New notebook import feature for importing notes from a CSV file. (Beta.)
Uses existing file upload mechanism with slightly modified UI to upload the file. Then there's new CSV parsing code to parse the CSV and import it as a new notebook. Still need a few more unit tests before this feature can be considered complete.
This commit is contained in:
parent
cc317c03ad
commit
52f129e571
1
NEWS
1
NEWS
|
@ -1,4 +1,5 @@
|
|||
1.4.23:
|
||||
* New notebook import feature for importing notes from a CSV file. (Beta.)
|
||||
* Enforcing maximum note length after stripping the note of any disallowed
|
||||
HTML tags instead of before. This makes Luminotes work better when cutting
|
||||
and pasting lots of text from MS Word.
|
||||
|
|
|
@ -36,6 +36,20 @@ class Upload_error( Exception ):
|
|||
)
|
||||
|
||||
|
||||
class Parse_error( Exception ):
|
||||
def __init__( self, message = None ):
|
||||
if message is None:
|
||||
message = u"Sorry, I can't figure out how to read that file. Please try a different file, or contact support for help."
|
||||
|
||||
Exception.__init__( self, message )
|
||||
self.__message = message
|
||||
|
||||
def to_dict( self ):
|
||||
return dict(
|
||||
error = self.__message
|
||||
)
|
||||
|
||||
|
||||
# map of upload id to Upload_file
|
||||
current_uploads = {}
|
||||
current_uploads_lock = Lock()
|
||||
|
@ -471,6 +485,42 @@ class Files( object ):
|
|||
notebook_id = notebook_id,
|
||||
note_id = note_id,
|
||||
file_id = file_id,
|
||||
label_text = u"attach file",
|
||||
instructions_text = u"Please select a file to upload.",
|
||||
)
|
||||
|
||||
@expose( view = Upload_page )
|
||||
@strongly_expire
|
||||
@end_transaction
|
||||
@grab_user_id
|
||||
@validate(
|
||||
notebook_id = Valid_id(),
|
||||
user_id = Valid_id( none_okay = True ),
|
||||
)
|
||||
def import_page( self, notebook_id, user_id ):
|
||||
"""
|
||||
Provide the information necessary to display the file import page, including the generation of a
|
||||
unique file id.
|
||||
|
||||
@type notebook_id: unicode
|
||||
@param notebook_id: id of the notebook that the upload will be to
|
||||
@type note_id: unicode
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype: unicode
|
||||
@return: rendered HTML page
|
||||
@raise Access_error: the current user doesn't have access to the given notebook
|
||||
"""
|
||||
if not self.__users.check_access( user_id, notebook_id, read_write = True ):
|
||||
raise Access_error()
|
||||
|
||||
file_id = self.__database.next_id( File )
|
||||
|
||||
return dict(
|
||||
notebook_id = notebook_id,
|
||||
note_id = None,
|
||||
file_id = file_id,
|
||||
label_text = u"import file",
|
||||
instructions_text = u"Please select a CSV file of notes to import into a new notebook.",
|
||||
)
|
||||
|
||||
@expose( view = Blank_page )
|
||||
|
@ -480,7 +530,7 @@ class Files( object ):
|
|||
@validate(
|
||||
upload = (),
|
||||
notebook_id = Valid_id(),
|
||||
note_id = Valid_id(),
|
||||
note_id = Valid_id( none_okay = True ),
|
||||
file_id = Valid_id(),
|
||||
user_id = Valid_id( none_okay = True ),
|
||||
)
|
||||
|
@ -493,8 +543,8 @@ class Files( object ):
|
|||
@param upload: file handle to uploaded file
|
||||
@type notebook_id: unicode
|
||||
@param notebook_id: id of the notebook that the upload is to
|
||||
@type note_id: unicode
|
||||
@param note_id: id of the note that the upload is to
|
||||
@type note_id: unicode or NoneType
|
||||
@param note_id: id of the note that the upload is to (if any)
|
||||
@type file_id: unicode
|
||||
@param file_id: id of the file being uploaded
|
||||
@type user_id: unicode or NoneType
|
||||
|
@ -722,6 +772,118 @@ class Files( object ):
|
|||
|
||||
return dict()
|
||||
|
||||
def parse_csv( self, file_id, skip_header = False ):
|
||||
"""
|
||||
Attempt to parse a previously uploaded file as a table or spreadsheet. Generate rows as they're
|
||||
requested.
|
||||
|
||||
@type file_id: unicode
|
||||
@param file_id: id of the file to parse
|
||||
@type skip_header: bool
|
||||
@param skip_header: if a line of header labels is detected, don't include it in the generated
|
||||
rows (defaults to False)
|
||||
@rtype: generator
|
||||
@return: rows of data from the parsed file. each row is a list of elements
|
||||
@raise Parse_error: there was an error in parsing the given file
|
||||
"""
|
||||
APPROX_SNIFF_SAMPLE_SIZE_BYTES = 1024 * 1024
|
||||
|
||||
try:
|
||||
import csv
|
||||
|
||||
table_file = Upload_file.open_file( file_id )
|
||||
sniffer = csv.Sniffer()
|
||||
|
||||
# attempt to determine the presence of a header
|
||||
lines = table_file.readlines( APPROX_SNIFF_SAMPLE_SIZE_BYTES )
|
||||
sniff_sample = "\n".join( lines )
|
||||
|
||||
has_header = sniffer.has_header( sniff_sample )
|
||||
|
||||
table_file.seek( 0 )
|
||||
reader = csv.reader( table_file )
|
||||
|
||||
# skip the header if requested to do so
|
||||
if has_header and skip_header:
|
||||
reader.next()
|
||||
|
||||
expected_row_length = None
|
||||
|
||||
for row in reader:
|
||||
# all rows must have the same number of elements
|
||||
current_row_length = len( row )
|
||||
if current_row_length == 0:
|
||||
continue
|
||||
|
||||
if expected_row_length and current_row_length != expected_row_length:
|
||||
raise Parse_error()
|
||||
else:
|
||||
expected_row_length = current_row_length
|
||||
|
||||
yield row
|
||||
except ( csv.Error, IOError, TypeError ):
|
||||
raise Parse_error()
|
||||
|
||||
@expose( view = Json )
|
||||
@end_transaction
|
||||
@grab_user_id
|
||||
@validate(
|
||||
file_id = Valid_id(),
|
||||
user_id = Valid_id( none_okay = True ),
|
||||
)
|
||||
def csv_head( self, file_id, user_id = None ):
|
||||
"""
|
||||
Attempt to parse a previously uploaded file as a table or spreadsheet. Return the first few rows
|
||||
of that table, with each element truncated to a maximum length if necessary.
|
||||
|
||||
Currently, only a CSV file format is supported.
|
||||
|
||||
@type file_id: unicode
|
||||
@param file_id: id of the file to parse
|
||||
@type user_id: unicode or NoneType
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype: dict
|
||||
@return: {
|
||||
'file_id': file id,
|
||||
'rows': list of parsed rows, each of which is a list of elements,
|
||||
}
|
||||
@raise Access_error: the current user doesn't have access to the notebook that the file is in
|
||||
@raise Parse_error: there was an error in parsing the given file
|
||||
"""
|
||||
MAX_ROW_COUNT = 4
|
||||
MAX_ELEMENT_LENGTH = 30
|
||||
MAX_ROW_ELEMENT_COUNT = 50
|
||||
|
||||
db_file = self.__database.load( File, file_id )
|
||||
|
||||
if not db_file or not self.__users.check_access( user_id, db_file.notebook_id ):
|
||||
raise Access_error()
|
||||
|
||||
parser = self.parse_csv( file_id )
|
||||
rows = []
|
||||
|
||||
def truncate( element ):
|
||||
if len( element ) > MAX_ELEMENT_LENGTH:
|
||||
return "%s ..." % element[ : MAX_ELEMENT_LENGTH ]
|
||||
|
||||
return element
|
||||
|
||||
for row in parser:
|
||||
if len( row ) == 0:
|
||||
continue
|
||||
|
||||
rows.append( [ truncate( element ) for element in row ][ : MAX_ROW_ELEMENT_COUNT ] )
|
||||
if len( rows ) == MAX_ROW_COUNT:
|
||||
break
|
||||
|
||||
if len( rows ) == 0:
|
||||
raise Parse_error()
|
||||
|
||||
return dict(
|
||||
file_id = file_id,
|
||||
rows = rows,
|
||||
)
|
||||
|
||||
def purge_unused( self, note, purge_all_links = False ):
|
||||
"""
|
||||
Delete files that were linked from the given note but no longer are.
|
||||
|
|
|
@ -17,7 +17,7 @@ class Html_cleaner(HTMLParser):
|
|||
"""
|
||||
Cleans HTML of any tags not matching a whitelist.
|
||||
"""
|
||||
def __init__( self ):
|
||||
def __init__( self, require_link_target = False ):
|
||||
HTMLParser.__init__( self, AbstractFormatter( NullWriter() ) )
|
||||
self.result = []
|
||||
self.open_tags = []
|
||||
|
@ -154,6 +154,9 @@ class Html_cleaner(HTMLParser):
|
|||
# Adding "javascript" or "vbscript" to this list would not be smart.
|
||||
self.allowed_schemes = ['http','https','ftp', 'irc', '']
|
||||
|
||||
# Boolean indicating whether links need to have a target attribute.
|
||||
self.require_link_target = require_link_target
|
||||
|
||||
def handle_data(self, data):
|
||||
if data:
|
||||
self.result.append( xssescape(data) )
|
||||
|
@ -191,6 +194,8 @@ class Html_cleaner(HTMLParser):
|
|||
else:
|
||||
bt += ' %s=%s' % \
|
||||
(xssescape(attribute), quoteattr(attrs[attribute]))
|
||||
if self.require_link_target and tag == "a" and not attrs.get( 'target' ):
|
||||
bt += ' target="_new"'
|
||||
if bt == "<a" or bt == "<img":
|
||||
return
|
||||
if tag in self.requires_no_close:
|
||||
|
|
|
@ -9,11 +9,13 @@ from Users import grab_user_id, Access_error
|
|||
from Expire import strongly_expire, weakly_expire
|
||||
from Html_nuker import Html_nuker
|
||||
from Html_differ import Html_differ
|
||||
from Files import Upload_file
|
||||
from model.Notebook import Notebook
|
||||
from model.Note import Note
|
||||
from model.Invite import Invite
|
||||
from model.User import User
|
||||
from model.User_revision import User_revision
|
||||
from model.File import File
|
||||
from view.Main_page import Main_page
|
||||
from view.Json import Json
|
||||
from view.Html_file import Html_file
|
||||
|
@ -23,6 +25,20 @@ from view.Updates_rss import Updates_rss
|
|||
from view.Update_link_page import Update_link_page
|
||||
|
||||
|
||||
class Import_error( Exception ):
|
||||
def __init__( self, message = None ):
|
||||
if message is None:
|
||||
message = u"An error occurred when trying to import your file. Please try a different file, or contact support for help."
|
||||
|
||||
Exception.__init__( self, message )
|
||||
self.__message = message
|
||||
|
||||
def to_dict( self ):
|
||||
return dict(
|
||||
error = self.__message
|
||||
)
|
||||
|
||||
|
||||
class Notebooks( object ):
|
||||
WHITESPACE_PATTERN = re.compile( u"\s+" )
|
||||
LINK_PATTERN = re.compile( u'<a\s+((?:[^>]+\s)?href="([^"]+)"(?:\s+target="([^"]*)")?[^>]*)>(<img [^>]+>)?([^<]*)</a>', re.IGNORECASE )
|
||||
|
@ -1183,7 +1199,7 @@ class Notebooks( object ):
|
|||
@type user_id: unicode or NoneType
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype dict
|
||||
@return { "redirect": notebookurl }
|
||||
@return { 'redirect': new_notebook_url }
|
||||
@raise Access_error: the current user doesn't have access to create a notebook
|
||||
@raise Validation_error: one of the arguments is invalid
|
||||
"""
|
||||
|
@ -1286,7 +1302,7 @@ class Notebooks( object ):
|
|||
@type user_id: unicode or NoneType
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype dict
|
||||
@return { "redirect": remainingnotebookurl }
|
||||
@return { 'redirect': remaining_notebook_url }
|
||||
@raise Access_error: the current user doesn't have access to the given notebook
|
||||
@raise Validation_error: one of the arguments is invalid
|
||||
"""
|
||||
|
@ -1385,7 +1401,7 @@ class Notebooks( object ):
|
|||
@type user_id: unicode or NoneType
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype dict
|
||||
@return { "redirect": notebookurl }
|
||||
@return { 'redirect': notebook_url }
|
||||
@raise Access_error: the current user doesn't have access to the given notebook
|
||||
@raise Validation_error: one of the arguments is invalid
|
||||
"""
|
||||
|
@ -1631,3 +1647,101 @@ class Notebooks( object ):
|
|||
result[ "count" ] = count
|
||||
|
||||
return result
|
||||
|
||||
@expose( view = Json )
|
||||
@strongly_expire
|
||||
@end_transaction
|
||||
@grab_user_id
|
||||
@validate(
|
||||
file_id = Valid_id(),
|
||||
content_column = Valid_int( min = 0 ),
|
||||
title_column = Valid_int( min = 0, none_okay = True ),
|
||||
plaintext = Valid_bool(),
|
||||
import_button = unicode,
|
||||
user_id = Valid_id( none_okay = True ),
|
||||
)
|
||||
def import_csv( self, file_id, content_column, title_column, plaintext, import_button, user_id = None ):
|
||||
"""
|
||||
Import a previously uploaded CSV file of notes as a new notebook. Delete the file once the
|
||||
import is complete.
|
||||
|
||||
Plaintext contents are left mostly untouched, just stripping HTML and converting newlines to
|
||||
<br> tags. HTML contents are cleaned of any disallowed/harmful HTML tags, and target="_new"
|
||||
attributes are added to all links without targets.
|
||||
|
||||
@type file_id: unicode
|
||||
@param file_id: id of the previously uploaded CSV file to import
|
||||
@type content_column: int
|
||||
@param content_column: zero-based index of the column containing note contents
|
||||
@type title_column: int or NoneType
|
||||
@param title_column: zero-based index of the column containing note titles (None indicates
|
||||
the lack of any such column, in which case titles are derived from the
|
||||
first few words of each note's contents)
|
||||
@type plaintext: bool
|
||||
@param plaintext: True if the note contents are plaintext, or False if they're HTML
|
||||
@type import_button: unicode
|
||||
@param import_button: ignored
|
||||
@type user_id: unicode or NoneType
|
||||
@param user_id: id of current logged-in user (if any)
|
||||
@rtype: dict
|
||||
@return: { 'redirect': new_notebook_url }
|
||||
@raise Access_error: the current user doesn't have access to the given file
|
||||
@raise Files.Parse_error: there was an error in parsing the given file
|
||||
@raise Import_error: there was an error in importing the notes from the file
|
||||
"""
|
||||
TRUNCATED_TITLE_WORD_COUNT = 5
|
||||
TRUNCATED_TITLE_CHAR_LENGTH = 60
|
||||
WHITESPACE_PATTERN = re.compile( "\s+" )
|
||||
|
||||
if user_id is None:
|
||||
raise Access_error()
|
||||
|
||||
user = self.__database.load( User, user_id )
|
||||
if user is None:
|
||||
raise Access_error()
|
||||
|
||||
db_file = self.__database.load( File, file_id )
|
||||
if not self.__users.check_access( user_id, db_file.notebook_id ):
|
||||
raise Access_error()
|
||||
|
||||
parser = self.__files.parse_csv( file_id, skip_header = True )
|
||||
|
||||
# create a new notebook for the imported notes
|
||||
notebook = self.__create_notebook( u"imported notebook", user, commit = False )
|
||||
|
||||
# import the notes into the new notebook
|
||||
for row in parser:
|
||||
row_length = len( row )
|
||||
if content_column >= row_length:
|
||||
raise Import_error()
|
||||
if title_column is not None and title_column >= row_length:
|
||||
raise Import_error()
|
||||
|
||||
# if there's no title column, then just use the first several words of the content column
|
||||
if title_column is None or title_column == content_column or len( row[ title_column ].strip() ) == 0:
|
||||
title = row[ content_column ].strip()
|
||||
title_words = WHITESPACE_PATTERN.split( title )[ : TRUNCATED_TITLE_WORD_COUNT ]
|
||||
title = u" ".join( title_words )[ : TRUNCATED_TITLE_CHAR_LENGTH ]
|
||||
else:
|
||||
title = row[ title_column ].strip()[ : TRUNCATED_TITLE_CHAR_LENGTH ]
|
||||
|
||||
contents = u"<h3>%s</h3>%s" % (
|
||||
Html_nuker().nuke( title ),
|
||||
Valid_string( max = 25000, escape_html = plaintext, require_link_target = True )( row[ content_column ] ),
|
||||
)
|
||||
|
||||
if plaintext:
|
||||
contents = contents.replace( u"\n", u"<br />" )
|
||||
|
||||
note_id = self.__database.next_id( Note, commit = False )
|
||||
note = Note.create( note_id, contents, notebook_id = notebook.object_id, startup = False, rank = None, user_id = user_id )
|
||||
self.__database.save( note, commit = False )
|
||||
|
||||
# delete the CSV file now that it's been imported
|
||||
self.__database.execute( db_file.sql_delete(), commit = False )
|
||||
self.__database.commit()
|
||||
Upload_file.delete_file( file_id )
|
||||
|
||||
return dict(
|
||||
redirect = u"/notebooks/%s?rename=true" % notebook.object_id,
|
||||
)
|
||||
|
|
|
@ -79,10 +79,11 @@ class Valid_string( object ):
|
|||
u"\u2122": u"(tm)",
|
||||
}
|
||||
|
||||
def __init__( self, min = None, max = None, escape_html = True ):
|
||||
def __init__( self, min = None, max = None, escape_html = True, require_link_target = False ):
|
||||
self.min = min
|
||||
self.max = max
|
||||
self.escape_html = escape_html
|
||||
self.require_link_target = require_link_target
|
||||
self.message = None
|
||||
|
||||
def __call__( self, value ):
|
||||
|
@ -100,7 +101,7 @@ class Valid_string( object ):
|
|||
if self.escape_html:
|
||||
value = escape( value, quote = True )
|
||||
else:
|
||||
cleaner = Html_cleaner()
|
||||
cleaner = Html_cleaner( self.require_link_target )
|
||||
value = cleaner.strip( value )
|
||||
|
||||
# check for max length after cleaning html, as cleaning can reduce the html's size
|
||||
|
|
|
@ -4,6 +4,7 @@ import time
|
|||
import types
|
||||
import urllib
|
||||
import cherrypy
|
||||
from nose.tools import raises
|
||||
from threading import Thread
|
||||
from StringIO import StringIO
|
||||
from PIL import Image
|
||||
|
@ -15,7 +16,7 @@ from model.Invite import Invite
|
|||
from model.File import File
|
||||
from controller.Notebooks import Access_error
|
||||
import controller.Files
|
||||
from controller.Files import Upload_file
|
||||
from controller.Files import Upload_file, Parse_error
|
||||
|
||||
|
||||
class Test_files( Test_controller ):
|
||||
|
@ -873,6 +874,8 @@ class Test_files( Test_controller ):
|
|||
assert result.get( u"notebook_id" ) == self.notebook.object_id
|
||||
assert result.get( u"note_id" ) == self.note.object_id
|
||||
assert result.get( u"file_id" )
|
||||
assert u"attach" in result.get( u"label_text" )
|
||||
assert u"upload" in result.get( u"instructions_text" )
|
||||
|
||||
def test_upload_page_without_login( self ):
|
||||
path = "/files/upload_page?notebook_id=%s¬e_id=%s" % ( self.notebook.object_id, self.note.object_id )
|
||||
|
@ -882,6 +885,28 @@ class Test_files( Test_controller ):
|
|||
assert headers
|
||||
assert headers.get( "Location" ) == u"http:///login?after_login=%s" % urllib.quote( path )
|
||||
|
||||
def test_import_page( self ):
|
||||
self.login()
|
||||
|
||||
result = self.http_get(
|
||||
"/files/import_page?notebook_id=%s" % self.notebook.object_id,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
assert result.get( u"notebook_id" ) == self.notebook.object_id
|
||||
assert result.get( u"note_id" ) == None
|
||||
assert result.get( u"file_id" )
|
||||
assert u"import" in result.get( u"label_text" )
|
||||
assert u"import" in result.get( u"instructions_text" )
|
||||
|
||||
def test_upload_page_without_login( self ):
|
||||
path = "/files/import_page?notebook_id=%s" % self.notebook.object_id
|
||||
result = self.http_get( path )
|
||||
|
||||
headers = result.get( "headers" )
|
||||
assert headers
|
||||
assert headers.get( "Location" ) == u"http:///login?after_login=%s" % urllib.quote( path )
|
||||
|
||||
def test_upload( self, filename = None ):
|
||||
self.login()
|
||||
|
||||
|
@ -1574,6 +1599,273 @@ class Test_files( Test_controller ):
|
|||
|
||||
assert u"access" in result[ u"error" ]
|
||||
|
||||
def test_parse_csv( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"label 1","label 2","label 3"\n5,"blah and stuff",3.3\n"8","whee","hmm\nfoo"'
|
||||
expected_rows = [
|
||||
[ "label 1", "label 2", "label 3" ],
|
||||
[ "5", "blah and stuff", "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
@raises( Parse_error )
|
||||
def test_parse_csv_empty( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = ""
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
parser.next()
|
||||
|
||||
@raises( Parse_error )
|
||||
def test_parse_csv_invalid_text( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"See, Vera? Dress yourself up, you get taken out somewhere fun. -- Jayne'
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
parser.next()
|
||||
|
||||
@raises( Parse_error )
|
||||
def test_parse_csv_invalid_binary( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = self.file_data + "\x00"
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
parser.next()
|
||||
|
||||
def test_parse_csv_embedded_quotes( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"label 1","label 2","label 3"\n5,"blah ""and"" stuff",3.3\n"8","whee","hmm\nfoo"'
|
||||
expected_rows = [
|
||||
[ "label 1", "label 2", "label 3" ],
|
||||
[ "5", 'blah "and" stuff', "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
@raises( Parse_error )
|
||||
def test_parse_csv_different_row_element_counts( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"label 1","label 2","label 3"\n5,"blah and stuff"\n"8","whee","hmm\nfoo",4.4'
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
|
||||
for row in parser:
|
||||
pass
|
||||
|
||||
def test_parse_csv_empty_rows( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"label 1","label 2","label 3"\n\n5,"blah and stuff",3.3\n"8","whee","hmm\nfoo"\n\n'
|
||||
expected_rows = [
|
||||
[ "label 1", "label 2", "label 3" ],
|
||||
[ "5", "blah and stuff", "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
@raises( Parse_error )
|
||||
def test_parse_csv_unknown_file_id( self ):
|
||||
parser = cherrypy.root.files.parse_csv( u"unknownfileid" )
|
||||
|
||||
for row in parser:
|
||||
pass
|
||||
|
||||
def test_parse_csv_without_header( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '5,"blah and stuff",3.3\n"8","whee","hmm\nfoo"'
|
||||
expected_rows = [
|
||||
[ "5", "blah and stuff", "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
def test_parse_csv_skip_header( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '"label 1","label 2","label 3"\n5,"blah and stuff",3.3\n"8","whee","hmm\nfoo"'
|
||||
expected_rows = [
|
||||
[ "5", "blah and stuff", "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id, skip_header = True )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
def test_parse_csv_skip_header_without_header( self ):
|
||||
self.login()
|
||||
|
||||
csv_data = '5,"blah and stuff",3.3\n"8","whee","hmm\nfoo"'
|
||||
expected_rows = [
|
||||
[ "5", "blah and stuff", "3.3" ],
|
||||
[ "8", "whee", "hmm\nfoo" ],
|
||||
]
|
||||
|
||||
result = self.http_upload(
|
||||
"/files/upload?file_id=%s" % self.file_id,
|
||||
dict(
|
||||
notebook_id = self.notebook.object_id,
|
||||
note_id = self.note.object_id,
|
||||
),
|
||||
filename = self.filename,
|
||||
file_data = csv_data,
|
||||
content_type = self.content_type,
|
||||
session_id = self.session_id,
|
||||
)
|
||||
|
||||
parser = cherrypy.root.files.parse_csv( self.file_id, skip_header = True )
|
||||
|
||||
for ( index, row ) in enumerate( parser ):
|
||||
assert row == expected_rows[ index ]
|
||||
|
||||
assert index == len( expected_rows ) - 1
|
||||
|
||||
def test_csv_head( self ):
|
||||
raise NotImplementedError()
|
||||
|
||||
def test_purge_unused( self ):
|
||||
self.login()
|
||||
|
||||
|
|
|
@ -4266,6 +4266,9 @@ class Test_notebooks( Test_controller ):
|
|||
|
||||
assert u"access" in result[ "error" ]
|
||||
|
||||
def test_import( self ):
|
||||
raise NotImplementedError()
|
||||
|
||||
def login( self ):
|
||||
result = self.http_post( "/users/login", dict(
|
||||
username = self.username,
|
||||
|
|
|
@ -142,10 +142,25 @@ ol li {
|
|||
margin-left: 1em;
|
||||
}
|
||||
|
||||
#access_table td {
|
||||
.radio_table td {
|
||||
padding-right: 1em;
|
||||
}
|
||||
|
||||
#import_notebook_table {
|
||||
font-size: 72%;
|
||||
border-collapse: collapse;
|
||||
border: 1px solid #999999;
|
||||
}
|
||||
|
||||
#import_notebook_table td {
|
||||
border: 1px solid #999999;
|
||||
padding: 0.5em;
|
||||
}
|
||||
|
||||
#import_notebook_table .heading_row {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
.thumbnail_left {
|
||||
float: left;
|
||||
margin: 0.5em;
|
||||
|
|
|
@ -347,6 +347,14 @@ Wiki.prototype.populate = function ( startup_notes, current_notes, note_read_wri
|
|||
} );
|
||||
}
|
||||
|
||||
var import_link = getElement( "import_link" );
|
||||
if ( import_link ) {
|
||||
connect( import_link, "onclick", function ( event ) {
|
||||
self.import_clicked();
|
||||
event.stop();
|
||||
} );
|
||||
}
|
||||
|
||||
var rename_notebook_link = getElement( "rename_notebook_link" );
|
||||
if ( rename_notebook_link ) {
|
||||
connect( rename_notebook_link, "onclick", function ( event ) {
|
||||
|
@ -1819,7 +1827,7 @@ Wiki.prototype.share_notebook = function () {
|
|||
if ( this.rate_plan.notebook_collaboration ) {
|
||||
var access_area = createDOM( "p", { "id": "access_choices" },
|
||||
createDOM( "p", {}, "Invite these people as:" ),
|
||||
createDOM( "table" , { "id": "access_table" },
|
||||
createDOM( "table" , { "id": "access_table", "class": "radio_table" },
|
||||
createDOM( "tr", {},
|
||||
createDOM( "td", {}, collaborators_radio, collaborators_label ),
|
||||
createDOM( "td", {}, viewers_radio, viewers_label ),
|
||||
|
@ -2140,6 +2148,111 @@ Wiki.prototype.display_group_settings = function ( result ) {
|
|||
this.create_editor( "group_" + result.group.object_id, "<h3>group admin settings</h3>" + div.innerHTML, undefined, undefined, undefined, false, true, true, getElement( "note_settings" ) );
|
||||
}
|
||||
|
||||
Wiki.prototype.display_import_notebook = function ( result ) {
|
||||
this.clear_messages();
|
||||
this.clear_pulldowns();
|
||||
|
||||
var import_frame = getElement( "note_import" );
|
||||
if ( import_frame )
|
||||
import_frame.editor.shutdown();
|
||||
|
||||
var form = createDOM( "form", { "id": "import_notebook_form", "target": "/notebooks/import_csv" },
|
||||
createDOM( "input",
|
||||
{ "type": "hidden", "name": "file_id", "value": result.file_id }
|
||||
)
|
||||
);
|
||||
|
||||
appendChildNodes( form, createDOM( "p", {}, "Here are the first few lines of the file:" ) );
|
||||
|
||||
var tbody = createDOM( "tbody", {} );
|
||||
var table = createDOM( "table" , { "id": "import_notebook_table" }, tbody );
|
||||
|
||||
appendChildNodes( form, table );
|
||||
|
||||
for ( var i in result.rows ) {
|
||||
var row = result.rows[ i ];
|
||||
var row_node = createDOM( "tr", {} );
|
||||
|
||||
for ( var j in row ) {
|
||||
var element = row[ j ];
|
||||
appendChildNodes( row_node, createDOM( "td", {}, element ) );
|
||||
}
|
||||
|
||||
appendChildNodes( tbody, row_node );
|
||||
}
|
||||
|
||||
var contents_select = createDOM( "select", { "name": "content_column" } );
|
||||
var column_number = 1;
|
||||
|
||||
for ( i in result.rows[ 0 ] ) {
|
||||
var element = result.rows[ 0 ][ i ];
|
||||
appendChildNodes( contents_select, createDOM( "option", { "value": i }, "column " + column_number + " - " + element ) );
|
||||
column_number += 1;
|
||||
}
|
||||
|
||||
appendChildNodes( form, createDOM( "p", { "target": "/notebooks/import" },
|
||||
createDOM( "b", {}, "Which column contains the note contents text?" ),
|
||||
createDOM( "br", {} ),
|
||||
contents_select
|
||||
) );
|
||||
|
||||
var titles_select = createDOM( "select", { "name": "title_column" } );
|
||||
column_number = 1;
|
||||
|
||||
appendChildNodes( titles_select, createDOM( "option", { "value": "None" }, "There isn't a title column." ) );
|
||||
for ( i in result.rows[ 0 ] ) {
|
||||
var element = result.rows[ 0 ][ i ];
|
||||
appendChildNodes( titles_select, createDOM( "option", { "value": i }, "column " + column_number + " - " + element ) );
|
||||
column_number += 1;
|
||||
}
|
||||
|
||||
appendChildNodes( form, createDOM( "p", {},
|
||||
createDOM( "b", {}, "Which column contains the note titles (if any)?" ),
|
||||
createDOM( "br", {} ),
|
||||
titles_select
|
||||
) );
|
||||
|
||||
var plaintext_label = createDOM( "label",
|
||||
{ "for": "plaintext_radio", "class": "radio_label", "title": "The note contents are just plain text." },
|
||||
"plain text"
|
||||
);
|
||||
var html_label = createDOM( "label",
|
||||
{ "for": "html_radio", "class": "radio_label", "title": "The note contents are formatted as HTML." },
|
||||
"HTML"
|
||||
);
|
||||
|
||||
var plaintext_radio = createDOM( "input",
|
||||
{ "type": "radio", "id": "plaintext_radio", "name": "plaintext", "value": "True", "checked": "true" }
|
||||
);
|
||||
var html_radio = createDOM( "input",
|
||||
{ "type": "radio", "id": "html_radio", "name": "plaintext", "value": "False" }
|
||||
);
|
||||
|
||||
appendChildNodes( form, createDOM( "p", {},
|
||||
createDOM( "b", {}, "Should the note contents be treated as plain text or HTML?" ),
|
||||
createDOM( "br", {} ),
|
||||
createDOM( "table" , { "id": "plaintext_table", "class": "radio_table" },
|
||||
createDOM( "tr", {},
|
||||
createDOM( "td", {}, plaintext_radio, plaintext_label ),
|
||||
createDOM( "td", {}, html_radio, html_label )
|
||||
)
|
||||
)
|
||||
) );
|
||||
|
||||
appendChildNodes( form, createDOM( "p", {},
|
||||
createDOM( "input",
|
||||
{ "type": "submit", "name": "import_button", "id": "import_button", "class": "button", "value": "import notebook" }
|
||||
)
|
||||
) );
|
||||
|
||||
var div = createDOM( "div", {},
|
||||
createDOM( "p", {}, "Almost done. I just need a little information about your file before I can complete the import and create a new notebook." ),
|
||||
form
|
||||
);
|
||||
|
||||
this.create_editor( "import", "<h3>import a notebook</h3>" + div.innerHTML, undefined, undefined, undefined, false, true, true, undefined );
|
||||
}
|
||||
|
||||
Wiki.prototype.declutter_clicked = function () {
|
||||
var header = getElement( "header" );
|
||||
if ( header )
|
||||
|
@ -2474,6 +2587,18 @@ Wiki.prototype.zero_total_notes_count = function () {
|
|||
signal( this, "total_notes_count_updated", this.total_notes_count );
|
||||
}
|
||||
|
||||
Wiki.prototype.import_clicked = function () {
|
||||
var pulldown_id = "import_pulldown";
|
||||
var existing_div = getElement( pulldown_id );
|
||||
if ( existing_div ) {
|
||||
existing_div.pulldown.shutdown();
|
||||
existing_div.pulldown = null;
|
||||
return;
|
||||
}
|
||||
|
||||
new Import_pulldown( this, this.notebook_id, this.invoker, getElement( "import_link" ) );
|
||||
}
|
||||
|
||||
Wiki.prototype.start_notebook_rename = function () {
|
||||
this.clear_pulldowns();
|
||||
|
||||
|
@ -3074,6 +3199,19 @@ Link_pulldown.prototype.shutdown = function () {
|
|||
this.link.pulldown = null;
|
||||
}
|
||||
|
||||
|
||||
function base_upload_filename() {
|
||||
// get the basename of the file
|
||||
var filename = getElement( "upload" ).value;
|
||||
var pieces = filename.split( "/" );
|
||||
filename = pieces[ pieces.length - 1 ];
|
||||
pieces = filename.split( "\\" );
|
||||
filename = pieces[ pieces.length - 1 ];
|
||||
|
||||
return filename;
|
||||
}
|
||||
|
||||
|
||||
function Upload_pulldown( wiki, notebook_id, invoker, editor, link, ephemeral ) {
|
||||
this.link = link || editor.find_link_at_cursor();
|
||||
this.link.pulldown = this;
|
||||
|
@ -3133,21 +3271,10 @@ Upload_pulldown.prototype.init_frame = function () {
|
|||
} );
|
||||
}
|
||||
|
||||
Upload_pulldown.prototype.base_filename = function () {
|
||||
// get the basename of the file
|
||||
var filename = getElement( "upload" ).value;
|
||||
var pieces = filename.split( "/" );
|
||||
filename = pieces[ pieces.length - 1 ];
|
||||
pieces = filename.split( "\\" );
|
||||
filename = pieces[ pieces.length - 1 ];
|
||||
|
||||
return filename;
|
||||
}
|
||||
|
||||
Upload_pulldown.prototype.upload_started = function ( file_id ) {
|
||||
this.file_id = file_id;
|
||||
this.uploading = true;
|
||||
var filename = this.base_filename();
|
||||
var filename = base_upload_filename();
|
||||
|
||||
// make the upload iframe invisible but still present so that the upload continues
|
||||
setElementDimensions( this.iframe, { "h": "0" } );
|
||||
|
@ -3222,6 +3349,130 @@ Upload_pulldown.prototype.shutdown = function () {
|
|||
}
|
||||
|
||||
|
||||
function Import_pulldown( wiki, notebook_id, invoker, anchor ) {
|
||||
anchor.pulldown = this;
|
||||
|
||||
Pulldown.call( this, wiki, notebook_id, "import_pulldown", anchor, null, false );
|
||||
|
||||
this.invoker = invoker;
|
||||
this.iframe = createDOM( "iframe", {
|
||||
"src": "/files/import_page?notebook_id=" + notebook_id,
|
||||
"frameBorder": "0",
|
||||
"scrolling": "no",
|
||||
"id": "upload_frame",
|
||||
"name": "upload_frame",
|
||||
"class": "upload_frame"
|
||||
} );
|
||||
this.iframe.pulldown = this;
|
||||
this.file_id = null;
|
||||
this.uploading = false;
|
||||
|
||||
var self = this;
|
||||
connect( this.iframe, "onload", function ( event ) { self.init_frame(); } );
|
||||
|
||||
appendChildNodes( this.div, this.iframe );
|
||||
|
||||
this.progress_iframe = createDOM( "iframe", {
|
||||
"frameBorder": "0",
|
||||
"scrolling": "no",
|
||||
"id": "progress_frame",
|
||||
"name": "progress_frame",
|
||||
"class": "upload_frame"
|
||||
} );
|
||||
addElementClass( this.progress_iframe, "undisplayed" );
|
||||
|
||||
appendChildNodes( this.div, this.progress_iframe );
|
||||
Pulldown.prototype.finish_init.call( this );
|
||||
}
|
||||
|
||||
Import_pulldown.prototype = new function () { this.prototype = Pulldown.prototype; };
|
||||
Import_pulldown.prototype.constructor = Import_pulldown;
|
||||
|
||||
Import_pulldown.prototype.init_frame = function () {
|
||||
var self = this;
|
||||
var doc = this.iframe.contentDocument || this.iframe.contentWindow.document;
|
||||
|
||||
withDocument( doc, function () {
|
||||
connect( "upload_button", "onclick", function ( event ) {
|
||||
withDocument( doc, function () {
|
||||
self.upload_started( getElement( "file_id" ).value );
|
||||
} );
|
||||
} );
|
||||
} );
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.upload_started = function ( file_id ) {
|
||||
this.file_id = file_id;
|
||||
this.uploading = true;
|
||||
var filename = base_upload_filename();
|
||||
|
||||
// make the upload iframe invisible but still present so that the upload continues
|
||||
setElementDimensions( this.iframe, { "h": "0" } );
|
||||
|
||||
removeElementClass( this.progress_iframe, "undisplayed" );
|
||||
var progress_url = "/files/progress?file_id=" + file_id + "&filename=" + escape( filename );
|
||||
|
||||
if ( frames[ "progress_frames" ] )
|
||||
frames[ "progress_frame" ].location.href = progress_url;
|
||||
else
|
||||
this.progress_iframe.src = progress_url;
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.upload_complete = function () {
|
||||
this.uploading = false;
|
||||
var wiki = this.wiki;
|
||||
|
||||
this.invoker.invoke(
|
||||
"/files/csv_head", "GET", {
|
||||
"file_id": this.file_id
|
||||
},
|
||||
function ( result ) { wiki.display_import_notebook( result ); }
|
||||
);
|
||||
this.shutdown();
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.update_position = function ( always_left_align ) {
|
||||
Pulldown.prototype.update_position.call( this, always_left_align );
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.cancel_due_to_click = function () {
|
||||
this.uploading = false;
|
||||
this.wiki.display_message( "The file import has been cancelled." )
|
||||
this.shutdown();
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.cancel_due_to_quota = function () {
|
||||
this.uploading = false;
|
||||
this.shutdown();
|
||||
|
||||
this.wiki.display_error(
|
||||
"That file is too large for your available storage space. Before uploading, please delete some notes or files, empty the trash, or",
|
||||
[ createDOM( "a", { "href": "/upgrade" }, "upgrade" ), " your account." ]
|
||||
);
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.cancel_due_to_error = function ( message ) {
|
||||
this.uploading = false;
|
||||
this.wiki.display_error( message )
|
||||
this.shutdown();
|
||||
}
|
||||
|
||||
Import_pulldown.prototype.shutdown = function () {
|
||||
if ( this.uploading )
|
||||
return;
|
||||
|
||||
// in Internet Explorer, the upload won't actually cancel without an explicit Stop command
|
||||
if ( !this.iframe.contentDocument && this.iframe.contentWindow ) {
|
||||
this.iframe.contentWindow.document.execCommand( 'Stop' );
|
||||
this.progress_iframe.contentWindow.document.execCommand( 'Stop' );
|
||||
}
|
||||
|
||||
Pulldown.prototype.shutdown.call( this );
|
||||
if ( this.anchor )
|
||||
this.anchor.pulldown = null;
|
||||
}
|
||||
|
||||
|
||||
SMALL_MAX_IMAGE_SIZE = 125;
|
||||
MEDIUM_MAX_IMAGE_SIZE = 300;
|
||||
LARGE_MAX_IMAGE_SIZE = 500;
|
||||
|
@ -3714,6 +3965,7 @@ Suggest_pulldown.prototype.shutdown = function () {
|
|||
}
|
||||
|
||||
|
||||
|
||||
function Note_tree( wiki, notebook_id, invoker ) {
|
||||
this.wiki = wiki;
|
||||
this.notebook_id = notebook_id;
|
||||
|
|
|
@ -81,6 +81,17 @@ class Link_area( Div ):
|
|||
class_ = u"link_area_item",
|
||||
),
|
||||
|
||||
( notebook.name != u"Luminotes" ) and Div(
|
||||
A(
|
||||
u"import",
|
||||
href = u"#",
|
||||
id = u"import_link",
|
||||
title = u"Import notes from other software into Luminotes.",
|
||||
),
|
||||
Span( "beta", class_ = "new_feature_text" ),
|
||||
class_ = u"link_area_item",
|
||||
) or None,
|
||||
|
||||
( notebook.owner and notebook.name != u"trash" ) and Div(
|
||||
A(
|
||||
u"rename",
|
||||
|
|
|
@ -2,7 +2,7 @@ from Tags import Html, Head, Link, Meta, Body, P, Form, Span, Input
|
|||
|
||||
|
||||
class Upload_page( Html ):
|
||||
def __init__( self, notebook_id, note_id, file_id ):
|
||||
def __init__( self, notebook_id, note_id, file_id, label_text, instructions_text ):
|
||||
Html.__init__(
|
||||
self,
|
||||
Head(
|
||||
|
@ -11,16 +11,16 @@ class Upload_page( Html ):
|
|||
),
|
||||
Body(
|
||||
Form(
|
||||
Span( u"attach file: ", class_ = u"field_label" ),
|
||||
Span( u"%s: " % label_text, class_ = u"field_label" ),
|
||||
Input( type = u"hidden", id = u"notebook_id", name = u"notebook_id", value = notebook_id ),
|
||||
Input( type = u"hidden", id = u"note_id", name = u"note_id", value = note_id ),
|
||||
Input( type = u"hidden", id = u"note_id", name = u"note_id", value = note_id or u"" ),
|
||||
Input( type = u"file", id = u"upload", name = u"upload", class_ = "text_field", size = u"30" ),
|
||||
Input( type = u"submit", id = u"upload_button", class_ = u"button", value = u"upload" ),
|
||||
action = u"/files/upload?file_id=%s" % file_id,
|
||||
method = u"post",
|
||||
enctype = u"multipart/form-data",
|
||||
),
|
||||
P( u"Please select a file to upload." ),
|
||||
P( instructions_text ),
|
||||
Span( id = u"tick_preload" ),
|
||||
Input( type = u"hidden", id = u"file_id", value = file_id ),
|
||||
),
|
||||
|
|
Reference in New Issue