ADDED ABOUT Index: ABOUT ================================================================== --- /dev/null +++ ABOUT @@ -0,0 +1,6 @@ +web2py is an open source full-stack framework for agile development +of secure database-driven web-based applications, written and programmable in +Python. + +Created by Massimo Di Pierro + ADDED LICENSE Index: LICENSE ================================================================== --- /dev/null +++ LICENSE @@ -0,0 +1,137 @@ +## Web2py License + +Web2py is Licensed under the LGPL license version 3 +(http://www.gnu.org/licenses/lgpl.html) + +Copyrighted (c) by Massimo Di Pierro (2007-2011) + +### On Commercial Redistribution + +In accordance with LGPL you may: +- redistribute web2py with your apps (including official web2py binary versions) +- release your applications which use official web2py libraries under any license you wish +But you must: +- make clear in the documentation that your application uses web2py +- release any modification of the web2py libraries under the LGPLv3 license + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL +NECESSARY SERVICING, REPAIR OR CORRECTION. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT +HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, +BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES +OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +(Earlier versions of web2py, 1.0.*-1.90.*, were released under the GPL2 license plus a +commercial exception which, for practical purposes, was very similar to the current LPGLv3) + +### Licenses for third party contributed software + +web2py contains third party software under the gluon/contrib/ folder. +Each file/module in contrib is distributed with web2py under its original license. +Here we list some of them. + +#### gluon.contrib.simplejson LICENSE + +Copyright (c) 2006 Bob Ippolito - Permission is hereby granted, free of charge, +to any person obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom +the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +#### gluon.contrib.rss2.py (originally PyRSS2Gen) LICENSE + +This is copyright (c) by Dalke Scientific Software, LLC and released under the +BSD license. See the file LICENSE in the distribution or + for details. + +#### gluon.contrib.markdown (markdown2) LICENSE + +MIT License from from + +#### gluon.contrib.feedparser LICENSE + +Copyright (c) 2002-2005, Mark Pilgrim + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +#### gluon.wsgiserver.py LICENSE (borrowed from cherrypy) + +Copyright (c) 2004, CherryPy Team (team@cherrypy.org) +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the CherryPy Team nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#### gluon.contrib.pam LICENSE + +Copyright (C) 2007-2009 Chris AtLee Licensed under the MIT license + +#### gluon.contrib.shell LICENSE + +Copyright (C) by Google inc. Apache 2.0 Lincense + +#### The javascript licenses are in the code itself + ADDED Makefile Index: Makefile ================================================================== --- /dev/null +++ Makefile @@ -0,0 +1,123 @@ +clean: + rm -f httpserver.log + rm -f parameters*.py + rm -f -r applications/*/compiled + find ./ -name '*~' -exec rm -f {} \; + find ./ -name '*.orig' -exec rm -f {} \; + find ./ -name '*.rej' -exec rm -f {} \; + find ./ -name '#*' -exec rm -f {} \; + find ./ -name 'Thumbs.db' -exec rm -f {} \; + find ./gluon/ -name '.*' -exec rm -f {} \; + find ./gluon/ -name '*class' -exec rm -f {} \; + find ./applications/admin/ -name '.*' -exec rm -f {} \; + find ./applications/examples/ -name '.*' -exec rm -f {} \; + find ./applications/welcome/ -name '.*' -exec rm -f {} \; + find ./ -name '*.pyc' -exec rm -f {} \; +all: + echo "The Makefile is used to build the distribution." + echo "In order to run web2py you do not need to make anything." + echo "just run web2py.py" +epydoc: + ### build epydoc + rm -f -r applications/examples/static/epydoc/ + epydoc --config epydoc.conf + cp applications/examples/static/title.png applications/examples/static/epydoc +tests: + cd gluon/tests; ./test.sh 1>tests.log 2>&1 +update: + wget -O gluon/contrib/feedparser.py http://feedparser.googlecode.com/svn/trunk/feedparser/feedparser.py + wget -O gluon/contrib/simplejsonrpc.py http://rad2py.googlecode.com/hg/ide2py/simplejsonrpc.py +src: + echo 'Version 1.99.2 ('`date +%Y-%m-%d\ %H:%M:%S`') stable' > VERSION + ### rm -f all junk files + make clean + ### clean up baisc apps + rm -f routes.py + rm -f applications/*/sessions/* + rm -f applications/*/errors/* | echo 'too many files' + rm -f applications/*/cache/* + rm -f applications/admin/databases/* + rm -f applications/welcome/databases/* + rm -f applications/examples/databases/* + rm -f applications/admin/uploads/* + rm -f applications/welcome/uploads/* + rm -f applications/examples/uploads/* + ### make admin layout and appadmin the default + cp applications/admin/views/appadmin.html applications/welcome/views + cp applications/admin/views/appadmin.html applications/examples/views + cp applications/admin/controllers/appadmin.py applications/welcome/controllers + cp applications/admin/controllers/appadmin.py applications/examples/controllers + ### update the license + cp ABOUT applications/admin/ + cp ABOUT applications/examples/ + cp LICENSE applications/admin/ + cp LICENSE applications/examples/ + ### build web2py_src.zip + echo '' > NEWINSTALL + mv web2py_src.zip web2py_src_old.zip | echo 'no old' + cd ..; zip -r web2py/web2py_src.zip web2py/gluon/*.py web2py/gluon/contrib/* web2py/splashlogo.gif web2py/*.py web2py/ABOUT web2py/LICENSE web2py/README web2py/NEWINSTALL web2py/VERSION web2py/Makefile web2py/epydoc.css web2py/epydoc.conf web2py/app.example.yaml web2py/logging.example.conf web2py_exe.conf web2py/queue.example.yaml MANIFEST.in w2p_apps w2p_clone w2p_run startweb2py web2py/scripts/*.sh web2py/scripts/*.py web2py/applications/admin web2py/applications/examples/ web2py/applications/welcome web2py/applications/__init__.py web2py/site-packages/__init__.py web2py/gluon/tests/*.sh web2py/gluon/tests/*.py + +mdp: + make epydoc + make src + make app + make win +app: + echo 'did you uncomment import_all in gluon/main.py?' + python2.5 -c 'import compileall; compileall.compile_dir("gluon/")' + #python web2py.py -S welcome -R __exit__.py + find gluon -path '*.pyc' -exec cp {} ../web2py_osx/site-packages/{} \; + cd ../web2py_osx/site-packages/; zip -r ../site-packages.zip * + mv ../web2py_osx/site-packages.zip ../web2py_osx/web2py/web2py.app/Contents/Resources/lib/python2.5 + cp ABOUT ../web2py_osx/web2py/web2py.app/Contents/Resources + cp NEWINSTALL ../web2py_osx/web2py/web2py.app/Contents/Resources + cp LICENSE ../web2py_osx/web2py/web2py.app/Contents/Resources + cp VERSION ../web2py_osx/web2py/web2py.app/Contents/Resources + cp README ../web2py_osx/web2py/web2py.app/Contents/Resources + cp splashlogo.gif ../web2py_osx/web2py/web2py.app/Contents/Resources + cp options_std.py ../web2py_osx/web2py/web2py.app/Contents/Resources + cp routes.example.py ../web2py_osx/web2py/web2py.app/Contents/Resources + cp router.example.py ../web2py_osx/web2py/web2py.app/Contents/Resources + cp app.example.yaml ../web2py_osx/web2py/web2py.app/Contents/Resources + cp queue.example.yaml ../web2py_osx/web2py/web2py.app/Contents/Resources + cp -r applications/admin ../web2py_osx/web2py/web2py.app/Contents/Resources/applications + cp -r applications/welcome ../web2py_osx/web2py/web2py.app/Contents/Resources/applications + cp -r applications/examples ../web2py_osx/web2py/web2py.app/Contents/Resources/applications + cp applications/__init__.py ../web2py_osx/web2py/web2py.app/Contents/Resources/applications + cd ../web2py_osx; zip -r web2py_osx.zip web2py + mv ../web2py_osx/web2py_osx.zip . +win: + echo 'did you uncomment import_all in gluon/main.py?' + python2.5 -c 'import compileall; compileall.compile_dir("gluon/")' + find gluon -path '*.pyc' -exec cp {} ../web2py_win/library/{} \; + cd ../web2py_win/library/; zip -r ../library.zip * + mv ../web2py_win/library.zip ../web2py_win/web2py + cp ABOUT ../web2py_win/web2py/ + cp NEWINSTALL ../web2py_win/web2py/ + cp LICENSE ../web2py_win/web2py/ + cp VERSION ../web2py_win/web2py/ + cp README ../web2py_win/web2py/ + cp splashlogo.gif ../web2py_win/web2py/ + cp options_std.py ../web2py_win/web2py/ + cp routes.example.py ../web2py_win/web2py/ + cp router.example.py ../web2py_win/web2py/ + cp app.example.yaml ../web2py_win/web2py/ + cp queue.example.yaml ../web2py_win/web2py/ + cp -r applications/admin ../web2py_win/web2py/applications + cp -r applications/welcome ../web2py_win/web2py/applications + cp -r applications/examples ../web2py_win/web2py/applications + cp applications/__init__.py ../web2py_win/web2py/applications + cd ../web2py_win; zip -r web2py_win.zip web2py + mv ../web2py_win/web2py_win.zip . +pip: + # create Web2py distribution for upload to Pypi + # after upload clean Web2py sources with rm -R ./dist + python setup.py sdist +run: + python2.5 web2py.py -a hello +push: + make src + echo '' > NEWINSTALL + hg push + bzr push bzr+ssh://mdipierro@bazaar.launchpad.net/~mdipierro/web2py/devel --use-existing-dir + ADDED README Index: README ================================================================== --- /dev/null +++ README @@ -0,0 +1,1178 @@ +## INSTALLATION INSTRUCTION - IMPORTANT + +To start web2py there is NO NEED to install it. Just unzip and do: + +> python web2py.py + +Thats is it!!! + +## web2py file structure + +start web2py with: + + python web2py.py + +`` + \project + README + LICENSE + TODO + Makefile ## make all and make app + web2py.py ## the startup script (*) + parameters.py ## created at startup + admin.tar ## the admin app (*) + examples.tar ## examples and documentation app (*) + welcome.tar ## the welcome app (entry point) (*) + \gluon ## the core libraries (*) + \deposit ## used for zip and install apps + setup_app.py ## used by py2app to make OSX executable + setup_exe.py ## used by py2app to make Winows executble + wsgihandler.py ## to use Gluon with mod_wsgi + \dist ## used by py2app, py2exe + \build ## used by py2app, py2exe + \tests ## under development stuff + \docs ## in progress documentation + \applications ## are the apps + \welcome ## application welcome, for example + \models + \views + \controllers + \sessions + \errors + \cache + \static + \uploads + \modules +`` + +## EWF v1.5 -> v1.6 +- load and save .py in ascii, avoids problem with LF+CR on windows +- added path.join in compileapp, fixed problem with Windows compileapp + +## EWF v1.6 -> v1.7 +- in paths replace '\' with '/' to fix problem with windows paths +- using limitby in database administration +- replaced mime/miltupart with multipart/form-data to fix a windows problem + +## EWF v1.7 -> Gluon v1.0 +- Name change +- Improved layout.html + +## Gluon v1.0 -> v1.1 +- bug in sqlhtml with JOINS queries + +## Gluon v1.1 -> v1.2 +- fixed some typos in examples +- IS_IN_SET now supports labels +- cleanup in sql.py does not cleanup, just checks valid field and table names + +## Gluon v1.3 +- added IS_IN_DB, IS_NOT_IN_DB and updated examples accordingly + +## Gluon v1.4 +- fixed problem with IS_INT_IN_RANGE and IS_FLOAT_IN_RANGE. Now an error in a validator is reported as a ticket. Good validators should not raise Exceptions. +- IS_IN_DB displays "label (id)" +- it can upload files without extension +- migration is now optional (define_table has migrate=False option) + +## Gluon v1.5 +-
-> in errors.html +- replace('//','////') in sub in template.py + +## Gluon v1.8 +- no more chdir (thread unsafe) +- no more sys.stdout (thread unsafe) +- response.body is StringIO() +- admin/default/site informs about upgrade +- response.locker (optional) + +## Gluon v1.9 +- allow "count(*)" in select +- db.execute() +- fixed problem with continue and return in template +- removed try: ... except in sql.py +- fixed '\t' + +## Gluon v1.10 +- fixed concurrency problems with SQLDB._instances and SQLDB._folders, now use lock +- now, by default, edit SQLFORMs retain uploaded files + +## Gluon v1.11 +- appadmin allows to keep or delete uploaded files + +## Gluon v1.12 +- in sql.py +- handles NULL values properly +- unicode support (data always stored in utf-8) +- 'date' -> datetime.date ,'time' -> datetime.time, 'datetime' -> datetime.datetime, 'boolean' -> True/False +- most types have default validators +- SQLField(...,required=True) option. +- SQLRows has __str__ that serializes in CSV and xml() that serializes in HTML +- SQLTable has import_from_csv_file(...) +- gluon.simplejson for AJAX +- in validators.py +- IS_IN_DB(db,..) - db can be an SQLSet or an SQLDB +- better error messages +- in admin +- new import/export in csv, update and delete interface. +- in appadmin +- edit form allows to keep stored encrypted password +- in main.py +- http://host not defaults to http://host/init/default/index +- New third party modules +- gluon.simplejson(.dumps, .loads) +- gluon.pyrtf(.dumps) +- gluon.rss2(.dumps) + +## Gluon v1.13 +- (this is one of the biggest revisions ever) +- Improved sql.py has support MySQL, cxOracle (experimental), extract, like and better testing +- SQLDB.tables and SQLTable.fields are now SQLCalableList objects +- Fixed bug with editing integer fields storing zero +- Admin interface now says "insert new [tablename]" and display insert, select or update properly in the title. +- Added a cache mechamism. Works for data, controllers, views and SQLRows. +- main.py now uses a request.folder absolute path when not os.name in ['nt','posix']. Seems to work on windowsce devices, except no file locking has consequences. +- Now you can put modules in applications/[anyapp]/modules and import them with +- import applications.[anyapp].modules.[module] as [module] +- Fixed problem with init +- New applications/examples/controller/global.py controller for docs. + +## Gluon v1.14 +- Fixed a bug fix in URLs + +## Gluon v1.15 +- New try:... except. in gluon/main.py for when sessions cannot be saved +- Now validator/formatter method allows IS_DATE('%d/%m/%Y') + +## web2py v1.16 +- yes we changed the name! Turns out Gluon was trademarked by somebody else. +- Although we are not infringing the trademark since this is a non-commercial +- product we could have run into some issues. So we have been professional +- and changed the name to web2py. +- Now SQLFORMs and FORM can have a formname and multiple forms are allowed +- per page. +- A new examples/default/index page. +- web2py.py instead of runme.py +- mysql sets utf8 encoding. +- input integer field values are automatically converted int(). + +## web2py v1.17 +- I posted v1.16 too soon. v1.17 was released after 1h to fix some bugs. + +## web2py v1.18 +- removed vulnerability in accept_languages and session_id +- Minor bug fixes. Typos and cleanup cache. Textarea now clears. +- Support for PyAMF. +- T returns a class, not a string +- new template parser (faster?) +- got rid of sintaxhighlighter in favor of server side CODE +- fix problem with cacheondisk locking +- fix 'None' instead of NULL in IS_NOT_IN_DB (I think) +- gluon.contrib.markdown +- notnull and unique in SQLField now supported (tested on sqlite mysql and postgresql) +- Storage now has __getstate__ and __setstate__ needed for pickling. +- session files are now locked to make it work better with asynchronous requests +- cxoracle should work, apart for limitby +- .../examples is now mapped to .../examples/default/index etc. +- .../init is now mapped to .../welcome if init is not present + +## web2py 1.19 +- minor typos + +## web2py 1.20 +- new IFRAME, LABEL, FIELDSET validators +- P(..cr2br=True) option +- FORM and SQLFORM have hidden=dict(...) option for REST +- testing framework. +- improved examples pages + +## web2py 1.21 +- replaced paste.httpserver with cherrypy.wsgi server +- temporary sessions are no longer saved +- widget has [stop] button and graph +- logging is done by main by appfactory +- fixed a bug in sql belongs + +## web2py 1.22-1.25 +- fixed minor bugs, added IS_NULL_OR + +## web2py 1.26 +- added shell.py (thanks Limodou!) +- added memcache support + +## web2py 1.27 +- IS_NULL_OR now works will all fields +- admin creates paths to static files +- wsgiserver options are passed to HttpServer +- faking limitby for oracle to make appadmin work +- all objects inherit from object +- fixed bug in app names with . +- fixed bug in created RestrictedError object on windows +- shell is now in gluon and accessible via web2py.py + +## web2py 1.28 +- fixed bug with belongs, faster sql.py +- included jquery.js +- minor aestetical fixes +- sortable.js is gone + +## web2py 1.29 +- Now selet mutliple works with get, so does is IS_LENGTH +- Added IS_LIST_OF +- fixed problem with admin from windows and localhost + +## web2py 1.30 +- added flv to contenttype +- added support for appengine + +## web2py 1.31-1.41 +- some bug fixes, mostly better appengine support +- mssql support +- firebird support +- widgets support +- connection pools + +## 1.42 +- fixed security issue by removing slash escape in mysql +- removed random everywhere +- use uuid for session and tickets +- use http_x_forward_for to figure out the client causing a ticket +- use longtext and longblob for mysql +- main now really catches all exceptions +- no more warnings on GAE + +## 1.43-1.48 +- html.py rewrite (better support for custom forms) (Bill Ferrett) +- new stickers in examples (thanks Mateusz) +- on windows can run in taskbar (Mark Larsen) +- in admin|edit page link to edit|controller (Nathan Freeze) +- better error codes and routes_onerror (Timothy Farrell) +- DAL support for groupy and having +- DAL support for expressions instead of values +- DAL has experimental Informix support +- fixed bug with non-printable chars in DAL +- 'text' fields limited to 2**16 (to avoid mysql problems) +- widget has -quiet and -debug (Attila Csipa) +- web2py_session uses BLOB instead of TEXT +- improved IS_URL +- Runs with python 2.6 (Tim) +- On GAE uses GAE for static files (Robin) + + +## 1.49 +- fixed a bug with taskbar widget, thanks Mark +- fixed a bug with form.latest +- made many DIV methods private (_) + + +## 1.50 +- Fixed some bugs introduced in 1.49 + +## 1.51 +- Fixed more bugs introduced in 1.49 (sql _extra and html select) +- support for sqlite:memory: + +## 1.52 +- Fixed a minor bug with _extra[key] and key not str. +- check for upgrade via ajax + +## 1.53 +- On GAE upload data goes automatically in datastore (blob created automatically) +- New appadmin runs on GAE (most of it, not all) +- Martin Hufsky patch allow slicing of fields in DAL expressions + +## 1.54 +- fixed minor bugs + +## 1.55? +- rowcount +- fixed bug when IS_IN_DB involved multiple fields on GAE +- T.set_current_languages +- better unittests +- response.custom_commit and response.custom_rollback +- you can next cache calls (like cache a controller that caches a select). Thanks Iceberg +- db(....id==None).select() no longer returns an error but an empty SQLRows on GAE +- db(...).delete(delete_uploads=True) and SQLFORM.accepts(....delete_uploads=True) will delete all referenced uploaded files +- DIV.element and DIV.update +- sqlrows.json() +- SQLFORM.widgets +- URL(r=request,args=0) +- IS_IN_DB(...,multiple=True) for Many2Many (sort of) +- In URL(...,f) f is url encoded +- In routes_in=[['a/b/c/','a/b/c/?var=value']] +- simplejson 2.0.7 + + +## 1.56 +- Consider the following table: + +- db.define_table('cirlce', +- db.Field('radius','double'), +- db.Field('area','double'), +- db.Field('modified_on','datetime')) + +- now you can do: + +## add a comment in the forms +- db.circle.area.comment="(this is a comment)" + +## do not show area in create/edit forms +- db.circle.area.writable=False + +- ## do not show now in display forms +- db.circle.modified_on.readable=False + +## automatically timestamp when record cretaed +- db.circle.modified_on.default=request.now + +## automatically timestamp when record is modified +- db.circle.modified_on.update=request.now + +## make the radius appear in bold in display and table +- db.circle.radius.represent=lambda value: B(value) + +## make a form that automatically computes area +- pi=3.1415 +- form=SQLFOM(db.circle) +- if form.accepts(request.vars, +- onvalidation=lambda form: form.vars.area=pi*form.vars.radius**2): ... + +## make a create form in two possible ways: +- form=SQLFORM(db.circle) +- form=SQLFORM(db.circle,0) + +## make an update form in two possible ways: +- form=SQLFORM(db.circle,record) +- form=SQLFORM(db.circle,record_id) + +## make a display form in two possible ways: +- form=SQLFORM(db.circle,record,readonly=True) +- form=SQLFORM(db.circle,record_id,readonly=True) + +## so now you can do... + +- form=SQLFORM(db.circle,request.args[-1]) + +- and you get a create form if the URL ends in /0, you get an update +- form if the URL ends in /[valid_record_id] + +## you can also define once for all + +- timestamp=SQLTable(None,'timestamp', +- SQLField('created_on','datetime', +- writable=False, +- default=request.now), +- SQLField('modified_on','datetime', +- writable=False, +- default=request.now,update=request.now)) + +## and use it in all your tables + +- db.define_table('mytable',db.Field('somefield'),timestamp) + +## ## ## + +- One more feature in trunk.... + +- db.define_table('image',SQLField('file','upload')) + +- db.image.file.authorize=lambda row: True or False + +- then controller +- def download(): return response.download(request,db) +- id' is now a hidden field sqlform +- gql references converted to long +- admin login has autofocus +- new notation proposed by Robin, db.table[id] +- new UploadWidget shows images +- new generic.html shows request, response, session +- new LEGEND helper (thanks Marcus) +- fixed doctests in sql (thanks Robin) +- new notation for DB + +- record=db.table[id] +- db.table[id]=dict(...) +- del db.table[id] + +- request.env.web2py_version +- new class gluon.storage.Settings has lock_keys, lock_values +- jquery 1.3.1 +- PEP8 compliance +- new examples application +- runs on jython (no database drivers yet, thanks Phyo) +- fixed bugs in tests +- passes all unittest but test_rewite (not sure it should pass that one) + +- Lots of patches from Fran Boone (about tools) and Dougla Soares de Andarde (Python 2.6 compliance, user use of hashlib instead of md5, new markdown2.py) + +## 1.56.1-1.56.4 +- fixing lots of small bugs with tool and languages +- jquery.1.3.2 + +## 1.57 +- New ajax edit with keepalive (no longer logged out when editing code) +- Fixed conflict resolution page. +- Removed /user/bin/python from models/controllers + +## 1.58 +- Fixed some CRON bugs +- Fixed a bug with new ajax edit +- Experimental DB2 support in DAL +- Customizable font size in admin edit page +- New welcome/models/db.py shows how to memcache sessions on GAE with MEMDB +- More expressive titles in admin +- DB2 support. Thanks Denes! + +## 1.59-1.60 +- fixed lots of small bugs +- routes_in can filter by http_host + +## 1.61 +- fixed some typos +- auth.add_permissions(0,....) 0 indicates group of current user +- crud.update has deletable=True or False +- fixed issue with GAE detection -> gluon.settings.web2py_runtime -> request + +## 1.62 +- SQLFORMS and crud now show readble fields +- Better WingIDE support +- Languages are automatically translated +- T.force and lazyT works better, optional T.lazy=False +- gluon.storage.Messages are now translated without T +- if routes.py then request.env.web2py_original_uri +- db.table.field.isattachment = True +- internationalizaiton of admin by Yair +- admin.py by Alvaro +- new MENU helper +- new w2p file format +- new welcome app with auth, service and crud turned on + +## 1.63-1.63.4 +- no more import gluon. +- support for generic.xxx +- simplejson can handle datetime date and time + +## 1.63.5 +- You can do jQuery.noConflict() without breaking web2py_ajax +- Wigets can have attributes (thanks Hans) +- Lots of internal cleanup and better code reusage (thanks Hans) + +## 1.64 +- Models 2-3 times faster (thanks Alexey) +- Better LDAP support +- Works with Jython (including sqlite and postgresql with zxJDBC): + +- download jython-2.5rc3.jar +- download qlite-jdbc-3.6.14.2.jar +- java -jar jython-xxx.jar +- export CLASSPATH=$CLASSPATH:/Users/mdipierro/jython2.5rc3/sqlite-jdbc-3.6.14.2.jar +- cd web2py +- ../jython2.5rc3/jython web2py.py + +## 1.64.2 +- New IS_COMPLEX validator, thank Mr. Freeze +- Experimental Informix support +- Autologin on registration + +## 1.64.3 +- Some bug fixes + +## 1.64.4 +- Som bug fixes +- Informix Support +- response.render(stream) +- SQLFORM.factory +- SQLFORM.widgets.radio and SQLFORM.widgets.checkboxes + +## 1.65 +- reST docstrings for Sphinx, thanks Hans +- gluon/conrtib/login_methods/gae_google_account.py for google CAS login on GAE, thanks Hans +- fixed problem with Auth and Firebird 'password' issue +- new auth.settings.create_user_groups +- tickets stored on datastore on GAE and also logged, thanks Hans +- imporved IS_LENGTH with max and min, thanks Mateusz +- improved IS_EMAIL with filters, thanks Mateusz +- new IS_IMAGE checks for format and size, thanks Mateusz +- new IS_IPV4, thanks Mateusz + +## 1.65.1 +- spreadsheet +- shell history, thanks sherdim +- crontab editor, thanks Angelo +- gluon/contrib/login_methods/cas_auth.py (thanks Hans) +- DAL(...) instead of SQLDB(...) +- DAL('gae') instead of GQLDB() +- Field instead of SQLField +- (the old syntax still works) + +## 1.65.2 +- Fixed some small auth bugs +- Field.store(...) + +## 1.65.3-10 +- Fixed some small bugs and typos in the docstrings +- Fixed AMF3 support + +## 1.65.11 +- Fixed a sqlhtml bug with image upload + +## 1.65.12 +- lables in auth auto-translate (thanks Alvaro) +- better ldap_auth (thanks Fran) +- auth chacks locally for blocked accounts even for alternate login methods (thanks Fran) + +## 1.65.13 +- request.url (thanks Jonathan) +- restored uploadfield_newfilename +- new examples layout nad logo (thanks Mateusz) + +## 1.66 +- new doctypes +- form.vars.newfilename +- new HTML and XHTML helpers +- better IS_LENGTH + +## 1.67.0 +- Python 2.4 support (again) +- New layout for welcome +- changed defauld field sizes to 512 +- Field(uploadfolder="...") +- appadmin works on GAE (again, somehting got broken at some point) +- new wsgiserver 3.2.0 should fix recurrent broken download problems + +## 1.67.1 +- Bux fixed + +## 1.67.2 +- Security fix in markdown + +## 1.68.1 +- New official markdown with security fix +- rows.first() +- rows.last() +- New cron +- New hindi and spanish translation +- cached uploads allow for progress bars (thanks AndCycle) +- ingres support (thanks Chris) +- legacy database support for db2, mssql with non-int primary keys (thanks Denes) +- default setting of content-type (this may cause strange behavior in old apps when downloading images) +- IS_UPPER and IS_LOWER works with unicode +- CLENUP not takes regex of allowed/now allowed chartares +- New rewrite.py allows dynamic routes +- Better error messages for IS_INT_* and IS_FLOAT_* + +## 1.68.2 +- Fixing bug with admin and missing crontab +- Fixing bug with rewrite.load on GAE (thanks Willian Wang) + +## 1.69.1 +- Fixed a bug introduced in 1.68 about inserting unicode in DAL +- Fixed other small bugs +- Better support for legacy databases (thank Denes) +- response.meta replaces response.author, response.keywords, response.description +- response.files stets dependes in plugins +- better admin for packing/unpacking plugins +- reference fiels nor evaluate to DALRef with lazy evaluation (cool, thanks Mr Freeze) +- can insert a record in place of a reference +- record[e] instead of record._extra[e] (tentatively!) +- record.update_record() with no args +- rows.find() (thanks Mr Freeze) +- rows.exclude() +- rows.sort() +- rows[:] + +## 1.70.1 +- Fixed bug with Rows.as_list and DALRef +- Added Rows.as_dict (thanks Mr Freeze and Thedeus) +- Added request.wsgi (thanks hcvst) allows running wsgi apps under web2py and applying wegi middleware to regular web2py actions that return strings. +- Experimental distributed transactions between postgresql, mysql and firebird +- Finally local_import is here! + +## 1.71.1 +- Complete rewrite of Rows +- renamed DALStorage->Rows, DALRef->Reference +- Experimental serializarion of Row and Rows (get serialized to dict and list of dict) +- DAL(...,folder) and template.render(content=, context=) make it more modular + +## 1.72.1 - 1.72.3 +- Better support for legacy databases + +## 1.73.1 +- Fixed problem with storage and comparison of Row objects +- Fixed problem with mail on GAE +- Fixed problem with T in IS_DATE(TIME) error_message and format +- Rows[i].delete_record() +- Even better support for legacy databases +- Experimantal support for non UTF8 encoding in DB +- Better IPV4 (thanks Thandeus) +- T.current_languages default to 'en' and new T.set_current_languages(...) (thanks Yarko) +- INPUT(...,hideerror=False) used to fix rare chechbox widget problem +- Admin allows change of admin password +- New gluon/contrib/populate.py +- Size of input/textarea set by CSS no more by jQuery (thanks Iceberg) +- Customizable CSV (thanks Thandeus) +- More bug fixed (thanks Thandeus) +- Better regex for template fixed Jython problem (thank Jonathan) + +## 1.74.1 +- Moved to mercurial +- Default validators use the new define_table(....,format='...') +- New get_vars and post_vars compatible in 2.5 and 2.6 (thanks Tim) +- Major rewrite of gql.py extends DAL syntax on GAE +- No more *.w2p, welcome.w2p is create automatically, base apps are always upgraded +- export_to_csv(delimiter = ',', quotechar = '"', quoting = csv.QUOTE_MINIMAL), thanks Thadeus + +## 1.74.2-4 +- Fix bugs including including unicode in emails and blobs on GAE + +## 1.74.5 +- bug fixes +- restored python 2.4 support,thanks ont.rif +- support for native types on Google App Engine +- cache.ram usage statictics, thanks Thadus +- no more auth manu in scaffolding +- no more spash screen with -Q +- fixed doctest in html.py, thanks Anand Vaidya +- export_to_csv_file has represent, thanks Thadeus + +## 1.74.6 +- bug fixes +- IS_IN_DB(...,_and=IS_NOT_IN_DB) +- Smaller populate, thanks Piotr +- better slicing of fields, thanks Michael Fig +- Cache stats, thanks Thadeus +- Better gql.py +- IS_IN_DB and IS_IN_SET default to zero='', no longer zero=None + +## 1.74.7 +- request_password_reset and password reset verification +- python web2py.py -S app -M -R script.py -A arg1 arg2 arg3 +- T("%(a)s") % dict(a="hello") + +## 1.74.8 +- IS_SLUG, thanks Gustavo and Jonathan +- web2py.py -nogui, thanks Jeff Bauer +- solved a problem with jython, thanks Tim Farrel +- login has "remember be option", thanks Piotr Banasziewicz +- fixed problem with keepvalue in update forms, thanks Miguel Lopez + +## 1.74.9 +- IS_IN_SET(((0,'label0'),(1,'label1'))), thanks Falko Krause +- SQLFORM(...).accpets stores True or False in boolean types no None, thanks Frederik Wagner +- SQLFORM.factory(...,table_name='no_table'), thanks Thedeus +- jQuery 1.4.1 +- Fixed major problem with internationalization of multiple languages. +- Fixed a serius security issue with login +- Possibly fixed some issues with cron + +## 1.75.1 +- better cron +- better fetch +- logging of email failures +- new web2py.fedora.sh +- new setup-web2py-ubuntu.sh +- experimental autocomplete +- menus work on IE6 + +## 1.75.2 +- fetch supports cache +- curd.update(....,onaccept=crud.archive) magic +- new UUID mechnism fixes session conflicts with cloned machine in cloud +- allow to upload app and overwrite existing old ones, thanks Jonathan +- print gluon.tools.prettydate(request.now,T), thanks Richard + +## 1.75.3 +- added support for PAM authentican for apps and for admin +- INTRODUCED MAJOR BUG IN BEAUTIFY (upgrade to 1.75.4) IMMEDIATELY + +## 1.75.4 +- customizable BEAUTIFY, thanks John + +## 1.75.5 +- fixed behaviour with languages.py, thanks Iceberg +- added chinese (thanks Iceberg) and Hungarian (thanks Gyuris) +- fixed problem with GAE deleted by id (thanks what_ho) +- fixed bug in LOAD with custom views, thanks vhang +- improved IS_IN_SET takes iterator, dict, list and list of tuples, thanks Iceberg +- Auth(...,controller='default') +- Fixed major bug in parsing repeated request.vars, thanks Ben +- IS_DATE and IS_DATETIME can now handle any 00).select(),headers='fieldname:capitalize') +- Oracle limitby improved (thanks Sergey) +- fixed migrations in Firebird, thanks Jose Jachuf +- gluon/contrib/login_methods/linkedin_account.py (to be tested) + +## 1.76.5 +- Fixed a typo in auth that created some registration problems + +## 1.77.1 +- Replaced CherryPy with Rocket web server, thanks Tim +- CacheOnDisk allows to specify a folder +- IS_DATE/DATETIME can handle any year since 0 +- SQLTABLE(...,headers='fieldname:capitalize') +- Field().with_alias, thanks Nathan and Mengu +- has_membership(group=...,role=...), thank Jonathan +- db.define_table(username=True), thanks Jonathan +- gluon.tools.prettydate +- can specify hostname in routes_out (same syntax as routes in), thanks Martin +- db.table.bulk_insert([...records...]) now works on GAE, thanks Jon +- IS_EMAIL validates on 'localhost', thanks Jonathan +- welcome/views/layout.html uses ez.css, thanks Yarko +- mail attachments support utf8, thanks szimszon +- works with PyPy, thanks Joe +- better Firebird support, thanks Jose +- better Oracle support, thanks Gabriele +- cron supports days of week +- SQLFORM(...,formstyle="table3cols") or "table2cols" or "divs" or "ul" +- crud.settings.formstyle +- web2py.py -f folder allows to specify locations of applications, thanks Iceberg +- better/faster regex in template works in Jython +- fixed lots of small bugs + +## 1.77.2 +- fixed x-index in calendar +## 1.77.3 +- some cleanup of code in compileapp + +## 1.78.1 +- new template system allows {{block name}}{{end}}, thanks Thadeus +- fixed mime headers in emails, included PGP in emails, thanks Gyuris +- automatic database retry connect when pooling and lost connections +- OPTGROUP helper, thanks Iceberg +- web2py_ajax_trap captures all form submissions, thank you Skiros +- multicolumn checkwidget and arbitrary chars in multiple is_in_set, thanks hy +- Québécois for welcome, thanks Chris +- crud.search(), thanks Mr Freeze +- DAL(...migrate,fake_migrate), thanks Thadeus + +## 1.78.3 +- reverted temporarily to old template system because of backward compatibility issues + +## 1.79.1 +- x509 emails, thanks Gyuris +- attachment and html in Mail on GAE, thanks PanosJee +- fixed docstring in SQLTABLE, thanks aabelyakov +- TAG(html) parese html into helpers (experimental, still some problems with unicode, , thanks RobertVa for unicode help) +- DIV.elements(find=re.compile(....)) +- DIV.flatten() +- DIV.elements('....') supports jQuery syntax in '....' +- better it-it.py and it.py, thanks Marcello Della Longa +- Many Bug fixes: +- improved support for DAL and joins in postgresql/oracle, thanks Nico de Groot +- bux fixex in html.py, thanks Ian +- fixed an issue with registration_key==None, thanks Jay Kelkar +- fixed bug in gql.py, thanks NoNoNo +- fixed problem with multiple and checkboxes, thanks MIchael Howden +- fixed bug in gae, thanks NoNoNo +- restored 2.4 compatibility, thanks Paolo Gasparello +- auth.update() when pictures in profile +- formstyle can be a function, thanks Howden +- escape in sanitizer, thanks Howes +- add missing settings, thanks Hamdy +- find and exclude return empty Rows instead of [], thanks iceberg +- simplejson 2.1.1 should fix compatibility problems +- added sms_utils and Authorize.net in contrib + +## 1.79.2 +- solved simplejson imcompatibility problem + +## 1.80.1 +- MARKMIN helper (no backward compatibility promise yet) +- self._last_reference, thanks Dave (no backward compatibility promise yet) +- IS_EQUAL_TO +- zh-tw and better internationalization page, thanks Daniel Lin and Iceberg +- better crud.search, thanks MrFreeze +- Rocket interfaces, thanks Nik Klever +- db.table.field.uploadseparate=True, thanks Gyuris +- SCOPE_IDENITY for MSSQL, thanks Jose +- fixed email attachment issue, thanks Bob_in_Comox +- fixed problem with groupby and IS_IN_DB +- other bug fixes +- new implementation for local_import +- ajax(..,...,null) +- fixed Chrome bug in calendar.js, thanks Iceberg +- experimental scrips/web2py-setup-fedora.sh +- generic.load, thanks Iceberg + +## 1.81.1 +- rpx (janrain) support out of the box, allows login with Facebook, MySpace, etc. Thanks Mr Freeze +- Increased security (escape single and double quotes, thanks Craig" +- Fixed a bug with db.table.field.uploadseparate=True and autodelete +- New welcome app with superfish and jQuery 1.4.2 +- Deleted openwysiwyg from admin +- In XML and xmlescape quote defaults to True. Both ' and " are escaped. Thanks Craig Younkins + +## 1.81.2 +- fixed bug in Auth + +## 1.81.3 +- fixed bug in label names in formstyle +- fixed id names in admin test.html page + +## 1.81.4 +- Fixed gluon.tools to work work with load and base.css to nowrap labels + +## 1.81.5 +- Fixed a few bugs. The most important bugs we fixed are in memcache (thanks Scott) and in a process starvation issue with Rocket (thanks Mike Ellis and Tim). + +## 1.82.1 +- request.ajax to detect if action is called via ajax, tahnks Jonathan and David Mako +- more captcha options, thanks Vidul +- openid and oauth2 thanks Michele and Keith +- better PluginManager and load components +- new template system, thanks Thadeus +- new db.table(id,[field=value]) and db.table(query) syntax +- URL('index') (no more r=request), thanks Thadeus +- mail.send(message='...', ....) +- DAL([....]) for load balancing +- @service.soap(...) with mysimplesoap, thanks Mariano +- hideerror + +## 1.83.1 +- New error reporting mechanism (thanks Mariano) +- New routing system with app level routing (thanks Jonathan) +- Integrated GAE appstat and GAE precompilation (thanks Scott) +- New Field types "list:string", "list:integer", "list:reference" +- request.cid, request.ajax, A(cid=request.cid), response.js + +## 1.83.2 +- mostly cleanup + +## 1.84.1-4 +- flash now stays put in the top right corner +- improved behavior for URL and T objects +- new app level logging with logging.conf (thanks Jonathan) +- improved OpenID (thanks Michele) +- web2py_ajax handles prepend, append, hide (thanks Juris) +- web2py_ajax also handels pre-validation of decimal fields +- ru-ru translation (thanks Michele) +- sk-sk translation (thanks Julius) +- migrations save .table only if changed and after each ALTER TABLE (no more mysql inconsistencies) +- fixed bugs in SQLCustomField, Field(default=...), IS_IMAGE, IS_DECIMAL_IN_RANGE and a few more. +- Better validators (IS_DECIMAL_IN_RANGE, IS_INT_IN_RANGE, etc) thanks Jonatham +- Polymmodel support on GAE +- Experimental ListWidget +- moved DAL and routes to thread.local (thanks Jonathan, again) +- scripts/extract_mysql_models.py, thanks Falko Krause and Ron McOuat +- scripts/dbsessions2trash.py, thanks Scott + +## 1.85.1-3 +- fixed some bugs +- added pyfpdf, thank Mariano + +## 1.86.1-1.86.3 +- markmin2latex +- markmin2pdf +- fixed some bugs +- Storage getfirst, getlast, getall by Kevin and Nathan +- db(db.table), db(db.table.id) both suported and equivalent to db(db.table.id>0) +- postresql ssl support +- less un-necessary logging and warnings on GAE +- IS_DECIMAL_IN_RANGE and IS_FLOAT_IN_RANGE support dot="," (dot="." is default) +- on_failed_authorization can be a function, thanks Niphold +- gluon/contrib/login_methods/cas_auth.py for integration between CAS and Auth. + +## 1.86.3 +- Error reporting on save, thanks Mariano +recalled + +## 1.87.1-2 +- new layout for examples, thanks Bruno and Martin +- admin allow ``DEMO_MODE=True`` and ``FILTER_APPS=['welcome']`` +- fixed a possible problem with CRON and paths + + +## 1.87.3 +- fixed a major bug introduced in 1.87.1 that prevents appadmin from working for new apps created with 1.87.1-2. +- upgraded to clockpick 1.28, thanks villas + +## 1.88.1-2 +- better list: string support, thanks Bob +- jquery 1.4.3 +- scripts/autoroutes.py +- new admin wizard +- added retrieve_username to navbar (if username) +- internal rewrite for arbitrary paths (abspath), thanks Jonathan +- populate support for list: and decimal, thanks Chirstian +- markmin2latex has extra attribute +- better mercual admin allows list of files, versions and retrieve +- new error report system, thanks Thadeus and Selecta +- SQLFORM.accepts(detect_record_change).record_changed +- fixed cron for bytecode compiled apps, thanks Álvaro J. Iradier Muro +- other bugs fixes and pep8 compliant fixes + +## 1.89.1-.5 +- new admin layout (thanks Branko Vukelic) +- new admin search +- new admin language selector (thanks Yair) +- new Welcome app (thanks Martin Mulone) +- better wizard +- admin support for DEMO_MODE=True +- admin exposes GAE deployment button (always) +- MENU support None links (thanks Michael Wolfe) +- web2py.py -J for running cron (thanks Jonathan Lundell) +- fixed ~db.table.id on GAE (thanks MicLee) +- service.jsonrpc supports service.JsonRpcException (thanks Matt) +- bug fixes + +## 1.90.1 +- new DAL (complete rewrite of the web2py DAL is more modular) +- rewrite has fail safe reload, thanks Jonathan +- better CAS with v2 support, thanks Olivier ROCH VILATO +- better markmin2latex +- session.connect(separate=True) to handle many session files, thanks huaiyu wang +- changed bahvior of impersonate (more secure, can generate form or used as API) +- new rocket, thanks Tim +- new pyfpdf +- no more old style classes +- experimental couchdb support in new dal (only insert, select, update by id) +- mysql support out of the box via pymysql +- SQLITABLE(...,headers='labels') thanks Bruno +- optional: digitally signed URLs, thanks Brian Meredyk +- minor bug fixes + +## 1.90.2-4 +- pymysql no longer requires ssl (if not used) +- fixed bug with virtualfields +- fixed bug in truncate (new dal) +- fixed bug in select with alternate primary key (new dal) +- fixed bug with IS_IN_DB and self refences (also new dal) + +## 1.90.5 +- set poll = False in rocket because of poll python thread bug often unfixed, thanks Jonathan +- fixes issue with crud and reCaptcha + +## 1.90.6 +- fix issue with pickling new dal Row and Rows. + +## 1.91.1 +- LICENSE CHANGE FROM GPLv2 to LGPLv3 +- URL(...,hash_vars=...) allows to specify which vars need to be signed +- fixed bug with aliasing in new DAL + +## 1.91.2-1.91.5 +- fixed a problem with deplyment on GAE +- other new dal bug fixes + +## 1.91.6 +- web2py comet via gluon/contrib/comet_messaging.py (html5 websockets) experimental +- fixed problem with services (broken in 1.91.5), thanks Vollrath +- customizable uploadwidget, thanks Fran +- fixed problem with mail unicode support, thanks Richard +- fixed problem with linkto=None and references fields in SQLTABLE, thanks villas +- no more upgrade button on windows since does not work +- better remember-me login, thanks Martin Weissenboeck +- support for recatcha options +- support for GAE namespaces via DAL('gae://namespace') +- new rocket (1.2.2), thanks Tim +- many other bug fixes and improvements (thanks Jonathan) + +## 1.92.1 +- much improved routing (thanks Jonathan) +- Expression.__mod__ (thanks Denes) +- admin has MULTI_USER_MODE (admin/models/0.py) +- support for count(distinct=...) +- has_permissions(...,group_id) +- IS_MATCH(...,strict=True) +- URL(...,scheme=,host=,port=), thanks Jonathan +- admin in Afrikaans, thanks Caleb +- auth.signature (experimental) +- many other bug fixes + +## 1.93.1-2 +- support for multiple interfaces, thanks Jonathan +- jquery 1.5.1 +- simplejson 2.1.3 +- customizable simplejson +- leaner app.yaml +- css3 buttons in welcome +- android support (experimental) +- Field(':hidden'), Field('.readonly'), Field('name=value') +- combined expressions print db.data.body.len().sum() +- wizard can download plugins +- better json serilization (object.custom_json) +- better xml serialization (object.custom_xml) +- better formstyle support +- better comet_messaging.py (needs more testing) +- many bug fixes + +## 1.94.1 +- moderniz 1.17 +- web2py no longer saves session if no change, this makes it up up to 10x faster for simple actions +- experimental REST API +- better support for MSSQL NOT NULL +- small bug fixes + +## 1.94.2 +- reverted wrong behavior of auth.requires(condition) in 1.94.1 + +## 1.94.3 +- fixed major bug in auth redirection + +## 1.94.4 +- removed debug print statement that caused problems on GAE and mod_wsgi + +## 1.94.5 +- fixed a major bug with session introdued in 1.94.1 + +## 1.94.6 +- fixed a number of minor bugs including adding some missing files +- better session handling on session._unlock(..), thanks Jonathan +- added experimental pip support, thanks Lifeeth +- added experimental SAP DB support + +## 1.95.1 +- Google MySQL support (experimental) +- pip support, thanks lifeeth +- better setup_exe.py, thanks meredyk +- importved pyfpdf +- domain check in email_auth.py, thanks Gyuris +- added change_password_onvalidation and change_password_onaccept +- DAL(...,migrate_enabled=True) +- login_methods/loginza.py, thanks Vladimir +- bpython shell support, thanks Arun +- request.uuid and response.uuid (for a future toolbar) +- db._timings contains database query timing info +- efficient db(...).isempty() +- setup-web2py-nginx-uwsgi-ubuntu.sh +- Many bug fixes, thanks Jonathan + +## 1.96.1 + +- "from gluon import *" imports in every python module a web2py environment (A, DIV,..SQLFORM, DAL, Field,...) including current.request, current.response, current.session, current.T, current.cache, thanks Jonathan. +- conditional models in + models//a.py and models///a.py +- from mymodule import *, looks for mymodule in applications/thisapp/modules first and then in sys.path. No more need for local_import. Thanks Pierre. +- usage of generic.* views is - by default - restricted to localhost for security. This can be changed in a granular way with: response.generic_patterns=['*']. This is a slight change of behavior for new app but a major security fix. + +- all applications have cas 2.0 provider at http://.../user/cas/login +- all applications can delegate to login to external provider Auth(...,cas_provider='http://.../other_app/default/user/cas') +- A(...,callback=URL(...),larget='id') does Ajax +- URL(...,user_signature=True), LOAD(...,user_signature=True) can sign urls and @auth.requires_signature() will check the signature for any decorated action. + +- DAL(...,migrate_enabled=False) to disable all migrations +- DAL(...,fake_migrate_all=True) to rebuild all corrupted metadata +- new DAL metadata format (databases/*.table) +- DAL(...,adapter_arg={}) allows support for alternate drivers +- DAL now allows circular table defintions +- DAL(..,auto_import=True) automatically imports tables from metadata without need to db.define_table(...)s. +- new alterante syntax for inner joins: db(...).select(join=...) +- experimental cubrid database support +- DAL 'request_tenant' fields are special, the altomatically filer all records based on their default value. +- db._common_fields.append(Field('owner')) allows to add fields to ALL tables +- DAL ignores repeated fields with same names + +- web2py_ajax.html is more modular, thanks Anthony +- request.is_local +- request.is_http +- new sessions2trash.py thanks Jim Karsten +- corrupted cache files are automatically deleted +- new simpler API gluon.contrib.AuthorizeNet.procss(...) +- fixed recaptcha (as they released new API) +- messages in validators have default internationalization +- No more Auth(globals(),db), just Auth(db). Same for Crud and Service. +- scripts/access.wsgi allows apache+mod_wsgi to delegate authentication of any URL to any web2py app +- json now supports T(...) +- scripts/setup-web2py-nginx-uwsgi-ubuntu.sh +- web2py HTTP responses now set: "X-Powered-By: web2py", thanks Bruno +- mostly fixed generic.pdf. You can view any page in PDF if you have pdflatex installed or if your html follows the pyfpdf convention. +- auth.settings.extra_fields['auth_user'].append(Field('country')) allows to extend auth_* tables without need of definiting a custom auth_* table. Must be placed before auth.define_tables() +- {{=response.toolbar()}} to help you debug applications +- web based shell now supports object modifications (but no redefinitions of non-serializable types) +- jQuery 1.6.1 +- Lots of bug fixes + +# 1.96.2-1.96.4 +- bug fixes + +# 1.97.1 +- validate_and_update, thanks Bruno +- fixed problem with new custom import, thanks Mart +- fixed pyamf 0.6, thanks Alexei and Nickd +- fixed "+ =" bug in wizard +- fixed problem with allowed_patterns +- fixed problems with LOAD and vars and ajax +- closed lots of google code tickets +- checkboxes should now work with list:string +- web2py works on Android, thanks Corne Dickens +- new cpdb.py, thanks Mart +- improved translation (frech in particuler), thanks Pierre +- improved cas_auth.py, thanks Sergio +- IS_DATE and IS_DATETIME validators now work with native types +- better description of --shell, thanks Anthony +- extra SQLTABLE columns, thanks Martin +- fixed toolbar conflics, thanks Simon +- GAE password shows with **** + +# 1.98.1-1.98.2 +- fixed some problems with LOAD(ajax=False), thanks Anthony +- jquery 1.6.2 +- gevent.pywsgi adds ssl support, thanks Vasile +- import/export of blobs are base64 encoded +- max number of login attemts in admin, thanks Ross +- fixed joins with alias tables +- new field.custom_delete attribute +- removed resctions on large 'text fields, thanks Martin +- field.represent = lambda value,record: .... (record is optional) +- FORM.validate() and FORM.process(), thanks Bruno +- faster visrtualfields, thanks Howsec +- mail has ssl support separate from tls, thanks Eric +- TAG objects are now pickable +- new CAT tag for no tags +- request.user_agent(), thanks Ross +- fixed fawps support +- SQLFORM(...,separator=': ') now customizable +- many small bug fixes + +## 1.99.1 +- gluon/contrib/simplejsonrpc.py +- gluon/contrib/redis_cache.py +- support for A(name,callback=url,target='id',delete='tr') +- support for A(name,component=url,target='id',delete='tr') +- new pip installer, thanks Chris Steel +- isapiwsgihandler.py +- dal expression.coalesce(*options) +- gluon/contrib/simplejsonrpc.py, thanks Mariano +- expire_sessions.py respects expiration time, thanks iceberg +- addressed this issue: http://fuelyourcoding.com/jquery-events-stop-misusing-return-false/ +- x509 support (thanks Michele) +- form.process() and for.validate() +- rocket upgrade (1.2.4) +- jQuery upgrade (1.6.3) +- new syntax rows[i]('tablename.fieldname') +- new query syntax field.contains(list,all=True or False) +- new SQLFORM.grid and SQLFORM.smartgrid (should replace crud.search and crud.select) +- support for natural language queries (english only) in SQLFORM.grid +- support for computed columns and additional links in SQLFORM.grid +- new style virtual fields (experimental): db.table.field=Field.Lazy(...) +- request.utcnow +- cleaner/simpler welcome/models/db.py and welcome layout.html +- response.include_meta() and response.include_files(), thanks Denes +- dal auto-reconnect on time-out connections +- COL and COLGROUP helpers +- addresed OWASP #10, thanks Anthony and Eric +- auth.settings.login_after_registration=True +- detection of mobile devices and @mobilize helper (view.mobile.html), thanks Angelo +- experimental gluon/scheduler.py +- scripts/make_min_web2py.py +- crud.search has more options, thanks Denes +- many bug fixes (thanks Jonathan, Michele, Fran and others) ADDED VERSION Index: VERSION ================================================================== --- /dev/null +++ VERSION @@ -0,0 +1,1 @@ +Version 1.99.2 (2011-09-26 06:55:33) stable ADDED __init__.py Index: __init__.py ================================================================== --- /dev/null +++ __init__.py @@ -0,0 +1,3 @@ + + + ADDED anyserver.py Index: anyserver.py ================================================================== --- /dev/null +++ anyserver.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +This file is based, althought a rewrite, on MIT code from the Bottle web framework. +""" + +import os, sys, optparse, urllib +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] +import gluon.main +from gluon.fileutils import read_file, write_file + +class Servers: + @staticmethod + def cgi(app, address=None, **options): + from wsgiref.handlers import CGIHandler + CGIHandler().run(app) # Just ignore host and port here + + @staticmethod + def flup(app,address, **options): + import flup.server.fcgi + flup.server.fcgi.WSGIServer(app, bindAddress=address).run() + + @staticmethod + def wsgiref(app,address,**options): # pragma: no cover + from wsgiref.simple_server import make_server, WSGIRequestHandler + class QuietHandler(WSGIRequestHandler): + def log_request(*args, **kw): pass + options['handler_class'] = QuietHandler + srv = make_server(address[0],address[1],app,**options) + srv.serve_forever() + + @staticmethod + def cherrypy(app,address, **options): + from cherrypy import wsgiserver + server = wsgiserver.CherryPyWSGIServer(address, app) + server.start() + + @staticmethod + def rocket(app,address, **options): + from gluon.rocket import CherryPyWSGIServer + server = CherryPyWSGIServer(address, app) + server.start() + + @staticmethod + def rocket_with_repoze_profiler(app,address, **options): + from gluon.rocket import CherryPyWSGIServer + from repoze.profile.profiler import AccumulatingProfileMiddleware + from gluon.settings import global_settings + global_settings.web2py_crontype = 'none' + wrapped = AccumulatingProfileMiddleware( + app, + log_filename='wsgi.prof', + discard_first_request=True, + flush_at_shutdown=True, + path = '/__profile__' + ) + server = CherryPyWSGIServer(address, wrapped) + server.start() + + @staticmethod + def paste(app,address,**options): + from paste import httpserver + from paste.translogger import TransLogger + httpserver.serve(app, host=address[0], port=address[1], **options) + + @staticmethod + def fapws(app,address, **options): + import fapws._evwsgi as evwsgi + from fapws import base + evwsgi.start(address[0],str(address[1])) + evwsgi.set_base_module(base) + def app(environ, start_response): + environ['wsgi.multiprocess'] = False + return app(environ, start_response) + evwsgi.wsgi_cb(('',app)) + evwsgi.run() + + + @staticmethod + def gevent(app,address, **options): + from gevent import monkey; monkey.patch_all() + from gevent import pywsgi + from gevent.pool import Pool + pywsgi.WSGIServer(address, app, spawn = 'workers' in options and Pool(int(option.workers)) or 'default').serve_forever() + + @staticmethod + def bjoern(app,address, **options): + import bjoern + bjoern.run(app, *address) + + @staticmethod + def tornado(app,address, **options): + import tornado.wsgi + import tornado.httpserver + import tornado.ioloop + container = tornado.wsgi.WSGIContainer(app) + server = tornado.httpserver.HTTPServer(container) + server.listen(address=address[0], port=address[1]) + tornado.ioloop.IOLoop.instance().start() + + @staticmethod + def twisted(app,address, **options): + from twisted.web import server, wsgi + from twisted.python.threadpool import ThreadPool + from twisted.internet import reactor + thread_pool = ThreadPool() + thread_pool.start() + reactor.addSystemEventTrigger('after', 'shutdown', thread_pool.stop) + factory = server.Site(wsgi.WSGIResource(reactor, thread_pool, app)) + reactor.listenTCP(address[1], factory, interface=address[0]) + reactor.run() + + @staticmethod + def diesel(app,address, **options): + from diesel.protocols.wsgi import WSGIApplication + app = WSGIApplication(app, port=address[1]) + app.run() + + @staticmethod + def gnuicorn(app,address, **options): + import gunicorn.arbiter + gunicorn.arbiter.Arbiter(address, 4, app).run() + + @staticmethod + def eventlet(app,address, **options): + from eventlet import wsgi, listen + wsgi.server(listen(address), app) + + @staticmethod + def mongrel2(app,address,**options): + import uuid + sys.path.append(os.path.abspath(os.path.dirname(__file__))) + from mongrel2 import handler + conn = handler.Connection(str(uuid.uuid4()), + "tcp://127.0.0.1:9997", + "tcp://127.0.0.1:9996") + mongrel2_handler(app,conn,debug=False) + + +def run(servername,ip,port,softcron=True,logging=False,profiler=None): + if logging: + application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, + logfilename='httpserver.log', + profilerfilename=profiler) + else: + application = gluon.main.wsgibase + if softcron: + from gluon.settings import global_settings + global_settings.web2py_crontype = 'soft' + getattr(Servers,servername)(application,(ip,int(port))) + +def mongrel2_handler(application,conn,debug=False): + """ + Based on : + https://github.com/berry/Mongrel2-WSGI-Handler/blob/master/wsgi-handler.py + + WSGI handler based on the Python wsgiref SimpleHandler. + A WSGI application should return a iterable op StringTypes. + Any encoding must be handled by the WSGI application itself. + """ + from wsgiref.handlers import SimpleHandler + try: + import cStringIO as StringIO + except: + import StringIO + + # TODO - this wsgi handler executes the application and renders a page + # in memory completely before returning it as a response to the client. + # Thus, it does not "stream" the result back to the client. It should be + # possible though. The SimpleHandler accepts file-like stream objects. So, + # it should be just a matter of connecting 0MQ requests/response streams to + # the SimpleHandler requests and response streams. However, the Python API + # for Mongrel2 doesn't seem to support file-like stream objects for requests + # and responses. Unless I have missed something. + + while True: + if debug: print "WAITING FOR REQUEST" + + # receive a request + req = conn.recv() + if debug: print "REQUEST BODY: %r\n" % req.body + + if req.is_disconnect(): + if debug: print "DISCONNECT" + continue #effectively ignore the disconnect from the client + + # Set a couple of environment attributes a.k.a. header attributes + # that are a must according to PEP 333 + environ = req.headers + environ['SERVER_PROTOCOL'] = 'HTTP/1.1' # SimpleHandler expects a server_protocol, lets assume it is HTTP 1.1 + environ['REQUEST_METHOD'] = environ['METHOD'] + if ':' in environ['Host']: + environ['SERVER_NAME'] = environ['Host'].split(':')[0] + environ['SERVER_PORT'] = environ['Host'].split(':')[1] + else: + environ['SERVER_NAME'] = environ['Host'] + environ['SERVER_PORT'] = '' + environ['SCRIPT_NAME'] = '' # empty for now + environ['PATH_INFO'] = urllib.unquote(environ['PATH']) + if '?' in environ['URI']: + environ['QUERY_STRING'] = environ['URI'].split('?')[1] + else: + environ['QUERY_STRING'] = '' + if environ.has_key('Content-Length'): + environ['CONTENT_LENGTH'] = environ['Content-Length'] # necessary for POST to work with Django + environ['wsgi.input'] = req.body + + if debug: print "ENVIRON: %r\n" % environ + + # SimpleHandler needs file-like stream objects for + # requests, errors and reponses + reqIO = StringIO.StringIO(req.body) + errIO = StringIO.StringIO() + respIO = StringIO.StringIO() + + # execute the application + handler = SimpleHandler(reqIO, respIO, errIO, environ, multithread = False, multiprocess = False) + handler.run(application) + + # Get the response and filter out the response (=data) itself, + # the response headers, + # the response status code and the response status description + response = respIO.getvalue() + response = response.split("\r\n") + data = response[-1] + headers = dict([r.split(": ") for r in response[1:-2]]) + code = response[0][9:12] + status = response[0][13:] + + # strip BOM's from response data + # Especially the WSGI handler from Django seems to generate them (2 actually, huh?) + # a BOM isn't really necessary and cause HTML parsing errors in Chrome and Safari + # See also: http://www.xs4all.nl/~mechiel/projects/bomstrip/ + # Although I still find this a ugly hack, it does work. + data = data.replace('\xef\xbb\xbf', '') + + # Get the generated errors + errors = errIO.getvalue() + + # return the response + if debug: print "RESPONSE: %r\n" % response + if errors: + if debug: print "ERRORS: %r" % errors + data = "%s\r\n\r\n%s" % (data, errors) + conn.reply_http(req, data, code = code, status = status, headers = headers) + +def main(): + usage = "python anyserver.py -s tornado -i 127.0.0.1 -p 8000 -l -P" + try: + version = read_file('VERSION') + except IOError: + version = '' + parser = optparse.OptionParser(usage, None, optparse.Option, version) + parser.add_option('-l', + '--logging', + action='store_true', + default=False, + dest='logging', + help='log into httpserver.log') + parser.add_option('-P', + '--profiler', + default=False, + dest='profiler', + help='profiler filename') + servers = ', '.join(x for x in dir(Servers) if not x[0]=='_') + parser.add_option('-s', + '--server', + default='rocket', + dest='server', + help='server name (%s)' % servers) + parser.add_option('-i', + '--ip', + default='127.0.0.1', + dest='ip', + help='ip address') + parser.add_option('-p', + '--port', + default='8000', + dest='port', + help='port number') + parser.add_option('-w', + '--workers', + default='', + dest='workers', + help='number of workers number') + (options, args) = parser.parse_args() + print 'starting %s on %s:%s...' % (options.server,options.ip,options.port) + run(options.server,options.ip,options.port,logging=options.logging,profiler=options.profiler) + + +if __name__=='__main__': + main() + ADDED app.example.yaml Index: app.example.yaml ================================================================== --- /dev/null +++ app.example.yaml @@ -0,0 +1,88 @@ +# For Google App Engine deployment, copy this file to app.yaml +# and edit as required +# See http://code.google.com/appengine/docs/python/config/appconfig.html +# and http://web2py.com/book/default/chapter/11?search=app.yaml + +application: web2py +version: 1 +api_version: 1 +runtime: python + +default_expiration: "24h" + +derived_file_type: +- python_precompiled + +handlers: + +- url: /_ah/stats.* + script: $PYTHON_LIB/google/appengine/ext/appstats/ui.py + login: admin + +- url: /(?P.+?)/static/(?P.+) + static_files: applications/\1/static/\2 + upload: applications/(.+?)/static/(.+) + secure: optional + +- url: /favicon.ico + static_files: applications/welcome/static/favicon.ico + upload: applications/welcome/static/favicon.ico + +- url: /robots.txt + static_files: applications/welcome/static/robots.txt + upload: applications/welcome/static/robots.txt + +- url: /_ah/admin/.* + script: $PYTHON_LIB/google/appengine/ext/admin + login: admin + +- url: /_ah/queue/default + script: gaehandler.py + login: admin + +- url: .* + script: gaehandler.py + secure: optional + +admin_console: + pages: + - name: Appstats + url: /_ah/stats + +skip_files: | + ^(.*/)?( + (app\.yaml)| + (app\.yml)| + (index\.yaml)| + (index\.yml)| + (#.*#)| + (.*~)| + (.*\.py[co])| + (.*/RCS/.*)| + (\..*)| + (applications/(admin|examples)/.*)| + ((admin|examples|welcome)\.(w2p|tar))| + (applications/.*?/(cron|databases|errors|cache|sessions)/.*)| + ((logs|scripts)/.*)| + (anyserver\.py)| + (web2py\.py)| + ((cgi|fcgi|modpython|wsgi)handler\.py)| + (epydoc\.(conf|css))| + (httpserver\.log)| + (logging\.example\.conf)| + (route[rs]\.example\.py)| + (setup_(app|exe)\.py)| + (splashlogo\.gif)| + (parameters_\d+\.py)| + (options_std.py)| + (gluon/tests/.*)| + (gluon/(rocket|winservice)\.py)| + (contrib/(gateways|markdown|memcache|pymysql)/.*)| + (contrib/(populate|taskbar_widget)\.py)| + (google_appengine/.*)| + (.*\.(bak|orig))| + )$ + +builtins: +- remote_api: on +- datastore_admin: on ADDED appengine_config.py Index: appengine_config.py ================================================================== --- /dev/null +++ appengine_config.py @@ -0,0 +1,5 @@ +def webapp_add_wsgi_middleware(app): + from google.appengine.ext.appstats import recording + app = recording.appstats_wsgi_middleware(app) + return app + ADDED applications/__init__.py Index: applications/__init__.py ================================================================== --- /dev/null +++ applications/__init__.py ADDED applications/admin/ABOUT Index: applications/admin/ABOUT ================================================================== --- /dev/null +++ applications/admin/ABOUT @@ -0,0 +1,6 @@ +web2py is an open source full-stack framework for agile development +of secure database-driven web-based applications, written and programmable in +Python. + +Created by Massimo Di Pierro + ADDED applications/admin/LICENSE Index: applications/admin/LICENSE ================================================================== --- /dev/null +++ applications/admin/LICENSE @@ -0,0 +1,137 @@ +## Web2py License + +Web2py is Licensed under the LGPL license version 3 +(http://www.gnu.org/licenses/lgpl.html) + +Copyrighted (c) by Massimo Di Pierro (2007-2011) + +### On Commercial Redistribution + +In accordance with LGPL you may: +- redistribute web2py with your apps (including official web2py binary versions) +- release your applications which use official web2py libraries under any license you wish +But you must: +- make clear in the documentation that your application uses web2py +- release any modification of the web2py libraries under the LGPLv3 license + +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL +NECESSARY SERVICING, REPAIR OR CORRECTION. + +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT +HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, +BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL +DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES +OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER +PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +(Earlier versions of web2py, 1.0.*-1.90.*, were released under the GPL2 license plus a +commercial exception which, for practical purposes, was very similar to the current LPGLv3) + +### Licenses for third party contributed software + +web2py contains third party software under the gluon/contrib/ folder. +Each file/module in contrib is distributed with web2py under its original license. +Here we list some of them. + +#### gluon.contrib.simplejson LICENSE + +Copyright (c) 2006 Bob Ippolito - Permission is hereby granted, free of charge, +to any person obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, distribute, +sublicense, and/or sell copies of the Software, and to permit persons to whom +the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +#### gluon.contrib.rss2.py (originally PyRSS2Gen) LICENSE + +This is copyright (c) by Dalke Scientific Software, LLC and released under the +BSD license. See the file LICENSE in the distribution or + for details. + +#### gluon.contrib.markdown (markdown2) LICENSE + +MIT License from from + +#### gluon.contrib.feedparser LICENSE + +Copyright (c) 2002-2005, Mark Pilgrim + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. + +#### gluon.wsgiserver.py LICENSE (borrowed from cherrypy) + +Copyright (c) 2004, CherryPy Team (team@cherrypy.org) +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * Neither the name of the CherryPy Team nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#### gluon.contrib.pam LICENSE + +Copyright (C) 2007-2009 Chris AtLee Licensed under the MIT license + +#### gluon.contrib.shell LICENSE + +Copyright (C) by Google inc. Apache 2.0 Lincense + +#### The javascript licenses are in the code itself + ADDED applications/admin/__init__.py Index: applications/admin/__init__.py ================================================================== --- /dev/null +++ applications/admin/__init__.py ADDED applications/admin/controllers/appadmin.py Index: applications/admin/controllers/appadmin.py ================================================================== --- /dev/null +++ applications/admin/controllers/appadmin.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- + +# ########################################################## +# ## make sure administrator is on localhost +# ########################################################### + +import os +import socket +import datetime +import copy +import gluon.contenttype +import gluon.fileutils + +# ## critical --- make a copy of the environment + +global_env = copy.copy(globals()) +global_env['datetime'] = datetime + +http_host = request.env.http_host.split(':')[0] +remote_addr = request.env.remote_addr +try: + hosts = (http_host, socket.gethostname(), + socket.gethostbyname(http_host), + '::1','127.0.0.1','::ffff:127.0.0.1') +except: + hosts = (http_host, ) + +if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\ + in ['https', 'HTTPS']: + session.secure() +elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"): + raise HTTP(200, T('appadmin is disabled because insecure channel')) + +if (request.application=='admin' and not session.authorized) or \ + (request.application!='admin' and not gluon.fileutils.check_credentials(request)): + redirect(URL('admin', 'default', 'index')) + +ignore_rw = True +response.view = 'appadmin.html' +response.menu = [[T('design'), False, URL('admin', 'default', 'design', + args=[request.application])], [T('db'), False, + URL('index')], [T('state'), False, + URL('state')], [T('cache'), False, + URL('ccache')]] + +# ########################################################## +# ## auxiliary functions +# ########################################################### + + +def get_databases(request): + dbs = {} + for (key, value) in global_env.items(): + cond = False + try: + cond = isinstance(value, GQLDB) + except: + cond = isinstance(value, SQLDB) + if cond: + dbs[key] = value + return dbs + + +databases = get_databases(None) + + +def eval_in_global_env(text): + exec ('_ret=%s' % text, {}, global_env) + return global_env['_ret'] + + +def get_database(request): + if request.args and request.args[0] in databases: + return eval_in_global_env(request.args[0]) + else: + session.flash = T('invalid request') + redirect(URL('index')) + + +def get_table(request): + db = get_database(request) + if len(request.args) > 1 and request.args[1] in db.tables: + return (db, request.args[1]) + else: + session.flash = T('invalid request') + redirect(URL('index')) + + +def get_query(request): + try: + return eval_in_global_env(request.vars.query) + except Exception: + return None + + +def query_by_table_type(tablename,db,request=request): + keyed = hasattr(db[tablename],'_primarykey') + if keyed: + firstkey = db[tablename][db[tablename]._primarykey[0]] + cond = '>0' + if firstkey.type in ['string', 'text']: + cond = '!=""' + qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond) + else: + qry = '%s.%s.id>0' % tuple(request.args[:2]) + return qry + + + +# ########################################################## +# ## list all databases and tables +# ########################################################### + + +def index(): + return dict(databases=databases) + + +# ########################################################## +# ## insert a new record +# ########################################################### + + +def insert(): + (db, table) = get_table(request) + form = SQLFORM(db[table], ignore_rw=ignore_rw) + if form.accepts(request.vars, session): + response.flash = T('new record inserted') + return dict(form=form,table=db[table]) + + +# ########################################################## +# ## list all records in table and insert new record +# ########################################################### + + +def download(): + import os + db = get_database(request) + return response.download(request,db) + +def csv(): + import gluon.contenttype + response.headers['Content-Type'] = \ + gluon.contenttype.contenttype('.csv') + db = get_database(request) + query = get_query(request) + if not query: + return None + response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\ + % tuple(request.vars.query.split('.')[:2]) + return str(db(query).select()) + + +def import_csv(table, file): + table.import_from_csv_file(file) + +def select(): + import re + db = get_database(request) + dbname = request.args[0] + regex = re.compile('(?P\w+)\.(?P\w+)=(?P\d+)') + if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'): + regex = re.compile('(?P
\w+)\.(?P\w+)=(?P.+)') + if request.vars.query: + match = regex.match(request.vars.query) + if match: + request.vars.query = '%s.%s.%s==%s' % (request.args[0], + match.group('table'), match.group('field'), + match.group('value')) + else: + request.vars.query = session.last_query + query = get_query(request) + if request.vars.start: + start = int(request.vars.start) + else: + start = 0 + nrows = 0 + stop = start + 100 + table = None + rows = [] + orderby = request.vars.orderby + if orderby: + orderby = dbname + '.' + orderby + if orderby == session.last_orderby: + if orderby[0] == '~': + orderby = orderby[1:] + else: + orderby = '~' + orderby + session.last_orderby = orderby + session.last_query = request.vars.query + form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px', + _name='query', _value=request.vars.query or '', + requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'), + INPUT(_name='update_check', _type='checkbox', + value=False), INPUT(_style='width:400px', + _name='update_fields', _value=request.vars.update_fields + or '')), TR(T('Delete:'), INPUT(_name='delete_check', + _class='delete', _type='checkbox', value=False), ''), + TR('', '', INPUT(_type='submit', _value='submit'))), + _action=URL(r=request,args=request.args)) + if request.vars.csvfile != None: + try: + import_csv(db[request.vars.table], + request.vars.csvfile.file) + response.flash = T('data uploaded') + except Exception, e: + response.flash = DIV(T('unable to parse csv file'),PRE(str(e))) + if form.accepts(request.vars, formname=None): +# regex = re.compile(request.args[0] + '\.(?P
\w+)\.id\>0') + regex = re.compile(request.args[0] + '\.(?P
\w+)\..+') + + match = regex.match(form.vars.query.strip()) + if match: + table = match.group('table') + try: + nrows = db(query).count() + if form.vars.update_check and form.vars.update_fields: + db(query).update(**eval_in_global_env('dict(%s)' + % form.vars.update_fields)) + response.flash = T('%s rows updated', nrows) + elif form.vars.delete_check: + db(query).delete() + response.flash = T('%s rows deleted', nrows) + nrows = db(query).count() + if orderby: + rows = db(query).select(limitby=(start, stop), + orderby=eval_in_global_env(orderby)) + else: + rows = db(query).select(limitby=(start, stop)) + except Exception, e: + (rows, nrows) = ([], 0) + response.flash = DIV(T('Invalid Query'),PRE(str(e))) + return dict( + form=form, + table=table, + start=start, + stop=stop, + nrows=nrows, + rows=rows, + query=request.vars.query, + ) + + +# ########################################################## +# ## edit delete one record +# ########################################################### + + +def update(): + (db, table) = get_table(request) + keyed = hasattr(db[table],'_primarykey') + record = None + if keyed: + key = [f for f in request.vars if f in db[table]._primarykey] + if key: + record = db(db[table][key[0]] == request.vars[key[0]]).select().first() + else: + record = db(db[table].id == request.args(2)).select().first() + + if not record: + qry = query_by_table_type(table, db) + session.flash = T('record does not exist') + redirect(URL('select', args=request.args[:1], + vars=dict(query=qry))) + + if keyed: + for k in db[table]._primarykey: + db[table][k].writable=False + + form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'), + ignore_rw=ignore_rw and not keyed, + linkto=URL('select', + args=request.args[:1]), upload=URL(r=request, + f='download', args=request.args[:1])) + + if form.accepts(request.vars, session): + session.flash = T('done!') + qry = query_by_table_type(table, db) + redirect(URL('select', args=request.args[:1], + vars=dict(query=qry))) + return dict(form=form,table=db[table]) + + +# ########################################################## +# ## get global variables +# ########################################################### + + +def state(): + return dict() + +def ccache(): + form = FORM( + P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")), + P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")), + P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")), + ) + + if form.accepts(request.vars, session): + clear_ram = False + clear_disk = False + session.flash = "" + if request.vars.yes: + clear_ram = clear_disk = True + if request.vars.ram: + clear_ram = True + if request.vars.disk: + clear_disk = True + + if clear_ram: + cache.ram.clear() + session.flash += "Ram Cleared " + if clear_disk: + cache.disk.clear() + session.flash += "Disk Cleared" + + redirect(URL(r=request)) + + try: + from guppy import hpy; hp=hpy() + except ImportError: + hp = False + + import shelve, os, copy, time, math + from gluon import portalocker + + ram = { + 'bytes': 0, + 'objects': 0, + 'hits': 0, + 'misses': 0, + 'ratio': 0, + 'oldest': time.time() + } + disk = copy.copy(ram) + total = copy.copy(ram) + + for key, value in cache.ram.storage.items(): + if isinstance(value, dict): + ram['hits'] = value['hit_total'] - value['misses'] + ram['misses'] = value['misses'] + try: + ram['ratio'] = ram['hits'] * 100 / value['hit_total'] + except (KeyError, ZeroDivisionError): + ram['ratio'] = 0 + else: + if hp: + ram['bytes'] += hp.iso(value[1]).size + ram['objects'] += hp.iso(value[1]).count + + if value[0] < ram['oldest']: + ram['oldest'] = value[0] + + locker = open(os.path.join(request.folder, + 'cache/cache.lock'), 'a') + portalocker.lock(locker, portalocker.LOCK_EX) + disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve')) + try: + for key, value in disk_storage.items(): + if isinstance(value, dict): + disk['hits'] = value['hit_total'] - value['misses'] + disk['misses'] = value['misses'] + try: + disk['ratio'] = disk['hits'] * 100 / value['hit_total'] + except (KeyError, ZeroDivisionError): + disk['ratio'] = 0 + else: + if hp: + disk['bytes'] += hp.iso(value[1]).size + disk['objects'] += hp.iso(value[1]).count + if value[0] < disk['oldest']: + disk['oldest'] = value[0] + finally: + portalocker.unlock(locker) + locker.close() + disk_storage.close() + + total['bytes'] = ram['bytes'] + disk['bytes'] + total['objects'] = ram['objects'] + disk['objects'] + total['hits'] = ram['hits'] + disk['hits'] + total['misses'] = ram['misses'] + disk['misses'] + try: + total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses']) + except (KeyError, ZeroDivisionError): + total['ratio'] = 0 + + if disk['oldest'] < ram['oldest']: + total['oldest'] = disk['oldest'] + else: + total['oldest'] = ram['oldest'] + + def GetInHMS(seconds): + hours = math.floor(seconds / 3600) + seconds -= hours * 3600 + minutes = math.floor(seconds / 60) + seconds -= minutes * 60 + seconds = math.floor(seconds) + + return (hours, minutes, seconds) + + ram['oldest'] = GetInHMS(time.time() - ram['oldest']) + disk['oldest'] = GetInHMS(time.time() - disk['oldest']) + total['oldest'] = GetInHMS(time.time() - total['oldest']) + + return dict(form=form, total=total, + ram=ram, disk=disk) + ADDED applications/admin/controllers/debug.py Index: applications/admin/controllers/debug.py ================================================================== --- /dev/null +++ applications/admin/controllers/debug.py @@ -0,0 +1,33 @@ +import sys +import cStringIO +import gluon.contrib.shell +import code, thread +from gluon.debug import communicate + + +if DEMO_MODE or MULTI_USER_MODE: + session.flash = T('disabled in demo mode') + redirect(URL('default','site')) + +FE=10**9 + +def index(): + app = request.args(0) or 'admin' + reset() + # read buffer + data = communicate() + return dict(app=app,data=data) + +def callback(): + app = request.args[0] + command = request.vars.statement + session['debug_commands:'+app].append(command) + output = communicate(command) + k = len(session['debug_commands:'+app]) - 1 + return '[%i] %s%s\n' % (k + 1, command, output) + +def reset(): + app = request.args(0) or 'admin' + session['debug_commands:'+app] = [] + return 'done' + ADDED applications/admin/controllers/default.py Index: applications/admin/controllers/default.py ================================================================== --- /dev/null +++ applications/admin/controllers/default.py @@ -0,0 +1,1223 @@ +# coding: utf8 + +from gluon.admin import * +from gluon.fileutils import abspath, read_file, write_file +from glob import glob +import shutil +import platform + +if DEMO_MODE and request.function in ['change_password','pack','pack_plugin','upgrade_web2py','uninstall','cleanup','compile_app','remove_compiled_app','delete','delete_plugin','create_file','upload_file','update_languages','reload_routes']: + session.flash = T('disabled in demo mode') + redirect(URL('site')) + +if not is_manager() and request.function in ['change_password','upgrade_web2py']: + session.flash = T('disabled in multi user mode') + redirect(URL('site')) + +if FILTER_APPS and request.args(0) and not request.args(0) in FILTER_APPS: + session.flash = T('disabled in demo mode') + redirect(URL('site')) + +def safe_open(a,b): + if DEMO_MODE and 'w' in b: + class tmp: + def write(self,data): pass + return tmp() + return open(a,b) + +def safe_read(a, b='r'): + safe_file = safe_open(a, b) + try: + return safe_file.read() + finally: + safe_file.close() + +def safe_write(a, value, b='w'): + safe_file = safe_open(a, b) + try: + safe_file.write(value) + finally: + safe_file.close() + +def get_app(name=None): + app = name or request.args(0) + if app and (not MULTI_USER_MODE or db(db.app.name==app)(db.app.owner==auth.user.id).count()): + return app + session.flash = 'App does not exist or your are not authorized' + redirect(URL('site')) + +def index(): + """ Index handler """ + + send = request.vars.send + if DEMO_MODE: + session.authorized = True + session.last_time = t0 + if not send: + send = URL('site') + if session.authorized: + redirect(send) + elif request.vars.password: + if verify_password(request.vars.password): + session.authorized = True + login_record(True) + + if CHECK_VERSION: + session.check_version = True + else: + session.check_version = False + + session.last_time = t0 + if isinstance(send, list): # ## why does this happen? + send = str(send[0]) + + redirect(send) + else: + times_denied = login_record(False) + if times_denied >= allowed_number_of_attempts: + response.flash = \ + T('admin disabled because too many invalid login attempts') + elif times_denied == allowed_number_of_attempts - 1: + response.flash = \ + T('You have one more login attempt before you are locked out') + else: + response.flash = T('invalid password.') + return dict(send=send) + + +def check_version(): + """ Checks if web2py is up to date """ + + session.forget() + session._unlock(response) + + new_version, version_number = check_new_version(request.env.web2py_version, + WEB2PY_VERSION_URL) + + if new_version == -1: + return A(T('Unable to check for upgrades'), _href=WEB2PY_URL) + elif new_version != True: + return A(T('web2py is up to date'), _href=WEB2PY_URL) + elif platform.system().lower() in ('windows','win32','win64') and os.path.exists("web2py.exe"): + return SPAN('You should upgrade to version %s' % version_number) + else: + return sp_button(URL('upgrade_web2py'), T('upgrade now')) \ + + XML(' %s' % version_number) + + +def logout(): + """ Logout handler """ + session.authorized = None + if MULTI_USER_MODE: + redirect(URL('user/logout')) + redirect(URL('index')) + + +def change_password(): + + if session.pam_user: + session.flash = T('PAM authenticated user, cannot change password here') + redirect(URL('site')) + form=SQLFORM.factory(Field('current_admin_password','password'), + Field('new_admin_password','password',requires=IS_STRONG()), + Field('new_admin_password_again','password')) + if form.accepts(request.vars): + if not verify_password(request.vars.current_admin_password): + form.errors.current_admin_password = T('invalid password') + elif form.vars.new_admin_password != form.vars.new_admin_password_again: + form.errors.new_admin_password_again = T('no match') + else: + path = abspath('parameters_%s.py' % request.env.server_port) + safe_write(path, 'password="%s"' % CRYPT()(request.vars.new_admin_password)[0]) + session.flash = T('password changed') + redirect(URL('site')) + return dict(form=form) + +def site(): + """ Site handler """ + + myversion = request.env.web2py_version + + # Shortcut to make the elif statements more legible + file_or_appurl = 'file' in request.vars or 'appurl' in request.vars + + if DEMO_MODE: + pass + + elif request.vars.filename and not 'file' in request.vars: + # create a new application + appname = cleanpath(request.vars.filename).replace('.', '_') + if app_create(appname, request): + if MULTI_USER_MODE: + db.app.insert(name=appname,owner=auth.user.id) + session.flash = T('new application "%s" created', appname) + redirect(URL('design',args=appname)) + else: + session.flash = \ + T('unable to create application "%s" (it may exist already)', request.vars.filename) + redirect(URL(r=request)) + + elif file_or_appurl and not request.vars.filename: + # can't do anything without an app name + msg = 'you must specify a name for the uploaded application' + response.flash = T(msg) + + elif file_or_appurl and request.vars.filename: + # fetch an application via URL or file upload + f = None + if request.vars.appurl is not '': + try: + f = urllib.urlopen(request.vars.appurl) + except Exception, e: + session.flash = DIV(T('Unable to download app because:'),PRE(str(e))) + redirect(URL(r=request)) + fname = request.vars.appurl + elif request.vars.file is not '': + f = request.vars.file.file + fname = request.vars.file.filename + + if f: + appname = cleanpath(request.vars.filename).replace('.', '_') + installed = app_install(appname, f, request, fname, + overwrite=request.vars.overwrite_check) + if f and installed: + msg = 'application %(appname)s installed with md5sum: %(digest)s' + session.flash = T(msg, dict(appname=appname, + digest=md5_hash(installed))) + elif f and request.vars.overwrite_check: + msg = 'unable to install application "%(appname)s"' + session.flash = T(msg, dict(appname=request.vars.filename)) + + else: + msg = 'unable to install application "%(appname)s"' + session.flash = T(msg, dict(appname=request.vars.filename)) + + redirect(URL(r=request)) + + regex = re.compile('^\w+$') + + if is_manager(): + apps = [f for f in os.listdir(apath(r=request)) if regex.match(f)] + else: + apps = [f.name for f in db(db.app.owner==auth.user_id).select()] + + if FILTER_APPS: + apps = [f for f in apps if f in FILTER_APPS] + + apps = sorted(apps,lambda a,b:cmp(a.upper(),b.upper())) + + return dict(app=None, apps=apps, myversion=myversion) + + +def pack(): + app = get_app() + + if len(request.args) == 1: + fname = 'web2py.app.%s.w2p' % app + filename = app_pack(app, request) + else: + fname = 'web2py.app.%s.compiled.w2p' % app + filename = app_pack_compiled(app, request) + + if filename: + response.headers['Content-Type'] = 'application/w2p' + disposition = 'attachment; filename=%s' % fname + response.headers['Content-Disposition'] = disposition + return safe_read(filename, 'rb') + else: + session.flash = T('internal error') + redirect(URL('site')) + +def pack_plugin(): + app = get_app() + if len(request.args) == 2: + fname = 'web2py.plugin.%s.w2p' % request.args[1] + filename = plugin_pack(app, request.args[1], request) + if filename: + response.headers['Content-Type'] = 'application/w2p' + disposition = 'attachment; filename=%s' % fname + response.headers['Content-Disposition'] = disposition + return safe_read(filename, 'rb') + else: + session.flash = T('internal error') + redirect(URL('plugin',args=request.args)) + +def upgrade_web2py(): + if 'upgrade' in request.vars: + (success, error) = upgrade(request) + if success: + session.flash = T('web2py upgraded; please restart it') + else: + session.flash = T('unable to upgrade because "%s"', error) + redirect(URL('site')) + elif 'noupgrade' in request.vars: + redirect(URL('site')) + return dict() + +def uninstall(): + app = get_app() + if 'delete' in request.vars: + if MULTI_USER_MODE: + if is_manager() and db(db.app.name==app).delete(): + pass + elif db(db.app.name==app)(db.app.owner==auth.user.id).delete(): + pass + else: + session.flash = T('no permission to uninstall "%s"', app) + redirect(URL('site')) + if app_uninstall(app, request): + session.flash = T('application "%s" uninstalled', app) + else: + session.flash = T('unable to uninstall "%s"', app) + redirect(URL('site')) + elif 'nodelete' in request.vars: + redirect(URL('site')) + return dict(app=app) + + +def cleanup(): + app = get_app() + clean = app_cleanup(app, request) + if not clean: + session.flash = T("some files could not be removed") + else: + session.flash = T('cache, errors and sessions cleaned') + + redirect(URL('site')) + + +def compile_app(): + app = get_app() + c = app_compile(app, request) + if not c: + session.flash = T('application compiled') + else: + session.flash = DIV(T('Cannot compile: there are errors in your app:'), + CODE(c)) + redirect(URL('site')) + + +def remove_compiled_app(): + """ Remove the compiled application """ + app = get_app() + remove_compiled_application(apath(app, r=request)) + session.flash = T('compiled application removed') + redirect(URL('site')) + +def delete(): + """ Object delete handler """ + app = get_app() + filename = '/'.join(request.args) + sender = request.vars.sender + + if isinstance(sender, list): # ## fix a problem with Vista + sender = sender[0] + + if 'nodelete' in request.vars: + redirect(URL(sender)) + elif 'delete' in request.vars: + try: + os.unlink(apath(filename, r=request)) + session.flash = T('file "%(filename)s" deleted', + dict(filename=filename)) + except Exception: + session.flash = T('unable to delete file "%(filename)s"', + dict(filename=filename)) + redirect(URL(sender)) + return dict(filename=filename, sender=sender) + +def peek(): + """ Visualize object code """ + app = get_app() + filename = '/'.join(request.args) + try: + data = safe_read(apath(filename, r=request)).replace('\r','') + except IOError: + session.flash = T('file does not exist') + redirect(URL('site')) + + extension = filename[filename.rfind('.') + 1:].lower() + + return dict(app=request.args[0], + filename=filename, + data=data, + extension=extension) + + +def test(): + """ Execute controller tests """ + app = get_app() + if len(request.args) > 1: + file = request.args[1] + else: + file = '.*\.py' + + controllers = listdir(apath('%s/controllers/' % app, r=request), file + '$') + + return dict(app=app, controllers=controllers) + +def keepalive(): + return '' + +def search(): + keywords=request.vars.keywords or '' + app = get_app() + def match(filename,keywords): + filename=os.path.join(apath(app, r=request),filename) + if keywords in read_file(filename,'rb'): + return True + return False + path = apath(request.args[0], r=request) + files1 = glob(os.path.join(path,'*/*.py')) + files2 = glob(os.path.join(path,'*/*.html')) + files3 = glob(os.path.join(path,'*/*/*.html')) + files=[x[len(path)+1:].replace('\\','/') for x in files1+files2+files3 if match(x,keywords)] + return response.json({'files':files}) + +def edit(): + """ File edit handler """ + # Load json only if it is ajax edited... + app = get_app() + filename = '/'.join(request.args) + # Try to discover the file type + if filename[-3:] == '.py': + filetype = 'python' + elif filename[-5:] == '.html': + filetype = 'html' + elif filename[-5:] == '.load': + filetype = 'html' + elif filename[-4:] == '.css': + filetype = 'css' + elif filename[-3:] == '.js': + filetype = 'js' + else: + filetype = 'html' + + # ## check if file is not there + + path = apath(filename, r=request) + + if request.vars.revert and os.path.exists(path + '.bak'): + try: + data = safe_read(path + '.bak') + data1 = safe_read(path) + except IOError: + session.flash = T('Invalid action') + if 'from_ajax' in request.vars: + return response.json({'error': str(T('Invalid action'))}) + else: + redirect(URL('site')) + + safe_write(path, data) + file_hash = md5_hash(data) + saved_on = time.ctime(os.stat(path)[stat.ST_MTIME]) + safe_write(path + '.bak', data1) + response.flash = T('file "%s" of %s restored', (filename, saved_on)) + else: + try: + data = safe_read(path) + except IOError: + session.flash = T('Invalid action') + if 'from_ajax' in request.vars: + return response.json({'error': str(T('Invalid action'))}) + else: + redirect(URL('site')) + + file_hash = md5_hash(data) + saved_on = time.ctime(os.stat(path)[stat.ST_MTIME]) + + if request.vars.file_hash and request.vars.file_hash != file_hash: + session.flash = T('file changed on disk') + data = request.vars.data.replace('\r\n', '\n').strip() + '\n' + safe_write(path + '.1', data) + if 'from_ajax' in request.vars: + return response.json({'error': str(T('file changed on disk')), + 'redirect': URL('resolve', + args=request.args)}) + else: + redirect(URL('resolve', args=request.args)) + elif request.vars.data: + safe_write(path + '.bak', data) + data = request.vars.data.replace('\r\n', '\n').strip() + '\n' + safe_write(path, data) + file_hash = md5_hash(data) + saved_on = time.ctime(os.stat(path)[stat.ST_MTIME]) + response.flash = T('file saved on %s', saved_on) + + data_or_revert = (request.vars.data or request.vars.revert) + + # Check compile errors + highlight = None + if filetype == 'python' and request.vars.data: + import _ast + try: + code = request.vars.data.rstrip().replace('\r\n','\n')+'\n' + compile(code, path, "exec", _ast.PyCF_ONLY_AST) + except Exception, e: + start = sum([len(line)+1 for l, line + in enumerate(request.vars.data.split("\n")) + if l < e.lineno-1]) + if e.text and e.offset: + offset = e.offset - (len(e.text) - len(e.text.splitlines()[-1])) + else: + offset = 0 + highlight = {'start': start, 'end': start + offset + 1} + try: + ex_name = e.__class__.__name__ + except: + ex_name = 'unknown exception!' + response.flash = DIV(T('failed to compile file because:'), BR(), + B(ex_name), T(' at line %s') % e.lineno, + offset and T(' at char %s') % offset or '', + PRE(str(e))) + + if data_or_revert and request.args[1] == 'modules': + # Lets try to reload the modules + try: + mopath = '.'.join(request.args[2:])[:-3] + exec 'import applications.%s.modules.%s' % (request.args[0], mopath) + reload(sys.modules['applications.%s.modules.%s' + % (request.args[0], mopath)]) + except Exception, e: + response.flash = DIV(T('failed to reload module because:'),PRE(str(e))) + + edit_controller = None + editviewlinks = None + view_link = None + if filetype == 'html' and len(request.args) >= 3: + cfilename = os.path.join(request.args[0], 'controllers', + request.args[2] + '.py') + if os.path.exists(apath(cfilename, r=request)): + edit_controller = URL('edit', args=[cfilename]) + view = request.args[3].replace('.html','') + view_link = URL(request.args[0],request.args[2],view) + elif filetype == 'python' and request.args[1] == 'controllers': + ## it's a controller file. + ## Create links to all of the associated view files. + app = get_app() + viewname = os.path.splitext(request.args[2])[0] + viewpath = os.path.join(app,'views',viewname) + aviewpath = apath(viewpath, r=request) + viewlist = [] + if os.path.exists(aviewpath): + if os.path.isdir(aviewpath): + viewlist = glob(os.path.join(aviewpath,'*.html')) + elif os.path.exists(aviewpath+'.html'): + viewlist.append(aviewpath+'.html') + if len(viewlist): + editviewlinks = [] + for v in viewlist: + vf = os.path.split(v)[-1] + vargs = "/".join([viewpath.replace(os.sep,"/"),vf]) + editviewlinks.append(A(T(vf.split(".")[0]),\ + _href=URL('edit',args=[vargs]))) + + if len(request.args) > 2 and request.args[1] == 'controllers': + controller = (request.args[2])[:-3] + functions = regex_expose.findall(data) + else: + (controller, functions) = (None, None) + + if 'from_ajax' in request.vars: + return response.json({'file_hash': file_hash, 'saved_on': saved_on, 'functions':functions, 'controller': controller, 'application': request.args[0], 'highlight': highlight }) + else: + + editarea_preferences = {} + editarea_preferences['FONT_SIZE'] = '10' + editarea_preferences['FULL_SCREEN'] = 'false' + editarea_preferences['ALLOW_TOGGLE'] = 'true' + editarea_preferences['REPLACE_TAB_BY_SPACES'] = '4' + editarea_preferences['DISPLAY'] = 'onload' + for key in editarea_preferences: + if globals().has_key(key): + editarea_preferences[key]=globals()[key] + return dict(app=request.args[0], + filename=filename, + filetype=filetype, + data=data, + edit_controller=edit_controller, + file_hash=file_hash, + saved_on=saved_on, + controller=controller, + functions=functions, + view_link=view_link, + editarea_preferences=editarea_preferences, + editviewlinks=editviewlinks) + +def resolve(): + """ + """ + + filename = '/'.join(request.args) + # ## check if file is not there + path = apath(filename, r=request) + a = safe_read(path).split('\n') + try: + b = safe_read(path + '.1').split('\n') + except IOError: + session.flash = 'Other file, no longer there' + redirect(URL('edit', args=request.args)) + + d = difflib.ndiff(a, b) + + def leading(line): + """ """ + + # TODO: we really need to comment this + z = '' + for (k, c) in enumerate(line): + if c == ' ': + z += ' ' + elif c == ' \t': + z += ' ' + elif k == 0 and c == '?': + pass + else: + break + + return XML(z) + + def getclass(item): + """ Determine item class """ + + if item[0] == ' ': + return 'normal' + if item[0] == '+': + return 'plus' + if item[0] == '-': + return 'minus' + + if request.vars: + c = '\n'.join([item[2:].rstrip() for (i, item) in enumerate(d) if item[0] \ + == ' ' or 'line%i' % i in request.vars]) + safe_write(path, c) + session.flash = 'files merged' + redirect(URL('edit', args=request.args)) + else: + # Making the short circuit compatible with <= python2.4 + gen_data = lambda index,item: not item[:1] in ['+','-'] and "" \ + or INPUT(_type='checkbox', + _name='line%i' % index, + value=item[0] == '+') + + diff = TABLE(*[TR(TD(gen_data(i,item)), + TD(item[0]), + TD(leading(item[2:]), + TT(item[2:].rstrip())), _class=getclass(item)) + for (i, item) in enumerate(d) if item[0] != '?']) + + return dict(diff=diff, filename=filename) + + +def edit_language(): + """ Edit language file """ + app = get_app() + filename = '/'.join(request.args) + from gluon.languages import read_dict, write_dict + strings = read_dict(apath(filename, r=request)) + keys = sorted(strings.keys(),lambda x,y: cmp(x.lower(), y.lower())) + rows = [] + rows.append(H2(T('Original/Translation'))) + + for key in keys: + name = md5_hash(key) + if key==strings[key]: + _class='untranslated' + else: + _class='translated' + if len(key) <= 40: + elem = INPUT(_type='text', _name=name,value=strings[key], + _size=70,_class=_class) + else: + elem = TEXTAREA(_name=name, value=strings[key], _cols=70, + _rows=5, _class=_class) + + # Making the short circuit compatible with <= python2.4 + k = (strings[key] != key) and key or B(key) + + rows.append(P(k, BR(), elem, TAG.BUTTON(T('delete'), + _onclick='return delkey("%s")' % name), _id=name)) + + rows.append(INPUT(_type='submit', _value=T('update'))) + form = FORM(*rows) + if form.accepts(request.vars, keepvalues=True): + strs = dict() + for key in keys: + name = md5_hash(key) + if form.vars[name]==chr(127): continue + strs[key] = form.vars[name] + write_dict(apath(filename, r=request), strs) + session.flash = T('file saved on %(time)s', dict(time=time.ctime())) + redirect(URL(r=request,args=request.args)) + return dict(app=request.args[0], filename=filename, form=form) + + +def about(): + """ Read about info """ + app = get_app() + # ## check if file is not there + about = safe_read(apath('%s/ABOUT' % app, r=request)) + license = safe_read(apath('%s/LICENSE' % app, r=request)) + return dict(app=app, about=MARKMIN(about), license=MARKMIN(license)) + + +def design(): + """ Application design handler """ + app = get_app() + + if not response.flash and app == request.application: + msg = T('ATTENTION: you cannot edit the running application!') + response.flash = msg + + if request.vars.pluginfile!=None and not isinstance(request.vars.pluginfile,str): + filename=os.path.basename(request.vars.pluginfile.filename) + if plugin_install(app, request.vars.pluginfile.file, + request, filename): + session.flash = T('new plugin installed') + redirect(URL('design',args=app)) + else: + session.flash = \ + T('unable to create application "%s"', request.vars.filename) + redirect(URL(r=request)) + elif isinstance(request.vars.pluginfile,str): + session.flash = T('plugin not specified') + redirect(URL(r=request)) + + + # If we have only pyc files it means that + # we cannot design + if os.path.exists(apath('%s/compiled' % app, r=request)): + session.flash = \ + T('application is compiled and cannot be designed') + redirect(URL('site')) + + # Get all models + models = listdir(apath('%s/models/' % app, r=request), '.*\.py$') + models=[x.replace('\\','/') for x in models] + defines = {} + for m in models: + data = safe_read(apath('%s/models/%s' % (app, m), r=request)) + defines[m] = regex_tables.findall(data) + defines[m].sort() + + # Get all controllers + controllers = sorted(listdir(apath('%s/controllers/' % app, r=request), '.*\.py$')) + controllers = [x.replace('\\','/') for x in controllers] + functions = {} + for c in controllers: + data = safe_read(apath('%s/controllers/%s' % (app, c), r=request)) + items = regex_expose.findall(data) + functions[c] = items + + # Get all views + views = sorted(listdir(apath('%s/views/' % app, r=request), '[\w/\-]+(\.\w+)+$')) + views = [x.replace('\\','/') for x in views if not x.endswith('.bak')] + extend = {} + include = {} + for c in views: + data = safe_read(apath('%s/views/%s' % (app, c), r=request)) + items = regex_extend.findall(data) + + if items: + extend[c] = items[0][1] + + items = regex_include.findall(data) + include[c] = [i[1] for i in items] + + # Get all modules + modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$') + modules = modules=[x.replace('\\','/') for x in modules] + modules.sort() + + # Get all static files + statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*') + statics = [x.replace('\\','/') for x in statics] + statics.sort() + + # Get all languages + languages = listdir(apath('%s/languages/' % app, r=request), '[\w-]*\.py') + + #Get crontab + cronfolder = apath('%s/cron' % app, r=request) + if not os.path.exists(cronfolder): os.mkdir(cronfolder) + crontab = apath('%s/cron/crontab' % app, r=request) + if not os.path.exists(crontab): + safe_write(crontab, '#crontab') + + plugins=[] + def filter_plugins(items,plugins): + plugins+=[item[7:].split('/')[0].split('.')[0] for item in items if item.startswith('plugin_')] + plugins[:]=list(set(plugins)) + plugins.sort() + return [item for item in items if not item.startswith('plugin_')] + + return dict(app=app, + models=filter_plugins(models,plugins), + defines=defines, + controllers=filter_plugins(controllers,plugins), + functions=functions, + views=filter_plugins(views,plugins), + modules=filter_plugins(modules,plugins), + extend=extend, + include=include, + statics=filter_plugins(statics,plugins), + languages=languages, + crontab=crontab, + plugins=plugins) + +def delete_plugin(): + """ Object delete handler """ + app=request.args(0) + plugin = request.args(1) + plugin_name='plugin_'+plugin + if 'nodelete' in request.vars: + redirect(URL('design',args=app)) + elif 'delete' in request.vars: + try: + for folder in ['models','views','controllers','static','modules']: + path=os.path.join(apath(app,r=request),folder) + for item in os.listdir(path): + if item.startswith(plugin_name): + filename=os.path.join(path,item) + if os.path.isdir(filename): + shutil.rmtree(filename) + else: + os.unlink(filename) + session.flash = T('plugin "%(plugin)s" deleted', + dict(plugin=plugin)) + except Exception: + session.flash = T('unable to delete file plugin "%(plugin)s"', + dict(plugin=plugin)) + redirect(URL('design',args=request.args(0))) + return dict(plugin=plugin) + +def plugin(): + """ Application design handler """ + app = get_app() + plugin = request.args(1) + + if not response.flash and app == request.application: + msg = T('ATTENTION: you cannot edit the running application!') + response.flash = msg + + # If we have only pyc files it means that + # we cannot design + if os.path.exists(apath('%s/compiled' % app, r=request)): + session.flash = \ + T('application is compiled and cannot be designed') + redirect(URL('site')) + + # Get all models + models = listdir(apath('%s/models/' % app, r=request), '.*\.py$') + models=[x.replace('\\','/') for x in models] + defines = {} + for m in models: + data = safe_read(apath('%s/models/%s' % (app, m), r=request)) + defines[m] = regex_tables.findall(data) + defines[m].sort() + + # Get all controllers + controllers = sorted(listdir(apath('%s/controllers/' % app, r=request), '.*\.py$')) + controllers = [x.replace('\\','/') for x in controllers] + functions = {} + for c in controllers: + data = safe_read(apath('%s/controllers/%s' % (app, c), r=request)) + items = regex_expose.findall(data) + functions[c] = items + + # Get all views + views = sorted(listdir(apath('%s/views/' % app, r=request), '[\w/\-]+\.\w+$')) + views = [x.replace('\\','/') for x in views] + extend = {} + include = {} + for c in views: + data = safe_read(apath('%s/views/%s' % (app, c), r=request)) + items = regex_extend.findall(data) + if items: + extend[c] = items[0][1] + + items = regex_include.findall(data) + include[c] = [i[1] for i in items] + + # Get all modules + modules = listdir(apath('%s/modules/' % app, r=request), '.*\.py$') + modules = modules=[x.replace('\\','/') for x in modules] + modules.sort() + + # Get all static files + statics = listdir(apath('%s/static/' % app, r=request), '[^\.#].*') + statics = [x.replace('\\','/') for x in statics] + statics.sort() + + # Get all languages + languages = listdir(apath('%s/languages/' % app, r=request), '[\w-]*\.py') + + #Get crontab + crontab = apath('%s/cron/crontab' % app, r=request) + if not os.path.exists(crontab): + safe_write(crontab, '#crontab') + + def filter_plugins(items): + regex=re.compile('^plugin_'+plugin+'(/.*|\..*)?$') + return [item for item in items if regex.match(item)] + + return dict(app=app, + models=filter_plugins(models), + defines=defines, + controllers=filter_plugins(controllers), + functions=functions, + views=filter_plugins(views), + modules=filter_plugins(modules), + extend=extend, + include=include, + statics=filter_plugins(statics), + languages=languages, + crontab=crontab) + + +def create_file(): + """ Create files handler """ + try: + app = get_app(name=request.vars.location.split('/')[0]) + path = apath(request.vars.location, r=request) + filename = re.sub('[^\w./-]+', '_', request.vars.filename) + + if path[-11:] == '/languages/': + # Handle language files + if len(filename) == 0: + raise SyntaxError + if not filename[-3:] == '.py': + filename += '.py' + app = path.split('/')[-3] + path=os.path.join(apath(app, r=request),'languages',filename) + if not os.path.exists(path): + safe_write(path, '') + findT(apath(app, r=request), filename[:-3]) + session.flash = T('language file "%(filename)s" created/updated', + dict(filename=filename)) + redirect(request.vars.sender) + + elif path[-8:] == '/models/': + # Handle python models + if not filename[-3:] == '.py': + filename += '.py' + + if len(filename) == 3: + raise SyntaxError + + text = '# coding: utf8\n' + + elif path[-13:] == '/controllers/': + # Handle python controllers + if not filename[-3:] == '.py': + filename += '.py' + + if len(filename) == 3: + raise SyntaxError + + text = '# coding: utf8\n# %s\ndef index(): return dict(message="hello from %s")' + text = text % (T('try something like'), filename) + + elif path[-7:] == '/views/': + if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin): + filename = 'plugin_%s/%s' % (request.vars.plugin, filename) + # Handle template (html) views + if filename.find('.')<0: + filename += '.html' + extension = filename.split('.')[-1].lower() + + if len(filename) == 5: + raise SyntaxError + + msg = T('This is the %(filename)s template', + dict(filename=filename)) + if extension == 'html': + text = dedent(""" + {{extend 'layout.html'}} +

%s

+ {{=BEAUTIFY(response._vars)}}""" % msg) + else: + generic = os.path.join(path,'generic.'+extension) + if os.path.exists(generic): + text = read_file(generic) + else: + text = '' + + elif path[-9:] == '/modules/': + if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin): + filename = 'plugin_%s/%s' % (request.vars.plugin, filename) + # Handle python module files + if not filename[-3:] == '.py': + filename += '.py' + + if len(filename) == 3: + raise SyntaxError + + text = dedent(""" + #!/usr/bin/env python + # coding: utf8 + from gluon import *\n""") + + elif path[-8:] == '/static/': + if request.vars.plugin and not filename.startswith('plugin_%s/' % request.vars.plugin): + filename = 'plugin_%s/%s' % (request.vars.plugin, filename) + text = '' + else: + redirect(request.vars.sender) + + full_filename = os.path.join(path, filename) + dirpath = os.path.dirname(full_filename) + + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + if os.path.exists(full_filename): + raise SyntaxError + + safe_write(full_filename, text) + session.flash = T('file "%(filename)s" created', + dict(filename=full_filename[len(path):])) + redirect(URL('edit', + args=[os.path.join(request.vars.location, filename)])) + except Exception, e: + if not isinstance(e,HTTP): + session.flash = T('cannot create file') + + redirect(request.vars.sender) + + +def upload_file(): + """ File uploading handler """ + + try: + filename = None + app = get_app(name=request.vars.location.split('/')[0]) + path = apath(request.vars.location, r=request) + + if request.vars.filename: + filename = re.sub('[^\w\./]+', '_', request.vars.filename) + else: + filename = os.path.split(request.vars.file.filename)[-1] + + if path[-8:] == '/models/' and not filename[-3:] == '.py': + filename += '.py' + + if path[-9:] == '/modules/' and not filename[-3:] == '.py': + filename += '.py' + + if path[-13:] == '/controllers/' and not filename[-3:] == '.py': + filename += '.py' + + if path[-7:] == '/views/' and not filename[-5:] == '.html': + filename += '.html' + + if path[-11:] == '/languages/' and not filename[-3:] == '.py': + filename += '.py' + + filename = os.path.join(path, filename) + dirpath = os.path.dirname(filename) + + if not os.path.exists(dirpath): + os.makedirs(dirpath) + + safe_write(filename, request.vars.file.file.read(), 'wb') + session.flash = T('file "%(filename)s" uploaded', + dict(filename=filename[len(path):])) + except Exception: + if filename: + d = dict(filename = filename[len(path):]) + else: + d = dict(filename = 'unkown') + session.flash = T('cannot upload file "%(filename)s"', d) + + redirect(request.vars.sender) + + +def errors(): + """ Error handler """ + import operator + import os + import pickle + import hashlib + + app = get_app() + + method = request.args(1) or 'new' + + + if method == 'new': + errors_path = apath('%s/errors' % app, r=request) + + delete_hashes = [] + for item in request.vars: + if item[:7] == 'delete_': + delete_hashes.append(item[7:]) + + hash2error = dict() + + for fn in listdir(errors_path, '^\w.*'): + fullpath = os.path.join(errors_path, fn) + if not os.path.isfile(fullpath): continue + try: + fullpath_file = open(fullpath, 'r') + try: + error = pickle.load(fullpath_file) + finally: + fullpath_file.close() + except IOError: + continue + + hash = hashlib.md5(error['traceback']).hexdigest() + + if hash in delete_hashes: + os.unlink(fullpath) + else: + try: + hash2error[hash]['count'] += 1 + except KeyError: + error_lines = error['traceback'].split("\n") + last_line = error_lines[-2] + error_causer = os.path.split(error['layer'])[1] + hash2error[hash] = dict(count=1, pickel=error, + causer=error_causer, + last_line=last_line, + hash=hash,ticket=fn) + + decorated = [(x['count'], x) for x in hash2error.values()] + decorated.sort(key=operator.itemgetter(0), reverse=True) + + return dict(errors = [x[1] for x in decorated], app=app, method=method) + else: + for item in request.vars: + if item[:7] == 'delete_': + os.unlink(apath('%s/errors/%s' % (app, item[7:]), r=request)) + func = lambda p: os.stat(apath('%s/errors/%s' % \ + (app, p), r=request)).st_mtime + tickets = sorted(listdir(apath('%s/errors/' % app, r=request), '^\w.*'), + key=func, + reverse=True) + + return dict(app=app, tickets=tickets, method=method) + + +def make_link(path): + """ Create a link from a path """ + tryFile = path.replace('\\', '/') + + if os.path.isabs(tryFile) and os.path.isfile(tryFile): + (folder, filename) = os.path.split(tryFile) + (base, ext) = os.path.splitext(filename) + app = get_app() + + editable = {'controllers': '.py', 'models': '.py', 'views': '.html'} + for key in editable.keys(): + check_extension = folder.endswith("%s/%s" % (app,key)) + if ext.lower() == editable[key] and check_extension: + return A('"' + tryFile + '"', + _href=URL(r=request, + f='edit/%s/%s/%s' % (app, key, filename))).xml() + return '' + + +def make_links(traceback): + """ Make links using the given traceback """ + + lwords = traceback.split('"') + + # Making the short circuit compatible with <= python2.4 + result = (len(lwords) != 0) and lwords[0] or '' + + i = 1 + + while i < len(lwords): + link = make_link(lwords[i]) + + if link == '': + result += '"' + lwords[i] + else: + result += link + + if i + 1 < len(lwords): + result += lwords[i + 1] + i = i + 1 + + i = i + 1 + + return result + + +class TRACEBACK(object): + """ Generate the traceback """ + + def __init__(self, text): + """ TRACEBACK constructor """ + + self.s = make_links(CODE(text).xml()) + + def xml(self): + """ Returns the xml """ + + return self.s + + +def ticket(): + """ Ticket handler """ + + if len(request.args) != 2: + session.flash = T('invalid ticket') + redirect(URL('site')) + + app = get_app() + myversion = request.env.web2py_version + ticket = request.args[1] + e = RestrictedError() + e.load(request, app, ticket) + + return dict(app=app, + ticket=ticket, + output=e.output, + traceback=(e.traceback and TRACEBACK(e.traceback)), + snapshot=e.snapshot, + code=e.code, + layer=e.layer, + myversion=myversion) + +def error(): + """ Generate a ticket (for testing) """ + raise RuntimeError('admin ticket generator at your service') + +def update_languages(): + """ Update available languages """ + + app = get_app() + update_all_languages(apath(app, r=request)) + session.flash = T('Language files (static strings) updated') + redirect(URL('design',args=app,anchor='languages')) + +def twitter(): + session.forget() + session._unlock(response) + import gluon.tools + import gluon.contrib.simplejson as sj + try: + if TWITTER_HASH: + page = gluon.tools.fetch('http://twitter.com/%s?format=json'%TWITTER_HASH) + return sj.loads(page)['#timeline'] + else: + return 'disabled' + except Exception, e: + return DIV(T('Unable to download because:'),BR(),str(e)) + +def user(): + if MULTI_USER_MODE: + if not db(db.auth_user).count(): + auth.settings.registration_requires_approval = False + return dict(form=auth()) + else: + return dict(form=T("Disabled")) + +def reload_routes(): + """ Reload routes.py """ + import gluon.rewrite + gluon.rewrite.load() + redirect(URL('site')) ADDED applications/admin/controllers/gae.py Index: applications/admin/controllers/gae.py ================================================================== --- /dev/null +++ applications/admin/controllers/gae.py @@ -0,0 +1,87 @@ +### this works on linux only + +import re +try: + import fcntl + import subprocess + import signal + import os + import shutil + from gluon.fileutils import read_file, write_file +except: + session.flash='sorry, only on Unix systems' + redirect(URL(request.application,'default','site')) + +forever=10**8 + +def kill(): + p = cache.ram('gae_upload',lambda:None,forever) + if not p or p.poll()!=None: + return 'oops' + os.kill(p.pid, signal.SIGKILL) + cache.ram('gae_upload',lambda:None,-1) + +class EXISTS(object): + def __init__(self, error_message='file not found'): + self.error_message = error_message + def __call__(self, value): + if os.path.exists(value): + return (value,None) + return (value,self.error_message) + +def deploy(): + regex = re.compile('^\w+$') + apps = sorted(file for file in os.listdir(apath(r=request)) if regex.match(file)) + form = SQLFORM.factory( + Field('appcfg',default=GAE_APPCFG,label='Path to appcfg.py', + requires=EXISTS(error_message=T('file not found'))), + Field('google_application_id',requires=IS_ALPHANUMERIC()), + Field('applications','list:string', + requires=IS_IN_SET(apps,multiple=True), + label=T('web2py apps to deploy')), + Field('email',requires=IS_EMAIL(),label=T('GAE Email')), + Field('password','password',requires=IS_NOT_EMPTY(),label=T('GAE Password'))) + cmd = output = errors= "" + if form.accepts(request,session): + try: + kill() + except: + pass + ignore_apps = [item for item in apps \ + if not item in form.vars.applications] + regex = re.compile('\(applications/\(.*') + yaml = apath('../app.yaml', r=request) + if not os.path.exists(yaml): + example = apath('../app.example.yaml', r=request) + shutil.copyfile(example,yaml) + data = read_file(yaml) + data = re.sub('application:.*','application: %s' % form.vars.google_application_id,data) + data = regex.sub('(applications/(%s)/.*)|' % '|'.join(ignore_apps),data) + write_file(yaml, data) + + path = request.env.applications_parent + cmd = '%s --email=%s --passin update %s' % \ + (form.vars.appcfg, form.vars.email, path) + p = cache.ram('gae_upload', + lambda s=subprocess,c=cmd:s.Popen(c, shell=True, + stdin=s.PIPE, + stdout=s.PIPE, + stderr=s.PIPE, close_fds=True),-1) + p.stdin.write(form.vars.password+'\n') + fcntl.fcntl(p.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) + fcntl.fcntl(p.stderr.fileno(), fcntl.F_SETFL, os.O_NONBLOCK) + return dict(form=form,command=cmd) + +def callback(): + p = cache.ram('gae_upload',lambda:None,forever) + if not p or p.poll()!=None: + return '' + try: + output = p.stdout.read() + except: + output='' + try: + errors = p.stderr.read() + except: + errors='' + return (output+errors).replace('\n','
') ADDED applications/admin/controllers/mercurial.py Index: applications/admin/controllers/mercurial.py ================================================================== --- /dev/null +++ applications/admin/controllers/mercurial.py @@ -0,0 +1,82 @@ +from gluon.fileutils import read_file, write_file + +if DEMO_MODE or MULTI_USER_MODE: + session.flash = T('disabled in demo mode') + redirect(URL('default','site')) +if not have_mercurial: + session.flash=T("Sorry, could not find mercurial installed") + redirect(URL('default','design',args=request.args(0))) + +_hgignore_content = """\ +syntax: glob +*~ +*.pyc +*.pyo +*.bak +*.bak2 +cache/* +private/* +uploads/* +databases/* +sessions/* +errors/* +""" + +def hg_repo(path): + import os + uio = ui.ui() + uio.quiet = True + if not os.environ.get('HGUSER') and not uio.config("ui", "username"): + os.environ['HGUSER'] = 'web2py@localhost' + try: + repo = hg.repository(ui=uio, path=path) + except: + repo = hg.repository(ui=uio, path=path, create=True) + hgignore = os.path.join(path, '.hgignore') + if not os.path.exists(hgignore): + write_file(hgignore, _hgignore_content) + return repo + +def commit(): + app = request.args(0) + path = apath(app, r=request) + repo = hg_repo(path) + form = FORM('Comment:',INPUT(_name='comment',requires=IS_NOT_EMPTY()), + INPUT(_type='submit',_value='Commit')) + if form.accepts(request.vars,session): + oldid = repo[repo.lookup('.')] + cmdutil.addremove(repo) + repo.commit(text=form.vars.comment) + if repo[repo.lookup('.')] == oldid: + response.flash = 'no changes' + try: + files = TABLE(*[TR(file) for file in repo[repo.lookup('.')].files()]) + changes = TABLE(TR(TH('revision'),TH('description'))) + for change in repo.changelog: + ctx=repo.changectx(change) + revision, description = ctx.rev(), ctx.description() + changes.append(TR(A(revision,_href=URL('revision', + args=(app,revision))), + description)) + except: + files = [] + changes = [] + return dict(form=form,files=files,changes=changes,repo=repo) + +def revision(): + app = request.args(0) + path = apath(app, r=request) + repo = hg_repo(path) + revision = request.args(1) + ctx=repo.changectx(revision) + form=FORM(INPUT(_type='submit',_value='revert')) + if form.accepts(request.vars): + hg.update(repo, revision) + session.flash = "reverted to revision %s" % ctx.rev() + redirect(URL('default','design',args=app)) + return dict( + files=ctx.files(), + rev=str(ctx.rev()), + desc=ctx.description(), + form=form + ) ADDED applications/admin/controllers/shell.py Index: applications/admin/controllers/shell.py ================================================================== --- /dev/null +++ applications/admin/controllers/shell.py @@ -0,0 +1,45 @@ +import sys +import cStringIO +import gluon.contrib.shell +import code, thread +from gluon.shell import env + +if DEMO_MODE or MULTI_USER_MODE: + session.flash = T('disabled in demo mode') + redirect(URL('default','site')) + +FE=10**9 + +def index(): + app = request.args(0) or 'admin' + reset() + return dict(app=app) + +def callback(): + app = request.args[0] + command = request.vars.statement + escape = command[:1]!='!' + history = session['history:'+app] = session.get('history:'+app,gluon.contrib.shell.History()) + if not escape: + command = command[1:] + if command == '%reset': + reset() + return '*** reset ***' + elif command[0] == '%': + try: + command=session['commands:'+app][int(command[1:])] + except ValueError: + return '' + session['commands:'+app].append(command) + environ=env(app,True) + output = gluon.contrib.shell.run(history,command,environ) + k = len(session['commands:'+app]) - 1 + #output = PRE(output) + #return TABLE(TR('In[%i]:'%k,PRE(command)),TR('Out[%i]:'%k,output)) + return 'In [%i] : %s%s\n' % (k + 1, command, output) + +def reset(): + app = request.args(0) or 'admin' + session['commands:'+app] = [] + session['history:'+app] = gluon.contrib.shell.History() + return 'done' ADDED applications/admin/controllers/toolbar.py Index: applications/admin/controllers/toolbar.py ================================================================== --- /dev/null +++ applications/admin/controllers/toolbar.py @@ -0,0 +1,29 @@ +import os +from gluon.settings import global_settings, read_file +# + +def index(): + app = request.args(0) + return dict(app=app) + +def profiler(): + """ + to use the profiler start web2py with -F profiler.log + """ + KEY = 'web2py_profiler_size' + filename = global_settings.cmd_options.profiler_filename + data = 'profiler disabled' + if filename: + if KEY in request.cookies: + size = int(request.cookies[KEY].value) + else: + size = 0 + if os.path.exists(filename): + data = read_file('profiler.log','rb') + if size=m: redirect(URL('step2')) + table=session.app['tables'][n] + form=SQLFORM.factory(Field('field_names','list:string', + default=session.app.get('table_'+table,[]))) + if form.accepts(request.vars) and form.vars.field_names: + fields=listify(form.vars.field_names) + if table=='auth_user': + for field in ['first_name','last_name','username','email','password']: + if not field in fields: + fields.append(field) + session.app['table_'+table]=[t.strip().lower() + for t in listify(form.vars.field_names) + if t.strip()] + try: + tables=sort_tables(session.app['tables']) + except RuntimeError: + response.flash=T('invalid circual reference') + else: + if n=m: redirect(URL('step4')) + page=session.app['pages'][n] + markmin_url='http://web2py.com/examples/static/markmin.html' + form=SQLFORM.factory(Field('content','text', + default=session.app.get('page_'+page,[]), + comment=A('use markmin', + _href=markmin_url,_target='_blank')), + formstyle='table2cols') + if form.accepts(request.vars): + session.app['page_'+page]=form.vars.content + if n 0 +FONT_SIZE = 10 + +# Displays the editor in full screen mode. The value must be 'true' or 'false' +FULL_SCREEN = 'false' + +# Display a check box under the editor to allow the user to switch +# between the editor and a simple +# HTML text area. The value must be 'true' or 'false' +ALLOW_TOGGLE = 'true' + +# Replaces tab characters with space characters. +# The value can be 'false' (meaning that tabs are not replaced), +# or an integer > 0 that specifies the number of spaces to replace a tab with. +REPLACE_TAB_BY_SPACES = 4 + +# Toggle on/off the code editor instead of textarea on startup +DISPLAY = "onload" or "later" + +# if demo mode is True then admin works readonly and does not require login +DEMO_MODE = False + +# if visible_apps is not empty only listed apps will be accessible +FILTER_APPS = [] + +# To upload on google app engine this has to point to the proper appengine +# config file +import os +# extract google_appengine_x.x.x.zip to web2py root directory +#GAE_APPCFG = os.path.abspath(os.path.join('appcfg.py')) +# extract google_appengine_x.x.x.zip to applications/admin/private/ +GAE_APPCFG = os.path.abspath(os.path.join('/usr/local/bin/appcfg.py')) + +# To use web2py as a teaching tool, set MULTI_USER_MODE to True +MULTI_USER_MODE = False + +# configurable twitterbox, set to None/False to suppress +TWITTER_HASH = "web2py" + +# parameter for downloading LAYOUTS +LAYOUTS_APP = 'http://web2py.com/layouts' +#LAYOUTS_APP = 'http://127.0.0.1:8000/layouts' + + +# parameter for downloading PLUGINS +PLUGINS_APP = 'http://web2py.com/plugins' +#PLUGINS_APP = 'http://127.0.0.1:8000/plugins' + +# set the language +if 'adminLanguage' in request.cookies and not (request.cookies['adminLanguage'] is None): + T.force(request.cookies['adminLanguage'].value) ADDED applications/admin/models/0_imports.py Index: applications/admin/models/0_imports.py ================================================================== --- /dev/null +++ applications/admin/models/0_imports.py @@ -0,0 +1,26 @@ +import time +import os +import sys +import re +import urllib +import cgi +import difflib +import shutil +import stat +import socket + +from textwrap import dedent + +try: + from mercurial import ui, hg, cmdutil + have_mercurial = True +except ImportError: + have_mercurial = False + +from gluon.utils import md5_hash +from gluon.fileutils import listdir, cleanpath, up +from gluon.fileutils import tar, tar_compiled, untar, fix_newlines +from gluon.languages import findT, update_all_languages +from gluon.myregex import * +from gluon.restricted import * +from gluon.compileapp import compile_application, remove_compiled_application ADDED applications/admin/models/access.py Index: applications/admin/models/access.py ================================================================== --- /dev/null +++ applications/admin/models/access.py @@ -0,0 +1,146 @@ +import os, time +from gluon import portalocker +from gluon.admin import apath +from gluon.fileutils import read_file +# ########################################################### +# ## make sure administrator is on localhost or https +# ########################################################### + +http_host = request.env.http_host.split(':')[0] + +if request.env.web2py_runtime_gae: + session_db = DAL('gae') + session.connect(request, response, db=session_db) + hosts = (http_host, ) + +if request.env.http_x_forwarded_for or request.is_https: + session.secure() +elif not request.is_local and not DEMO_MODE: + raise HTTP(200, T('Admin is disabled because insecure channel')) + +try: + _config = {} + port = int(request.env.server_port or 0) + restricted(read_file(apath('../parameters_%i.py' % port, request)), _config) + + if not 'password' in _config or not _config['password']: + raise HTTP(200, T('admin disabled because no admin password')) +except IOError: + import gluon.fileutils + if request.env.web2py_runtime_gae: + if gluon.fileutils.check_credentials(request): + session.authorized = True + session.last_time = time.time() + else: + raise HTTP(200, + T('admin disabled because not supported on google app engine')) + else: + raise HTTP(200, T('admin disabled because unable to access password file')) + + +def verify_password(password): + session.pam_user = None + if DEMO_MODE: + return True + elif not 'password' in _config: + return False + elif _config['password'].startswith('pam_user:'): + session.pam_user = _config['password'][9:].strip() + import gluon.contrib.pam + return gluon.contrib.pam.authenticate(session.pam_user,password) + else: + return _config['password'] == CRYPT()(password)[0] + + +# ########################################################### +# ## handle brute-force login attacks +# ########################################################### + +deny_file = os.path.join(request.folder, 'private', 'hosts.deny') +allowed_number_of_attempts = 5 +expiration_failed_logins = 3600 + +def read_hosts_deny(): + import datetime + hosts = {} + if os.path.exists(deny_file): + hosts = {} + f = open(deny_file, 'r') + portalocker.lock(f, portalocker.LOCK_SH) + for line in f.readlines(): + if not line.strip() or line.startswith('#'): + continue + fields = line.strip().split() + if len(fields) > 2: + hosts[fields[0].strip()] = ( # ip + int(fields[1].strip()), # n attemps + int(fields[2].strip()) # last attempts + ) + portalocker.unlock(f) + f.close() + return hosts + +def write_hosts_deny(denied_hosts): + f = open(deny_file, 'w') + portalocker.lock(f, portalocker.LOCK_EX) + for key, val in denied_hosts.items(): + if time.time()-val[1] < expiration_failed_logins: + line = '%s %s %s\n' % (key, val[0], val[1]) + f.write(line) + portalocker.unlock(f) + f.close() + +def login_record(success=True): + denied_hosts = read_hosts_deny() + val = (0,0) + if success and request.client in denied_hosts: + del denied_hosts[request.client] + elif not success and not request.is_local: + val = denied_hosts.get(request.client,(0,0)) + if time.time()-val[1]= allowed_number_of_attempts: + return val[0] # locked out + time.sleep(2**val[0]) + val = (val[0]+1,int(time.time())) + denied_hosts[request.client] = val + write_hosts_deny(denied_hosts) + return val[0] + + +# ########################################################### +# ## session expiration +# ########################################################### + +t0 = time.time() +if session.authorized: + + if session.last_time and session.last_time < t0 - EXPIRATION: + session.flash = T('session expired') + session.authorized = False + else: + session.last_time = t0 + +if not session.authorized and not \ + (request.controller == 'default' and \ + request.function in ('index','user')): + + if request.env.query_string: + query_string = '?' + request.env.query_string + else: + query_string = '' + + if request.env.web2py_original_uri: + url = request.env.web2py_original_uri + else: + url = request.env.path_info + query_string + redirect(URL(request.application, 'default', 'index', vars=dict(send=url))) +elif session.authorized and \ + request.controller == 'default' and \ + request.function == 'index': + redirect(URL(request.application, 'default', 'site')) + + +if request.controller=='appadmin' and DEMO_MODE: + session.flash = 'Appadmin disabled in demo mode' + redirect(URL('default','sites')) + ADDED applications/admin/models/buttons.py Index: applications/admin/models/buttons.py ================================================================== --- /dev/null +++ applications/admin/models/buttons.py @@ -0,0 +1,15 @@ +# Template helpers + +import os + +def button(href, label): + return A(SPAN(label),_class='button',_href=href) + +def sp_button(href, label): + return A(SPAN(label),_class='button special',_href=href) + +def helpicon(): + return IMG(_src=URL('static', 'images/help.png'), _alt='help') + +def searchbox(elementid): + return TAG[''](LABEL(IMG(_src=URL('static', 'images/search.png'), _alt=T('filter')), _class='icon', _for=elementid), ' ', INPUT(_id=elementid, _type='text', _size=12)) ADDED applications/admin/models/db.py Index: applications/admin/models/db.py ================================================================== --- /dev/null +++ applications/admin/models/db.py @@ -0,0 +1,40 @@ +# -*- coding: utf-8 -*- +# this file is released under public domain and you can use without limitations + +if MULTI_USER_MODE: + db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB + from gluon.tools import * + mail = Mail() # mailer + auth = Auth(globals(),db) # authentication/authorization + crud = Crud(globals(),db) # for CRUD helpers using auth + service = Service(globals()) # for json, xml, jsonrpc, xmlrpc, amfrpc + plugins = PluginManager() + + mail.settings.server = 'logging' or 'smtp.gmail.com:587' # your SMTP server + mail.settings.sender = 'you@gmail.com' # your email + mail.settings.login = 'username:password' # your credentials or None + + auth.settings.hmac_key = '' # before define_tables() + auth.define_tables() # creates all needed tables + auth.settings.mailer = mail # for user email verification + auth.settings.registration_requires_verification = False + auth.settings.registration_requires_approval = True + auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['verify_email'])+'/%(key)s to verify your email' + auth.settings.reset_password_requires_verification = True + auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['reset_password'])+'/%(key)s to reset your password' + + db.define_table('app',Field('name'),Field('owner',db.auth_user)) + +if not session.authorized and MULTI_USER_MODE: + if auth.user and not request.function=='user': + session.authorized = True + elif not request.function=='user': + redirect(URL('default','user/login')) + +def is_manager(): + if not MULTI_USER_MODE: + return True + elif auth.user and auth.user.id==1: + return True + else: + return False ADDED applications/admin/models/menu.py Index: applications/admin/models/menu.py ================================================================== --- /dev/null +++ applications/admin/models/menu.py @@ -0,0 +1,33 @@ +# ########################################################### +# ## generate menu +# ########################################################### + +_a = request.application +_c = request.controller +_f = request.function +response.title = '%s %s' % (_f, '/'.join(request.args)) +response.subtitle = 'admin' +response.menu = [(T('Site'), _f == 'site', URL(_a,'default','site'))] + +if request.args: + _t = request.args[0] + response.menu.append((T('Edit'), _c == 'default' and _f == 'design', + URL(_a,'default','design',args=_t))) + response.menu.append((T('About'), _c == 'default' and _f == 'about', + URL(_a,'default','about',args=_t))) + response.menu.append((T('Errors'), _c == 'default' and _f == 'errors', + URL(_a,'default','errors',args=_t))) + response.menu.append((T('Versioning'), + _c == 'mercurial' and _f == 'commit', + URL(_a,'mercurial','commit',args=_t))) + +if not session.authorized: + response.menu = [(T('Login'), True, '')] +else: + response.menu.append((T('Logout'), False, + URL(_a,'default',f='logout'))) + +if os.path.exists('applications/examples'): + response.menu.append((T('Help'), False, URL('examples','default','index'))) +else: + response.menu.append((T('Help'), False, 'http://web2py.com/examples')) ADDED applications/admin/models/plugin_multiselect.py Index: applications/admin/models/plugin_multiselect.py ================================================================== --- /dev/null +++ applications/admin/models/plugin_multiselect.py @@ -0,0 +1,4 @@ +response.files.append(URL('static','plugin_multiselect/jquery.dimensions.js')) +response.files.append(URL('static','plugin_multiselect/jquery.multiselect.js')) +response.files.append(URL('static','plugin_multiselect/jquery.multiselect.css')) +response.files.append(URL('static','plugin_multiselect/start.js')) ADDED applications/admin/modules/__init__.py Index: applications/admin/modules/__init__.py ================================================================== --- /dev/null +++ applications/admin/modules/__init__.py ADDED applications/admin/static/css/calendar.css Index: applications/admin/static/css/calendar.css ================================================================== --- /dev/null +++ applications/admin/static/css/calendar.css @@ -0,0 +1,4 @@ +.calendar{z-index:99;position:relative;display:none;border-top:2px solid #fff;border-right:2px solid #000;border-bottom:2px solid #000;border-left:2px solid #fff;font-size:11px;color:#000;cursor:default;background:#d4d0c8;font-family:tahoma,verdana,sans-serif;}.calendar table{border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;font-size:11px;color:#000;cursor:default;background:#d4d0c8;font-family:tahoma,verdana,sans-serif;}.calendar .button{text-align:center;padding:1px;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;}.calendar .nav{background:transparent}.calendar thead .title{font-weight:bold;padding:1px;border:1px solid #000;background:#848078;color:#fff;text-align:center;}.calendar thead .name{border-bottom:1px solid #000;padding:2px;text-align:center;background:#f4f0e8;}.calendar thead .weekend{color:#f00;}.calendar thead .hilite{border-top:2px solid #fff;border-right:2px solid #000;border-bottom:2px solid #000;border-left:2px solid #fff;padding:0;background-color:#e4e0d8;}.calendar thead .active{padding:2px 0 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;background-color:#c4c0b8;}.calendar tbody .day{width:2em;text-align:right;padding:2px 4px 2px 2px;}.calendar tbody .day.othermonth{font-size:80%;color:#aaa;}.calendar tbody .day.othermonth.oweekend{color:#faa;}.calendar table .wn{padding:2px 3px 2px 2px;border-right:1px solid #000;background:#f4f0e8;}.calendar tbody .rowhilite td{background:#e4e0d8;}.calendar tbody .rowhilite td.wn{background:#d4d0c8;}.calendar tbody td.hilite{padding:1px 3px 1px 1px;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;}.calendar tbody td.active{padding:2px 2px 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar tbody td.selected{font-weight:bold;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;padding:2px 2px 0 2px;background:#e4e0d8;}.calendar tbody td.weekend{color:#f00;}.calendar tbody td.today{font-weight:bold;color:#00f;}.calendar tbody .disabled{color:#999;}.calendar tbody .emptycell{visibility:hidden;}.calendar tbody .emptyrow{display:none;}.calendar tfoot .ttip{background:#f4f0e8;padding:1px;border:1px solid #000;background:#848078;color:#fff;text-align:center;}.calendar tfoot .hilite{border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;padding:1px;background:#e4e0d8;}.calendar tfoot .active{padding:2px 0 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar .combo{position:absolute;display:none;width:4em;top:0;left:0;cursor:default;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;background:#e4e0d8;font-size:90%;padding:1px;z-index:100;}.calendar .combo .label,.calendar .combo .label-IEfix{text-align:center;padding:1px;}.calendar .combo .label-IEfix{width:4em;}.calendar .combo .active{background:#c4c0b8;padding:0;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar .combo .hilite{background:#048;color:#fea;}.calendar td.time{border-top:1px solid #000;padding:1px 0;text-align:center;background-color:#f4f0e8;}.calendar td.time .hour,.calendar td.time .minute,.calendar td.time .ampm{padding:0 3px 0 4px;border:1px solid #889;font-weight:bold;background-color:#fff;}.calendar td.time .ampm{text-align:center;}.calendar td.time .colon{padding:0 2px 0 3px;font-weight:bold;}.calendar td.time span.hilite{border-color:#000;background-color:#766;color:#fff;}.calendar td.time span.active{border-color:#f00;background-color:#000;color:#0f0;} + +#CP_hourcont {z-index:99;padding: 0px;position:absolute;border:1px dashed #bbbbbb;background-color:#dddddd;display:none;} #CP_minutecont {z-index:99;background-color:#dddddd;padding: 0px;position:absolute;width:45px;border: 1px dashed #cccccc;display:none;} .floatleft {float:left;} .CP_hour {z-index:99;padding:1px;font-family: Arial, Helvetica, sans-serif;font-size:9px;white-space:nowrap;cursor:pointer;width:35px;margin:1px;background-color:#eeeeee;} .CP_minute {z-index:99;padding:1px;background-color:#eeeeee;font-family: Arial, Helvetica, sans-serif;font-size:9px;white-space:nowrap;cursor:pointer;width:auto;margin:1px;} .CP_over {z-index:99;background-color:#ffffff;} + ADDED applications/admin/static/css/jqueryMultiSelect.css Index: applications/admin/static/css/jqueryMultiSelect.css ================================================================== --- /dev/null +++ applications/admin/static/css/jqueryMultiSelect.css @@ -0,0 +1,47 @@ +.multiSelect { + width: 200px; + border: solid 1px #BBB; + background: #FFF right center no-repeat; + padding: 2px 4px; + padding-right: 20px; + display: inline; +} + +.multiSelect.hover { + background: right center no-repeat; +} + +.multiSelect.active, +.multiSelect.focus { + border: inset 1px #000; +} + +.multiSelect.active { + background: right center no-repeat; +} + +.multiSelectOptions { + width: 500px; + max-height: 150px; + margin-top: -1px; + overflow: auto; + border: solid 1px #B2B2B2; + background: #FFF; +} + +.multiSelectOptions LABEL { + padding: 2px 5px; + display: block; +} + +.multiSelectOptions LABEL.checked { + background: #E6E6E6; +} + +.multiSelectOptions LABEL.selectAll { + border-bottom: dotted 1px #CCC; +} + +.multiSelectOptions LABEL.hover { + background: #CFCFCF; +} ADDED applications/admin/static/css/styles.css Index: applications/admin/static/css/styles.css ================================================================== --- /dev/null +++ applications/admin/static/css/styles.css @@ -0,0 +1,1065 @@ +/** +* cSans v0.6.3 +* 2009 Copyright A navalla suíza http://anavallasuiza.com +* cSans is released under the GNU Affero GPL version 3 - more information at http://www.fsf.org/licensing/licenses/agpl-3.0.html +*/ + +/* 1. RESET */ +html,body,div,span,object,iframe, +h1,h2,h3,h4,h5,h6,p,blockquote,pre, +a,abbr,acronym,address,code, +del,dfn,em,img,q,dl,dt,dd,ol,ul,li, +fieldset,form,label,legend,textarea, +table,caption,tbody,tfoot,thead,tr,th,td { + margin:0; + padding:0; + border:0; +} +textarea,select,input { font-size:1em; } +html,body { height:100%; } +body { + font-family:Arial,Helvetica,"Liberation Sans",Sans,sans-serif; + font-size: 12pt; + line-height:1.25em; + text-align:center; + background:#fff; + color:#000; +} +.page,.expanded-page { text-align:left; } + +/* 2. DEBUG: Show borders to stress an element */ +.omg-red,.omg-yellow,.omg-blue,.omg-green,.omg-black,.omg-white { + border-top:dotted 3px; + border-bottom:dotted 3px; +} +.omg-red { border-color:red; } +.omg-yellow { border-color:yellow; } +.omg-blue { border-color:blue; } +.omg-green { border-color:green; } +.omg-black { border-color:black; } +.omg-white { border-color:white; } + +/* 3. BASIC STYLES */ +/* 3.1. Titles */ +h1,h2,h3,h4,h5,h6 { + font-weight:bold; + line-height:1em; + margin:1em 0 0.5em 0; +} +h1 { font-size:2em; } +h2 { font-size:1.75em; } +h3 { font-size:1.5em; } +h4 { font-size:1.125em; } +h5 { font-size:1em; } +h6 { font-size:1em; font-weight:normal; } + +/* 3.2. Lists */ +ul,dd { margin-left:1em; } +ol { list-style-type:decimal; margin-left:1.5em; } +dl dt { font-weight:bold; } + +/* 3.3. Tables */ +table { border-collapse:collapse; border-spacing:0; } +caption,th { font-weight:bold; } +th,td { text-align:left; padding:0; border:1px solid #ccc; } +tfoot { font-style:italic; } + +/* 3.4. Images */ +a img { border:none; } +img.right { margin-left:1em; } +img.left { margin-right:1em; } + +/* 3.5. Forms */ +fieldset { border:1px solid #ccc; } +legend { font-weight:bold; font-size:1.2em; } +input { margin:0; } +input.text,input.password { overflow-y:visible; } +textarea { width:400px; height:100px; border:1px solid #ccc; overflow-y: auto; } +select { margin:0; font-size:1em; } +input,textarea,select { font-family:Arial,sans-serif; font-size: 1em; } + +/* 3.6. Monospace elements */ +pre,code,tt { font-family:"Courier New", Courier, monospace; line-height:1.5; } +pre,code { white-space:pre; } +tt { display:block; line-height:1.5; } + +/* 3.7. Block elements */ +p,form,table,address,blockquote,pre,code,tt,ul,ol,dl { margin-bottom:1em; } + +/* 3.8. Inline elements */ +em,dfn { font-style:italic; } + +/* 3.9. HR element */ +hr { + background:none; + visibility:hidden; + clear:both; + float:none; + width:100%; + height:1px; + border:none; + margin:-1px 0; +} + +/* 3.10. Flash objects */ +object { outline:none; } + +/* 4. UTILS */ +/* 4.1. Images with link and without link must overwrite background, width and height properties in your css */ +.image a,.image li,.image { + background-repeat:no-repeat; + background-color:transparent; + margin:0; + padding:0; + outline:none; + font-size:0px !important; + line-height:0em !important; + letter-spacing:-20px; + text-indent: -2px; + display:block; + overflow:hidden; + text-align:left; + border: none; +} +.image a { + display:block; + width:100%; + height:100%; + height:inherit; +} +ol.image,ul.image { + background:none; + width:100%; + height:auto; + float:left; +} +.image li { + float:left; +} +span.image,strong.image,em.image,a.image { + display:inline-block; + vertical-align:bottom; +} +input.image { cursor:pointer; } + +/* 4.2. Simple tabs system. You can overwrite background and text styles */ +.tabs { + list-style:none; + padding:0; + margin:0; + float:left; + width:100%; +} +.tabs li { + float:left; + margin:0 1px 0 0; +} +.tabs li span, +.tabs li a { + float: left; + padding:2px 5px; + white-space:nowrap; + text-align:center; + cursor:pointer; + outline:0; + text-decoration:none; +} +.tabs li span { cursor:default; } +.tabs .select a,.tabs a:hover { + background:#ddd; +} + +/* 4.3. Convert a block element (like a fieldset) into a inline element */ +.inline { + border:none; + margin:0; + padding:0; + display:inline; +} +fieldset.inline { + display:block; + float:left; + width: 100%; +} + +/* 4.4. Inline-block element must content any element inside (div, p, etc) for the correct visualization in FF<3 */ +.inline-block-top,.inline-block-middle,.inline-block-bottom { + float:none !important; + display:inline-block; +} +.inline-block-top { vertical-align:top !important; } +.inline-block-middle { vertical-align:middle !important; } +.inline-block-bottom { vertical-align:bottom !important; } + +/* 4.5. Float elements */ +.left { float:left !important; } +.right,.right-right { float:right !important; } +.right-right { text-align:right !important; } +.right-full { width:100%; text-align:right !important; } + +/* 4.6. Clear */ +.clear,.content,.page,.expanded-page { display:block; } +.clear:after,.tabs:after,.content:after,.page:after,.expanded-page:after { + content:" "; + display:block; + height:0; + clear:both; + visibility:hidden; + font-size:0; +} + +/* 4.7. Destroy margin collapse */ +.no-collapse { padding-top:1px; } + +/* 4.8. Hide an element */ +.hide { display:none; } + +/* 5. LAYOUT */ +/* 5.1. Row */ +.row { + float:left; + width:100%; + margin:0; + padding:0; + border:none; +} + +/* 5.2. Last column in a row */ +.last { margin-right:0 !important; } + +/* 5.3. Content */ +.content { + padding:0; + margin:0; +} + +/* 6. HACKS */ +img { -ms-interpolation-mode:bicubic; } /* IE */ +.clear,.tabs,.content,.page,.expanded-page { + *overflow-y:auto; /* IE7 */ + *overflow-x:hidden; /* IE7 */ + _height:1%; /* IE6 */ + _overflow-y:visible; /* IE6 */ + _overflow-x:visible; /* IE6 */ +} +a.image,span.image,strong.image,em.image,.inline-block-top,.inline-block-middle,.inline-block-bottom { + display:-moz-inline-box; /* FF<3 */ + -moz-box-orient:vertical; /* FF<3 */ + *display:inline; /* IE */ +} + +/** +* cSans Button plugin v0.3.1 +* 2009 Copyright A navalla suíza http://anavallasuiza.com +* cSans is released under the GNU Affero GPL version 3 - more information at http://www.fsf.org/licensing/licenses/agpl-3.0.html +*/ + +/* 1. BASIC STYLES */ + +ul.button { + list-style: none; + padding: 0; + margin: 0; + display:block; + } +ul.button { + overflow-y: auto; + overflow-x: hidden; + } +ul.button li { + float: left; + margin: 0; + } +ul.button li a { + display: block; + z-index: 2; + } +a.button { + display: inline-block; + vertical-align: middle; + } + +/* 1.1. Normal status */ +a.button, +ul.button li a { + background-position: 100% 0; + background-repeat: no-repeat; + white-space: nowrap; + text-align: center; + cursor: pointer; + outline: 0; + } +a.button span, +ul.button li a span { + display: block; + background-position: 0 0; + background-repeat: no-repeat; + } + +/* 1.2. Hover/selected status */ +a.button:hover, +a.button.select, +ul.button li a:hover, +ul.button li.select a { + background-position: 100% -70px !important; + } +a.button:hover span, +a.button.select span, +ul.button li a:hover span, +ul.button li.select a span { + background-position: 0 -70px !important; + } + +/* 2. EDITABLE STYLES */ + +/* 2.1. Image background used */ +a.button, +.button a, +.button span { + background-image: url(../images/button.png); + } + +/* 2.2. Normal status (Example for padding 10px) */ +a.button, +.button a { + padding: 0 10px 0 0; /* Padding-right: 10px */ + margin: 0 1px 0 10px; /* Margin-left: 10px */ + text-decoration: none; + } +.button span { + padding: 4px 0 6px 10px; /*Padding-left: 10px */ + margin: 0 0 0 -10px; /* Margin-left: -10px */ + } + +/* 3. HACKS */ + +ul.button { + _height: 1%; /* IE6 */ + _overflow-y: visible; /* IE6 */ + _overflow-x: visible; /* IE6 */ + } +a.button span { + _float: left; /* Only IE6 */ + _position: relative; /* Only IE6 */ + } +a.button { + display: -moz-inline-box; /* FF<3 */ + display: inline-block; /* FF<3 */ + -moz-box-orient: vertical; /* FF<3 */ + *display: inline; /* IE */ + } + +/** +* cSans Tooltip pluging v0.1 +* 2009 Copyright A navalla suíza http://anavallasuiza.com +* cSans is released under the GNU Affero GPL version 3 - more information at http://www.fsf.org/licensing/licenses/agpl-3.0.html +*/ + +/* BASIC STYLES */ + +.tooltip:hover { + background:transparent; + text-decoration:none; + } +.tooltip span { + display:none; + padding:5px; + margin-left:10px; + width:150px; + } +.tooltip:hover span { + cursor: default; + display:inline; + position:absolute; + } + +/** +* cSans Flexible v0.1 +* 2009 Copyright A navalla suíza http://anavallasuiza.com +* cSans is released under the GNU Affero GPL version 3 - more information at http://www.fsf.org/licensing/licenses/agpl-3.0.html +*/ + +/* 1. BASIC STYLES */ +/* 1.1 Widths */ +.f10 { width:10%; } +.f20 { width:20%; } +.f25 { width:25%; } +.f30 { width:30%; } +.f33 { width:33.33%; } +.f40 { width:40%; } +.f50 { width:50%; } +.f60 { width:60%; } +.f66 { width:66.66%; } +.f70 { width:70%; } +.f75 { width:75%; } +.f80 { width:80%; } +.f90 { width:90%; } +.f100 { width:100%; } + +.f10,.f20,.f25,.f30,.f33,.f40,.f50,.f60,.f66,.f70,.f75,.f80,.f90,.f100 { + float:left; + overflow:hidden; +} + +/* 1.2 Margin-left */ +.fl10 { margin-left:10%; } +.fl20 { margin-left:20%; } +.fl25 { margin-left:25%; } +.fl30 { margin-left:30%; } +.fl33 { margin-left:33.33%; } +.fl40 { margin-left:40%; } +.fl50 { margin-left:50%; } +.fl60 { margin-left:60%; } +.fl66 { margin-left:66.66%; } +.fl70 { margin-left:70%; } +.fl75 { margin-left:75%; } +.fl80 { margin-left:80%; } +.fl90 { margin-left:90%; } +.fl100 { margin-left:100%; } + +/* 1.3 Margin-right */ +.fr10 { margin-right:10%; } +.fr20 { margin-right:20%; } +.fr25 { margin-right:25%; } +.fr30 { margin-right:30%; } +.fr33 { margin-right:33.33%; } +.fr40 { margin-right:40%; } +.fr50 { margin-right:50%; } +.fr60 { margin-right:60%; } +.fr66 { margin-right:66.66%; } +.fr70 { margin-right:70%; } +.fr75 { margin-right:75%; } +.fr80 { margin-right:80%; } +.fr90 { margin-right:90%; } +.fr100 { margin-right:100%; } + +/* 2. HACKS */ +.f10,.f20,.f25,.f30,.f33,.f40,.f50,.f60,.f66,.f70,.f75,.f80,.f90,.f100 { + _display:inline; /* IE5-6 */ + *margin-left:-1px; /* IE5-7 */ +} + +/** +* web2py Nuovo Theme +* ------------------ +**/ + +/* Basics */ + +html, body { + font-size: 13px; + text-align: left; + color: #333; + padding: 0; + margin: 0; + background: #fff url(../images/header_shadow.png) repeat-x left 33px; +} + +a { + color: #e8953c; + text-decoration: none; +} + +label { + color: #777; + font-weight: bold; + font-size: 100%; +} + +img { + vertical-align: baseline; +} + +td, th { + border: none; +} + +.center { + text-align: center; +} + +.centerblock { + margin: 0 auto; +} + +.clear { + clear: both; +} + +.att { + color: #d22; +} + +/* General */ + +h3 { + padding-left: 18px; + background: url(../images/sidebar_bullet.gif) no-repeat; + color: #555; + font-weight: normal; + font-size: 130%; +} + +.formfield { + padding: 0.7em 0.5em; + -moz-border-radius: 0.3em; + border-radius: 0.3em; + margin: 1em 0; + border: 1px solid #ddd; +} + +.buttongroup { + padding: 0.7em 0.5em; +} + +.formfield { + background: #eee; +} + +.flash { + position: fixed; + top: 2em; + right: 2em; + background: #e8953c; + color: #fff; + border: 2px solid #fff; + -moz-border-radius: 0.7em; + border-radius: 0.7em; + padding: 0.5em 1em; +} + +.tooltip span { + background: #9fb364; + color: #eef1d9; + border: 1px solid #eef1d9; + font-style: italic; + width: 20%; + padding: 0.3em; + -moz-border-radius: 0.5em; + border-radius: 0.5em; + font-size: 13px; + text-transform: none; +} + +.help { + width: 60%; + font-size: 1em; + padding: 0.3em; + -moz-border-radius: 0.5em; + border-radius: 0.5em; + background: #eef1d9; + color: #9fb364; + font-style: italic; + text-transform: none; +} + +.help h3 { + color: #9fb364; + font-size: 1.2em; + background: transparent; + font-weight: bold; +} + +.icon img { + vertical-align: middle; + cursor: pointer; +} + +.form td { + padding: 0.2em 1em 0.2em 0; +} + +/* Buttons */ + +.controls a.button, +.controls a.button span { + background-image: url(../images/small_button.png); +} + +.controls a.button span { + padding-top: 2px; +} + +.controls a.button { + color: #333; +} + +.controls a.special, +.controls a.special span { + background-image: url(../images/small_special_button.png); +} + +.controls a.special { + color: #ddd; +} + +/* Header */ + +#header { + background: #292929 url(../images/header_bg.png) repeat-x; + height: 33px; + overflow: hidden; +} + +/* Home button */ + +#start { + position: absolute; + top: 2px; + left: 13px; + margin: 0; +} + +#start a.button { + display: block; +} + +#start a.button, +#start a.button span { + background-image: url(../images/start.png); +} + +#start a { + text-indent: -999px; + overflow: hidden; + padding: 0; + margin: 0; +} + +#start a.button span { + width: 112px; + height: 36px; + padding: 0; + margin: 0; +} + +/* Menu */ + +#menu { + float: right; + margin: 3px 13px 0 0; +} + +#menu li { + float: left; + list-style: none; + margin-right: 0.4em; +} + +#menu a.button, +#menu a.button span { + background-image: url(../images/menu.png); +} + +#menu a.button { + padding-right: 1em; +} + +#menu a.button span { + padding-left: 1em; +} + +#menu a { + color: #333; +} + +/* Main area */ + +#main { + padding: 2em 1em 5em; + position: relative; +} + +#main h2 { + margin-top: 0; + font-weight: normal; + text-transform: uppercase; + border-bottom: 1px dotted #aaa; + padding-left: 18px; + background: transparent url(../images/section_bullet.png) no-repeat left 3px; + color: #aaa; +} + +/* Applist */ + +.applist h3 { + color: #aaa; + font-weight: normal; +} + +.applist ul { + margin: 0; +} + +.applist li { + list-style: none; + padding: 0; +} + +h3.editableapp, +h3.currentapp { + padding: 5px 0 5px 54px; +} + +h3.editableapp { + background: #fff url(../images/folder.png) no-repeat; +} + +h3.currentapp { + background: #fff url(../images/folder_locked.png) no-repeat; +} + +.applist .controls { + margin-left: 1.5em; +} + + +/* Site sidebar */ + +.sidebar_inner { + margin: 0 1em 0; + -moz-border-radius: 0.5em; + border-radius: 0.5em; + border: 1px solid #ddd; + min-width:400px; +} + +.sidebar h4 { + color: #888; +} + +.pwdchange { + padding: 1em; + float: right !important; +} + +.sidebar .box { + clear: right; + margin-top: 2em; + border-top: 1px solid #eee; + padding: 0 1em; +} + +.sidebar .box { + background: url(../images/sidebar_background.jpg) no-repeat; +} + +.sidebar .upgrade_version { + color: #71c837; +} + +/* Tweets */ + +#tweets ol { + margin: 1em 0; +} + +#tweets ol li { + background: #ebe8d0; + list-style: none; + -moz-border-radius: 0.5em; + border-radius: 0.5em; + padding: 0.5em; + margin: 1em 0; + border: 1px solid #aaa; +} + +#tweets .entry-date { + font-weight: bold; + display: block; +} + +/* Design/Plugin page */ + +.component { + cursor: pointer; +} + +.component_contents { + padding-left: 20px; +} + +.component_contents li { + list-style: none; +} + +.component_contents div.comptools { + margin-bottom: 1em; + padding-bottom: 0.5em; +} + +.component_contents div.formfield form { + margin-bottom: 0.2em; + margin-top: 0.2em; +} + +.file { + font-weight: bold; +} + +.folder { + display: block; + padding: 4px 0 4px 40px; + background: url(../images/folder_sm.png) no-repeat; + margin: 0.5em 0; +} + +.folder .file { + font-weight: bold; +} + +.sublist { + margin-left: 0; + border-left: 1px dotted #aaa; + padding-left: 0.5em; + margin-top: 0.5em; + margin-bottom: 0.0em; +} + +/* About */ + +.legalese { + background: #eee url(../images/embossed.png) repeat-y; + padding: 1em 1em 1em 2em; +} + +/* Wizard */ + +.step li { + list-style: none; + margin-left: 1em; + margin-top: 0.5em; +} + +.step #wizard_nav .box { + border-bottom: 1px dotted #aaa; + padding: 0.5em; +} + +.step #wizard_form { + padding: 0.5em 0 2em 2em; +} + +/* Editor */ + +.edit #body { + height: auto; + width: 100%; +} + +.edit .help li { + list-style: none; + padding: 0.5em 0; +} + +.edit .help tt { + font-weight: bold; + font-style: normal; + display: inline; /* Rest cSans base style */ + border: 1px solid #999; + background: #333; + color: #ddd; + padding: 0.3em; + -moz-border-radius: 0.3em; + border-radius: 0.3em; +} + +/* Ticket */ + +ul#snapshot > li { + list-style: none; +} + +.inspect td, +.versions td, +.inspect th, +.versions th { + padding: 0.3em; + // border: 1px solid #aaa; +} + +.inspect th, +.versions th { + background: #ddd; + color: #777; +} + +.ticket h3 { + margin-top: 0.5em; + background: url(../images/ticket_section.png) no-repeat; + padding: 30px; + text-transform: uppercase; +} + + +.ticket .inspect li { + list-style: none; +} + +#frames ul { + margin: 0; +} + +#frames li { + margin: 0.5em 0; + padding: 0; + list-style: none; +} + +/* Errors */ + +.errors table.sortable th { + background: url(../images/header_bg.png) repeat-x; + color: #eee; + // border-right: 1px solid #eee; + padding-top: 0.5em; +} + +.errors table.sortable td { + border-bottom: 1px dotted #ddd; + padding: 0.4em 0.2em; +} + +/* Tests */ + +.test h3.failed { + background-image: url(../images/red_bullet.gif); +} + +.test h3.nodoctests { + background-image: url(../images/dim_bullet.gif); +} + +/* Footer */ + +#footer { + padding: 1em 0 0; + color: #eee; + text-align: center; + background: #292929 url(../images/header_bg.png) repeat-x; + height: 45px; + overflow: hidden; + clear: both; +} + +/* Shell */ + +.shell #wrapper { + margin: 0 auto; +} + +.shell #output { + width: 75%; + height:30em; +} + +.shell #output, +.shell #output pre { + color: #e8953c; + background: white; + border: 1px solid #333; +} + +.shell .prompt, +.shell #output, +.shell pre, +.shell #caret { + font-family: monospace; +} + +.shell .prompt, +.shell #output, +.shell #caret { + font-size: 10pt; + padding: 6px; + padding-right: 0em; +} + +.shell #shellwrapper { + background: white; + border: 1px solid #333; + color: #e8953c; + width: 75%; + // padding: 6px; + // -moz-border-radius: 1em; + // border-radius: 1em; + margin: 1em 0 +} + +.shell #caret { + border: 0; + float: left; +} + +.shell .prompt { + color: #e8953c; + width: 85%; + height: 4em; + border: 0; +} + +.shell .prompt, .shell #output { + overflow: auto; +} + +.shell table, tr, td { + text-align: left; + vertical-align: top; +} + +.shell pre { + border: 0; + padding: 0; + margin: 0; + color: #333333; +} + +.shell .message { + width: 100%; + color: #8AD; + font-weight: bold; + font-style: italic; +} + +.shell .error { + color: #F44; +} + +.shell .username { + font-weight: bold; +} + +.shell dd{ + color: #000033; +} + +.shell dt{ + color: #333333; +} + +.shell #ajax-status { + font-weight: bold; +} + +.shell .processing { + background-image: url('../images/spinner.gif'); +} + +.shell #caret { + width: 2.5em; + margin-right: 0px; + padding-right: 0px; + border-right: 0px; +} + + +/* ie7 hacks */ + +.sublist { + zoom: 1; +} + +.untranslated { background-color: #FFCC00; } +.translated { background-color: white; } +.ui-multiselect { border: 1px solid #ccc; width:400px;} +#editor_area textarea { height: 400px; width: 100% } ADDED applications/admin/static/eamy/bundle_markup.js Index: applications/admin/static/eamy/bundle_markup.js ================================================================== --- /dev/null +++ applications/admin/static/eamy/bundle_markup.js @@ -0,0 +1,389 @@ +/* + * eAmy.Offline - Amy Editor embedded for offline use. + * http://www.april-child.com/amy + * + * Published under MIT License. + * Copyright (c) 2007-2008 Petr Krontorád, April-Child.com + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + * + * + * This file is auto-generated from original Fry Framework and Amy Editor sources.. + */ + + +// Generated from theme definition file. +$class('ac.chap.theme.EAmy < ac.chap.Theme'); + + ac.chap.theme.EAmy.prototype.initDefinition = function() + { +// $call(this, 'ac.chap.Theme.initDefinition');this.cssId = 'black'; +//this.background = '#072240'; +//this.textColor = '#DFEFFF'; +//this.caretColor = 'lime'; +//this.caretRowStyleActive = '#041629'; +//this.selectionStyle = '#86553b'; +//this.colorScheme[ac.chap.TOKEN_MULTIROW_COMMENT] = 'color:#0084FF;font-style:italic'; +//this.colorScheme[ac.chap.TOKEN_SINGLEROW_COMMENT] = 'color:#0084FF;font-style:italic'; +//this.colorScheme[ac.chap.TOKEN_SINGLE_QUOTED] = 'color:#00DF00'; +//this.colorScheme[ac.chap.TOKEN_DOUBLE_QUOTED] = 'color:#00DF00'; +//this.colorScheme[ac.chap.CHUNK_KEYWORD] = 'color:#FF9D00'; +//this.colorScheme[ac.chap.CHUNK_NUMBER] = 'color:#FF5B8C'; +//this.colorScheme[ac.chap.CHUNK_OPERATOR] = 'color:#FF9D00;'; +//this.colorScheme[ac.chap.CHUNK_PARENTHESIS] = 'color:#FFF177'; +//this.colorScheme[ac.chap.CHUNK_KEYWORD_CUSTOM] = 'color:#54FFB8'; +//this.colorScheme[ac.chap.CHUNK_FUNCTION_NAME] = 'color:#FFE000'; +//this.colorScheme[ac.chap.CHUNK_LIBRARY] = 'color:#71E5B6'; +//this.colorScheme[ac.chap.CHUNK_LIBRARY_CUSTOM] = 'color:#FF78E5'; + + $call(this, 'ac.chap.Theme.initDefinition');this.cssId = 'twilight'; +this.background = '#141414'; +this.textColor = '#F8F8F8'; +this.caretColor = '#A7A7A7'; +this.caretRowStyleActive = '#1B1B1B'; +this.selectionStyle = '#3C4043'; +this.colorScheme[ac.chap.TOKEN_MULTIROW_COMMENT] = 'color:#605A60;font-style:italic'; +this.colorScheme[ac.chap.TOKEN_SINGLEROW_COMMENT] = 'color:#605A60;font-style:italic'; +this.colorScheme[ac.chap.TOKEN_SINGLE_QUOTED] = 'color:#8B9F67'; +this.colorScheme[ac.chap.TOKEN_DOUBLE_QUOTED] = 'color:#D4F29E'; +this.colorScheme[ac.chap.CHUNK_KEYWORD] = 'color:#D2A964'; +this.colorScheme[ac.chap.CHUNK_NUMBER] = 'color:#DE6848'; +this.colorScheme[ac.chap.CHUNK_OPERATOR] = 'color:#EFC25A;'; +this.colorScheme[ac.chap.CHUNK_PARENTHESIS] = 'color:#ABC4DC'; +this.colorScheme[ac.chap.CHUNK_KEYWORD_CUSTOM] = 'color:#A0849E'; +this.colorScheme[ac.chap.CHUNK_FUNCTION_NAME] = 'color:#DAD280'; +this.colorScheme[ac.chap.CHUNK_LIBRARY] = 'color:#7286A8'; +this.colorScheme[ac.chap.CHUNK_LIBRARY_CUSTOM] = 'color:#A55C29'; +} + + +// Generated from bundle keymap definition file. +ac.chap.KeyMap.prototype.initDefinition = function() + { + var _ = '\n'; + this.compile + (""+_+ "KEY: 0" ++_+ " insert(character:true)" ++_+ "KEY: -37" ++_+ " caret(move:'left')" ++_+ "KEY: -37+shift" ++_+ " caret(move:'left')" ++_+ " selection(add:true)" ++_+ "KEY: -37+ctrl" ++_+ " caret(move:'prev_regexp', re:'[^|._A-Z ,|(|);]*$')" ++_+ "KEY: -37+alt" ++_+ " caret(move:'prev_word')" ++_+ "KEY: -37+ctrl+shift" ++_+ " caret(move:'prev_regexp', re:'[^|._A-Z ,|(|);]*$')" ++_+ " selection(add:true)" ++_+ "KEY: -37+alt+shift" ++_+ " caret(move:'prev_word')" ++_+ " selection(add:true)" ++_+ "KEY: -37+meta" ++_+ " caret(move:'row_start')" ++_+ "KEY: -37+meta+shift" ++_+ " caret(move:'row_start')" ++_+ " selection(add:true)" ++_+ "KEY: -39" ++_+ " caret(move:'right')" ++_+ "KEY: -39+shift" ++_+ " caret(move:'right')" ++_+ " selection(add:true)" ++_+ "KEY: -39+ctrl" ++_+ " caret(move:'next_regexp', re:'^[^|._A-Z ,|(|);]*')" ++_+ "KEY: -39+alt" ++_+ " caret(move:'next_word')" ++_+ "KEY: -39+ctrl+shift" ++_+ " caret(move:'next_regexp', re:'^[^|._A-Z ,|(|);]*')" ++_+ " selection(add:true)" ++_+ "KEY: -39+alt+shift" ++_+ " caret(move:'next_word')" ++_+ " selection(add:true)" ++_+ "KEY: -39+meta" ++_+ " caret(move:'row_end')" ++_+ "KEY: -39+meta+shift" ++_+ " caret(move:'row_end')" ++_+ " selection(add:true)" ++_+ "KEY: -38" ++_+ " caret(move:'up')" ++_+ "KEY: -38+shift" ++_+ " caret(move:'up')" ++_+ " selection(add:true)" ++_+ "KEY: -40" ++_+ " caret(move:'down')" ++_+ "KEY: -40+shift" ++_+ " caret(move:'down')" ++_+ " selection(add:true)" ++_+ "KEY: -13" ++_+ " insert(row:true)" ++_+ "KEY: -8" ++_+ " delete(character:true)" ++_+ "KEY: -46" ++_+ " delete(character:false)" ++_+ "KEY: 75+ctrl+shift" ++_+ " delete(row:true)" ++_+ "KEY: -27" ++_+ " custom(action:'WordComplete', direction:true)" ++_+ "KEY: -27+shift" ++_+ " custom(action:'WordComplete', direction:false)" ++_+ "KEY: -9" ++_+ " custom(action:'SnippetComplete')" ++_+ "KEY: 123" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'}')" ++_+ "KEY: 34" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'\"')" ++_+ "KEY: 91" ++_+ " custom(action:'AutoComplete', use_selection:true, text:']')" ++_+ "KEY: 40" ++_+ " custom(action:'AutoComplete', use_selection:true, text:')')" ++_+ "KEY: -36" ++_+ " caret(move:'doc_start')" ++_+ "KEY: -36+shift" ++_+ " caret(move:'doc_start')" ++_+ " selection(add:true)" ++_+ "KEY: -35" ++_+ " caret(move:'doc_end')" ++_+ "KEY: -35+shift" ++_+ " caret(move:'doc_end')" ++_+ " selection(add:true)" ++_+ "KEY: -34+meta" ++_+ " caret(move:'page_down')" ++_+ "KEY: -34+meta+shift" ++_+ " caret(move:'page_down')" ++_+ " selection(add:true)" ++_+ "KEY: -33+meta" ++_+ " caret(move:'page_up')" ++_+ "KEY: -33+meta+shift" ++_+ " caret(move:'page_down')" ++_+ " selection(add:true)" ++_+ "KEY: 99+meta" ++_+ " clipboard(copy:true)" ++_+ "KEY: 120+meta" ++_+ " clipboard(cut:true)" ++_+ "KEY: 122+meta" ++_+ " undo()" ++_+ "KEY: 90+meta+shift" ++_+ " redo()" ++_+ "KEY: 97+meta" ++_+ " selection(all:true)" ++_+ "KEY: 97+ctrl" ++_+ " selection(all:true)" ++_+ "KEY: -113" ++_+ " custom(action:'GoToBookmark', direction:1)" ++_+ "KEY: -113+shift" ++_+ " custom(action:'GoToBookmark', direction:-1)" ++_+ "KEY: -113+meta" ++_+ " custom(action:'ToggleBookmark')" ++_+ "KEY: 91+meta" ++_+ " custom(action:'Indent', direction:'left')" ++_+ "KEY: 93+meta" ++_+ " custom(action:'Indent', direction:'right')" ++_+ "KEY: 47+meta" ++_+ " custom(action:'Comment')" ++_+ "KEY: 43+meta" ++_+ " custom(action:'RuntimeOption', key:'font.size', value:'bigger')" ++_+ "KEY: 45+meta" ++_+ " custom(action:'RuntimeOption', key:'font.size', value:'smaller')" ++_+ "KEY: 101+meta" ++_+ " custom(action:'SetSearchKeyword')" ++_+ "KEY: 103+meta" ++_+ " custom(action:'SearchKeyword', direction:'down')" ++_+ "KEY: 71+shift+meta" ++_+ " custom(action:'SearchKeyword', direction:'up')" ++_+ "KEY: 102+ctrl" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: 83+ctrl+shift" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: 102+meta" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: -13" ++_+ " custom(action:'SmartIndent', split_line:true, indent_tab_when_starts:'class module def if else unless rescue ensure while do __class__')" ++_+ "KEY: -13+meta" ++_+ " custom(action:'SmartIndent', split_line:false, indent_tab_when_starts:'class module def if else unless rescue ensure while do __class__')" ++_+ "KEY: 39" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'\\'')" +)}; + +$class('ac.chap.lang.EAmy < ac.chap.Language'); + + ac.chap.lang.EAmy.prototype.initDefinition = function() + { + $call(this, 'ac.chap.Language.initDefinition'); +this.singleQuoteStringMarker = "'"; +this.singleQuoteStringMarkerException = "\\"; +this.doubleQuoteStringMarker = "\""; +this.doubleQuoteStringMarkerException = "\\" +this.wordDelimiter = /[\w\.\d]/; +this.indentIgnoreMarker = /[\.]/; +this.foldingStartMarkers = [/^\s*<(div)\b.*>/i, /^\s*<(ul)\b.*>/i]; +this.foldingParityMarkers = [/^\s*<(div)\b.*>/i, /^\s*<(ul)\b.*>/i]; +this.foldingStopMarkers = [/^\s*<\/(div)>/i, /^\s*<\/(ul)>/i]; +this.singleRowCommentStartMarkers = []; +this.multiRowCommentStartMarker = ""; +this.chunkRules.push([/(([^\w]|^)(\d{1,}[\d\.Ee]*)([^w]|$))/i, 3, ac.chap.CHUNK_NUMBER]) +this.chunkRules.push([/(\+|\-|\*|\/|\=|\!|\^|\%|\||\&|\<|\>)/i, 0, ac.chap.CHUNK_OPERATOR]) +this.chunkRules.push([/(\(|\)|\[|\]|\{|\})/i, 0, ac.chap.CHUNK_PARENTHESIS]) +this.chunkRules.push([/((<|<\/)([\w-_\:]*)([ >]))/i, 3, ac.chap.CHUNK_KEYWORD]) +this.chunkRules.push([/(([ \t])([\w-_\:]*)(=$))/i, 3, ac.chap.CHUNK_KEYWORD_CUSTOM]) +this.chunkRules.push([/(([^\w]|^)(!DOCTYPE)([^\w]|$))/i, 3, ac.chap.CHUNK_LIBRARY]) +this.chunkRules.push([/(([^\w]|^)(\d{1,}[\d\.Ee]*)([^w]|$))/i, 3, ac.chap.CHUNK_NUMBER]) +this.chunkRules.push([/(\+|\-|\*|\/|\=|\!|\^|\%|\||\&|\<|\>)/i, 0, ac.chap.CHUNK_OPERATOR]) +this.chunkRules.push([/(\(|\)|\[|\]|\{|\})/i, 0, ac.chap.CHUNK_PARENTHESIS]) +this.chunkRules.push([/((<|<\/)([\w-_\:]*)([ >]))/i, 3, ac.chap.CHUNK_KEYWORD]) +this.chunkRules.push([/(([ \t])([\w-_\:]*)(=$))/i, 3, ac.chap.CHUNK_KEYWORD_CUSTOM]) +this.chunkRules.push([/(([^\w]|^)(!DOCTYPE)([^\w]|$))/i, 3, ac.chap.CHUNK_LIBRARY]) +} +var snippet = {}; +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ie6', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'iegte7', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ie5', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ie', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ienot', code: '${1:${AMY_SELECTED_TEXT: IE Conditional Comment: NOT Internet Explorer }}$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ielte6', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ielt6', code: '$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: '${0:${AMY_SELECTED_TEXT/\A(.*)<\/em>\z|.*/(?1:$1:$0<\/em>)/m}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: '${0:${AMY_SELECTED_TEXT/\A(.*)<\/strong>\z|.*/(?1:$1:$0<\/strong>)/m}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'left', code: '←'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'backtab', code: '⇤'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'enter', code: '⌅'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'arrow', code: '→'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'option', code: '⌥'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'shift', code: '⇧'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ' '}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'delete', code: '⌦'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'backspace', code: '⌫'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'escape', code: '⎋'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'tab', code: '⇥'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'up', code: '↑'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'control', code: '⌃'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'return', code: '↩'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'down', code: '↓'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'command', code: '⌘'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctype', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctype', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctypexf', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctypext', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctypex', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'doctypexs', code: '\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 't', code: '<{${1:tag_name}}>$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'body', code: '\n $0\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'textarea', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'div', code: '\n ${0:$AMY_SELECTED_TEXT}\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: '
'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'title', code: '${1:${AMY_FILENAME/((.+)\..*)?/(?2:$2:Page Title)/}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'movie', code: '\n \n \n \n \n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'input', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'head', code: '\n \n ${1:${AMY_FILENAME/((.+)\..*)?/(?2:$2:Page Title)/}}\n $0\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'meta', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'h1', code: '

${1:$AMY_SELECTED_TEXT}

'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'form', code: '\n $0\n\n

\n'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'link', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'style', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'table', code: '
\n \n \n
${5:Header}
${0:Data}
'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'base', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'scriptsrc', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'mailto', code: '
${3:email me}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'script', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'c', code: 'class="$1"'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'i', code: 'id="$1"'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'p', code: '{{pass}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ex', code: '{{extend \'${1:layout.html}\'}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'for', code: '{{for ${1:bar} in ${2:foo}:}}\n $0\n{{pass}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'if', code: '{{if ${1:foo} ${2:==/!=/=>/=/<} ${3:bar}:}}\n $0\n{{pass}}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '=', code: '{{=$0}}'}; +eamy.snippets.push(snippet); + + ADDED applications/admin/static/eamy/bundle_python.js Index: applications/admin/static/eamy/bundle_python.js ================================================================== --- /dev/null +++ applications/admin/static/eamy/bundle_python.js @@ -0,0 +1,296 @@ +/* + * eAmy.Offline - Amy Editor embedded for offline use. + * http://www.april-child.com/amy + * + * Published under MIT License. + * Copyright (c) 2007-2008 Petr Krontorád, April-Child.com + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + * + * + * This file is auto-generated from original Fry Framework and Amy Editor sources.. + */ + + +// Generated from theme definition file. +$class('ac.chap.theme.EAmy < ac.chap.Theme'); + + ac.chap.theme.EAmy.prototype.initDefinition = function() + { +// $call(this, 'ac.chap.Theme.initDefinition');this.cssId = 'black'; +//this.background = '#072240'; +//this.textColor = '#DFEFFF'; +//this.caretColor = 'lime'; +//this.caretRowStyleActive = '#041629'; +//this.selectionStyle = '#86553b'; +//this.colorScheme[ac.chap.TOKEN_MULTIROW_COMMENT] = 'color:#0084FF;font-style:italic'; +//this.colorScheme[ac.chap.TOKEN_SINGLEROW_COMMENT] = 'color:#0084FF;font-style:italic'; +//this.colorScheme[ac.chap.TOKEN_SINGLE_QUOTED] = 'color:#00DF00'; +//this.colorScheme[ac.chap.TOKEN_DOUBLE_QUOTED] = 'color:#00DF00'; +//this.colorScheme[ac.chap.CHUNK_KEYWORD] = 'color:#FF9D00'; +//this.colorScheme[ac.chap.CHUNK_NUMBER] = 'color:#FF5B8C'; +//this.colorScheme[ac.chap.CHUNK_OPERATOR] = 'color:#FF9D00;'; +//this.colorScheme[ac.chap.CHUNK_PARENTHESIS] = 'color:#FFF177'; +//this.colorScheme[ac.chap.CHUNK_KEYWORD_CUSTOM] = 'color:#54FFB8'; +//this.colorScheme[ac.chap.CHUNK_FUNCTION_NAME] = 'color:#FFE000'; +//this.colorScheme[ac.chap.CHUNK_LIBRARY] = 'color:#71E5B6'; +//this.colorScheme[ac.chap.CHUNK_LIBRARY_CUSTOM] = 'color:#FF78E5'; +// + $call(this, 'ac.chap.Theme.initDefinition'); +this.cssId = 'twilight'; +this.background = '#141414'; +this.textColor = '#F8F8F8'; +this.caretColor = '#A7A7A7'; +this.caretRowStyleActive = '#1B1B1B'; +this.selectionStyle = '#3C4043'; +this.colorScheme[ac.chap.TOKEN_MULTIROW_COMMENT] = 'color:#605A60;font-style:italic'; +this.colorScheme[ac.chap.TOKEN_SINGLEROW_COMMENT] = 'color:#605A60;font-style:italic'; +this.colorScheme[ac.chap.TOKEN_SINGLE_QUOTED] = 'color:#8B9F67'; +this.colorScheme[ac.chap.TOKEN_DOUBLE_QUOTED] = 'color:#D4F29E'; +this.colorScheme[ac.chap.CHUNK_KEYWORD] = 'color:#D2A964'; +this.colorScheme[ac.chap.CHUNK_NUMBER] = 'color:#DE6848'; +this.colorScheme[ac.chap.CHUNK_OPERATOR] = 'color:#EFC25A;'; +this.colorScheme[ac.chap.CHUNK_PARENTHESIS] = 'color:#ABC4DC'; +this.colorScheme[ac.chap.CHUNK_KEYWORD_CUSTOM] = 'color:#A0849E'; +this.colorScheme[ac.chap.CHUNK_FUNCTION_NAME] = 'color:#DAD280'; +this.colorScheme[ac.chap.CHUNK_LIBRARY] = 'color:#7286A8'; +this.colorScheme[ac.chap.CHUNK_LIBRARY_CUSTOM] = 'color:#A55C29'; +} + + + +// Generated from bundle keymap definition file. +ac.chap.KeyMap.prototype.initDefinition = function() + { + var _ = '\n'; + this.compile + (""+_+ "KEY: 0" ++_+ " insert(character:true)" ++_+ "KEY: -37" ++_+ " caret(move:'left')" ++_+ "KEY: -37+shift" ++_+ " caret(move:'left')" ++_+ " selection(add:true)" ++_+ "KEY: -37+ctrl" ++_+ " caret(move:'prev_regexp', re:'[^|._A-Z ,|(|);]*$')" ++_+ "KEY: -37+alt" ++_+ " caret(move:'prev_word')" ++_+ "KEY: -37+ctrl+shift" ++_+ " caret(move:'prev_regexp', re:'[^|._A-Z ,|(|);]*$')" ++_+ " selection(add:true)" ++_+ "KEY: -37+alt+shift" ++_+ " caret(move:'prev_word')" ++_+ " selection(add:true)" ++_+ "KEY: -37+meta" ++_+ " caret(move:'row_start')" ++_+ "KEY: -37+meta+shift" ++_+ " caret(move:'row_start')" ++_+ " selection(add:true)" ++_+ "KEY: -39" ++_+ " caret(move:'right')" ++_+ "KEY: -39+shift" ++_+ " caret(move:'right')" ++_+ " selection(add:true)" ++_+ "KEY: -39+ctrl" ++_+ " caret(move:'next_regexp', re:'^[^|._A-Z ,|(|);]*')" ++_+ "KEY: -39+alt" ++_+ " caret(move:'next_word')" ++_+ "KEY: -39+ctrl+shift" ++_+ " caret(move:'next_regexp', re:'^[^|._A-Z ,|(|);]*')" ++_+ " selection(add:true)" ++_+ "KEY: -39+alt+shift" ++_+ " caret(move:'next_word')" ++_+ " selection(add:true)" ++_+ "KEY: -39+meta" ++_+ " caret(move:'row_end')" ++_+ "KEY: -39+meta+shift" ++_+ " caret(move:'row_end')" ++_+ " selection(add:true)" ++_+ "KEY: -38" ++_+ " caret(move:'up')" ++_+ "KEY: -38+shift" ++_+ " caret(move:'up')" ++_+ " selection(add:true)" ++_+ "KEY: -40" ++_+ " caret(move:'down')" ++_+ "KEY: -40+shift" ++_+ " caret(move:'down')" ++_+ " selection(add:true)" ++_+ "KEY: -13" ++_+ " insert(row:true)" ++_+ "KEY: -8" ++_+ " delete(character:true)" ++_+ "KEY: -46" ++_+ " delete(character:false)" ++_+ "KEY: 75+ctrl+shift" ++_+ " delete(row:true)" ++_+ "KEY: -27" ++_+ " custom(action:'WordComplete', direction:true)" ++_+ "KEY: -27+shift" ++_+ " custom(action:'WordComplete', direction:false)" ++_+ "KEY: -9" ++_+ " custom(action:'SnippetComplete')" ++_+ "KEY: 123" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'}')" ++_+ "KEY: 34" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'\"')" ++_+ "KEY: 91" ++_+ " custom(action:'AutoComplete', use_selection:true, text:']')" ++_+ "KEY: 40" ++_+ " custom(action:'AutoComplete', use_selection:true, text:')')" ++_+ "KEY: -36" ++_+ " caret(move:'doc_start')" ++_+ "KEY: -36+shift" ++_+ " caret(move:'doc_start')" ++_+ " selection(add:true)" ++_+ "KEY: -35" ++_+ " caret(move:'doc_end')" ++_+ "KEY: -35+shift" ++_+ " caret(move:'doc_end')" ++_+ " selection(add:true)" ++_+ "KEY: -34+meta" ++_+ " caret(move:'page_down')" ++_+ "KEY: -34+meta+shift" ++_+ " caret(move:'page_down')" ++_+ " selection(add:true)" ++_+ "KEY: -33+meta" ++_+ " caret(move:'page_up')" ++_+ "KEY: -33+meta+shift" ++_+ " caret(move:'page_down')" ++_+ " selection(add:true)" ++_+ "KEY: 99+meta" ++_+ " clipboard(copy:true)" ++_+ "KEY: 120+meta" ++_+ " clipboard(cut:true)" ++_+ "KEY: 122+meta" ++_+ " undo()" ++_+ "KEY: 90+meta+shift" ++_+ " redo()" ++_+ "KEY: 97+meta" ++_+ " selection(all:true)" ++_+ "KEY: 97+ctrl" ++_+ " selection(all:true)" ++_+ "KEY: -113" ++_+ " custom(action:'GoToBookmark', direction:1)" ++_+ "KEY: -113+shift" ++_+ " custom(action:'GoToBookmark', direction:-1)" ++_+ "KEY: -113+meta" ++_+ " custom(action:'ToggleBookmark')" ++_+ "KEY: 91+meta" ++_+ " custom(action:'Indent', direction:'left')" ++_+ "KEY: 93+meta" ++_+ " custom(action:'Indent', direction:'right')" ++_+ "KEY: 47+meta" ++_+ " custom(action:'Comment')" ++_+ "KEY: 43+meta" ++_+ " custom(action:'RuntimeOption', key:'font.size', value:'bigger')" ++_+ "KEY: 45+meta" ++_+ " custom(action:'RuntimeOption', key:'font.size', value:'smaller')" ++_+ "KEY: 101+meta" ++_+ " custom(action:'SetSearchKeyword')" ++_+ "KEY: 103+meta" ++_+ " custom(action:'SearchKeyword', direction:'down')" ++_+ "KEY: 71+shift+meta" ++_+ " custom(action:'SearchKeyword', direction:'up')" ++_+ "KEY: 102+ctrl" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: 83+ctrl+shift" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: 102+meta" ++_+ " custom(action:'SearchInteractive')" ++_+ "KEY: -13" ++_+ " custom(action:'SmartIndent', split_line:true, indent_tab_when_starts:'class module def if else unless rescue ensure while do __class__')" ++_+ "KEY: -13+meta" ++_+ " custom(action:'SmartIndent', split_line:false, indent_tab_when_starts:'class module def if else unless rescue ensure while do __class__')" ++_+ "KEY: 39" ++_+ " custom(action:'AutoComplete', use_selection:true, text:'\\'')" +)}; + +$class('ac.chap.lang.EAmy < ac.chap.Language'); + + ac.chap.lang.EAmy.prototype.initDefinition = function() + { + $call(this, 'ac.chap.Language.initDefinition'); +this.singleQuoteStringMarker = "'"; +this.singleQuoteStringMarkerException = "\\"; +this.doubleQuoteStringMarker = "\""; +this.doubleQuoteStringMarkerException = "\\"; +this.wordDelimiter = /[\w\d]/; +this.indentIgnoreMarker = /[\t \s]/; +this.foldingStartMarkers = [/^\s*def|class/i]; +this.foldingParityMarkers = [/do|(^\s*if)|(^\s*def)|(^\s*class)/i]; +this.foldingStopMarkers = [/^\s{0,1}$/i]; +this.singleRowCommentStartMarkers = ['#']; +this.multiRowCommentStartMarker = "\"\"\""; +this.multiRowCommentEndMarker = "\"\"\""; +this.stringInterpolation = ['(#\{[^\}]*\})', 1]; +this.chunkRules.push([/(([^\w]|^)(\d{1,}[\d\.Ee]*)([^w]|$))/i, 3, ac.chap.CHUNK_NUMBER]) +this.chunkRules.push([/(\+|\-|\*|\/|\=|\!|\^|\%|\||\&|\<|\>)/i, 0, ac.chap.CHUNK_OPERATOR]) +this.chunkRules.push([/(\(|\)|\[|\]|\{|\})/i, 0, ac.chap.CHUNK_PARENTHESIS]) +this.chunkRules.push([/(([^\w]|^)(elif|else|except|finally|for|if|try|while|with)([^\w]|$))/i, 3, ac.chap.CHUNK_KEYWORD]) +this.chunkRules.push([/(([^\w]|^)(@[\w]*|break|continue|pass|raise|return|yield|and|in|is|not|or|as|assert|del|exec|print)([^\w]|$))/i, 3, ac.chap.CHUNK_KEYWORD_CUSTOM]) +this.chunkRules.push([/((def[ ]{1,})([\w]{1,}))/i, 3, ac.chap.CHUNK_FUNCTION_NAME]) +this.chunkRules.push([/(([^\w]|^)(__import__|all|abs|any|apply|callable|chr|cmp|coerce|compile|delattr|dir|divmod|eval|execfile|filter|getattr|globals|hasattr|hash|hex|id|input|intern|isinstance|issubclass|iter|len|locals|map|max|min|oct|ord|pow|range|raw_input|reduce|reload|repr|round|setattr|sorted|sum|unichr|vars|zip|basestring|bool|buffer|classmethod|complex|dict|enumerate|file|float|frozenset|int|list|long|object|open|property|reversed|set|slice|staticmethod|str|super|tuple|type|unicode|xrange)([^\w]|$))/i, 3, ac.chap.CHUNK_LIBRARY]) +this.chunkRules.push([/(([^\w]|^)((__(all|bases|class|debug|dict|doc|file|members|metaclass|methods|name|slots|weakref)__)|(import|from| abs|add|and|call|cmp|coerce|complex|contains|del|delattr|delete|delitem|delslice|div|divmod|enter|eq|exit|float|floordiv|ge|get|getattr|getattribute|getitem|getslice|gt|hash|hex|iadd|iand|idiv|ifloordiv|ilshift|imod|imul|init|int|invert|ior|ipow|irshift|isub|iter|itruediv|ixor|le|len|long|lshift|lt|mod|mul|ne|neg|new|nonzero|oct|or|pos|pow|radd|rand|rdiv|rdivmod|repr|rfloordiv|rlshift|rmod|rmul|ror|rpow|rrshift|rshift|rsub|rtruediv|rxor|set|setattr|setitem|setslice|str|sub|truediv|unicode|xor))([^\w]|$))/i, 3, ac.chap.CHUNK_LIBRARY_CUSTOM]) +} +var snippet = {}; +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ifmain', code: 'if __name__ == '+"'"+'__main__'+"'"+':\n ${1:main()}$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'try', code: 'try:\n ${1:pass}\nexcept ${2:Exception}, ${3:e}:\n ${4:raise e}\nelse:\n ${5:pass}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'property', code: 'def ${1:foo}():\n doc = "${2:The $1 property.}"\n def fget(self):\n ${3:return self._$1}\n def fset(self, value):\n ${4:self._$1 = value}\n def fdel(self):\n ${5:del self._$1}\n return locals()\n$1 = property(**$1())$0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '__', code: '__${1:init}__'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '.', code: 'self.'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: '', code: ''}; +eamy.snippets.push(snippet); +//snippet = {tab_activation: 'def', code: 'def ${1:fname}(${2:`if [ "$TM_CURRENT_LINE" != "" ]\n # poor man'+"'"+'s way ... check if there is an indent or not\n # (cuz we would have lost the class scope by this point)\n then\n echo "self"\n fi`}):\n ${3/.+/"""/}${3:docstring for $1}${3/.+/"""\n/}${3/.+/\t/}${0:pass}'}; +//eamy.snippets.push(snippet); +snippet = {tab_activation: 'def', code: 'def ${1:fname}(${2:`if [ "$TM_CURRENT_LINE" != "" ]\n # poor man'+"'"+'s way ... check if there is an indent or not\n # (cuz we would have lost the class scope by this point)\n then\n echo "self"\n fi`}):\n ${3:}\n ${0:return dict()}'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'class', code: 'class ${1:ClassName}(${2:object}):\n ${3/.+/"""/}${3:docstring for $1}${3/.+/"""\n/}${3/.+/\t/}def __init__(self${4/([^,])?(.*)/(?1:, )/}${4:arg}):\n ${5:super($1, self).__init__()}\n${4/(\A\s*,\s*\Z)|,?\s*([A-Za-z_][a-zA-Z0-9_]*)\s*(=[^,]*)?(,\s*|$)/(?2:\t\tself.$2 = $2\n)/g} $0'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'aurm', code: '@auth.requires_membership(\'$0\'):'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'dbt', code: '${1:db_name}.define_table("${2:table_name}",\n SQLField("${3:field_name}", "${4:string/text/password/blob/upload/boolean/integer/double/time/date/datetime/db.reference_table}", ${5:length=$6}, ${7:default="$8"}),$9\n)'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'dbf', code: 'SQLField("${1:field_name}", "${2:string/text/password/blob/upload/boolean/integer/double/time/date/datetime/db.reference_table}", ${3:length=$4}, ${5:default="$6"}),$7'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'dbi', code: '${1:db_name}.${2:table_name}.insert(\n ${3:field_name}="$4" $5\n)'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 't', code: 'T("$0")'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'rev', code: 'response.view=\'$0\''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'ref', code: 'response.flash=\'$0\''}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 're', code: 'redirect(\'$0\')'}; +eamy.snippets.push(snippet); +snippet = {tab_activation: 'rej', code: 'response.json=\'$0\''}; +eamy.snippets.push(snippet); ADDED applications/admin/static/eamy/chap-bg-sidebar.gif Index: applications/admin/static/eamy/chap-bg-sidebar.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-bg-sidebar.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-bookmark-default.gif Index: applications/admin/static/eamy/chap-bookmark-default.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-bookmark-default.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-folding-expand-inner.gif Index: applications/admin/static/eamy/chap-folding-expand-inner.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-folding-expand-inner.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-folding-expand.gif Index: applications/admin/static/eamy/chap-folding-expand.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-folding-expand.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-folding-start.gif Index: applications/admin/static/eamy/chap-folding-start.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-folding-start.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-folding-stop.gif Index: applications/admin/static/eamy/chap-folding-stop.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-folding-stop.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/chap-wrapped-row.gif Index: applications/admin/static/eamy/chap-wrapped-row.gif ================================================================== --- /dev/null +++ applications/admin/static/eamy/chap-wrapped-row.gif cannot compute difference between binary files ADDED applications/admin/static/eamy/eamy.js Index: applications/admin/static/eamy/eamy.js ================================================================== --- /dev/null +++ applications/admin/static/eamy/eamy.js @@ -0,0 +1,8143 @@ +/* + * eAmy.Offline - Amy Editor embedded for offline use. + * http://www.april-child.com/amy + * + * Published under MIT License. + * Copyright (c) 2007-2008 Petr Krontorád, April-Child.com + + Permission is hereby granted, free of charge, to any person + obtaining a copy of this software and associated documentation + files (the "Software"), to deal in the Software without + restriction, including without limitation the rights to use, + copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the + Software is furnished to do so, subject to the following + conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES + OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT + HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, + WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + OTHER DEALINGS IN THE SOFTWARE. + + * + * + * This file is auto-generated from original Fry Framework and Amy Editor sources.. + */ + + + + + +/* + * AC Fry - JavaScript Framework v1.0 + * (c)2006 Petr Krontorad, April-Child.com + * Portions of code based on WHOA Bender Framework, (c)2002-2005 Petr Krontorad, WHOA Group. + * http://www.april-child.com. All rights reserved. + * See the license/license.txt for additional details regarding the license. + * Special thanks to Matt Groening and David X. Cohen for all the robots. + */ + +/* Reserving global `fry` object */ +var fry = +{ + version:1.0, + __production_mode:false +}; + +// String prototype enhancements +String.prototype.camelize = function() +{ + return this.replace( /([-_].)/g, function(){ return arguments[0].substr(1).toUpperCase();} ); +} + +String.prototype.decamelize = function() +{ + return this.replace( /([A-Z])/g, function(){ return '-'+arguments[0].toLowerCase();} ); +} + +String.prototype.trim = function() +{ + return this.replace(/(^\s*)|(\s*$)/g, '' ); +} + +String.prototype.stripLines = function() +{ + return this.replace( /\n/g, '' ); +} + +String.prototype.stripMarkup = function() +{ + return this.replace( /<(.|\n)+?>/g, '' ); +} + +String.prototype.replaceMarkup = function( charRep ) +{ + return this.replace( /(<(.|\n)+?>)/g, function() + { + var t = ''; + for ( var i=0; i/g, '>' ).replace( /' ).replace( /&/g, '&' ); +} + +String.prototype.surround = function(t, side) +{ + side = side || 3; + return (1==1&side?t:'')+this+(2==2&side?t:''); +} + +String.prototype.surroundTag = function(t) +{ + return '<'+t+'>'+this+''; +} + +// example of use: var pattern = '? is ?; alert(pattern.embed('Decin', 'sunny')); pattern = '@city is @weather'; alert(pattern.embed({weather:'cloudy', city:'Decin'})) +String.prototype.embed = function() +{ + var t = this; + if ( 1 == arguments.length && 'object' == typeof arguments[0] ) + { + // named placeholders + for ( var i in arguments[0] ) + { + eval('var re=/@'+i+'/g;'); + t = t.replace(re, arguments[0][i]); + } + } + else + { + // anonymous placeholders `?` + for ( var i=0; i opacity ) + { + opacity = 0; + } + if ( 1 < opacity ) + { + opacity = 1; + } + if ( $__tune.isIE ) + { + node.style.filter = 'alpha(opacity='+(100*opacity)+')'; + } + else + { + node.style.opacity = opacity; + node.style.MozOpacity = opacity; + } + }, + getPageScrollPosition:function() + { + var d = document.documentElement; + if ( d && d.scrollTop) + { + return [d.scrollLeft, d.scrollTop]; + } + else if (document.body) + { + return [document.body.scrollLeft, document.body.scrollTop]; + } + else + { + return [0, 0]; + } + } + }, + event: + { + get:function(evt, type) + { + if ( $notset(evt.target) ) + { + evt.target = evt.srcElement; + evt.stopPropagation = function() + { + window.event.cancelBubble = true; + }; + } + evt.stop = function() + { + evt.stopPropagation(); + evt.stopped = true; + } + evt.$ = $(evt.target); + if ( $notset(evt.pageX) ) + { + evt.pageX = evt.clientX + document.body.scrollLeft; + evt.pageY = evt.clientY + document.body.scrollTop; + } + evt.getOffsetX = function() + { + if ( $notset(evt.offsetX) ) + { + var pos = evt.$.abspos(); + evt.offsetX = evt.pageX - pos.x; + evt.offsetY = evt.pageY - pos.y; + } + return evt.offsetX; + } + evt.getOffsetY = function() + { + if ( $notset(evt.offsetY) ) + { + var pos = evt.$.abspos(); + evt.offsetX = evt.pageX - pos.x; + evt.offsetY = evt.pageY - pos.y; + } + return evt.offsetY; + } + evt.isAnyControlKeyPressed = function() + { + return evt.metaKey||evt.ctrlKey||evt.altKey||evt.shiftKey; + } + evt.KEY_ESCAPE = 27; + evt.KEY_ENTER = 13; + evt.KEY_ARR_RIGHT = 39; + evt.KEY_ARR_LEFT = 37; + evt.KEY_ARR_UP = 38; + evt.KEY_ARR_DOWN = 40; + return evt; + }, + addListener:function(node, type, listener) + { + if ( $__tune.isIE && node.attachEvent ) + { + node.attachEvent('on'+type, listener); + } + else + { + node.addEventListener(type, listener, false); + } + }, + removeListener:function(node, type, listener) + { + if ( node.detachEvent ) + { + node.detachEvent('on'+type, listener); + node['on'+type] = null; + } + else if ( node.removeEventListener ) + { + node.removeEventListener(type, listener, false); + } + } + }, + behavior: + { + disablePageScroll:function() + { + if ( $notset($__tune.__prop.page_scroll) ) + { + $__tune.__prop.page_scroll = [$().s().overflow, $().ga('scroll')]; + } + $().s('overflow:hidden').sa('scroll', 'no'); + }, + enablePageScroll:function() + { + $().s('overflow:auto').sa('scroll', 'yes'); + }, + disableCombos:function() + { + $().g('select', function(node) + { + node.sa('__dis_combo', node.s().visibility); + $(node).v(false); + }); + }, + enableCombos:function() + { + $().g('select', function(node) + { + node.s({visibility:node.ga('__dis_combo') || 'visible'}); + }); + }, + clearSelection:function() + { + try + { + if ( window.getSelection ) + { + if ( $__tune.isSafari ) + { + window.getSelection().collapse(); + } + else + { + window.getSelection().removeAllRanges(); + } + } + else + { + if ( document.selection ) + { + if ( document.selection.empty ) + { + document.selection.empty(); + } + else + { + if ( document.selection.clear ) + { + document.selection.clear(); + } + } + } + } + } + catch (e) {} + }, + makeBodyUnscrollable:function() + { + $().s('position:fixed').w(fry.ui.info.page.width); + } + }, + ui: + { + scrollbarWidth:-1!=navigator.appVersion.indexOf('intosh')?15:17 + }, + selection: + { + setRange:function(el, selectionStart, selectionEnd) + { + if (el.setSelectionRange) + { + el.focus(); + el.setSelectionRange(selectionStart, selectionEnd); + } + else if (el.createTextRange) + { + var range = el.createTextRange(); + range.collapse(true); + range.moveEnd('character', selectionEnd); + range.moveStart('character', selectionStart); + range.select(); + } + } + } +} +// some browsers masks its presence having Gecko string somewhere inside its userAgent field... +$__tune.isGecko = !$__tune.isSafari&&!$__tune.isIE&&-1!=navigator.userAgent.indexOf('ecko'); +$__tune.isSafari2 = $__tune.isSafari && -1 != navigator.appVersion.indexOf('Kit/4'); +$__tune.isSafari3 = $__tune.isSafari && -1 != navigator.appVersion.indexOf('Kit/5'); + + +// Node manipulations + +function ACNode(node) +{ + this.$ = node; + if ( node ) + { + node.setAttribute('fryis', '1'); + } +} +// `$$` creates new node +ACNode.prototype.$$ = function(tagName) +{ + return $$(tagName); +} +// *is* - tells whether node is a part of the active DOM tree (that is displayed on page). Node may exist only in memory before appending or after removing when references, in which case node.is() will return false. +ACNode.prototype.is = function() +{ + return this.$ && null != this.$.parentNode; +} +// *i*d +ACNode.prototype.i = function(id) +{ + if ( 'undefined' == typeof id ) + { + return this.$.id||''; + } + this.$.id = id; + return this; +} +// class *n*ame +ACNode.prototype.n = function(n) +{ + if ( 'undefined' == typeof n) + { + return this.$.className||''; + } + this.$.className = n; + return this; +} +// *e*vent listener, if called with one argument only, previously registered listeners are removed +ACNode.prototype.e = function(t, c, oneUseOnly) +{ + var ser_type_id = 'fryse-'+t; + if ( !c ) + { + if ( null != this.$.getAttribute(ser_type_id) ) + { + var ser_listeners = this.$.getAttribute(ser_type_id).split(','); + // console.log('*E* removing listeners for %s, listeners: %s', t, ser_listeners); + for ( var i=0; i h ) + { + this.$.style.fontSize = '1px'; + } + return this; +} +// *s*tyle information - argument can be either "{color:'red', backgroundColor:'blue'}" or "'color:red;background-color:blue'" +ACNode.prototype.s = function(s) +{ + if ( 'undefined' == typeof s ) + { + return this.$.style; + } + if ( 'object' == typeof s ) + { + for ( var n in s ) + { + this.$.style[n] = s[n]; + } + } + else if ( 'string' == typeof s ) + { + if ( '' != s ) + { + var styles = s.split(';'); + for ( var i=0; idiv>table>tbody>tr>td>` and node at `td` +// to return second div you would call `gp('tr/tbody/table/div')` or `tr/table/div:1`, first div could be acquired using `table/div:2` etc. you can use `*` for any node. +ACNode.prototype.gp = function(q) +{ + if ( 'string' == typeof q ) + { + q = q.split('/'); + } + var fq = []; + for ( var i=0; i to && to>=from ) + { + self.clearInterval(t); + } + else + { + c(i, control); + if ( control.stopped ) + { + self.clearInterval(t); + } + } + i++; + }, interval); +} +// $dotimes +// ======== +// Repeats embedded code n times. +/* Usage: + $dotimes(20, function(i) + { + // your code, i is the counter parameter + }) +*/ +var $dotimes = function(n, c) +{ + for ( var i=0; i i ) + { + control.skip(); + return; + } + tr.n(0==i%2 ? 'even' : 'odd'); + if ( 20 < i ) + { + control.stop(); + } + }) +*/ +var $foreach = function(o, c) +{ + if ( !o ) + { + c = null; + return; + } + if ( 'undefined' == typeof o.length && 'function' != typeof o.__length ) + { + c = null; + return; + } + var n = 'function' == typeof o.__length ? o.__length() : o.length; + var control = + { + stopped:false, + stop:function() + { + this.stopped = true; + }, + skipped:false, + skip:function() + { + this.skipped = true; + }, + removed:false, + remove:function(stopAfterwards) + { + this.removed = true; + this.stopped = true == stopAfterwards; + } + } + // cannot just extend Array.prototype for `item()` method due bug in IE6 iteration mechanism. Some day (>2010 :) this might get fixed and will become obsolete + for ( var i=0; i= evt.keyCode) + { + fry.keyboard.pushKey(evt.keyCode + 32, fry.keyboard.META_KEY); + evt.preventDefault(); + return true; + } + } + } + fry.keyboard.initialized = true; + fry.keyboard.start(); +} + +fry.keyboard.start = function() +{ + fry.keyboard.stopped = false; +} + +fry.keyboard.stop = function() +{ + fry.keyboard.stopped = true; +} + +fry.keyboard.disableTextfieldsEditation = function() +{ + fry.keyboard.allowTextfieldsEditation(true); +} + +fry.keyboard.allowTextfieldsEditation = function(disable) +{ + var lst = document.getElementsByTagName('input'); + var n = lst.length; + for (var i=0; i evt.keyCode)) + { + // control code + mask++; + } + if (!fry.keyboard.pushKey(code, mask)) + { + return true; + } + } + evt.preventDefault(); + evt.stopPropagation(); + return false; +} + +fry.keyboard.paste.ff_mac = function(evt) +{ + fry.keyboard.last_down_evt = null; + // catching Command+C, Command+X, it's a FF.mac hack + if (evt.metaKey && ((67 == evt.keyCode && 0 == evt.charCode && 67 == evt.which) || (88 == evt.keyCode && 0 == evt.charCode && 88 == evt.which))) + { + return fry.keyboard.shared.copy(evt); + } + else + { + return 86 == evt.keyCode && 0 == evt.charCode && 86 == evt.which && evt.metaKey; + } +} + +fry.keyboard.down.ff_mac = function(evt) +{ + return false; +} + +fry.keyboard.press.ff_mac = function(evt) +{ + if (null != fry.keyboard.last_down_evt) + { + return; + } + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + if (!evt.charCode || (evt.charCode == evt.keyCode)) + { + // control code + fry.keyboard.pushKey(evt.keyCode, 1 + mask); + } + else + { + if (!fry.keyboard.pushKey(evt.charCode, mask)) + { + return true; + } + } + evt.preventDefault(); + evt.stopPropagation(); + return false; +} + +fry.keyboard.paste.webkit = function(evt) +{ + if ($__tune.isMac) + { + return (86 == evt.keyCode && (0 == evt.charCode || 118 == evt.charCode) && evt.metaKey); + } + else + { + return (86 == evt.keyCode && (0 == evt.charCode || 118 == evt.charCode) && evt.ctrlKey); + } +} + +fry.keyboard.down.webkit = function(evt) +{ + if (0 != evt.keyCode && (48 > evt.keyCode || (111 < evt.keyCode && 128 > evt.keyCode) || 60000 < evt.charCode)) + { + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + if (!evt.charCode || 111 < evt.keyCode || 32 > evt.charCode || 60000 < evt.charCode) + { + // control code + fry.keyboard.pushKey(evt.keyCode, 1 + mask); + } + else + { + if (!fry.keyboard.pushKey(evt.charCode, mask)) + { + return true; + } + } + evt.preventDefault(); + evt.stopPropagation(); + fry.keyboard.last_down_evt = null; + return false; + } + fry.keyboard.last_down_evt = evt; + return true; +} + +fry.keyboard.press.webkit = function(evt) +{ + if (null != fry.keyboard.last_down_evt) + { + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + var code = !evt.keyCode ? evt.charCode : evt.keyCode; + if (evt.keyCode == evt.charCode && evt.keyCode == fry.keyboard.last_down_evt.charCode && evt.keyCode > 60000) + { + code = fry.keyboard.last_down_evt.keyCode; + } + if (evt.keyCode == fry.keyboard.last_down_evt.keyCode && 48 > evt.keyCode) + { + // control code + mask++; + } + else + { + var r_mask = fry.keyboard.SHIFT_KEY + fry.keyboard.META_KEY; + if (r_mask == (mask & r_mask) && 97 <= code && 122 >= code) + { + code -= 32; + } + } + if (!fry.keyboard.pushKey(code, mask)) + { + return true; + } + } + evt.preventDefault(); + evt.stopPropagation(); + return false; +} + +fry.keyboard.paste.ie = function(evt) +{ + if (evt.ctrlKey && (67 == evt.keyCode || 88 == evt.keyCode)) + { + // ctrl+c, ctrl+x + return fry.keyboard.shared.copy(evt); + } + else + { + return false; + } +} + +fry.keyboard.down.ie = function(evt) +{ + fry.keyboard.last_down_evt = evt; + if (48 > evt.keyCode || (111 < evt.keyCode && 128 > evt.keyCode)) + { + // control code for IE + var mask = 1 + (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + return !fry.keyboard.pushKey(evt.keyCode, mask) + } + else + { + var code = evt.keyCode; + // disabling some other keys (A, F, N, R, S, T) + if (82 == evt.keyCode || 65 == evt.keyCode || 83 == evt.keyCode || 70 == evt.keyCode || 78 == evt.keyCode || 84 == evt.keyCode) + { + if (!evt.shiftKey) + { + code += 32; + } + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + return !fry.keyboard.pushKey(code, mask); + } + } + return true; +} + +fry.keyboard.press.ie = function(evt) +{ + if (null != fry.keyboard.last_down_evt) + { + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + return !fry.keyboard.pushKey(evt.keyCode, mask); + } + return false; +} + + +fry.keyboard.paste.opera = function(evt) +{ + return 86 == evt.keyCode && 86 == evt.which && evt.ctrlKey; +} + +fry.keyboard.down.opera = function(evt) +{ + fry.keyboard.last_down_evt = evt; + return false; +} + +fry.keyboard.press.opera = function(evt) +{ + var e = fry.keyboard.last_down_evt; + var mask = (evt.altKey ? 2 : 0) + (evt.ctrlKey ? 4 : 0) + (evt.shiftKey ? 8 : 0) + (evt.metaKey ? 16 : 0); + var prev_mask = (e.altKey ? 2 : 0) + (e.ctrlKey ? 4 : 0) + (e.shiftKey ? 8 : 0) + (e.metaKey ? 16 : 0); + if ((evt.keyCode == fry.keyboard.last_down_evt.keyCode || 0 == e.keyCode) && (0 == evt.which || 48 > e.keyCode || 111 < e.keyCode)) + { + mask++; + } + if (!fry.keyboard.pushKey(evt.keyCode, mask)) + { + return true; + } + evt.preventDefault(); + evt.stopPropagation(); + return false; +} + +fry.keyboard.addListener = function(listener) +{ + fry.keyboard.listener = listener; +} + +fry.keyboard.removeListener = function(listener) +{ + fry.keyboard.listener = null; +} + + +fry.keyboard.pushKey = function(code, mask) +{ + if (32 == code) + { + mask = mask & 65534; + } + var was_clipboard_copy = false; + var was_clipboard_cut = false; + if ($__tune.isMac) + { + was_clipboard_copy = (99 == code) && (fry.keyboard.META_KEY == (mask & fry.keyboard.META_KEY)); + was_clipboard_cut = (120 == code) && (fry.keyboard.META_KEY == (mask & fry.keyboard.META_KEY)); + } + else + { + was_clipboard_copy = (1 == code || 99 == code) && (fry.keyboard.CTRL_KEY == (mask & fry.keyboard.CTRL_KEY)); + was_clipboard_cut = (24 == code || 120 == code) && (fry.keyboard.CTRL_KEY == (mask & fry.keyboard.CTRL_KEY)); + } + if (was_clipboard_copy || was_clipboard_cut) + { + fry.keyboard.prepareClipboard(); + fry.keyboard.clipboard.copiedContent = ''; + var was_custom_content = false; + if (fry.keyboard.listener) + { + var content = fry.keyboard.listener(0, fry.keyboard.CONTROL_CODE | fry.keyboard.SIG_CLIPBOARD_GET); + if ('string' == typeof content) + { + was_custom_content = true; + fry.keyboard.clipboard.copiedContent = content; + } + } + if (was_custom_content) + { + fry.keyboard.clipboard.node.value = fry.keyboard.clipboard.copiedContent; + fry.keyboard.clipboard.node.select(); + fry.keyboard.clipboard.node.focus(); + } + fry.keyboard.pushKey(0, fry.keyboard.CONTROL_CODE | (was_clipboard_cut ? fry.keyboard.CUT : fry.keyboard.COPY)); + fry.keyboard.clipboard.content = fry.keyboard.clipboard.copiedContent; + // returning false will enforce propagation + return !was_custom_content; + } + if (fry.keyboard.PASTE == (mask & fry.keyboard.PASTE)) + { + fry.keyboard.clipboard.pastedContent = code; + fry.keyboard.clipboard.content = fry.keyboard.clipboard.pastedContent; + code = 0; + } + // filtering out solo-keys Ctrl, Shift, Alt that are triggered by some browsers. + if ((17 == code && 5 == (mask & 5)) || (16 == code && 9 == (mask & 9)) || (18 == code && 3 == (mask & 3))) + { + return true; + } + // filtering out Command+V on FF.mac + if (118 == code && 16 == mask) + { + return true; + } + if (fry.keyboard.listener) + { + // console.info(code); + if (13 != code && 0 != (mask & (fry.keyboard.CONTROL_CODE + fry.keyboard.META_KEY + fry.keyboard.CTRL_KEY))) + { + // some keystroke occured that we really want to know about the listener result, we have to call it immediatelly + return fry.keyboard.listener(code, mask); + } + else + { + // let's ease the pain of the browser + setTimeout('fry.keyboard.listener('+code+','+mask+')', 10); + } + } + else + { + fry.keyboard.buffer.unshift([code, mask]); + } + return true; +} + +fry.keyboard.popKey = function() +{ + return fry.keyboard.buffer.pop(); +} + +fry.keyboard.getClipboardContent = function() +{ + return fry.keyboard.clipboard.content; +} + + +/*--------*/ +var client = {conf:{fry:{backendURL:''}}}; +var eamy = +{ + snippets:[], + instances:[] +}; + +/* + * ac.Chap - Text Editing Component - Core + */ + +if ( 'undefined' == typeof ac ) +{ + var ac = {chap:{}}; +} + +ac.chap = +{ + state: + { + active:null + }, + + TOKEN_MULTIROW_COMMENT:0, + TOKEN_SINGLEROW_COMMENT:1, + TOKEN_SINGLE_QUOTED:2, + TOKEN_DOUBLE_QUOTED:3, + TOKEN_NEWROW:4, + TOKEN_WHITESPACE:5, + + ROWSTATE_NONE:0, + ROWSTATE_FOLD_START:1, + ROWSTATE_FOLD_STOP:2, + ROWSTATE_FOLD_EXPAND:4, + ROWSTATE_FOLD_COLLAPSED:8, + ROWSTATE_SELECTION:16, + ROWSTATE_BOOKMARK:32, + + CHUNK_KEYWORD:4, + CHUNK_NUMBER:5, + CHUNK_OPERATOR:6, + CHUNK_PARENTHESIS:7, + CHUNK_KEYWORD_CUSTOM:8, + CHUNK_FUNCTION_NAME:9, + CHUNK_LIBRARY:10, + CHUNK_LIBRARY_CUSTOM:11, + + ACTION_CARET:1, + ACTION_SELECTION:2, + ACTION_INSERT:3, + ACTION_UPDATE:4, + ACTION_DELETE:5, + ACTION_CLIPBOARD:6, + ACTION_UNDO:7, + ACTION_REDO:8, + ACTION_CUSTOM:9, + + ACTION_RES_REDRAWCARET:1, + ACTION_RES_REDRAWTEXT:2, + ACTION_RES_SELECTIONCHANGED:4, + ACTION_RES_SCROLLTOCARET:8, + + CKEY_NONE:0, + CKEY_ALT:2, + CKEY_CTRL:4, + CKEY_SHIFT:8, + CKEY_META:16, + + TRANSLOG_TYPE_INSERT:1, + TRANSLOG_TYPE_REMOVE:2, + + ACTION_LISTENER_BEFORE:1, + ACTION_LISTENER_AFTER:2, + ACTION_LISTENER_BOTH:3 + +} + + + +ac.chap.activeComponent = null; +ac.chap.instanceId = 1; + +ac.chap.getActiveComponent = function() +{ + return ac.chap.activeComponent; +} + +ac.chap.setActiveComponent = function(component) +{ + fry.keyboard.initialize(); + if (null != ac.chap.activeComponent) + { + ac.chap.activeComponent.blur(); + } + ac.chap.activeComponent = component; + if (null != component) + { + if (ac.widget) + { + ac.widget.focus(component); + } + ac.chap.activeComponent.focus(); + } + else + { + if (!ac.widget) + { + fry.keyboard.stop(); + } + } +} + +ac.chap.route = function(type, windowId, viewIndex, pars) +{ + if ( null == ac.chap.activeComponent ) + { + return; + } + if ( 'undefined' == typeof ac.chap.activeComponent.views[viewIndex] ) + { + return; + } + switch ( type ) + { + case 'expand-folding': + { + ac.chap.activeComponent.expandFolding(pars); + } + } +} + +ac.chap.caretThread = setInterval(function() +{ + if (null != ac.chap.activeComponent) + { + ac.chap.activeComponent.showCaret(true, true); + } +}, 600); + +$(document.documentElement).e($__tune.isSafari2?'mousedown':'click', function(evt) +{ + var elem = evt.$.$; + while ( null != elem && document.documentElement != elem ) + { + if ( 'true' == elem.getAttribute('chap-view') ) + { + evt.stop(); + return; + } + elem = elem.parentNode; + } + ac.chap.setActiveComponent(null); +}); + + +ac.chap.keyboardListener = function(code, mask) +{ + if (null == ac.chap.activeComponent) + { + return; + } + return ac.chap.activeComponent.standaloneKeyboardListener(code, mask); +} +if ('undefined' == typeof ac['widget']) +{ + // chap is not a part of Fry MVC, must handle keyboardListener itself + fry.keyboard.addListener(ac.chap.keyboardListener); +} + +$class('ac.chap.Window', +{ + construct:function(options, userId) + { + this.instanceId = ac.chap.instanceId++; + this.ident = 'ac-chap-' + this.instanceId; + this.userId = userId | 0; + this.caret = null; + this.options = null; + this.state = null; + + this.views = []; + this.activeView = null; + this.viewLayoutNodes = []; + + this.char_map = []; + this.row_id_map = []; + this.syntax_map = []; + this.style_map = []; + + this.row_id_sequence = 1; + + this.language = null; + this.keymap = null; + this.snippets = []; + this.commands = []; + + this.selection = null; + this.transaction_log = []; + this.redo_log = []; + + this.setOptions(options||{}); + this.setState(); + }, + destruct:function() + { + $delete(this.state); + $delete(this.options); + $delete(this.char_map); + $delete(this.row_id_map); + $delete(this.syntax_map); + $delete(this.style_map); + this.hide(); + $delete(this.activeView); + } +}); + +ac.chap.Window.prototype.focus = function() +{ + this.showCaret(); +} + +ac.chap.Window.prototype.blur = function() +{ + this.hideCaret(); +} + +// compatibility layer with AC Fry Widget library +ac.chap.Window.prototype.onFocus = function() +{ + ac.chap.setActiveComponent(this); + this.focus(); +} + +ac.chap.Window.prototype.onBlur = function() +{ + ac.chap.setActiveComponent(null); + this.blur(); +} + +ac.chap.Window.prototype.onResize = function(width, height) +{ +} + +ac.chap.Window.prototype.onSystemClipboardCopy = function() +{ + return this.getSelection(); +} + +ac.chap.Window.prototype.onSystemClipboardCut = function() +{ + this.runAction(ac.chap.ACTION_CLIPBOARD, {cut:true}); + return this.processActionResult(true, true); +} + +ac.chap.Window.prototype.onSystemClipboardPaste = function(content) +{ + this.runAction(ac.chap.ACTION_CLIPBOARD, {paste:true, content:content}); + return this.processActionResult(true, true); +} + +ac.chap.Window.prototype.hasKeyboardListenerActive = function() +{ + return true; +} +ac.chap.Window.prototype.onCut = function(selection, callbackOk) +{ +} + +ac.chap.Window.prototype.onPaste = function(selection, wasCut) +{ +} + +ac.chap.Window.prototype.setOptions = function(options) +{ + this.options = + { + initialCaretPosition:[0,0], + tokenizerLazyLaunch:900, + syntaxHighlightingEnabled:true, + remoteBackendURL:'', + font:{ + size:11, + family: $__tune.isMac ? "Consolas, 'Bitstream Vera Sans mono', 'Courier', 'Monaco', monospaced" : "Consolas, 'Courier New', 'Courier', monospaced", + allowedSizes: [8, 9, 10, 11, 12, 13, 14, 17, 21, 24, 27, 30, 34, 38, 42] + } + }; + if ( $isset(options.initial_caret_position) ) + { + this.options.initialCaretPosition = [options.initial_caret_position[0], options.initial_caret_position[1]]; + } + if ( $isset(options.language) ) + { + this.language = $new(options.language); + } + else + { + this.language = $new(ac.chap.Language); + } + if ( $isset(options.keymap) ) + { + this.keymap = $new(options.keymap); + } + else + { + this.keymap = $new(ac.chap.KeyMap); + } + if ( $isset(options.syntaxHighlightingEnabled) ) + { + this.options.syntaxHighlightingEnabled = options.syntaxHighlightingEnabled; + } + if ( $isset(options.remoteBackendURL) ) + { + this.options.remoteBackendURL = options.remoteBackendURL; + } + else + { + if ( client && client.conf && client.conf.fry ) + { + this.options.remoteBackendURL = client.conf.fry.backendURL; + } + } + if ($isset(options.font)) + { + if ($isset(options.font['size'])) + { + this.options.font.size = options.font.size; + } + if ($isset(options.font['family'])) + { + this.options.font.family = options.font.family; + } + } +} + +ac.chap.Window.prototype.setState = function() +{ + this.state = + { + lastKeyTimePressed:0, + caretPhase:1, + lastKeyCode:0, + lastControlKey:0, + lastCaretPosition:[], + tokenizerTimer:null, + scheduledTokenizerTime:0, + transactionLogStopped:false, + actionListeners:[], + actionListenersStopped:false, + caretListener:null, + commandListener:null, + transactionListener:[null,800], + passThroughKeysListener:null + } + this.caret = + { + position:[this.options.initialCaretPosition[0], this.options.initialCaretPosition[1]], + mode:1 // 1 normal, 2 overwrite + } +} + +ac.chap.Window.prototype.addView = function(layoutNode, options, renderAfter) +{ + var view_index = this.views.length; + this.viewLayoutNodes.push(layoutNode); + this.views.push($new(ac.chap.View, this, view_index, options||{})); + this.row_id_map[view_index] = []; + if ( 0 < view_index ) + { +// console.log(view_index); + // creating duplicate + var n = this.row_id_map[0].length; + for ( var i=0; i= 0; i--) + { + if (font_size > font_sizes[i]) + { + font_size = font_sizes[i]; + break; + } + } + } + else + { + if (font_sizes[0] <= value && value <= font_sizes[font_sizes.length-1]) + { + font_size = value; + } + } + this.options.font.size = font_size; + redraw = true; + } + else if ('font.family' == key) + { + this.options.font.family = value; + redraw = true; + } + else if ('word.wrap' == key) + { + if (this.activeView) + { + this.hideCaret(); + this.activeView.options.wordWrap = value; + this.activeView.reloadOptions(); + this.showCaret(); + } + } + if (redraw) + { + this.hideCaret(); + var num_views = this.views.length; + for (var i=0; i caret_col ) + { + caret_col++; + } + else + { + if ( 'undefined' != typeof this.char_map[caret_row+1] ) + { + caret_row++; + caret_col = 0; + } + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'up' == direction ) + { + if ( 0 < caret_row ) + { + var move_end = this.char_map[caret_row].length == caret_col; + if ( move_end ) + { + caret_col = this.char_map[caret_row-1].length; + } + else + { + caret_col = Math.min(this.char_map[caret_row-1].length, caret_col); + } + this.setCaretPosition(caret_row-1, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + } + else if ( 'down' == direction ) + { + if ( 'undefined' != typeof this.char_map[caret_row+1] ) + { + var move_end = this.char_map[caret_row].length == caret_col; + if ( move_end ) + { + caret_col = this.char_map[caret_row+1].length; + } + else + { + caret_col = Math.min(this.char_map[caret_row+1].length, caret_col); + } + this.setCaretPosition(caret_row+1, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + } + else if ( 'prev_word' == direction ) + { + if ( 0 < caret_col ) + { + var ch = this.char_map[caret_row].charAt(caret_col-1); + var re = this.language.wordDelimiter; + var look_for_wch = re.test(ch); + while ( 0 != caret_col ) + { + ch = this.char_map[caret_row].charAt(caret_col-1); + if ( look_for_wch != re.test(ch) ) + { + break; + } + caret_col--; + } + } + else + { + if ( 0 < caret_row ) + { + caret_row--; + caret_col = this.char_map[caret_row].length; + } + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'next_word' == direction ) + { + if ( this.char_map[caret_row].length > caret_col ) + { + var ch = this.char_map[caret_row].charAt(caret_col); + var re = this.language.wordDelimiter; + var look_for_wch = re.test(ch); + while ( this.char_map[caret_row].length > caret_col ) + { + ch = this.char_map[caret_row].charAt(caret_col); + if ( look_for_wch != re.test(ch) ) + { + break; + } + caret_col++; + } + } + else + { + if ( 'undefined' != typeof this.char_map[caret_row+1] ) + { + caret_row++; + caret_col = 0; + } + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'prev_regexp' == direction ) + { + if ( 0 < caret_col ) + { + var row = this.char_map[caret_row].substring(0, caret_col); + var re = new RegExp(params['re'].replace('|', '\\')); + var matches = re.exec(row); + if (0 == matches.length) + { + console.warning('Invalid RE definition for `prev_regexp\' direction in ACTION_CARET.move action in keymap.'); + caret_col--; + } + else + { + caret_col -= matches[0].length + 1; + } + } + else + { + if ( 0 < caret_row ) + { + caret_row--; + caret_col = this.char_map[caret_row].length; + } + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'next_regexp' == direction ) + { + if ( this.char_map[caret_row].length > caret_col ) + { + var row = this.char_map[caret_row].substr(caret_col + 1); + var re = new RegExp(params['re'].replace('|', '\\')); + var matches = re.exec(row); + if (0 == matches.length) + { + console.warning('Invalid RE definition for `next_regexp\' direction in ACTION_CARET.move action in keymap.'); + caret_col++; + } + else + { + caret_col += matches[0].length + 1; + } + } + else + { + if ( 'undefined' != typeof this.char_map[caret_row+1] ) + { + caret_row++; + caret_col = 0; + } + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'row_start' == direction ) + { + if ( 0 < caret_col ) + { + caret_col = 0; + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + } + else if ( 'row_end' == direction ) + { + if ( this.char_map[caret_row].length > caret_col ) + { + caret_col = this.char_map[caret_row].length; + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + } + else if ( 'page_up' == direction ) + { + if (this.activeView) + { + var row = caret_row - this.activeView.numRows; + if (0 > row) + { + row = 0; + } + this.setCaretPosition(row, 0 != caret_col ? this.char_map[row].length : 0); + return ac.chap.ACTION_RES_REDRAWCARET; + } + return 0; + } + else if ( 'page_down' == direction ) + { + if (this.activeView) + { + var row = caret_row + this.activeView.numRows; + if (this.char_map.length <= row) + { + row = this.char_map.length-1; + } + this.setCaretPosition(row, 0 != caret_col ? this.char_map[row].length : 0); + return ac.chap.ACTION_RES_REDRAWCARET; + } + return 0; + } + else if ( 'doc_start' == direction ) + { + this.setCaretPosition(0,0); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'doc_end' == direction ) + { + var last_index = this.char_map.length-1 + this.setCaretPosition(last_index, this.char_map[last_index].length-1); + return ac.chap.ACTION_RES_REDRAWCARET; + } + } + else if ( $isset(params.moveBy) ) + { + var offset = params.moveBy; + if ( 'column' == offset ) + { + //move by params.value columns, newlines are counted as column, params.value may be negative indication caret moving to the left + if ( 0 < params.value ) + { + var range_source = (this.char_map[caret_row].substr(caret_col)+'\n'+this.char_map.slice(caret_row+1).join('\n')).substr(0, params.value); + range_source = range_source.split('\n'); + caret_row += range_source.length-1; + caret_col = range_source[range_source.length-1].length + (1==range_source.length ? caret_col : 0); + } + else + { + var range_source = (this.char_map.slice(0, caret_row).join('\n')+'\n'+this.char_map[caret_row].substr(0, caret_col)); + range_source = range_source.substr(range_source.length+params.value); + range_source = range_source.split('\n'); + caret_row -= (range_source.length-1); + caret_col = (1==range_source.length ? caret_col : this.char_map[caret_row].length) - range_source[0].length; + } + this.setCaretPosition(caret_row, caret_col); + return ac.chap.ACTION_RES_REDRAWCARET; + } + else if ( 'row' == offset ) + { + // move by params.value rows + } + else if ( 'page' == offset ) + { + // move by params.value pages + } + } + else if ( $isset(params.moveTo) ) + { + // move to params.moveTo[0], params.moveTo[1] + this.setCaretPosition(params.moveTo[0], params.moveTo[1]); + return ac.chap.ACTION_RES_REDRAWCARET; + } + };break; + case ac.chap.ACTION_SELECTION: + { + if ( $isset(params.remove) ) + { + var changed = this.removeSelection(); + return ac.chap.ACTION_RES_REDRAWCARET | (changed ? ac.chap.ACTION_RES_REDRAWTEXT : 0); + } + else if ( $isset(params.add) ) + { + this.addSelection([caret_row, caret_col], this.state.lastCaretPosition); + this.renderSelection(); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_SELECTIONCHANGED; + } + else if ( $isset(params.all) ) + { + $__tune.behavior.clearSelection(); + this.addAllSelection(); + this.renderSelection(); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_SELECTIONCHANGED; + } + };break; + case ac.chap.ACTION_INSERT: + { + var str = null; + if ( $isset(params.row) ) + { + var ins_content = '\n'; + caret_col = 0; + // if ( 0 < caret_row ) + // { + // // indenting by previous row + // var t = this.char_map[caret_row]; + // var n = t.length; + // var re = this.language.indentIgnoreMarker; + // while ( caret_col width ) + { + // will go before [row,column] + source = this.char_map.slice(0, row).join('\n')+'\n'+this.char_map[row].substr(0, column); + return source.substr(source.length+width); + } + else + { + // will go after [row,column] + source = this.char_map.slice(row).join('\n').substr(column); + return source.substr(0, width); + } +} + +ac.chap.Window.prototype.getWordAt = function(row, column, numWords) +{ + // if numWords <0 returns words before otherwise after position. if omitted, default value is -1 that is word before caret + // also, if more than one word is required, returns array of words as result + row = $getdef(row, this.caret.position[0]); + column = $getdef(column, this.caret.position[1]); + numWords = $getdef(numWords, -1); + var words = []; + var re = this.language.wordDelimiter; + var required_words = numWords; + var direction_after = 0 < numWords; + var next_word = true; + required_words = Math.abs(-numWords); + if ( !direction_after ) + { + column--; + if ( -1 == column ) + { + row--; + if ( -1 == row ) + { + return words; + } + column = this.char_map[row].length-1; + } + } + while (true) + { + var ch = this.char_map[row].charAt(column) + if ( re.test(ch) ) + { + if ( next_word ) + { + words.push(''); + next_word = false; + } + // word character found + if ( direction_after ) + { + words[words.length-1] += ch; + } + else + { + words[words.length-1] = ch + words[words.length-1]; + } + } + else + { + if ( required_words == words.length ) + { + break; + } + next_word = true; + } + + column += (direction_after ? 1 : -1); + if ( 0 > column ) + { + row--; + if ( -1 == row ) + { + break; + } + next_word = true; + column = this.char_map[row].length-1; + } + else if ( this.char_map[row].length <= column ) + { + row++; + if ( this.char_map.length == row ) + { + break; + } + next_word = true; + column = 0; + } + } + if ( 1 == required_words ) + { + return words[0] ? words[0] : false; + } + return words; +} + +ac.chap.Window.prototype.getLineAt = function(row) +{ + return this.char_map[row]; +} + +ac.chap.Window.prototype.getText = function() +{ + return this.char_map.join('\n'); +} + +ac.chap.Window.prototype.getNumRows = function() +{ + return this.char_map.length; +} + +ac.chap.Window.prototype.getSyntaxHighlightingSource = function() +{ + if (null != this.activeView) + { + return this.activeView.getSyntaxHighlightingSource(); + } + return 'ni'; +} + +ac.chap.Window.prototype.getCaretAbsolutePosition = function() +{ + if ( null != this.activeView ) + { + var pos = this.activeView.getRenderedCharPosition(this.caret.position[0], this.caret.position[1]); + if ( null != pos ) + { + var root_pos = this.activeView.nodeScrollArea.abspos(); + return [pos[0]+root_pos.x+this.activeView.options.colWidth, pos[1]+root_pos.y+this.activeView.options.rowHeight]; + } + } + return null; +} + +ac.chap.Window.prototype.stopTransactionLog = function() +{ + this.state.transactionLogStopped = true; +} + +ac.chap.Window.prototype.startTransactionLog = function() +{ + this.state.transactionLogStopped = false; +} + +ac.chap.Window.prototype.addAllSelection = function() +{ + var num_rows = this.char_map.length; + var num_views = this.views.length; + for ( var i=0; i offset_x - mid_char_w ) + { + col_index = i; + break; + } + else if ( w - mid_char_w < offset_x && (dim[0] % w + 2*mid_char_w >= offset_x)) + { + // last char + col_index = i+1; + break; + } + } + i++; + } +// console.log('%s, %s', row_index, col_index); + if ( i == num_chars ) + { + col_index = i; + } +// console.log('CHANGE CARET to: %s', col_index); + this.hideCaret(); + if ( evt.shiftKey ) + { + this.addSelection([row_index, col_index], this.state.lastCaretPosition); + this.renderText(); + } + else + { + this.setCaretPosition(row_index, col_index); + this.state.caretPhase = 1; + view.showCaret(); + this.state.lastCaretPosition = [this.caret.position[0], this.caret.position[1]]; + if ( this.removeSelection() ) + { + this.renderText(); + } + } + ac.chap.setActiveComponent(this); +} + +ac.chap.Window.prototype.foldingize = function() +{ + var startRowIndex = 0; + + var source_rows = this.char_map.slice(startRowIndex); + + // creating folding info + var n = source_rows.length; + var foldings = []; + var foldings_index = -1; + for ( var i=0; i ixs[i][1] ) + { + found_marker_index = i; + lowest = ixs[i][1]; + } + } + } + if ( -1 == found_marker_index ) + { + break; + } + var start_index = ixs[found_marker_index][1]; + var skipped_source = source.substr(0, start_index); + var num_skipped_rows = skipped_source.split('\n').length; + cursor.row += num_skipped_rows - 1; + cursor.col = (1 == num_skipped_rows ? col_offset : 0) + skipped_source.length - ('\n'+skipped_source).lastIndexOf('\n'); + + if ( 'undefined' == typeof syntax_map[cursor.row] ) + { + syntax_map[cursor.row] = []; + } + + var start_marker_len = ixs[found_marker_index][2].length; + var end_marker_len = ixs[found_marker_index][3].length; + source = source.substr(start_index+start_marker_len); + + var token_type = ixs[found_marker_index][0]; + + var end_index = source.indexOf(ixs[found_marker_index][3]); + var sub_source = source; + var end_index_offset = 0; + var except = false; + while ( 0 < end_index && '' != ixs[found_marker_index][4] && ixs[found_marker_index][4] == sub_source.charAt(end_index-end_marker_len) ) + { + except = true; + end_index_offset += end_index + end_marker_len; + sub_source = sub_source.substr(end_index+end_marker_len); + end_index = sub_source.indexOf(ixs[found_marker_index][3]); + } + if ( except && -1 != end_index ) + { + end_index += end_index_offset; + } + if ( -1 == end_index ) + { + syntax_map[cursor.row].push([token_type, cursor.col, -1, '']); + fillRowTokens(token_type, cursor.row+1, -1); + break; + } + else + { + var block_source = source.substr(0, end_index); + var num_block_rows = '\n' == ixs[found_marker_index][3] ? 1 : block_source.split('\n').length; + var cursor_col_end = block_source.length - ('\n'+block_source).lastIndexOf('\n'); + + syntax_map[cursor.row].push([token_type, cursor.col, 1 == num_block_rows ? (cursor.col+end_index+start_marker_len+end_marker_len) : -1, ixs[found_marker_index][2]]); + fillRowTokens(token_type, cursor.row+1, cursor.row+num_block_rows-1); + if ( 1 == num_block_rows ) + { + col_offset = cursor.col + end_index + start_marker_len + end_marker_len; + if ( '\n' == ixs[found_marker_index][3] ) + { + cursor.row++; + col_offset = 0; + } + } + else + { + if ( 'undefined' == typeof syntax_map[cursor.row+num_block_rows] ) + { + syntax_map[cursor.row+num_block_rows-1] = []; + } + syntax_map[cursor.row+num_block_rows-1].push([token_type, -1, cursor_col_end + end_marker_len, '']); +// var a = block_source.split('\n'); + col_offset = cursor_col_end + end_marker_len; + cursor.row += num_block_rows -1; + } +// console.log(num_block_rows); + source = source.substr(end_index+end_marker_len); + } + } + delete ixs; + delete source; + + var n = Math.max(syntax_map.length, this.syntax_map.length); + for ( i=0; i'; + } + } + return ht; + } + var nodes = this.nodeEditArea.childNodes; + var ht = ''; + for (var i=0; i') ) + { + str = str.replace(/>/g, '>'); + } + if ( -1 != str.indexOf('<') ) + { + str = str.replace( / 1 2 3 =  1 2 3 +function ch_encode_markup_spaces(str) +{ + var n = str.length - str.replace(/ /g, '').length; + for ( var i=0; i/g, '').length); + return arguments[1]+(is_inside?'~`~`~`~`':' '); + }); + } + return str.replace(/~`~`~`~`/g, ' '); +} + + +ac.chap.View.prototype.getRenderedCharDimension = function(rowIndex, colIndex) +{ + return [this.options.colWidth, this.options.rowHeight]; +} + +ac.chap.View.prototype.getRenderedStringDimension = function(rowIndex, colIndex, width) +{ + if ( 'undefined' != typeof this.window.char_map[rowIndex] ) + { + if ( colIndex < this.window.char_map[rowIndex].length ) + { + var str = this.window.char_map[rowIndex].substr(colIndex, width); + var ix = 0; + var tab = this.options.tabelator; + while ( -1 != ix ) + { + ix = str.indexOf('\t'); + if ( -1 != ix ) + { + str = str.substr(0,ix)+tab.substr(0, tab.length-(ix % tab.length))+str.substr(ix+1); + } + } +// console.log('(getrenderedstringdimension) = [%s], ix:%s w:%s %s', this.options.colWidth*str.length, colIndex, width, str); + return [this.options.colWidth*str.length, this.options.rowHeight]; + } + } + return [0,0]; +} + +ac.chap.View.prototype.getVirtualStringDimension = function(row, colIndex, width) +{ + if ( colIndex < row.length ) + { + var str = row.substr(colIndex, width); + var ix = 0; + var tab = this.options.tabelator; + while ( -1 != ix ) + { + ix = str.indexOf('\t'); + if ( -1 != ix ) + { + str = str.substr(0,ix)+tab.substr(0, tab.length-(ix % tab.length))+str.substr(ix+1); + } + } +// console.log('(getrenderedstringdimension) = [%s], ix:%s w:%s %s', this.options.colWidth*str.length, colIndex, width, str); + return [this.options.colWidth*str.length, this.options.rowHeight]; + } + return [0,0]; +} + +ac.chap.View.prototype.getRenderedCharPosition = function(rowIndex, colIndex) +{ + var node_row = this.getRowNode(rowIndex); + if ( null != node_row && null != node_row.parentNode ) + { + var offset_x = this.getRenderedStringDimension(rowIndex, 0, colIndex)[0]; + var offset_y = 0; + var dim = this.getRenderedCharDimension(rowIndex, colIndex); + offset_x -= dim[0]; + if ( this.options.wordWrap ) + { + if ( 0 < colIndex ) + { + var w = this.options.colWidth * (this.numCols); + offset_y = this.options.rowHeight * (Math.floor(offset_x/w)); + offset_x = (offset_x) % w; + } + } + return [offset_x, node_row.offsetTop+offset_y, node_row]; + } + return null; +} + +ac.chap.View.prototype.getRowNode = function(rowIndex) +{ + return document.getElementById('row-'+this.window.instanceId+'-'+this.index+'-'+rowIndex); +} + +ac.chap.View.prototype.getVirtualCharPosition = function(nodeRow, row, colIndex) +{ + var offset_x = this.getVirtualStringDimension(row, 0, colIndex)[0]; + var offset_y = 0; + var dim = this.getRenderedCharDimension(0, colIndex); + offset_x -= dim[0]; + if ( this.options.wordWrap ) + { + if ( 0 < colIndex ) + { + var w = this.options.colWidth * (this.numCols); + offset_y = this.options.rowHeight * (Math.floor(offset_x/w)); + offset_x = (offset_x) % w; + } + } + return [offset_x, nodeRow.offsetTop+offset_y]; +} + +ac.chap.View.prototype.showCaret = function(skipScroll) +{ + var caret_row = this.window.caret.position[0]; + var caret_col = this.window.caret.position[1]; + + pos = this.getRenderedCharPosition(caret_row, caret_col); + if ( null != pos ) + { + // caret is visible + var node_row = pos[2]; + var node = document.getElementById('ac-chap-caret-'+this.window.instanceId); + if ( null != node ) + { + node.parentNode.removeChild(node); + } + if ( 1 == this.window.state.caretPhase ) + { + // displaying caret + node = document.createElement('div'); + node.id = 'ac-chap-caret-'+this.window.instanceId; + node.style.position = 'absolute'; + node.style.font = '1px arial'; // IE + node.style.width = this.options.colWidth + 'px'; + node.style.height = this.options.rowHeight + 'px'; + pos[2] = this.options.colWidth; + pos[3] = this.options.rowHeight; + pos = this.theme.adjustCaretPosition(this.window.caret.mode, pos); + node.style.left = pos[0]+'px'; + node.style.top = pos[1]+'px'; + this.theme.renderCaret(this.window.caret.mode, node); + this.nodeCaret = node_row.appendChild(node); + node_row.style.background = this.theme.caretRowStyleActive; + + if ( !skipScroll ) + { + // might be out of borders, at least partially + if ( 0 > node_row.offsetTop - (this.nodeScrollArea.$.scrollTop % this.options.rowHeight) ) + { + // top margin overlay, first rendered row is partially hidden + this.scrollToRow(caret_row); + } + else if ( node_row.offsetTop > this.options.rowHeight*(this.numRows-1)-$__tune.ui.scrollbarWidth ) + { + // bottom margin overlay + this.scrollToRow(caret_row-Math.floor(this.numRows/2)); + } + } + this.nodeCaretRow = node_row; + } + if ('undefined' != typeof this.state.lastCaretRowIndex && this.state.lastCaretRowIndex != caret_row) + { + var last_node_row = this.getRowNode(this.state.lastCaretRowIndex); + if (last_node_row) + { + last_node_row.style.background = 'transparent'; + } + } + this.state.lastCaretRowIndex = caret_row; + } + else + { + if ( !skipScroll ) + { + // scrolling into view + this.scrollToRow(caret_row - Math.floor(this.numRows/2)); + } + } +} + +ac.chap.View.prototype.hideCaret = function(skipCaretRow) +{ + if ( null != this.nodeCaret && null != this.nodeCaret.parentNode ) + { + this.nodeCaret.parentNode.removeChild(this.nodeCaret); + this.nodeCaret = null; + } + if ( !skipCaretRow && null != this.nodeCaretRow ) + { +// console.log('off(hide) caret line background for %s', this.nodeCaretRow.id); + this.nodeCaretRow.style.background = 'transparent'; + } +// console.log('hide caret'); +} + +ac.chap.View.prototype.scrollToRow = function(rowIndex, setCaretToo, dontRefreshCaret) +{ + this.nodeScrollArea.$.scrollTop = this.options.rowHeight * rowIndex - Math.floor(this.nodeRoot.$.offsetHeight/3); + if (setCaretToo) + { + this.window.runAction(ac.chap.ACTION_CARET, {moveTo:[rowIndex, 0]}); + this.window.runAction(ac.chap.ACTION_CARET, {move:'row_end'}); + } + if (!dontRefreshCaret) + { + this.window.state.caretPhase = 1; + this.showCaret(true); + } +} + +ac.chap.View.prototype.expandFolding = function(rowIndex) +{ + if ( 'undefined' == typeof this.window.row_id_map[this.index][rowIndex] ) + { + return; + } + var row_state = this.window.row_id_map[this.index][rowIndex][2]; + if (0 == (ac.chap.ROWSTATE_FOLD_EXPAND & row_state)) + { + return; + } + var end_row_index = this.window.row_id_map[this.index][rowIndex][3][1]; + this.window.row_id_map[this.index][rowIndex][2] &= (65535 - ac.chap.ROWSTATE_FOLD_EXPAND); + this.window.row_id_map[this.index][rowIndex][1] = false; + for ( var i=rowIndex+1; i<=end_row_index; i++ ) + { + this.window.row_id_map[this.index][i][1] = false; + this.window.row_id_map[this.index][i][2] &= (65535 - ac.chap.ROWSTATE_FOLD_COLLAPSED); + } + this.recalculateVisibleRows(); + var me = this; + // console.log('expanding: %i, start: %i', rowIndex, end_row_index); + $runafter(40, function(){me.renderText(true)}); +} + +ac.chap.View.prototype.resize = function() +{ + var h = this.nodeRoot.p().h(); + this.nodeRoot.h(h); + $(this.nodeSidebar).h(h); + this.nodeScrollArea.h(h); + this.nodeFillArea.h(h-$__tune.ui.scrollbarWidth); + $(this.nodeSelectionArea).h(h-$__tune.ui.scrollbarWidth+this.options.rowHeight); + this.recalculateNumRows(); + this.recalculateVisibleRows(); + this.renderText(true); +} + +ac.chap.View.prototype.reloadOptions = function() +{ + this.calculateColRowDim(); + this.recalculateNumCols(false, true); + this.recalculateNumRows(); + this.recalculateVisibleRows(); + this.renderSidebarStub(); + this.renderText(true); +} + +ac.chap.View.prototype.recalculateNumCols = function(node, withoutScrollbar) +{ + node = node || this.nodeRoot; + var w = node.$.offsetWidth; + if (withoutScrollbar) + { + w -= $__tune.ui.scrollbarWidth+61; + } + this.numCols = Math.floor(w/this.options.colWidth); +} + +ac.chap.View.prototype.recalculateNumRows = function(node) +{ + node = node || this.nodeRoot; + this.numRows = Math.floor(node.$.offsetHeight/this.options.rowHeight); +} + +ac.chap.View.prototype.showInteractiveSearch = function() +{ + this.hideInteractiveSearch(); + var pos = this.nodeRoot.abspos(); + var node = $().a($$()).pos(true).x(pos.x+58).y(pos.y).z(2000).w(this.nodeRoot.w()-$__tune.ui.scrollbarWidth-61).h(24).o(0.8); + node.s('background:#000;border:1px solid #777;border-top:0;'); + var search_key_id = 'is_key_?'.embed(this.window.ident); + var ht = '
'; + node.t(ht); + var me = this; + var status_node = node.g('td:0'); + var search_key_node = node.g('input:0'); + var original_caret_pos = [me.window.caret.position[0], me.window.caret.position[1]]; + var last_keyword = ''; + var selection = me.window.getSelection(); + search_key_node.e('keydown', function(evt) + { + evt.stopPropagation(); + if (40 == evt.keyCode) + { + me.window.runAction(ac.chap.ACTION_CUSTOM, {action:'SearchKeyword', direction:'down'}); + me.scrollToRow(me.window.caret.position[0], false, true); + me.window.processActionResult(true, true); + evt.preventDefault(); + return true; + } + else if (38 == evt.keyCode) + { + me.window.runAction(ac.chap.ACTION_CUSTOM, {action:'SearchKeyword', direction:'up'}); + me.scrollToRow(me.window.caret.position[0], false, true); + me.window.processActionResult(true, true); + evt.preventDefault(); + return true; + } + else if (27 == evt.keyCode) + { + finish(true); + evt.preventDefault(); + return true; + } + else if (13 == evt.keyCode) + { + finish(); + evt.preventDefault(); + return true; + } + }).e('keyup', function(evt) + { + evt.stopPropagation(); + search(); + + }).$.focus(); + node.g('img:0').e('click', function(evt) + { + evt.stopPropagation(); + finish(true); + }); + + function finish(canceled) + { + if (canceled) + { + me.window.removeSelection(); + me.window.runAction(ac.chap.ACTION_CARET, {moveTo:[original_caret_pos[0], original_caret_pos[1]]}); + me.scrollToRow(me.window.caret.position[0], false, true); + me.window.processActionResult(true, true); + } + ac.chap.setActiveComponent(me.window); + me.hideInteractiveSearch(); + } + + function update_status(numFound) + { + status_node.t('Found ? results.'.embed(numFound)); + } + + function search() + { + var keyword = search_key_node.$.value.trim(); + if ('' == keyword || last_keyword == keyword) + { + if ('' == keyword) + { + update_status(0); + } + return; + } + update_status(me.window.getText().split(keyword).length - 1); + last_keyword = keyword; + me.window.removeSelection(); + me.window.runAction(ac.chap.ACTION_CARET, {moveTo:[original_caret_pos[0], original_caret_pos[1]]}); + me.window.runAction(ac.chap.ACTION_CUSTOM, {action:'SetSearchKeyword', keyword:keyword}); + me.window.runAction(ac.chap.ACTION_CUSTOM, {action:'SearchKeyword', direction:'down'}); + me.scrollToRow(me.window.caret.position[0], false, true); + me.window.processActionResult(true, true); + } + + if (null != selection) + { + search_key_node.$.value = selection; + search_key_node.$.select(); + search_key_node.$.focus(); + search(); + } + + + + this.interactiveSearchNode = node; +} + +ac.chap.View.prototype.hideInteractiveSearch = function() +{ + if (this.interactiveSearchNode && this.interactiveSearchNode.is()) + { + this.interactiveSearchNode.rs(); + } +} + +ac.chap.View.prototype.render = function(node) +{ + var w = node.$.offsetWidth; + var h = node.$.offsetHeight; + this.recalculateNumRows(node); + this.recalculateNumCols(node); + node.sa('chap-view', 'true'); + var me = this; + this.nodeRoot = node.a($$()).pos(true).w(w).h(h).n('acw-chap').s('background:?'.embed(this.theme.background)); + this.interactiveSearchNode = null; + + var w_rows = 58; + w -= w_rows; + this.nodeSidebar = this.nodeRoot.a($$()).pos(true).x(0).y(0).w(w_rows).h(h).s('overflow:hidden').n('sidebar'); + + // rendering sidebar stub + this.renderSidebarStub(); + + + this.nodeScrollArea = this.nodeRoot.a($$()).pos(true).x(w_rows).y(0).w(w).h(h).n('scroll-area').s('overflow:auto'); + this.nodeScrollArea.e('scroll', function(evt) + { + var offset = Math.floor(me.nodeScrollArea.$.scrollTop/me.options.rowHeight); + var map = me.window.row_id_map[me.index]; + var row_index = 0; + for ( var i=0; i' + ch_encode_markup(token[1]) + ''; + } + else + { + rend_chunk += ch_encode_markup(token[1]); + } + i += token[1].length - 1; + // offset = i + token[1].length; + offset = i + 1; + // console.log(token, offset, rend_chunk); + } + rend_chunk += ch_encode_markup(chunk.substr(offset)); + chunk = rend_chunk; + } + else + { + chunk = ch_encode_markup(chunk); + } + return chunk; +} + +ac.chap.View.prototype.renderTextRow = function(node, rowIndex, renderedPreviously) +{ + var row = this.window.char_map[rowIndex]; + var rendered_row = ''; + var offset = 0; + var font_style = ';font:' + this.window.options.font.size + 'px ' + this.window.options.font.family; + var interpolation = this.window.language.stringInterpolation; + + var row_state = this.window.row_id_map[this.index][rowIndex][2]; + + if ( 'undefined' != typeof this.window.syntax_map[rowIndex] && 0 < this.window.syntax_map[rowIndex].length ) + { + // console.log(this.window.syntax_map[rowIndex]); + var n = this.window.syntax_map[rowIndex].length; + for ( var i=0; i' + ch_encode_markup(chunk.substring(0, m.index)) + ''; + new_chunk += this.renderChunk(chunk.substr(m.index, m[interpolation[1]].length)); + chunk = chunk.substr(m.index + m[interpolation[1]].length); + // console.warn(chunk); + } + new_chunk += '' + ch_encode_markup(chunk) + ''; + // console.info(new_chunk); + rendered_row += new_chunk; + } + else + { + rendered_row += ''+ch_encode_markup(chunk)+''; + // console.log(rendered_row); + } + } + else + { + rendered_row += ch_encode_markup(chunk); + } + offset = -1 == end_offset ? row.length : end_offset; + } + } + rendered_row += this.renderChunk(row.substr(offset)); + // console.log(rendered_row); + // rendering custom selection (search results, errors and such) + // !!!!!!!!!! + // NOT USED NOW !!!!! + // !!!!!!!!!! + // !!!!!!!!!! + // !!!!!!!!!! + // !!!!!!!!!! + if ( false && ac.chap.ROWSTATE_SELECTION == (row_state & ac.chap.ROWSTATE_SELECTION) ) + { + var range = this.window.row_id_map[this.index][rowIndex][5]; + if ( 0 == range[0] && this.window.char_map[rowIndex].length == range[1] ) + { + rendered_row = ''+rendered_row+''; + } + else + { + // console.log(range); + var raw = rendered_row; + var n = raw.length; + var col_index = 0; + var selection_started = false; + var offset = 0; + // console.log('before: %s', raw); + for ( var i=0; i'); + i += ix; + // console.log('ix: %s', ix); + + if ( selection_started ) + { + var c = ''; + rendered_row = rendered_row.substr(0, i+offset+1)+c+rendered_row.substr(i+offset+1); + offset += c.length; + } + continue; + } + if ( range[0] == col_index ) + { + selection_started = true; + var c = ''; + rendered_row = rendered_row.substr(0, i+offset)+c+rendered_row.substr(i+offset); + offset += c.length; + } + if ( selection_started && range[1]-1 < col_index ) + { + selection_started = false; + var c = ''; + rendered_row = rendered_row.substr(0, i+offset)+c+rendered_row.substr(i+offset); + break; + } + if ( '&' == ch ) + { + if ( '<' == raw.substr(i, 4) || '>' == raw.substr(i, 4) ) + { + i += 3; + } + else if ( '&' == raw.substr(i, 5) ) + { + i += 4; + } + } + col_index++; + } + if ( selection_started ) + { + rendered_row += ''; + } + } + } +// console.log(rendered_row); + // making intelligent tabelators - note, using simple replace of \t doesn't work + var ix = 0; + var tab = this.options.tabelator; + var raw = this.window.char_map[rowIndex]; + var tab_stack = []; + while ( -1 != ix ) + { + ix = raw.indexOf('\t'); + if ( -1 != ix ) + { + var tab_length = tab.length - (ix % tab.length); + raw = raw.substr(0,ix)+tab.substr(0, tab_length)+raw.substr(ix+1); + tab_stack.push(tab_length); + } + } + for ( var i=0; i'); + if ( -1 == ix ) + { + break; + } + i += ix; + continue; + } + var n_ch = 0; + if ( '&' == ch ) + { + if ( '<' == rendered_row.substr(i,4) || '>' == rendered_row.substr(i,4) ) + { + n_ch = 3; + } + else if ( '&' == rendered_row.substr(i,5) ) + { + n_ch = 4; + } + } + printable += ch.charAt(0); + if ( this.numCols == printable.length ) + { + raw = raw.substr(0, i+offset+n_ch)+'
'+raw.substr(i+offset+n_ch); + num_subrows++; + offset += 4; + printable = ''; + } + i += n_ch; + } + rendered_row = raw; + } + if ( ac.chap.ROWSTATE_FOLD_EXPAND == (row_state & ac.chap.ROWSTATE_FOLD_EXPAND) ) + { + var end_index = this.window.row_id_map[this.index][rowIndex][3][1]; + var content = ch_encode_markup(this.window.char_map.slice(rowIndex, end_index+1).join('\n').replace(/"/ig, "''")); + rendered_row += '
'.embed(content); + } + node.setAttribute('num-subrows', num_subrows); + + if ( $__tune.isIE ) + { + // IE trims input source in innerHTML + rendered_row = ch_encode_markup_spaces(rendered_row); + } + node.innerHTML = rendered_row; +} + +ac.chap.View.prototype.recalculateVisibleRows = function() +{ + var map = this.window.row_id_map[this.index]; + var n = map.length; + var i = 0; + var num_visibles = 0; + while ( i < n ) + { + var state = map[i][2]; + if ( 0 == (ac.chap.ROWSTATE_FOLD_COLLAPSED & state) ) + { + num_visibles++; + } + i++; + } + this.numVisibleRows = num_visibles; +} + +ac.chap.View.prototype.getVisibleRowIndices = function() +{ + var map = this.window.row_id_map[this.index]; + var i = 0; + var index = this.startRow; + var indices = []; + while ( i++ <= this.numRows ) + { + if ( 'undefined' == typeof this.window.row_id_map[this.index][index] ) + { + break; + } + var state = this.window.row_id_map[this.index][index][2]; + if ( ac.chap.ROWSTATE_FOLD_COLLAPSED == (ac.chap.ROWSTATE_FOLD_COLLAPSED & state) ) + { + // collapsed + i--; + index++; + continue; + } + indices.push(index); + if ( ac.chap.ROWSTATE_FOLD_EXPAND == (state & ac.chap.ROWSTATE_FOLD_EXPAND) ) + { + // collapsed folding + var refered_row_index = this.window.row_id_map[this.index][index][3][1]; + index = refered_row_index + 1; + } + else + { + index++; + } + } + return indices; +} + +ac.chap.View.prototype.renderRowSidebar = function(position, rowIndex, rowNode, forceCompleteRedraw) +{ + if (!this.nodeSidebar.firstChild.childNodes.item(position)) + { + return; + } + var bar_node = this.nodeSidebar.firstChild.childNodes.item(position).firstChild; + if ( 0 == rowNode.offsetHeight ) + { + rowNode.style.height = this.options.rowHeight; + } + var num_subrows = parseInt(rowNode.getAttribute('num-subrows')); + var cache_id = forceCompleteRedraw ? 'none' : (num_subrows+':'+this.window.row_id_map[this.index][rowIndex].join('-')); + if (bar_node.getAttribute('sidebar-cache-id') == cache_id && 'none' != cache_id) + { + return; + } + if ('none' != cache_id) + { + bar_node.setAttribute('sidebar-cache-id', cache_id); + bar_node.firstChild.style.fontSize = (this.window.options.font.size-2) + 'px'; + } + // console.log(cache_id); + + var row_height = num_subrows * this.options.rowHeight; + bar_node.parentNode.style.height = row_height + 'px'; + if (forceCompleteRedraw) + { + bar_node.firstChild.style.fontSize = (this.window.options.font.size-2) + 'px'; + } + var ht = rowIndex+1; + if ( this.options.wordWrap ) + { + var htt = '
'; + for ( var i=1; i this.numCols ) + { + ix_r++; + ix_c -= this.numCols; + } + if ( ii == range[1] ) + { + offset[2] = ix_r; + offset[3] = ix_c; + break; + } + } + if ( -1 == offset[3] ) + { + offset[2] = ix_r+1;//offset[0]+1; + } + node_row_selection.style.top = (node_row.offsetTop + this.options.rowHeight*offset[0]) + 'px'; +// console.log('%o', offset); + if ( offset[0] == offset[2] ) + { + // selection stays non-wrapped + node_row_selection.style.left = offset[1]*this.options.colWidth + 'px'; + node_row_selection.style.width = ((offset[3]-offset[1])*this.options.colWidth) + 'px'; +// console.log(node_row_selection.style.width); + } + else + { + // finishing current node + if ( -1 == offset[1] ) + { + // caret stays on the end of the row + node_row_selection.style.left = (ix_c*this.options.colWidth) + 'px'; + node_row_selection.style.width = (node_row.offsetWidth - (ix_c*this.options.colWidth)) + 'px'; + } + else + { + node_row_selection.style.left = (offset[1]*this.options.colWidth) + 'px'; + node_row_selection.style.width = (node_row.offsetWidth - (offset[1]*this.options.colWidth)) + 'px'; + } + // marking as non-cacheable + node_row_selection.removeAttribute('cachid'); + // creating additional ones + for ( ii=offset[0]+1; ii<=offset[2]; ii++ ) + { + node_row_selection = node_cache.appendChild(document.createElement('div')); + node_row_selection.style.background = this.theme.selectionStyle; + node_row_selection.style.position = 'absolute'; + node_row_selection.style.left = '0px'; + node_row_selection.style.top = (node_row.offsetTop+ii*this.options.rowHeight) + 'px'; + node_row_selection.style.height = this.options.rowHeight + 'px'; + if ( ii != offset[2] ) + { + node_row_selection.style.width = node_row.offsetWidth + 'px'; + } + else + { + node_row_selection.style.width = ((offset[3])*this.options.colWidth) + 'px'; + } + } + } + + } + else + { + var offset_x1 = this.getRenderedStringDimension(row_index, 0, range[0])[0]; + var offset_x2 = this.getRenderedStringDimension(row_index, 0, range[1]+1)[0]; + node_row_selection.style.left = offset_x1 + 'px'; + node_row_selection.style.width = (offset_x2 - offset_x1) + 'px'; + } + } + } + // console.log('selection after range: %o', this.window.row_id_map[this.index][row_index][3]); + + } + } + // console.log('%o', this.window.row_id_map[this.index][0]); + var ht = node_cache.innerHTML; + this.nodeSelectionArea.innerHTML = ht; +} + +ac.chap.View.prototype.renderText = function(forceCompleteRedraw) +{ + var row_indices = this.getVisibleRowIndices(); + var num_rows = row_indices.length; +// console.log('view: %s - row indices: %o', this.index, row_indices); + +// console.log('view: %s - num rows x cols [%s x %s]', this.index, this.numRows, this.numCols); + // checking to see if only one row changed - the most usual case + var changed_row_index = -1; + var changed_row_position = -1; + for ( var i=0; i fill_area_h ) + { + fill_area_h = this.nodeRoot.h()-$__tune.ui.scrollbarWidth; + } + this.nodeFillArea.h(fill_area_h); + + } + else + { + this.nodeEditAreaCache.innerHTML = ''; + } + if ( parseInt(this.nodeSidebar.firstChild.style.top) != top_offset ) + { + this.nodeSidebar.firstChild.style.top = (top_offset)+'px'; + this.nodeSidebar.firstChild.style.height = (this.nodeSidebar.offsetHeight - $__tune.ui.scrollbarWidth - top_offset)+'px'; + } + this.renderSelection(); +} + + + +/* + * ac.Chap - Text Editing Component widget - Settings file + */ + +if ( 'undefined' == typeof ac ) +{ + var ac = {chap:{}}; +} + + +$class('ac.chap.KeyMap', +{ + construct:function() + { + this.definition = {}; + this.isMac = true; + this.wordCompleteCache = null; + this.snippetCompleteCache = null; + this.searchKeyword = ''; + this.initDefinition(); + } +}); + +ac.chap.KeyMap.prototype.initDefinition = function() +{ +} + +ac.chap.KeyMap.prototype.importCommands = function(commands) +{ + var n = commands.length; + for ( var i=0; i looking_for_len && words_prev[i+1].substr(0, looking_for_len) == looking_for ) + { + if ( -1 == found_words_index.indexOf(' '+words_prev[i+1]) ) + { + found_words.push(words_prev[i+1]); + found_words_index += ' '+words_prev[i+1]; + } + } + if ( words_next[i] && words_next[i].length > looking_for_len && words_next[i].substr(0, looking_for_len) == looking_for ) + { + if ( -1 == found_words_index.indexOf(' '+words_next[i]) ) + { + found_words.push(words_next[i]); + found_words_index += ' '+words_next[i]; + } + } + } + if ( 1 < found_words.length ) + { +// console.log('results found: %o', found_words); + wcc.results = found_words; + wcc.index = 0; + proceed_complete = true; + } + } + else + { + proceed_complete = true; + } + var num_results = wcc.results.length; + if ( proceed_complete && 0 < num_results ) + { + var index = wcc.index; + index += params.direction ? 1 : -1; + if ( num_results <= index ) + { + index = 0; + } + else if ( 0 > index ) + { + index = num_results-1; + } +// console.log('n:%s i:%s', num_results, index); + // let's not add the following operation to the transaction/undo log + component.stopTransactionLog(); + component.runAction(ac.chap.ACTION_CARET, {move:'prev_word'}); + component.runAction(ac.chap.ACTION_SELECTION, {add:true}); + component.runAction(ac.chap.ACTION_DELETE, {character:false}); + component.runAction(ac.chap.ACTION_INSERT, {string:wcc.results[index]}); + component.startTransactionLog(); + wcc.index = index; + wcc.position = [component.caret.position[0], component.caret.position[1]]; + } + else + { + wcc.results = []; + } + this.wordCompleteCache = wcc; + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT; +} + +ac.chap.KeyMap.prototype.getAffectedRows = function(component, caretRow) +{ + var from_row = caretRow; + var to_row = caretRow; + if (null != component.selection) + { + var start_pos = component.selection.startPosition[0]; + var end_pos = component.selection.endPosition[0]; + if (-1 == component.selection.endPosition[1]) + { + end_pos--; + } + from_row = Math.min(start_pos, end_pos); + to_row = Math.max(start_pos, end_pos); + } + return [from_row, to_row]; +} + +ac.chap.KeyMap.prototype.action_Indent = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + var affected_rows = this.getAffectedRows(component, caretRow); + var tab = component.getTabelator(); + for (var i=affected_rows[0]; i<=affected_rows[1]; i++) + { + if ('right' == params.direction) + { + component.insertIntoCharacterMap(tab, i, 0); + } + else + { + var row = component.char_map[i]; + var index = 0; + while (('\t' == row.charAt(index) || ' ' == row.charAt(index)) && (row.length > index) && (tab.length > index)) index++; + if (0 < index) + { + component.removeFromCharacterMap(i, 0, i, index); + } + } + } + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT | ac.chap.ACTION_RES_SELECTIONCHANGED; +} + +ac.chap.KeyMap.prototype.action_Comment = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + if (!component.language) + { + return 0; + } + var markers = component.language.singleRowCommentStartMarkers; + if (0 == markers.length) + { + return 0; + } + var marker = markers[0]; + var tab = component.getTabelator(); + var affected_rows = this.getAffectedRows(component, caretRow); + var tab = component.getTabelator(); + var prepend_text = marker + ' '; + for (var i=affected_rows[0]; i<=affected_rows[1]; i++) + { + var row = component.char_map[i]; + var index = row.indexOf(marker); + if (-1 != index && 0 == row.substring(0, index).replace(tab, '').replace(' ', '')) + { + // was commented + component.removeFromCharacterMap(i, 0, i, index+marker.length); + } + else + { + // will be commented + component.insertIntoCharacterMap(marker, i, 0); + } + } + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT | ac.chap.ACTION_RES_SELECTIONCHANGED; +} + +ac.chap.KeyMap.prototype.action_RuntimeOption = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + component.setRuntimeOption(params['key'], params['value']); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT | ac.chap.ACTION_RES_SELECTIONCHANGED; +} + +ac.chap.KeyMap.prototype.action_SearchInteractive = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + component.showInteractiveSearch(); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_SELECTIONCHANGED; +} + +ac.chap.KeyMap.prototype.action_SetSearchKeyword = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + if (params['keyword']) + { + this.searchKeyword = params['keyword']; + return 0; + } + if (component.selection && component.selection.startPosition[0] == component.selection.endPosition[0]) + { + this.searchKeyword = component.getSelection(); + return ac.chap.ACTION_RES_SELECTIONCHANGED; + } + return 0; +} + +ac.chap.KeyMap.prototype.action_SearchKeyword = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + if ('' == this.searchKeyword) + { + return 0; + } + row_index = caretRow; + var index = 0; + var search_down = 'down' == params['direction']; + do + { + var row = component.char_map[row_index]; + var offset = 0; + console.log(ac.chap.activeComponent.activeView); + if (row_index == caretRow) + { + if (search_down) + { + // if (row.substring(caretCol, this.searchKeyword.length) == this.searchKeyword) + // { + // // offset = this.searchKeyword.length; + // } + row = row.substr(caretCol) + offset += caretCol; + } + else + { + if (row.substring(caretCol-this.searchKeyword.length, caretCol)) + { + offset = this.searchKeyword.length; + } + row = row.substring(0, caretCol - offset); + offset = 0; + } + } + index = search_down ? row.indexOf(this.searchKeyword) : row.lastIndexOf(this.searchKeyword); + if (-1 != index) + { + index += offset; + component.runAction(ac.chap.ACTION_SELECTION, {remove:true}); + component.runAction(ac.chap.ACTION_CARET, {moveTo:[row_index, index]}); + component.runAction(ac.chap.ACTION_CARET, {store:true}); + component.runAction(ac.chap.ACTION_CARET, {moveTo:[row_index, index+this.searchKeyword.length]}); + component.runAction(ac.chap.ACTION_SELECTION, {add:true}); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_SELECTIONCHANGED | ac.chap.ACTION_RES_SCROLLTOCARET; + } + row_index += search_down ? 1 : -1; + if (search_down && row_index == component.char_map.length) + { + row_index = 0; + } + else if (!search_down && -1 == row_index) + { + row_index = component.char_map.length-1; + } + } + while (caretRow != row_index); + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_SELECTIONCHANGED; +} + +ac.chap.KeyMap.prototype.action_SmartIndent = function(keyCode, controlKeysMask, caretRow, caretCol, component, params) +{ + // console.log(params); + var line = component.getLineAt(caretRow); + var m = params['indent_tab_when_ends'] ? line.match(/^([ \t]*)(.*)$/) : line.match(/^([ \t]*)([^ \t]*)/); + // console.log(m); + var prepend_text = m[1]; + if (params['indent_tab_when_ends'] || params['indent_tab_when_starts']) + { + m[2] = m[2].trim(); + var indent_when_values = params['indent_tab_when_ends'] ? params['indent_tab_when_ends'].split(' ') : params['indent_tab_when_starts'].split(' '); + for (var i=0; i#'); + code = code.substr(start_ix+4, m[3].length)+'##'+code.substr(start_ix+m[0].length-m[1].length); + any_change = true; + } + else + { + break; + } + } + code_chunks.push(code); + code = code_chunks.join(''); + } + re = /##/; + while ( true ) + { + m = re.exec(code) + if ( null == m ) + { + break; + } + tabstops[m[1]][1] = tabstops[m[1]][1].replace(/##/g, ''); + tabstops[m[1]][2] = m.index; + tabstops[m[1]][3] = tabstops[m[1]][1].length; + code = code.substr(0, m.index)+code.substr(m.index+m[0].length).replace('##', ''); + } + + // [getting mirrors] + re = /\{\$\{(\d)\:([^\}]*)\}\}/; + while ( true ) + { + m = re.exec(code) + if ( null == m ) + { + break; + } + code = code.substr(0, m.index)+m[2]+code.substr(m.index+m[0].length); + var ix_end = code.indexOf('{$'+m[1]+'}'); + if ( -1 == ix_end ) + { + console.error('Invalid snippet definition. Mirror `?` does not have `{$?}` mirrored location specified.'.embed(m[1], m[1])); + break; + } + tabstops[m[1]] = ['mi', m[2], m.index, m[2].length, ix_end]; + if ( m.index > ix_end ) + { + console.error('Unsupported feature. Mirror `?` should have mirrored location positioned AFTER itself.'.embed(m[1])); + } + code = code.substr(0, ix_end)+code.substr(ix_end+4); + } + + // [getting tabstops] + re = /(^|[^\\])\$(\d)/; + while (true) + { + m = re.exec(code); + if ( null == m ) + { + break; + } + tab_id = m[2]; + if ( tabstops[tab_id] ) + { + console.error('Invalid snippet definition. Tabstop `?` already defined as placeholder at position `?`. Snippet source: `?`.'.embed(tab_id, m.index, code)); + break; + } + var start_ix = m.index+m[1].length; + tabstops[tab_id] = ['ts', '', start_ix, 0]; + code = code.substr(0, start_ix)+code.substr(start_ix+2); + var offset = m[1].length + 2; + for ( var tab_id in tabstops ) + { + // console.log('adjusting: %s, %s < %s', tab_id, start_ix, tabstops[tab_id][2]); + if ( 'mi' == tabstops[tab_id][0] ) + { + if ( start_ix < tabstops[tab_id][2] ) + { + tabstops[tab_id][2] -= offset; + } + if ( start_ix < tabstops[tab_id][4] ) + { + tabstops[tab_id][4] -= offset; + } + } + else if ( 'ph' == tabstops[tab_id][0] && start_ix < tabstops[tab_id][2] ) + { + tabstops[tab_id][2] -= offset; + } + } + } + // $0 not defined, will be at the end of the snippet by default + if ( !tabstops[0] ) + { + tabstops[0] = ['ts', '', code.length, 0]; + } + + // [postprocessing - unescape] + code = code.replace(/\0/g, '$'); + + + for ( var tab_id in tabstops ) + { + var placeholder = tabstops[tab_id]; +// console.log('#%s : %o', tab_id, placeholder); + } + var scc = + { + firstInitialized:true, + insertCaretPosition:[caretRow, caretCol], + tabstops: tabstops, + callbackIndex: -1, + activeTabStopIndex:tabstops[1] ? 1 : 0, + activeTabStopRange:[], + activeTabStopContent:'', + activeTabStopNested:[], + wasSelection:wasSelection, + tabActivation:tabActivation + } + + if ( !scc.wasSelection ) + { + component.runAction(ac.chap.ACTION_CARET, {move:'prev_word'}); + component.runAction(ac.chap.ACTION_SELECTION, {add:true}); + component.runAction(ac.chap.ACTION_DELETE, {character:true}); + } + + var tabstop = tabstops[scc.activeTabStopIndex]; + + + component.runAction(ac.chap.ACTION_INSERT, {string:code.substr(0, tabstop[2])}); + component.runAction(ac.chap.ACTION_INSERT, {string:code.substr(tabstop[2]), skipCaretChange:true}); + + // selecting default value + var selection_changed = false; + if ( '' != tabstop[1] ) + { + component.runAction(ac.chap.ACTION_CARET, {store:true}); + component.runAction(ac.chap.ACTION_CARET, {moveBy:'column', value:tabstop[1].length}); + component.runAction(ac.chap.ACTION_SELECTION, {add:true}); + selection_changed = true; + } + + if ( 0 != scc.activeTabStopIndex ) + { + // starting action listener + this.snippetCompleteCache = scc; + this.snippetCompleteCache.callbackIndex = component.addActionListener(ac.chap.ACTION_LISTENER_BOTH, this, this.snippetCompleteActionListener); + } + } + return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT | (selection_changed ? ac.chap.ACTION_RES_SELECTIONCHANGED : 0); +// return ac.chap.ACTION_RES_REDRAWCARET | ac.chap.ACTION_RES_REDRAWTEXT; +} + +ac.chap.KeyMap.prototype.snippetCompleteActionListener = function(component, action, type, actionResult, actionType, params, caretRow, caretCol) +{ + var scc = action.snippetCompleteCache; + + if ( ac.chap.ACTION_LISTENER_BEFORE == type && !scc.firstInitialized ) + { + // before action listener + // check if we are still in the tabstop range +// var offset = component.char_map[caretRow].substr() + // console.log('activeTabStopRange: %o', scc.activeTabStopRange); + if ( caretRow < scc.activeTabStopRange[0] || caretRow > scc.activeTabStopRange[2] || caretCol < scc.activeTabStopRange[1] || caretCol > scc.activeTabStopRange[3] ) + { + // out of range, canceling whole snippet logic + component.removeActionListener(scc.callbackIndex); + component.removeSelection(); + delete action.snippetCompleteCache; + // console.log('CANCELED'); + return; + } +// console.log('before: %s - [%s,%s]', actionType, caretRow, caretCol); + } + else + { + // after action listener + if ( scc.firstInitialized ) + { + scc.firstInitialized = false; + var tabstop = scc.tabstops[scc.activeTabStopIndex]; + caretCol -= tabstop[1].length; + var code_rows = tabstop[1].split('\n'); + var num_code_rows = code_rows.length; + var to_caret_row = caretRow + num_code_rows - 1; + var to_caret_col = (to_caret_row == caretRow ? caretCol : 0) + code_rows[num_code_rows-1].length; + scc.activeTabStopRange = [caretRow, caretCol, to_caret_row, to_caret_col]; + scc.stopMarker = component.char_map[to_caret_row].substr(to_caret_col); + // creating list of nested tabstops + for ( var i in scc.tabstops ) + { + if ( i == scc.activeTabStopIndex ) + { + continue; + } + var c_tabstop = scc.tabstops[i]; + if ( c_tabstop[2] >= tabstop[2] && (c_tabstop[2] + c_tabstop[3] <= tabstop[2] + tabstop[3]) ) + { + scc.activeTabStopNested[i] = true; + } + } + if ( 'mi' != tabstop[0] ) + { + scc.activeTabStopContent = tabstop[1]; + } + scc.firstRealRun = false; + +// action.snippetCompletePostInit(component, caretRow, caretCol); + // console.log('firstRealRun: %o', scc.activeTabStopRange); + } + else + { + // console.log('next: %o', scc.activeTabStopRange); + // adjust the range by finding the stop marker + var n = component.char_map.length; + var i = scc.activeTabStopRange[0]; + var found = false; + var max_iter = 50; +// var offset_range = [scc.activeTabStopRange[2], scc.activeTabStopRange[3]]; + var new_content = ''; + var old_content = scc.activeTabStopContent; + while ( i active_offset ) + { + if ( scc.activeTabStopNested[i] ) + { + // nested + delete scc.tabstops[i]; + } + else + { + tabstop[2] += offset; + // console.log('ADJUSTING: #%s by %s, new: %s', i, offset, tabstop[2]); + } + } + } + } + } + // console.log('after %s(%s, %s) : %o', actionType, caretRow, caretCol, scc.activeTabStopRange); + } +} + + +ac.chap.KeyMap.prototype.compile = function(source) +{ + /* example: + + KEY: -13+shift + selection(add:true) + caret(move:'up') + + KEY: -27 + caret(move:'row_end') + + ... + .. + . + */ + var rows = source.split('\n'); + var n = rows.length; + var re_definition = /^KEY\:\s*[-\d]*[\+\w\s]*$/; + var re_chain = /^[^\(]*\(.*\)\s*$/; + var src = ''; + var chain = []; + var last_key_code = null; + for ( var i=0; i)/i, 0, ac.chap.CHUNK_OPERATOR], + [/(\(|\)|\[|\]|\{|\})/i, 0, ac.chap.CHUNK_PARENTHESIS] + ]; + this.wordDelimiter = /[\w\.\d]/; + this.indentIgnoreMarker = /[\.]/; +} + + +ac.chap.lang = {}; + + + + + + +/* loader stuff - you are free to modify as needed */ + + +// !! Make sure, bundle_*.js is loaded prior launching this function - the bundle defines ac.chap.langEAmy, EAmyJavaScript etc. +function showEditor(templateNode) +{ + var source = templateNode.value; + templateNode = $(templateNode); + var w = templateNode.w(); + var h = templateNode.h(); + + var node = templateNode.p().ib($$(), templateNode).w(w).h(h); + templateNode.d(false); + + var language = ac.chap.lang.JavaScript; + var keymap = ac.chap.keymap.EAmyJavaScript; + + var instance = $new(ac.chap.Window, {language:ac.chap.lang.EAmy, keymap:ac.chap.Keymap}); + instance.addView(node, {theme:ac.chap.theme.EAmy, rowHeight:11, colWidth:7, wordWrap:true, tabelator:' '}); + + instance.show(); + instance.setSnippets(eamy.snippets); + instance.keymap.importSnippets(eamy.snippets); + instance.edit(source); + eamy.instances.push(instance); + +} + +// !! Remove from here and include in your section if you want. +// document.write(''); + + +// Performed upon loading the page. You are free to remove it and call the showEditor() (or modified version of it) in order to launch the editing component. Code of the showEditor should give you enough clue. +$__tune.event.addListener(self, 'load', function(evt) +{ + // this is basically a search for any
"; + html= t.translate(html, editAreas[id]["settings"]["language"]); + span.innerHTML= html; + father= d.getElementById(id).parentNode; + next= d.getElementById(id).nextSibling; + if(next==null) + father.appendChild(span); + else + father.insertBefore(span, next); + } + + if(!editAreas[id]["initialized"]) + { + t.execCommand(id, "EA_init"); // ini callback + if(editAreas[id]["settings"]["display"]=="later"){ + editAreas[id]["initialized"]= true; + return; + } + } + + if(t.isIE){ // launch IE selection checkup + t.init_ie_textarea(id); + } + + // get toolbar content + var area=editAreas[id]; + + for(i=0; i'; + } + + // add plugins scripts if not already loaded by the compressor (but need to load language in all the case) + for(i=0; i'; + t.iframe_script+=''; + } + + + // create css link for the iframe if the whole css text has not been already loaded by the compressor + if(!t.iframe_css){ + t.iframe_css=""; + } + + + // create template + template= t.template.replace(/\[__BASEURL__\]/g, t.baseURL); + template= template.replace("[__TOOLBAR__]",html_toolbar_content); + + + // fill template with good language sentences + template= t.translate(template, area["settings"]["language"], "template"); + + // add css_code + template= template.replace("[__CSSRULES__]", t.iframe_css); + // add js_code + template= template.replace("[__JSCODE__]", t.iframe_script); + + // add version_code + template= template.replace("[__EA_VERSION__]", t.version); + //template=template.replace(/\{\$([^\}]+)\}/gm, this.traduc_template); + + //editAreas[area["settings"]["id"]]["template"]= template; + + area.textarea=d.getElementById(area["settings"]["id"]); + editAreas[area["settings"]["id"]]["textarea"]=area.textarea; + + // if removing previous instances from DOM before (fix from Marcin) + if(typeof(window.frames["frame_"+area["settings"]["id"]])!='undefined') + delete window.frames["frame_"+area["settings"]["id"]]; + + // insert template in the document after the textarea + father= area.textarea.parentNode; + /* var container= document.createElement("div"); + container.id= "EditArea_frame_container_"+area["settings"]["id"]; + */ + content= d.createElement("iframe"); + content.name= "frame_"+area["settings"]["id"]; + content.id= "frame_"+area["settings"]["id"]; + content.style.borderWidth= "0px"; + setAttribute(content, "frameBorder", "0"); // IE + content.style.overflow="hidden"; + content.style.display="none"; + + + next= area.textarea.nextSibling; + if(next==null) + father.appendChild(content); + else + father.insertBefore(content, next) ; + f=window.frames["frame_"+area["settings"]["id"]]; + f.document.open(); + f.editAreas=editAreas; + f.area_id= area["settings"]["id"]; + f.document.area_id= area["settings"]["id"]; + f.document.write(template); + f.document.close(); + + // frame.editAreaLoader=this; + //editAreas[area["settings"]["id"]]["displayed"]=true; + + }, + + toggle : function(id, toggle_to){ + + /* if((editAreas[id]["displayed"]==true && toggle_to!="on") || toggle_to=="off"){ + this.toggle_off(id); + }else if((editAreas[id]["displayed"]==false && toggle_to!="off") || toggle_to=="on"){ + this.toggle_on(id); + }*/ + if(!toggle_to) + toggle_to= (editAreas[id]["displayed"]==true)?"off":"on"; + if(editAreas[id]["displayed"]==true && toggle_to=="off"){ + this.toggle_off(id); + }else if(editAreas[id]["displayed"]==false && toggle_to=="on"){ + this.toggle_on(id); + } + + return false; + }, + + // static function + toggle_off : function(id){ + var fs=window.frames,f,t,parNod,nxtSib,selStart,selEnd,scrollTop,scrollLeft; + if(fs["frame_"+id]) + { + f = fs["frame_"+id]; + t = editAreas[id]["textarea"]; + if(f.editArea.fullscreen['isFull']) + f.editArea.toggle_full_screen(false); + editAreas[id]["displayed"]=false; + + // set wrap to off to keep same display mode (some browser get problem with this, so it need more complex operation + t.wrap = "off"; // for IE + setAttribute(t, "wrap", "off"); // for Firefox + parNod = t.parentNode; + nxtSib = t.nextSibling; + parNod.removeChild(t); + parNod.insertBefore(t, nxtSib); + + // restore values + t.value= f.editArea.textarea.value; + selStart = f.editArea.last_selection["selectionStart"]; + selEnd = f.editArea.last_selection["selectionEnd"]; + scrollTop = f.document.getElementById("result").scrollTop; + scrollLeft = f.document.getElementById("result").scrollLeft; + + + document.getElementById("frame_"+id).style.display='none'; + + t.style.display="inline"; + + try{ // IE will give an error when trying to focus an invisible or disabled textarea + t.focus(); + } catch(e){}; + if(this.isIE){ + t.selectionStart= selStart; + t.selectionEnd = selEnd; + t.focused = true; + set_IE_selection(t); + }else{ + if(this.isOpera && this.isOpera < 9.6 ){ // Opera bug when moving selection start and selection end + t.setSelectionRange(0, 0); + } + try{ + t.setSelectionRange(selStart, selEnd); + } catch(e) {}; + } + t.scrollTop= scrollTop; + t.scrollLeft= scrollLeft; + f.editArea.execCommand("toggle_off"); + + } + }, + + // static function + toggle_on : function(id){ + var fs=window.frames,f,t,selStart=0,selEnd=0,scrollTop=0,scrollLeft=0,curPos,elem; + + if(fs["frame_"+id]) + { + f = fs["frame_"+id]; + t = editAreas[id]["textarea"]; + area= f.editArea; + area.textarea.value= t.value; + + // store display values; + curPos = editAreas[id]["settings"]["cursor_position"]; + + if(t.use_last==true) + { + selStart = t.last_selectionStart; + selEnd = t.last_selectionEnd; + scrollTop = t.last_scrollTop; + scrollLeft = t.last_scrollLeft; + t.use_last=false; + } + else if( curPos == "auto" ) + { + try{ + selStart = t.selectionStart; + selEnd = t.selectionEnd; + scrollTop = t.scrollTop; + scrollLeft = t.scrollLeft; + //alert(scrollTop); + }catch(ex){} + } + + // set to good size + this.set_editarea_size_from_textarea(id, document.getElementById("frame_"+id)); + t.style.display="none"; + document.getElementById("frame_"+id).style.display="inline"; + area.execCommand("focus"); // without this focus opera doesn't manage well the iframe body height + + + // restore display values + editAreas[id]["displayed"]=true; + area.execCommand("update_size"); + + f.document.getElementById("result").scrollTop= scrollTop; + f.document.getElementById("result").scrollLeft= scrollLeft; + area.area_select(selStart, selEnd-selStart); + area.execCommand("toggle_on"); + + + } + else + { + /* if(this.isIE) + get_IE_selection(document.getElementById(id)); */ + elem= document.getElementById(id); + elem.last_selectionStart= elem.selectionStart; + elem.last_selectionEnd= elem.selectionEnd; + elem.last_scrollTop= elem.scrollTop; + elem.last_scrollLeft= elem.scrollLeft; + elem.use_last=true; + editAreaLoader.start(id); + } + }, + + set_editarea_size_from_textarea : function(id, frame){ + var elem,width,height; + elem = document.getElementById(id); + + width = Math.max(editAreas[id]["settings"]["min_width"], elem.offsetWidth)+"px"; + height = Math.max(editAreas[id]["settings"]["min_height"], elem.offsetHeight)+"px"; + if(elem.style.width.indexOf("%")!=-1) + width = elem.style.width; + if(elem.style.height.indexOf("%")!=-1) + height = elem.style.height; + //alert("h: "+height+" w: "+width); + + frame.style.width= width; + frame.style.height= height; + }, + + set_base_url : function(){ + var t=this,elems,i,docBasePath; + + if( !this.baseURL ){ + elems = document.getElementsByTagName('script'); + + for( i=0; i'; + html += ''; + return html; + }, + + get_control_html : function(button_name, lang) { + var t=this,i,but,html,si; + for (i=0; i"; + case "|": + case "separator": + return ''; + case "select_font": + html= ""; + return html; + case "syntax_selection": + html= ""; + return html; + } + + return "["+button_name+"]"; + }, + + + get_template : function(){ + if(this.template=="") + { + var xhr_object = null; + if(window.XMLHttpRequest) // Firefox + xhr_object = new XMLHttpRequest(); + else if(window.ActiveXObject) // Internet Explorer + xhr_object = new ActiveXObject("Microsoft.XMLHTTP"); + else { // XMLHttpRequest not supported + alert("XMLHTTPRequest not supported. EditArea not loaded"); + return; + } + + xhr_object.open("GET", this.baseURL+"template.html", false); + xhr_object.send(null); + if(xhr_object.readyState == 4) + this.template=xhr_object.responseText; + else + this.has_error(); + } + }, + + // translate text + translate : function(text, lang, mode){ + if(mode=="word") + text=editAreaLoader.get_word_translation(text, lang); + else if(mode="template"){ + editAreaLoader.current_language= lang; + text=text.replace(/\{\$([^\}]+)\}/gm, editAreaLoader.translate_template); + } + return text; + }, + + translate_template : function(){ + return editAreaLoader.get_word_translation(EditAreaLoader.prototype.translate_template.arguments[1], editAreaLoader.current_language); + }, + + get_word_translation : function(val, lang){ + var i; + + for( i in editAreaLoader.lang[lang]){ + if(i == val) + return editAreaLoader.lang[lang][i]; + } + return "_"+val; + }, + + load_script : function(url){ + var t=this,d=document,script,head; + + if( t.loadedFiles[url] ) + return; + //alert("load: "+url); + try{ + script= d.createElement("script"); + script.type= "text/javascript"; + script.src= url; + script.charset= "UTF-8"; + d.getElementsByTagName("head")[0].appendChild(script); + }catch(e){ + d.write(''); + } + + t.loadedFiles[url] = true; + }, + + add_event : function(obj, name, handler) { + try{ + if (obj.attachEvent) { + obj.attachEvent("on" + name, handler); + } else{ + obj.addEventListener(name, handler, false); + } + }catch(e){} + }, + + remove_event : function(obj, name, handler){ + try{ + if (obj.detachEvent) + obj.detachEvent("on" + name, handler); + else + obj.removeEventListener(name, handler, false); + }catch(e){} + }, + + + // reset all the editareas in the form that have been reseted + reset : function(e){ + var formObj,is_child,i,x; + + formObj = editAreaLoader.isIE ? window.event.srcElement : e.target; + if(formObj.tagName!='FORM') + formObj= formObj.form; + + for( i in editAreas ){ + is_child= false; + for( x=0;x old_sel["start"]) // if text was selected, cursor at the end + this.setSelectionRange(id, new_sel["end"], new_sel["end"]); + else // cursor in the middle + this.setSelectionRange(id, old_sel["start"]+open_tag.length, old_sel["start"]+open_tag.length); + }, + + // hide both EditArea and normal textarea + hide : function(id){ + var fs= window.frames,d=document,t=this,scrollTop,scrollLeft,span; + if(d.getElementById(id) && !t.hidden[id]) + { + t.hidden[id]= {}; + t.hidden[id]["selectionRange"]= t.getSelectionRange(id); + if(d.getElementById(id).style.display!="none") + { + t.hidden[id]["scrollTop"]= d.getElementById(id).scrollTop; + t.hidden[id]["scrollLeft"]= d.getElementById(id).scrollLeft; + } + + if(fs["frame_"+id]) + { + t.hidden[id]["toggle"]= editAreas[id]["displayed"]; + + if(fs["frame_"+id] && editAreas[id]["displayed"]==true){ + scrollTop = fs["frame_"+ id].document.getElementById("result").scrollTop; + scrollLeft = fs["frame_"+ id].document.getElementById("result").scrollLeft; + }else{ + scrollTop = d.getElementById(id).scrollTop; + scrollLeft = d.getElementById(id).scrollLeft; + } + t.hidden[id]["scrollTop"]= scrollTop; + t.hidden[id]["scrollLeft"]= scrollLeft; + + if(editAreas[id]["displayed"]==true) + editAreaLoader.toggle_off(id); + } + + // hide toggle button and debug box + span= d.getElementById("EditAreaArroundInfos_"+id); + if(span){ + span.style.display='none'; + } + + // hide textarea + d.getElementById(id).style.display= "none"; + } + }, + + // restore hidden EditArea and normal textarea + show : function(id){ + var fs= window.frames,d=document,t=this,span; + if((elem=d.getElementById(id)) && t.hidden[id]) + { + elem.style.display= "inline"; + elem.scrollTop= t.hidden[id]["scrollTop"]; + elem.scrollLeft= t.hidden[id]["scrollLeft"]; + span= d.getElementById("EditAreaArroundInfos_"+id); + if(span){ + span.style.display='inline'; + } + + if(fs["frame_"+id]) + { + + // restore toggle button and debug box + + + // restore textarea + elem.style.display= "inline"; + + // restore EditArea + if(t.hidden[id]["toggle"]==true) + editAreaLoader.toggle_on(id); + + scrollTop = t.hidden[id]["scrollTop"]; + scrollLeft = t.hidden[id]["scrollLeft"]; + + if(fs["frame_"+id] && editAreas[id]["displayed"]==true){ + fs["frame_"+ id].document.getElementById("result").scrollTop = scrollTop; + fs["frame_"+ id].document.getElementById("result").scrollLeft = scrollLeft; + }else{ + elem.scrollTop = scrollTop; + elem.scrollLeft = scrollLeft; + } + + } + // restore selection + sel = t.hidden[id]["selectionRange"]; + t.setSelectionRange(id, sel["start"], sel["end"]); + delete t.hidden[id]; + } + }, + + // get the current file datas (for multi file editing mode) + getCurrentFile : function(id){ + return this.execCommand(id, 'get_file', this.execCommand(id, 'curr_file')); + }, + + // get the given file datas (for multi file editing mode) + getFile : function(id, file_id){ + return this.execCommand(id, 'get_file', file_id); + }, + + // get all the openned files datas (for multi file editing mode) + getAllFiles : function(id){ + return this.execCommand(id, 'get_all_files()'); + }, + + // open a file (for multi file editing mode) + openFile : function(id, file_infos){ + return this.execCommand(id, 'open_file', file_infos); + }, + + // close the given file (for multi file editing mode) + closeFile : function(id, file_id){ + return this.execCommand(id, 'close_file', file_id); + }, + + // close the given file (for multi file editing mode) + setFileEditedMode : function(id, file_id, to){ + var reg1,reg2; + reg1 = new RegExp('\\\\', 'g'); + reg2 = new RegExp('"', 'g'); + return this.execCommand(id, 'set_file_edited_mode("'+ file_id.replace(reg1, '\\\\').replace(reg2, '\\"') +'", '+ to +')'); + }, + + + // allow to access to editarea functions and datas (for advanced users only) + execCommand : function(id, cmd, fct_param){ + switch(cmd){ + case "EA_init": + if(editAreas[id]['settings']["EA_init_callback"].length>0) + eval(editAreas[id]['settings']["EA_init_callback"]+"('"+ id +"');"); + break; + case "EA_delete": + if(editAreas[id]['settings']["EA_delete_callback"].length>0) + eval(editAreas[id]['settings']["EA_delete_callback"]+"('"+ id +"');"); + break; + case "EA_submit": + if(editAreas[id]['settings']["submit_callback"].length>0) + eval(editAreas[id]['settings']["submit_callback"]+"('"+ id +"');"); + break; + } + if(window.frames["frame_"+id] && window.frames["frame_"+ id].editArea){ + if(fct_param!=undefined) + return eval('window.frames["frame_'+ id +'"].editArea.'+ cmd +'(fct_param);'); + else + return eval('window.frames["frame_'+ id +'"].editArea.'+ cmd +';'); + } + return false; + } +}; + + var editAreaLoader= new EditAreaLoader(); + var editAreas= {}; + ADDED applications/admin/static/edit_area/elements_functions.js Index: applications/admin/static/edit_area/elements_functions.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/elements_functions.js @@ -0,0 +1,336 @@ +/**** + * This page contains some general usefull functions for javascript + * + ****/ + + + // need to redefine this functiondue to IE problem + function getAttribute( elm, aName ) { + var aValue,taName,i; + try{ + aValue = elm.getAttribute( aName ); + }catch(exept){} + + if( ! aValue ){ + for( i = 0; i < elm.attributes.length; i ++ ) { + taName = elm.attributes[i] .name.toLowerCase(); + if( taName == aName ) { + aValue = elm.attributes[i] .value; + return aValue; + } + } + } + return aValue; + }; + + // need to redefine this function due to IE problem + function setAttribute( elm, attr, val ) { + if(attr=="class"){ + elm.setAttribute("className", val); + elm.setAttribute("class", val); + }else{ + elm.setAttribute(attr, val); + } + }; + + /* return a child element + elem: element we are searching in + elem_type: type of the eleemnt we are searching (DIV, A, etc...) + elem_attribute: attribute of the searched element that must match + elem_attribute_match: value that elem_attribute must match + option: "all" if must return an array of all children, otherwise return the first match element + depth: depth of search (-1 or no set => unlimited) + */ + function getChildren(elem, elem_type, elem_attribute, elem_attribute_match, option, depth) + { + if(!option) + var option="single"; + if(!depth) + var depth=-1; + if(elem){ + var children= elem.childNodes; + var result=null; + var results= []; + for (var x=0;x0){ + results= results.concat(result); + } + }else if(result!=null){ + return result; + } + } + } + } + if(option=="all") + return results; + } + return null; + }; + + function isChildOf(elem, parent){ + if(elem){ + if(elem==parent) + return true; + while(elem.parentNode != 'undefined'){ + return isChildOf(elem.parentNode, parent); + } + } + return false; + }; + + function getMouseX(e){ + + if(e!=null && typeof(e.pageX)!="undefined"){ + return e.pageX; + }else{ + return (e!=null?e.x:event.x)+ document.documentElement.scrollLeft; + } + }; + + function getMouseY(e){ + if(e!=null && typeof(e.pageY)!="undefined"){ + return e.pageY; + }else{ + return (e!=null?e.y:event.y)+ document.documentElement.scrollTop; + } + }; + + function calculeOffsetLeft(r){ + return calculeOffset(r,"offsetLeft") + }; + + function calculeOffsetTop(r){ + return calculeOffset(r,"offsetTop") + }; + + function calculeOffset(element,attr){ + var offset=0; + while(element){ + offset+=element[attr]; + element=element.offsetParent + } + return offset; + }; + + /** return the computed style + * @param: elem: the reference to the element + * @param: prop: the name of the css property + */ + function get_css_property(elem, prop) + { + if(document.defaultView) + { + return document.defaultView.getComputedStyle(elem, null).getPropertyValue(prop); + } + else if(elem.currentStyle) + { + var prop = prop.replace(/-\D/gi, function(sMatch) + { + return sMatch.charAt(sMatch.length - 1).toUpperCase(); + }); + return elem.currentStyle[prop]; + } + else return null; + } + +/**** + * Moving an element + ***/ + + var _mCE; // currently moving element + + /* allow to move an element in a window + e: the event + id: the id of the element + frame: the frame of the element + ex of use: + in html: + or + in javascript: document.getElementById("my_div").onmousedown= start_move_element + */ + function start_move_element(e, id, frame){ + var elem_id=(e.target || e.srcElement).id; + if(id) + elem_id=id; + if(!frame) + frame=window; + if(frame.event) + e=frame.event; + + _mCE= frame.document.getElementById(elem_id); + _mCE.frame=frame; + frame.document.onmousemove= move_element; + frame.document.onmouseup= end_move_element; + /*_mCE.onmousemove= move_element; + _mCE.onmouseup= end_move_element;*/ + + //alert(_mCE.frame.document.body.offsetHeight); + + mouse_x= getMouseX(e); + mouse_y= getMouseY(e); + //window.status=frame+ " elem: "+elem_id+" elem: "+ _mCE + " mouse_x: "+mouse_x; + _mCE.start_pos_x = mouse_x - (_mCE.style.left.replace("px","") || calculeOffsetLeft(_mCE)); + _mCE.start_pos_y = mouse_y - (_mCE.style.top.replace("px","") || calculeOffsetTop(_mCE)); + return false; + }; + + function end_move_element(e){ + _mCE.frame.document.onmousemove= ""; + _mCE.frame.document.onmouseup= ""; + _mCE=null; + }; + + function move_element(e){ + var newTop,newLeft,maxLeft; + + if( _mCE.frame && _mCE.frame.event ) + e=_mCE.frame.event; + newTop = getMouseY(e) - _mCE.start_pos_y; + newLeft = getMouseX(e) - _mCE.start_pos_x; + + maxLeft = _mCE.frame.document.body.offsetWidth- _mCE.offsetWidth; + max_top = _mCE.frame.document.body.offsetHeight- _mCE.offsetHeight; + newTop = Math.min(Math.max(0, newTop), max_top); + newLeft = Math.min(Math.max(0, newLeft), maxLeft); + + _mCE.style.top = newTop+"px"; + _mCE.style.left = newLeft+"px"; + return false; + }; + +/*** + * Managing a textarea (this part need the navigator infos from editAreaLoader + ***/ + + var nav= editAreaLoader.nav; + + // allow to get infos on the selection: array(start, end) + function getSelectionRange(textarea){ + return {"start": textarea.selectionStart, "end": textarea.selectionEnd}; + }; + + // allow to set the selection + function setSelectionRange(t, start, end){ + t.focus(); + + start = Math.max(0, Math.min(t.value.length, start)); + end = Math.max(start, Math.min(t.value.length, end)); + + if( nav.isOpera && nav.isOpera < 9.6 ){ // Opera bug when moving selection start and selection end + t.selectionEnd = 1; + t.selectionStart = 0; + t.selectionEnd = 1; + t.selectionStart = 0; + } + t.selectionStart = start; + t.selectionEnd = end; + //textarea.setSelectionRange(start, end); + + if(nav.isIE) + set_IE_selection(t); + }; + + + // set IE position in Firefox mode (textarea.selectionStart and textarea.selectionEnd). should work as a repeated task + function get_IE_selection(t){ + var d=document,div,range,stored_range,elem,scrollTop,relative_top,line_start,line_nb,range_start,range_end,tab; + if(t && t.focused) + { + if(!t.ea_line_height) + { // calculate the lineHeight + div= d.createElement("div"); + div.style.fontFamily= get_css_property(t, "font-family"); + div.style.fontSize= get_css_property(t, "font-size"); + div.style.visibility= "hidden"; + div.innerHTML="0"; + d.body.appendChild(div); + t.ea_line_height= div.offsetHeight; + d.body.removeChild(div); + } + //t.focus(); + range = d.selection.createRange(); + try + { + stored_range = range.duplicate(); + stored_range.moveToElementText( t ); + stored_range.setEndPoint( 'EndToEnd', range ); + if(stored_range.parentElement() == t){ + // the range don't take care of empty lines in the end of the selection + elem = t; + scrollTop = 0; + while(elem.parentNode){ + scrollTop+= elem.scrollTop; + elem = elem.parentNode; + } + + // var scrollTop= t.scrollTop + document.body.scrollTop; + + // var relative_top= range.offsetTop - calculeOffsetTop(t) + scrollTop; + relative_top= range.offsetTop - calculeOffsetTop(t)+ scrollTop; + // alert("rangeoffset: "+ range.offsetTop +"\ncalcoffsetTop: "+ calculeOffsetTop(t) +"\nrelativeTop: "+ relative_top); + line_start = Math.round((relative_top / t.ea_line_height) +1); + + line_nb = Math.round(range.boundingHeight / t.ea_line_height); + + range_start = stored_range.text.length - range.text.length; + tab = t.value.substr(0, range_start).split("\n"); + range_start += (line_start - tab.length)*2; // add missing empty lines to the selection + t.selectionStart = range_start; + + range_end = t.selectionStart + range.text.length; + tab = t.value.substr(0, range_start + range.text.length).split("\n"); + range_end += (line_start + line_nb - 1 - tab.length)*2; + t.selectionEnd = range_end; + } + } + catch(e){} + } + if( t && t.id ) + { + setTimeout("get_IE_selection(document.getElementById('"+ t.id +"'));", 50); + } + }; + + function IE_textarea_focus(){ + event.srcElement.focused= true; + } + + function IE_textarea_blur(){ + event.srcElement.focused= false; + } + + // select the text for IE (take into account the \r difference) + function set_IE_selection( t ){ + var nbLineStart,nbLineStart,nbLineEnd,range; + if(!window.closed){ + nbLineStart=t.value.substr(0, t.selectionStart).split("\n").length - 1; + nbLineEnd=t.value.substr(0, t.selectionEnd).split("\n").length - 1; + try + { + range = document.selection.createRange(); + range.moveToElementText( t ); + range.setEndPoint( 'EndToStart', range ); + range.moveStart('character', t.selectionStart - nbLineStart); + range.moveEnd('character', t.selectionEnd - nbLineEnd - (t.selectionStart - nbLineStart) ); + range.select(); + } + catch(e){} + } + }; + + + editAreaLoader.waiting_loading["elements_functions.js"]= "loaded"; ADDED applications/admin/static/edit_area/highlight.js Index: applications/admin/static/edit_area/highlight.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/highlight.js @@ -0,0 +1,407 @@ + // change_to: "on" or "off" + EditArea.prototype.change_highlight= function(change_to){ + if(this.settings["syntax"].length==0 && change_to==false){ + this.switchClassSticky(_$("highlight"), 'editAreaButtonDisabled', true); + this.switchClassSticky(_$("reset_highlight"), 'editAreaButtonDisabled', true); + return false; + } + + if(this.do_highlight==change_to) + return false; + + + this.getIESelection(); + var pos_start= this.textarea.selectionStart; + var pos_end= this.textarea.selectionEnd; + + if(this.do_highlight===true || change_to==false) + this.disable_highlight(); + else + this.enable_highlight(); + this.textarea.focus(); + this.textarea.selectionStart = pos_start; + this.textarea.selectionEnd = pos_end; + this.setIESelection(); + + }; + + EditArea.prototype.disable_highlight= function(displayOnly){ + var t= this, a=t.textarea, new_Obj, old_class, new_class; + + t.selection_field.innerHTML=""; + t.selection_field_text.innerHTML=""; + t.content_highlight.style.visibility="hidden"; + // replacing the node is far more faster than deleting it's content in firefox + new_Obj= t.content_highlight.cloneNode(false); + new_Obj.innerHTML= ""; + t.content_highlight.parentNode.insertBefore(new_Obj, t.content_highlight); + t.content_highlight.parentNode.removeChild(t.content_highlight); + t.content_highlight= new_Obj; + old_class= parent.getAttribute( a,"class" ); + if(old_class){ + new_class= old_class.replace( "hidden","" ); + parent.setAttribute( a, "class", new_class ); + } + + a.style.backgroundColor="transparent"; // needed in order to see the bracket finders + + //var icon= document.getElementById("highlight"); + //setAttribute(icon, "class", getAttribute(icon, "class").replace(/ selected/g, "") ); + //t.restoreClass(icon); + //t.switchClass(icon,'editAreaButtonNormal'); + t.switchClassSticky(_$("highlight"), 'editAreaButtonNormal', true); + t.switchClassSticky(_$("reset_highlight"), 'editAreaButtonDisabled', true); + + t.do_highlight=false; + + t.switchClassSticky(_$("change_smooth_selection"), 'editAreaButtonSelected', true); + if(typeof(t.smooth_selection_before_highlight)!="undefined" && t.smooth_selection_before_highlight===false){ + t.change_smooth_selection_mode(false); + } + + // this.textarea.style.backgroundColor="#FFFFFF"; + }; + + EditArea.prototype.enable_highlight= function(){ + var t=this, a=t.textarea, new_class; + t.show_waiting_screen(); + + t.content_highlight.style.visibility="visible"; + new_class =parent.getAttribute(a,"class")+" hidden"; + parent.setAttribute( a, "class", new_class ); + + // IE can't manage mouse click outside text range without this + if( t.isIE ) + a.style.backgroundColor="#FFFFFF"; + + t.switchClassSticky(_$("highlight"), 'editAreaButtonSelected', false); + t.switchClassSticky(_$("reset_highlight"), 'editAreaButtonNormal', false); + + t.smooth_selection_before_highlight=t.smooth_selection; + if(!t.smooth_selection) + t.change_smooth_selection_mode(true); + t.switchClassSticky(_$("change_smooth_selection"), 'editAreaButtonDisabled', true); + + + t.do_highlight=true; + t.resync_highlight(); + + t.hide_waiting_screen(); + }; + + /** + * Ask to update highlighted text + * @param Array infos - Array of datas returned by EditArea.get_selection_infos() + */ + EditArea.prototype.maj_highlight= function(infos){ + // for speed mesure + var debug_opti="",tps_start= new Date().getTime(), tps_middle_opti=new Date().getTime(); + var t=this, hightlighted_text, updated_highlight; + var textToHighlight=infos["full_text"], doSyntaxOpti = false, doHtmlOpti = false, stay_begin="", stay_end="", trace_new , trace_last; + + if(t.last_text_to_highlight==infos["full_text"] && t.resync_highlight!==true) + return; + + // OPTIMISATION: will search to update only changed lines + if(t.reload_highlight===true){ + t.reload_highlight=false; + }else if(textToHighlight.length==0){ + textToHighlight="\n "; + }else{ + // get text change datas + changes = t.checkTextEvolution(t.last_text_to_highlight,textToHighlight); + + // check if it can only reparse the changed text + trace_new = t.get_syntax_trace(changes.newTextLine).replace(/\r/g, ''); + trace_last = t.get_syntax_trace(changes.lastTextLine).replace(/\r/g, ''); + doSyntaxOpti = ( trace_new == trace_last ); + + // check if the difference comes only from a new line created + // => we have to remember that the editor can automaticaly add tabulation or space after the new line) + if( !doSyntaxOpti && trace_new == "\n"+trace_last && /^[ \t\s]*\n[ \t\s]*$/.test( changes.newText.replace(/\r/g, '') ) && changes.lastText =="" ) + { + doSyntaxOpti = true; + } + + // we do the syntax optimisation + if( doSyntaxOpti ){ + + tps_middle_opti=new Date().getTime(); + + stay_begin= t.last_hightlighted_text.split("\n").slice(0, changes.lineStart).join("\n"); + if(changes.lineStart>0) + stay_begin+= "\n"; + stay_end= t.last_hightlighted_text.split("\n").slice(changes.lineLastEnd+1).join("\n"); + if(stay_end.length>0) + stay_end= "\n"+stay_end; + + // Final check to see that we're not in the middle of span tags + if( stay_begin.split(' trace: "+trace_new + +"\nchanged_last_text: "+ch.lastText+" => trace: "+trace_last + //debug_opti+= "\nchanged: "+ infos["full_text"].substring(ch.posStart, ch.posNewEnd); + + "\nchanged_line: "+ch.newTextLine + + "\nlast_changed_line: "+ch.lastTextLine + +"\nstay_begin: "+ stay_begin.slice(-100) + +"\nstay_end: "+ stay_end.substr( 0, 100 ); + //debug_opti="start: "+stay_begin_len+ "("+nb_line_start_unchanged+") end: "+ (stay_end_len)+ "("+(splited.length-nb_line_end_unchanged)+") "; + //debug_opti+="changed: "+ textToHighlight.substring(stay_begin_len, textToHighlight.length-stay_end_len)+" \n"; + + //debug_opti+="changed: "+ stay_begin.substr(stay_begin.length-200)+ "----------"+ textToHighlight+"------------------"+ stay_end.substr(0,200) +"\n"; + +"\n"; + } + + + // END OPTIMISATION + } + + tps_end_opti = new Date().getTime(); + + // apply highlight + updated_highlight = t.colorize_text(textToHighlight); + tpsAfterReg = new Date().getTime(); + + /*** + * see if we can optimize for updating only the required part of the HTML code + * + * The goal here will be to find the text node concerned by the modification and to update it + */ + //------------------------------------------- + + // disable latest optimization tricks (introduced in 0.8.1 and removed in 0.8.2), TODO: check for another try later + doSyntaxOpti = doHtmlOpti = false; + if( doSyntaxOpti ) + { + try + { + var replacedBloc, i, nbStart = '', nbEnd = '', newHtml, lengthOld, lengthNew; + replacedBloc = t.last_hightlighted_text.substring( stay_begin.length, t.last_hightlighted_text.length - stay_end.length ); + + lengthOld = replacedBloc.length; + lengthNew = updated_highlight.length; + + // find the identical caracters at the beginning + for( i=0; i < lengthOld && i < lengthNew && replacedBloc.charAt(i) == updated_highlight.charAt(i) ; i++ ) + { + } + nbStart = i; + // find the identical caracters at the end + for( i=0; i + nbStart < lengthOld && i + nbStart < lengthNew && replacedBloc.charAt(lengthOld-i-1) == updated_highlight.charAt(lengthNew-i-1) ; i++ ) + { + } + nbEnd = i; + //console.log( nbStart, nbEnd, replacedBloc, updated_highlight ); + // get the changes + lastHtml = replacedBloc.substring( nbStart, lengthOld - nbEnd ); + newHtml = updated_highlight.substring( nbStart, lengthNew - nbEnd ); + + // We can do the optimisation only if we havn't touch to span elements + if( newHtml.indexOf('').replace( /&/g, '&'); + + nbOpendedSpan = beginStr.split(' 0 ) + { + nbClosed--; + parentSpan = parentSpan.parentNode; + } + + // find the position of the last opended tag + while( parentSpan.parentNode != t.content_highlight && parentSpan.parentNode.tagName != 'PRE' && ( tmpMaxStartOffset = Math.max( 0, beginStr.lastIndexOf( '', maxStartOffset ) ); + + // count the number of sub spans + nbSubSpanBefore = beginStr.substr( lastEndPos ).split('' ) ) == -1 ? beginStr.length : beginStr.length - ( lastIndex + 1 ); + //nbUnchangedChars = ? beginStr.length : beginStr.substr( lastIndex + 1 ).replace( /</g, '<').replace( />/g, '>').replace( /&/g, '&').length; + + if( ( lastIndex = beginStr.lastIndexOf( '>' ) ) == -1 ) + { + nbUnchangedChars = beginStr.length; + } + else + { + nbUnchangedChars = beginStr.substr( lastIndex + 1 ).replace( /</g, '<').replace( />/g, '>').replace( /&/g, '&').length; + //nbUnchangedChars += beginStr.substr( ).replace( /&/g, '&').replace( //g, '>').length - beginStr.length; + } + //alert( nbUnchangedChars ); + // console.log( span, textNode, nbOpendedSpan,nbClosedSpan, span.nextSibling, textNode.length, nbUnchangedChars, lastHtml, lastHtml.length, newHtml, newHtml.length ); + // alert( textNode.parentNode.className +'-'+ textNode.parentNode.tagName+"\n"+ textNode.data +"\n"+ nbUnchangedChars +"\n"+ lastHtml.length +"\n"+ newHtml +"\n"+ newHtml.length ); + // console.log( nbUnchangedChars, lastIndex, beginStr.length, beginStr.replace(/&/g, '&'), lastHtml.length, '|', newHtml.replace( /\t/g, 't').replace( /\n/g, 'n').replace( /\r/g, 'r'), lastHtml.replace( /\t/g, 't').replace( /\n/g, 'n').replace( /\r/, 'r') ); + // console.log( textNode.data.replace(/&/g, '&') ); + // IE only manage \r for cariage return in textNode and not \n or \r\n + if( t.isIE ) + { + nbUnchangedChars -= ( beginStr.substr( beginStr.length - nbUnchangedChars ).split("\n").length - 1 ); + //alert( textNode.data.replace(/\r/g, '_r').replace(/\n/g, '_n')); + textNode.replaceData( nbUnchangedChars, lastHtml.replace(/\n/g, '').length, newHtml.replace(/\n/g, '') ); + } + else + { + textNode.replaceData( nbUnchangedChars, lastHtml.length, newHtml ); + } + //--------] + } + } + // an exception shouldn't occured but if replaceData failed at least it won't break everything + catch( e ) + { + // throw e; + // console.log( e ); + doHtmlOpti = false; + } + + } + + /*** END HTML update's optimisation ***/ + // end test + + // console.log( (TPS6-TPS5), (TPS5-TPS4), (TPS4-TPS3), (TPS3-TPS2), (TPS2-TPS1), _CPT ); + // get the new highlight content + tpsAfterOpti2 = new Date().getTime(); + hightlighted_text = stay_begin + updated_highlight + stay_end; + if( !doHtmlOpti ) + { + // update the content of the highlight div by first updating a clone node (as there is no display in the same time for t node it's quite faster (5*)) + var new_Obj= t.content_highlight.cloneNode(false); + if( ( t.isIE && t.isIE < 8 ) || ( t.isOpera && t.isOpera < 9.6 ) ) + new_Obj.innerHTML= "
" + hightlighted_text + "
"; + else + new_Obj.innerHTML= ""+ hightlighted_text +""; + + t.content_highlight.parentNode.replaceChild(new_Obj, t.content_highlight); + + t.content_highlight= new_Obj; + } + + t.last_text_to_highlight= infos["full_text"]; + t.last_hightlighted_text= hightlighted_text; + + tps3=new Date().getTime(); + + if(t.settings["debug"]){ + //lineNumber=tab_text.length; + //t.debug.value+=" \nNB char: "+_$("src").value.length+" Nb line: "+ lineNumber; + + t.debug.value= "Tps optimisation "+(tps_end_opti-tps_start) + +" | tps reg exp: "+ (tpsAfterReg-tps_end_opti) + +" | tps opti HTML : "+ (tpsAfterOpti2-tpsAfterReg) + ' '+ ( doHtmlOpti ? 'yes' : 'no' ) + +" | tps update highlight content: "+ (tps3-tpsAfterOpti2) + +" | tpsTotal: "+ (tps3-tps_start) + + "("+tps3+")\n"+ debug_opti; + // t.debug.value+= "highlight\n"+hightlighted_text;*/ + } + + }; + + EditArea.prototype.resync_highlight= function(reload_now){ + this.reload_highlight=true; + this.last_text_to_highlight=""; + this.focus(); + if(reload_now) + this.check_line_selection(false); + }; ADDED applications/admin/static/edit_area/images/autocompletion.gif Index: applications/admin/static/edit_area/images/autocompletion.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/autocompletion.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/close.gif Index: applications/admin/static/edit_area/images/close.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/close.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/fullscreen.gif Index: applications/admin/static/edit_area/images/fullscreen.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/fullscreen.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/go_to_line.gif Index: applications/admin/static/edit_area/images/go_to_line.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/go_to_line.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/help.gif Index: applications/admin/static/edit_area/images/help.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/help.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/highlight.gif Index: applications/admin/static/edit_area/images/highlight.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/highlight.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/load.gif Index: applications/admin/static/edit_area/images/load.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/load.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/move.gif Index: applications/admin/static/edit_area/images/move.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/move.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/newdocument.gif Index: applications/admin/static/edit_area/images/newdocument.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/newdocument.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/opacity.png Index: applications/admin/static/edit_area/images/opacity.png ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/opacity.png cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/processing.gif Index: applications/admin/static/edit_area/images/processing.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/processing.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/redo.gif Index: applications/admin/static/edit_area/images/redo.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/redo.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/reset_highlight.gif Index: applications/admin/static/edit_area/images/reset_highlight.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/reset_highlight.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/save.gif Index: applications/admin/static/edit_area/images/save.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/save.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/search.gif Index: applications/admin/static/edit_area/images/search.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/search.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/smooth_selection.gif Index: applications/admin/static/edit_area/images/smooth_selection.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/smooth_selection.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/spacer.gif Index: applications/admin/static/edit_area/images/spacer.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/spacer.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/statusbar_resize.gif Index: applications/admin/static/edit_area/images/statusbar_resize.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/statusbar_resize.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/undo.gif Index: applications/admin/static/edit_area/images/undo.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/undo.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/images/word_wrap.gif Index: applications/admin/static/edit_area/images/word_wrap.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/images/word_wrap.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/keyboard.js Index: applications/admin/static/edit_area/keyboard.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/keyboard.js @@ -0,0 +1,145 @@ +var EA_keys = {8:"Retour arriere",9:"Tabulation",12:"Milieu (pave numerique)",13:"Entrer",16:"Shift",17:"Ctrl",18:"Alt",19:"Pause",20:"Verr Maj",27:"Esc",32:"Space",33:"Page up",34:"Page down",35:"End",36:"Begin",37:"Left",38:"Up",39:"Right",40:"Down",44:"Impr ecran",45:"Inser",46:"Suppr",91:"Menu Demarrer Windows / touche pomme Mac",92:"Menu Demarrer Windows",93:"Menu contextuel Windows",112:"F1",113:"F2",114:"F3",115:"F4",116:"F5",117:"F6",118:"F7",119:"F8",120:"F9",121:"F10",122:"F11",123:"F12",144:"Verr Num",145:"Arret defil"}; + + + +function keyDown(e){ + if(!e){ // if IE + e=event; + } + + // send the event to the plugins + for(var i in editArea.plugins){ + if(typeof(editArea.plugins[i].onkeydown)=="function"){ + if(editArea.plugins[i].onkeydown(e)===false){ // stop propaging + if(editArea.isIE) + e.keyCode=0; + return false; + } + } + } + + var target_id=(e.target || e.srcElement).id; + var use=false; + if (EA_keys[e.keyCode]) + letter=EA_keys[e.keyCode]; + else + letter=String.fromCharCode(e.keyCode); + + var low_letter= letter.toLowerCase(); + + if(letter=="Page up" && !AltPressed(e) && !editArea.isOpera){ + editArea.execCommand("scroll_page", {"dir": "up", "shift": ShiftPressed(e)}); + use=true; + }else if(letter=="Page down" && !AltPressed(e) && !editArea.isOpera){ + editArea.execCommand("scroll_page", {"dir": "down", "shift": ShiftPressed(e)}); + use=true; + }else if(editArea.is_editable==false){ + // do nothing but also do nothing else (allow to navigate with page up and page down) + return true; + }else if(letter=="Tabulation" && target_id=="textarea" && !CtrlPressed(e) && !AltPressed(e)){ + if(ShiftPressed(e)) + editArea.execCommand("invert_tab_selection"); + else + editArea.execCommand("tab_selection"); + + use=true; + if(editArea.isOpera || (editArea.isFirefox && editArea.isMac) ) // opera && firefox mac can't cancel tabulation events... + setTimeout("editArea.execCommand('focus');", 1); + }else if(letter=="Entrer" && target_id=="textarea"){ + if(editArea.press_enter()) + use=true; + }else if(letter=="Entrer" && target_id=="area_search"){ + editArea.execCommand("area_search"); + use=true; + }else if(letter=="Esc"){ + editArea.execCommand("close_all_inline_popup", e); + use=true; + }else if(CtrlPressed(e) && !AltPressed(e) && !ShiftPressed(e)){ + switch(low_letter){ + case "f": + editArea.execCommand("area_search"); + use=true; + break; + case "r": + editArea.execCommand("area_replace"); + use=true; + break; + case "q": + editArea.execCommand("close_all_inline_popup", e); + use=true; + break; + case "h": + editArea.execCommand("change_highlight"); + use=true; + break; + case "g": + setTimeout("editArea.execCommand('go_to_line');", 5); // the prompt stop the return false otherwise + use=true; + break; + case "e": + editArea.execCommand("show_help"); + use=true; + break; + case "z": + use=true; + editArea.execCommand("undo"); + break; + case "y": + use=true; + editArea.execCommand("redo"); + break; + default: + break; + } + } + + // check to disable the redo possibility if the textarea content change + if(editArea.next.length > 0){ + setTimeout("editArea.check_redo();", 10); + } + + setTimeout("editArea.check_file_changes();", 10); + + + if(use){ + // in case of a control that sould'nt be used by IE but that is used => THROW a javascript error that will stop key action + if(editArea.isIE) + e.keyCode=0; + return false; + } + //alert("Test: "+ letter + " ("+e.keyCode+") ALT: "+ AltPressed(e) + " CTRL "+ CtrlPressed(e) + " SHIFT "+ ShiftPressed(e)); + + return true; + +}; + + +// return true if Alt key is pressed +function AltPressed(e) { + if (window.event) { + return (window.event.altKey); + } else { + if(e.modifiers) + return (e.altKey || (e.modifiers % 2)); + else + return e.altKey; + } +}; + +// return true if Ctrl key is pressed +function CtrlPressed(e) { + if (window.event) { + return (window.event.ctrlKey); + } else { + return (e.ctrlKey || (e.modifiers==2) || (e.modifiers==3) || (e.modifiers>5)); + } +}; + +// return true if Shift key is pressed +function ShiftPressed(e) { + if (window.event) { + return (window.event.shiftKey); + } else { + return (e.shiftKey || (e.modifiers>3)); + } +}; ADDED applications/admin/static/edit_area/langs/bg.js Index: applications/admin/static/edit_area/langs/bg.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/bg.js @@ -0,0 +1,54 @@ +/* + * Bulgarian translation + * Author: Valentin Hristov + * Company: SOFTKIT Bulgarian + * Site: http://www.softkit-bg.com + */ +editAreaLoader.lang["bg"]={ +new_document: "нов документ", +search_button: "търсене и замяна", +search_command: "търси следващия / отвори прозорец с търсачка", +search: "търсене", +replace: "замяна", +replace_command: "замяна / отвори прозорец с търсачка", +find_next: "намери следващия", +replace_all: "замени всички", +reg_exp: "реголярни изрази", +match_case: "чуствителен към регистъра", +not_found: "няма резултат.", +occurrence_replaced: "замяната е осъществена.", +search_field_empty: "Полето за търсене е празно", +restart_search_at_begin: "До края на документа. Почни с началото.", +move_popup: "премести прозореца с търсачката", +font_size: "--Размер на шрифта--", +go_to_line: "премени към реда", +go_to_line_prompt: "премени към номера на реда:", +undo: "отмени", +redo: "върни", +change_smooth_selection: "включи/изключи някой от функциите за преглед (по красиво, но повече натоварва)", +highlight: "превключване на оцветяване на синтаксиса включена/изключена", +reset_highlight: "въстанови оцветяване на синтаксиса (ако не е синхронизиран с текста)", +word_wrap: "режим на пренасяне на дълги редове", +help: "за програмата", +save: "съхрани", +load: "зареди", +line_abbr: "Стр", +char_abbr: "Стлб", +position: "Позиция", +total: "Всичко", +close_popup: "затвори прозореца", +shortcuts: "Бързи клавиши", +add_tab: "добави табулация в текста", +remove_tab: "премахни табулацията в текста", +about_notice: "Внимание: използвайте функцията оцветяване на синтаксиса само за малки текстове", +toggle: "Превключи редактор", +accesskey: "Бърз клавиш", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Зареждане...", +fullscreen: "на цял екран", +syntax_selection: "--Синтаксис--", +close_tab: "Затвори файла" +}; ADDED applications/admin/static/edit_area/langs/cs.js Index: applications/admin/static/edit_area/langs/cs.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/cs.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["cs"]={ +new_document: "Nový dokument", +search_button: "Najdi a nahraď", +search_command: "Hledej další / otevři vyhledávací pole", +search: "Hledej", +replace: "Nahraď", +replace_command: "Nahraď / otevři vyhledávací pole", +find_next: "Najdi další", +replace_all: "Nahraď vše", +reg_exp: "platné výrazy", +match_case: "vyhodnocené výrazy", +not_found: "nenalezené.", +occurrence_replaced: "výskyty nahrazené.", +search_field_empty: "Pole vyhledávání je prázdné", +restart_search_at_begin: "Dosažen konec souboru, začínám od začátku.", +move_popup: "Přesuň vyhledávací okno", +font_size: "--Velikost textu--", +go_to_line: "Přejdi na řádek", +go_to_line_prompt: "Přejdi na řádek:", +undo: "krok zpět", +redo: "znovu", +change_smooth_selection: "Povolit nebo zakázat některé ze zobrazených funkcí (účelnější zobrazení požaduje větší zatížení procesoru)", +highlight: "Zvýrazňování syntaxe zap./vyp.", +reset_highlight: "Obnovit zvýraznění (v případě nesrovnalostí)", +word_wrap: "toggle word wrapping mode", +help: "O programu", +save: "Uložit", +load: "Otevřít", +line_abbr: "Ř.", +char_abbr: "S.", +position: "Pozice", +total: "Celkem", +close_popup: "Zavřít okno", +shortcuts: "Zkratky", +add_tab: "Přidat tabulování textu", +remove_tab: "Odtsranit tabulování textu", +about_notice: "Upozornění! Funkce zvýrazňování textu je k dispozici pouze pro malý text", +toggle: "Přepnout editor", +accesskey: "Přístupová klávesa", +tab: "Záložka", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Zpracovávám ...", +fullscreen: "Celá obrazovka", +syntax_selection: "--vyber zvýrazňovač--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/de.js Index: applications/admin/static/edit_area/langs/de.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/de.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["de"]={ +new_document: "Neues Dokument", +search_button: "Suchen und Ersetzen", +search_command: "Weitersuchen / öffne Suchfeld", +search: "Suchen", +replace: "Ersetzen", +replace_command: "Ersetzen / öffne Suchfeld", +find_next: "Weitersuchen", +replace_all: "Ersetze alle Treffer", +reg_exp: "reguläre Ausdrücke", +match_case: "passt auf den Begriff
", +not_found: "Nicht gefunden.", +occurrence_replaced: "Die Vorkommen wurden ersetzt.", +search_field_empty: "Leeres Suchfeld", +restart_search_at_begin: "Ende des zu durchsuchenden Bereiches erreicht. Es wird die Suche von Anfang an fortgesetzt.", //find a shorter translation +move_popup: "Suchfenster bewegen", +font_size: "--Schriftgröße--", +go_to_line: "Gehe zu Zeile", +go_to_line_prompt: "Gehe zu Zeilennummmer:", +undo: "Rückgängig", +redo: "Wiederherstellen", +change_smooth_selection: "Aktiviere/Deaktiviere einige Features (weniger Bildschirmnutzung aber mehr CPU-Belastung)", +highlight: "Syntax Highlighting an- und ausschalten", +reset_highlight: "Highlighting zurücksetzen (falls mit Text nicht konform)", +word_wrap: "Toggle word wrapping mode", +help: "Info", +save: "Speichern", +load: "Öffnen", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Position", +total: "Gesamt", +close_popup: "Popup schließen", +shortcuts: "Shortcuts", +add_tab: "Tab zum Text hinzufügen", +remove_tab: "Tab aus Text entfernen", +about_notice: "Bemerkung: Syntax Highlighting ist nur für kurze Texte", +toggle: "Editor an- und ausschalten", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "In Bearbeitung...", +fullscreen: "Full-Screen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/dk.js Index: applications/admin/static/edit_area/langs/dk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/dk.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["dk"]={ +new_document: "nyt tomt dokument", +search_button: "søg og erstat", +search_command: "find næste / åben søgefelt", +search: "søg", +replace: "erstat", +replace_command: "erstat / åben søgefelt", +find_next: "find næste", +replace_all: "erstat alle", +reg_exp: "regular expressions", +match_case: "forskel på store/små bogstaver
", +not_found: "not found.", +occurrence_replaced: "occurences replaced.", +search_field_empty: "Search field empty", +restart_search_at_begin: "End of area reached. Restart at begin.", +move_popup: "flyt søgepopup", +font_size: "--Skriftstørrelse--", +go_to_line: "gå til linie", +go_to_line_prompt: "gå til linienummer:", +undo: "fortryd", +redo: "gentag", +change_smooth_selection: "slå display funktioner til/fra (smartere display men mere CPU krævende)", +highlight: "slå syntax highlight til/fra", +reset_highlight: "nulstil highlight (hvis den er desynkroniseret fra teksten)", +word_wrap: "toggle word wrapping mode", +help: "om", +save: "gem", +load: "hent", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Position", +total: "Total", +close_popup: "luk popup", +shortcuts: "Genveje", +add_tab: "tilføj tabulation til tekst", +remove_tab: "fjern tabulation fra tekst", +about_notice: "Husk: syntax highlight funktionen bør kun bruge til små tekster", +toggle: "Slå editor til / fra", +accesskey: "Accesskey", +tab: "Tab", +shift: "Skift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Processing...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/en.js Index: applications/admin/static/edit_area/langs/en.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/en.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["en"]={ +new_document: "new empty document", +search_button: "search and replace", +search_command: "search next / open search area", +search: "search", +replace: "replace", +replace_command: "replace / open search area", +find_next: "find next", +replace_all: "replace all", +reg_exp: "regular expressions", +match_case: "match case", +not_found: "not found.", +occurrence_replaced: "occurences replaced.", +search_field_empty: "Search field empty", +restart_search_at_begin: "End of area reached. Restart at begin.", +move_popup: "move search popup", +font_size: "--Font size--", +go_to_line: "go to line", +go_to_line_prompt: "go to line number:", +undo: "undo", +redo: "redo", +change_smooth_selection: "enable/disable some display features (smarter display but more CPU charge)", +highlight: "toggle syntax highlight on/off", +reset_highlight: "reset highlight (if desyncronized from text)", +word_wrap: "toggle word wrapping mode", +help: "about", +save: "save", +load: "load", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Position", +total: "Total", +close_popup: "close popup", +shortcuts: "Shortcuts", +add_tab: "add tabulation to text", +remove_tab: "remove tabulation to text", +about_notice: "Notice: syntax highlight function is only for small text", +toggle: "Toggle editor", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Processing...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/eo.js Index: applications/admin/static/edit_area/langs/eo.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/eo.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["eo"]={ +new_document: "nova dokumento (vakigas la enhavon)", +search_button: "serĉi / anstataŭigi", +search_command: "pluserĉi / malfermi la serĉo-fenestron", +search: "serĉi", +replace: "anstataŭigi", +replace_command: "anstataŭigi / malfermi la serĉo-fenestron", +find_next: "serĉi", +replace_all: "anstataŭigi ĉion", +reg_exp: "regula esprimo", +match_case: "respekti la usklecon", +not_found: "ne trovita.", +occurrence_replaced: "anstataŭigoj plenumitaj.", +search_field_empty: "La kampo estas malplena.", +restart_search_at_begin: "Fino de teksto ĝisrirata, ĉu daŭrigi el la komenco?", +move_popup: "movi la serĉo-fenestron", +font_size: "--Tipara grando--", +go_to_line: "iri al la linio", +go_to_line_prompt: "iri al la linio numero:", +undo: "rezigni", +redo: "refari", +change_smooth_selection: "ebligi/malebligi la funkcioj de vidigo (pli bona vidigo, sed pli da ŝarĝo de la ĉeforgano)", +highlight: "ebligi/malebligi la sintaksan kolorigon", +reset_highlight: "repravalorizi la sintaksan kolorigon (se malsinkronigon de la teksto)", +word_wrap: "toggle word wrapping mode", +help: "pri", +save: "registri", +load: "ŝarĝi", +line_abbr: "Ln", +char_abbr: "Sg", +position: "Pozicio", +total: "Sumo", +close_popup: "fermi la ŝprucfenestron", +shortcuts: "Fulmoklavo", +add_tab: "aldoni tabon en la tekston", +remove_tab: "forigi tablon el la teksto", +about_notice: "Noto: la sintaksa kolorigo estas nur prikalkulita por mallongaj tekstoj.", +toggle: "baskuligi la redaktilon", +accesskey: "Fulmoklavo", +tab: "Tab", +shift: "Maj", +ctrl: "Ktrl", +esc: "Esk", +processing: "ŝargante...", +fullscreen: "plenekrane", +syntax_selection: "--Sintakso--", +close_tab: "Fermi la dosieron" +}; ADDED applications/admin/static/edit_area/langs/es.js Index: applications/admin/static/edit_area/langs/es.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/es.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["es"]={ +new_document: "nuevo documento vacío", +search_button: "buscar y reemplazar", +search_command: "buscar siguiente / abrir área de búsqueda", +search: "buscar", +replace: "reemplazar", +replace_command: "reemplazar / abrir área de búsqueda", +find_next: "encontrar siguiente", +replace_all: "reemplazar todos", +reg_exp: "expresiones regulares", +match_case: "coincidir capitalización", +not_found: "no encontrado.", +occurrence_replaced: "ocurrencias reemplazadas.", +search_field_empty: "Campo de búsqueda vacío", +restart_search_at_begin: "Se ha llegado al final del área. Se va a seguir desde el principio.", +move_popup: "mover la ventana de búsqueda", +font_size: "--Tamaño de la fuente--", +go_to_line: "ir a la línea", +go_to_line_prompt: "ir a la línea número:", +undo: "deshacer", +redo: "rehacer", +change_smooth_selection: "activar/desactivar algunas características de visualización (visualización más inteligente pero más carga de CPU)", +highlight: "intercambiar resaltado de sintaxis", +reset_highlight: "reinicializar resaltado (si no esta sincronizado con el texto)", +word_wrap: "toggle word wrapping mode", +help: "acerca", +save: "guardar", +load: "cargar", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Posición", +total: "Total", +close_popup: "recuadro de cierre", +shortcuts: "Atajos", +add_tab: "añadir tabulado al texto", +remove_tab: "borrar tabulado del texto", +about_notice: "Aviso: el resaltado de sintaxis sólo funciona para texto pequeño", +toggle: "Cambiar editor", +accesskey: "Tecla de acceso", +tab: "Tab", +shift: "Mayúsc", +ctrl: "Ctrl", +esc: "Esc", +processing: "Procesando...", +fullscreen: "pantalla completa", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/fi.js Index: applications/admin/static/edit_area/langs/fi.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/fi.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["fi"]={ +new_document: "uusi tyhjä dokumentti", +search_button: "etsi ja korvaa", +search_command: "etsi seuraava / avaa etsintävalikko", +search: "etsi", +replace: "korvaa", +replace_command: "korvaa / avaa etsintävalikko", +find_next: "etsi seuraava", +replace_all: "korvaa kaikki", +reg_exp: "säännölliset lausekkeet", +match_case: "täsmää kirjainkokoon", +not_found: "ei löytynyt.", +occurrence_replaced: "esiintymää korvattu.", +search_field_empty: "Haettava merkkijono on tyhjä", +restart_search_at_begin: "Alueen loppu saavutettiin. Aloitetaan alusta.", +move_popup: "siirrä etsintävalikkoa", +font_size: "--Fontin koko--", +go_to_line: "siirry riville", +go_to_line_prompt: "mene riville:", +undo: "peruuta", +redo: "tee uudelleen", +change_smooth_selection: "kytke/sammuta joitakin näyttötoimintoja (Älykkäämpi toiminta, mutta suurempi CPU kuormitus)", +highlight: "kytke syntaksikorostus päälle/pois", +reset_highlight: "resetoi syntaksikorostus (jos teksti ei ole synkassa korostuksen kanssa)", +word_wrap: "toggle word wrapping mode", +help: "tietoja", +save: "tallenna", +load: "lataa", +line_abbr: "Rv", +char_abbr: "Pos", +position: "Paikka", +total: "Yhteensä", +close_popup: "sulje valikko", +shortcuts: "Pikatoiminnot", +add_tab: "lisää sisennys tekstiin", +remove_tab: "poista sisennys tekstistä", +about_notice: "Huomautus: syntaksinkorostus toimii vain pienelle tekstille", +toggle: "Kytke editori", +accesskey: "Pikanäppäin", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Odota...", +fullscreen: "koko ruutu", +syntax_selection: "--Syntaksi--", +close_tab: "Sulje tiedosto" +}; ADDED applications/admin/static/edit_area/langs/fr.js Index: applications/admin/static/edit_area/langs/fr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/fr.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["fr"]={ +new_document: "nouveau document (efface le contenu)", +search_button: "rechercher / remplacer", +search_command: "rechercher suivant / ouvrir la fenêtre de recherche", +search: "rechercher", +replace: "remplacer", +replace_command: "remplacer / ouvrir la fenêtre de recherche", +find_next: "rechercher", +replace_all: "tout remplacer", +reg_exp: "expr. régulière", +match_case: "respecter la casse", +not_found: "pas trouvé.", +occurrence_replaced: "remplacements éffectués.", +search_field_empty: "Le champ de recherche est vide.", +restart_search_at_begin: "Fin du texte atteint, poursuite au début.", +move_popup: "déplacer la fenêtre de recherche", +font_size: "--Taille police--", +go_to_line: "aller à la ligne", +go_to_line_prompt: "aller a la ligne numero:", +undo: "annuler", +redo: "refaire", +change_smooth_selection: "activer/désactiver des fonctions d'affichage (meilleur affichage mais plus de charge processeur)", +highlight: "activer/désactiver la coloration syntaxique", +reset_highlight: "réinitialiser la coloration syntaxique (si désyncronisée du texte)", +word_wrap: "activer/désactiver les retours à la ligne automatiques", +help: "à propos", +save: "sauvegarder", +load: "charger", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Position", +total: "Total", +close_popup: "fermer le popup", +shortcuts: "Racourcis clavier", +add_tab: "ajouter une tabulation dans le texte", +remove_tab: "retirer une tabulation dans le texte", +about_notice: "Note: la coloration syntaxique n'est prévue que pour de courts textes.", +toggle: "basculer l'éditeur", +accesskey: "Accesskey", +tab: "Tab", +shift: "Maj", +ctrl: "Ctrl", +esc: "Esc", +processing: "chargement...", +fullscreen: "plein écran", +syntax_selection: "--Syntaxe--", +close_tab: "Fermer le fichier" +}; ADDED applications/admin/static/edit_area/langs/hr.js Index: applications/admin/static/edit_area/langs/hr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/hr.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["hr"]={ +new_document: "Novi dokument", +search_button: "Traži i izmijeni", +search_command: "Traži dalje / Otvori prozor za traženje", +search: "Traži", +replace: "Izmijeni", +replace_command: "Izmijeni / Otvori prozor za traženje", +find_next: "Traži dalje", +replace_all: "Izmjeni sve", +reg_exp: "Regularni izrazi", +match_case: "Bitna vel. slova", +not_found: "nije naðeno.", +occurrence_replaced: "izmjenjenih.", +search_field_empty: "Prazno polje za traženje!", +restart_search_at_begin: "Došao do kraja. Poèeo od poèetka.", +move_popup: "Pomakni prozor", +font_size: "--Velièina teksta--", +go_to_line: "Odi na redak", +go_to_line_prompt: "Odi na redak:", +undo: "Vrati natrag", +redo: "Napravi ponovo", +change_smooth_selection: "Ukljuèi/iskljuèi neke moguænosti prikaza (pametniji prikaz, ali zagušeniji CPU)", +highlight: "Ukljuèi/iskljuèi bojanje sintakse", +reset_highlight: "Ponovi kolorizaciju (ako je nesinkronizirana s tekstom)", +word_wrap: "toggle word wrapping mode", +help: "O edit_area", +save: "Spremi", +load: "Uèitaj", +line_abbr: "Ln", +char_abbr: "Zn", +position: "Pozicija", +total: "Ukupno", +close_popup: "Zatvori prozor", +shortcuts: "Kratice", +add_tab: "Dodaj tabulaciju", +remove_tab: "Makni tabulaciju", +about_notice: "Napomena: koloriziranje sintakse je samo za kratke kodove", +toggle: "Prebaci naèin ureðivanja", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Procesiram...", +fullscreen: "Cijeli prozor", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/it.js Index: applications/admin/static/edit_area/langs/it.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/it.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["it"]={ +new_document: "nuovo documento vuoto", +search_button: "cerca e sostituisci", +search_command: "trova successivo / apri finestra di ricerca", +search: "cerca", +replace: "sostituisci", +replace_command: "sostituisci / apri finestra di ricerca", +find_next: "trova successivo", +replace_all: "sostituisci tutti", +reg_exp: "espressioni regolari", +match_case: "confronta maiuscole/minuscole
", +not_found: "non trovato.", +occurrence_replaced: "occorrenze sostituite.", +search_field_empty: "Campo ricerca vuoto", +restart_search_at_begin: "Fine del testo raggiunta. Ricomincio dall'inizio.", +move_popup: "sposta popup di ricerca", +font_size: "-- Dimensione --", +go_to_line: "vai alla linea", +go_to_line_prompt: "vai alla linea numero:", +undo: "annulla", +redo: "ripeti", +change_smooth_selection: "abilita/disabilita alcune caratteristiche della visualizzazione", +highlight: "abilita/disabilita colorazione della sintassi", +reset_highlight: "aggiorna colorazione (se non sincronizzata)", +word_wrap: "toggle word wrapping mode", +help: "informazioni su...", +save: "salva", +load: "carica", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Posizione", +total: "Totale", +close_popup: "chiudi popup", +shortcuts: "Scorciatoie", +add_tab: "aggiungi tabulazione", +remove_tab: "rimuovi tabulazione", +about_notice: "Avviso: la colorazione della sintassi vale solo con testo piccolo", +toggle: "Abilita/disabilita editor", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "In corso...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/ja.js Index: applications/admin/static/edit_area/langs/ja.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/ja.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["ja"]={ +new_document: "新規作成", +search_button: "検索・置換", +search_command: "次を検索 / 検索窓を表示", +search: "検索", +replace: "置換", +replace_command: "置換 / 置換窓を表示", +find_next: "次を検索", +replace_all: "全置換", +reg_exp: "正規表現", +match_case: "大文字小文字の区別", +not_found: "見つかりません。", +occurrence_replaced: "置換しました。", +search_field_empty: "検索対象文字列が空です。", +restart_search_at_begin: "終端に達しました、始めに戻ります", +move_popup: "検索窓を移動", +font_size: "--フォントサイズ--", +go_to_line: "指定行へ移動", +go_to_line_prompt: "指定行へ移動します:", +undo: "元に戻す", +redo: "やり直し", +change_smooth_selection: "スムース表示の切り替え(CPUを使います)", +highlight: "構文強調表示の切り替え", +reset_highlight: "構文強調表示のリセット", +word_wrap: "toggle word wrapping mode", +help: "ヘルプを表示", +save: "保存", +load: "読み込み", +line_abbr: "行", +char_abbr: "文字", +position: "位置", +total: "合計", +close_popup: "ポップアップを閉じる", +shortcuts: "ショートカット", +add_tab: "タブを挿入する", +remove_tab: "タブを削除する", +about_notice: "注意:構文強調表示は短いテキストでしか有効に機能しません。", +toggle: "テキストエリアとeditAreaの切り替え", +accesskey: "アクセスキー", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "処理中です...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/mk.js Index: applications/admin/static/edit_area/langs/mk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/mk.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["mk"]={ +new_document: "Нов документ", +search_button: "Најди и замени", +search_command: "Барај следно / Отвори нов прозорец за пребарување", +search: "Барај", +replace: "Замени", +replace_command: "Замени / Отвори прозорец за пребарување", +find_next: "најди следно", +replace_all: "Замени ги сите", +reg_exp: "Регуларни изрази", +match_case: "Битна е големината на буквите", +not_found: "не е пронајдено.", +occurrence_replaced: "замени.", +search_field_empty: "Полето за пребарување е празно", +restart_search_at_begin: "Крај на областа. Стартувај од почеток.", +move_popup: "Помести го прозорецот", +font_size: "--Големина на текстот--", +go_to_line: "Оди на линија", +go_to_line_prompt: "Оди на линија со број:", +undo: "Врати", +redo: "Повтори", +change_smooth_selection: "Вклучи/исклучи некои карактеристики за приказ (попаметен приказ, но поголемо оптеретување за процесорот)", +highlight: "Вклучи/исклучи осветлување на синтакса", +reset_highlight: "Ресетирај го осветлувањето на синтакса (доколку е десинхронизиранo со текстот)", +word_wrap: "toggle word wrapping mode", +help: "За", +save: "Зачувај", +load: "Вчитај", +line_abbr: "Лн", +char_abbr: "Зн", +position: "Позиција", +total: "Вкупно", +close_popup: "Затвори го прозорецот", +shortcuts: "Кратенки", +add_tab: "Додај табулација на текстот", +remove_tab: "Отстрани ја табулацијата", +about_notice: "Напомена: Осветлувањето на синтанса е само за краток текст", +toggle: "Смени начин на уредување", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Обработувам...", +fullscreen: "Цел прозорец", +syntax_selection: "--Синтакса--", +close_tab: "Избери датотека" +}; ADDED applications/admin/static/edit_area/langs/nl.js Index: applications/admin/static/edit_area/langs/nl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/nl.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["nl"]={ +new_document: "nieuw leeg document", +search_button: "zoek en vervang", +search_command: "zoek volgende / zoekscherm openen", +search: "zoek", +replace: "vervang", +replace_command: "vervang / zoekscherm openen", +find_next: "volgende vinden", +replace_all: "alles vervangen", +reg_exp: "reguliere expressies", +match_case: "hoofdletter gevoelig", +not_found: "niet gevonden.", +occurrence_replaced: "object vervangen.", +search_field_empty: "Zoek veld leeg", +restart_search_at_begin: "Niet meer instanties gevonden, begin opnieuw", +move_popup: "versleep zoek scherm", +font_size: "--Letter grootte--", +go_to_line: "Ga naar regel", +go_to_line_prompt: "Ga naar regel nummer:", +undo: "Ongedaan maken", +redo: "Opnieuw doen", +change_smooth_selection: "zet wat schermopties aan/uit (kan langzamer zijn)", +highlight: "zet syntax highlight aan/uit", +reset_highlight: "reset highlight (indien gedesynchronizeerd)", +word_wrap: "toggle word wrapping mode", +help: "informatie", +save: "opslaan", +load: "laden", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Positie", +total: "Totaal", +close_popup: "Popup sluiten", +shortcuts: "Snelkoppelingen", +add_tab: "voeg tabs toe in tekst", +remove_tab: "verwijder tabs uit tekst", +about_notice: "Notitie: syntax highlight functie is alleen voor kleine tekst", +toggle: "geavanceerde bewerkingsopties", +accesskey: "Accessknop", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Verwerken...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/pl.js Index: applications/admin/static/edit_area/langs/pl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/pl.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["pl"]={ +new_document: "nowy dokument", +search_button: "znajdź i zamień", +search_command: "znajdź następny", +search: "znajdź", +replace: "zamień", +replace_command: "zamień", +find_next: "następny", +replace_all: "zamień wszystko", +reg_exp: "wyrażenie regularne", +match_case: "uwzględnij wielkość liter
", +not_found: "nie znaleziono.", +occurrence_replaced: "wystąpień zamieniono.", +search_field_empty: "Nie wprowadzono tekstu", +restart_search_at_begin: "Koniec dokumentu. Wyszukiwanie od początku.", +move_popup: "przesuń okienko wyszukiwania", +font_size: "Rozmiar", +go_to_line: "idź do linii", +go_to_line_prompt: "numer linii:", +undo: "cofnij", +redo: "przywróć", +change_smooth_selection: "włącz/wyłącz niektóre opcje wyglądu (zaawansowane opcje wyglądu obciążają procesor)", +highlight: "włącz/wyłącz podświetlanie składni", +reset_highlight: "odśwież podświetlanie składni (jeśli rozsynchronizowało się z tekstem)", +word_wrap: "toggle word wrapping mode", +help: "o programie", +save: "zapisz", +load: "otwórz", +line_abbr: "Ln", +char_abbr: "Zn", +position: "Pozycja", +total: "W sumie", +close_popup: "zamknij okienko", +shortcuts: "Skróty klawiaturowe", +add_tab: "dodaj wcięcie do zaznaczonego tekstu", +remove_tab: "usuń wcięcie", +about_notice: "Uwaga: podświetlanie składni nie jest zalecane dla długich tekstów", +toggle: "Włącz/wyłącz edytor", +accesskey: "Alt+", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Przetwarzanie...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/pt.js Index: applications/admin/static/edit_area/langs/pt.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/pt.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["pt"]={ +new_document: "Novo documento", +search_button: "Localizar e substituir", +search_command: "Localizar próximo", +search: "Localizar", +replace: "Substituir", +replace_command: "Substituir", +find_next: "Localizar", +replace_all: "Subst. tudo", +reg_exp: "Expressões regulares", +match_case: "Diferenciar maiúsculas e minúsculas", +not_found: "Não encontrado.", +occurrence_replaced: "Ocorrências substituidas", +search_field_empty: "Campo localizar vazio.", +restart_search_at_begin: "Fim das ocorrências. Recomeçar do inicio.", +move_popup: "Mover janela", +font_size: "--Tamanho da fonte--", +go_to_line: "Ir para linha", +go_to_line_prompt: "Ir para a linha:", +undo: "Desfazer", +redo: "Refazer", +change_smooth_selection: "Opções visuais", +highlight: "Cores de sintaxe", +reset_highlight: "Resetar cores (se não sincronizado)", +word_wrap: "toggle word wrapping mode", +help: "Sobre", +save: "Salvar", +load: "Carregar", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Posição", +total: "Total", +close_popup: "Fechar", +shortcuts: "Shortcuts", +add_tab: "Adicionar tabulação", +remove_tab: "Remover tabulação", +about_notice: "Atenção: Cores de sintaxe são indicados somente para textos pequenos", +toggle: "Exibir editor", +accesskey: "Accesskey", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Processando...", +fullscreen: "fullscreen", +syntax_selection: "--Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/ru.js Index: applications/admin/static/edit_area/langs/ru.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/ru.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["ru"]={ +new_document: "новый пустой документ", +search_button: "поиск и замена", +search_command: "искать следующий / открыть панель поиска", +search: "поиск", +replace: "замена", +replace_command: "заменить / открыть панель поиска", +find_next: "найти следующее", +replace_all: "заменить все", +reg_exp: "регулярное выражение", +match_case: "учитывать регистр", +not_found: "не найдено.", +occurrence_replaced: "вхождение заменено.", +search_field_empty: "Поле поиска пустое", +restart_search_at_begin: "Достигнут конец документа. Начинаю с начала.", +move_popup: "переместить окно поиска", +font_size: "--Размер шрифта--", +go_to_line: "перейти к строке", +go_to_line_prompt: "перейти к строке номер:", +undo: "отменить", +redo: "вернуть", +change_smooth_selection: "включить/отключить некоторые функции просмотра (более красиво, но больше использует процессор)", +highlight: "переключить подсветку синтаксиса включена/выключена", +reset_highlight: "восстановить подсветку (если разсинхронизирована от текста)", +word_wrap: "toggle word wrapping mode", +help: "о программе", +save: "сохранить", +load: "загрузить", +line_abbr: "Стр", +char_abbr: "Стлб", +position: "Позиция", +total: "Всего", +close_popup: "закрыть всплывающее окно", +shortcuts: "Горячие клавиши", +add_tab: "добавить табуляцию в текст", +remove_tab: "убрать табуляцию из текста", +about_notice: "Внимание: функция подсветки синтаксиса только для небольших текстов", +toggle: "Переключить редактор", +accesskey: "Горячая клавиша", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Обработка...", +fullscreen: "полный экран", +syntax_selection: "--Синтакс--", +close_tab: "Закрыть файл" +}; ADDED applications/admin/static/edit_area/langs/sk.js Index: applications/admin/static/edit_area/langs/sk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/sk.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["sk"]={ +new_document: "nový prázdy dokument", +search_button: "vyhľadaj a nahraď", +search_command: "hľadaj ďalsšie / otvor vyhľadávacie pole", +search: "hľadaj", +replace: "nahraď", +replace_command: "nahraď / otvor vyhľadávacie pole", +find_next: "nájdi ďalšie", +replace_all: "nahraď všetko", +reg_exp: "platné výrazy", +match_case: "zhodujúce sa výrazy", +not_found: "nenájdené.", +occurrence_replaced: "výskyty nahradené.", +search_field_empty: "Pole vyhľadávanie je prádzne", +restart_search_at_begin: "End of area reached. Restart at begin.", +move_popup: "presuň vyhľadávacie okno", +font_size: "--Veľkosť textu--", +go_to_line: "prejdi na riadok", +go_to_line_prompt: "prejdi na riadok:", +undo: "krok späť", +redo: "prepracovať", +change_smooth_selection: "povoliť/zamietnúť niektoré zo zobrazených funkcií (účelnejšie zobrazenie vyžaduje väčšie zaťaženie procesora CPU)", +highlight: "prepnúť zvýrazňovanie syntaxe zap/vyp", +reset_highlight: "zrušiť zvýrazňovanie (ak je nesynchronizované s textom)", +word_wrap: "toggle word wrapping mode", +help: "o programe", +save: "uložiť", +load: "načítať", +line_abbr: "Ln", +char_abbr: "Ch", +position: "Pozícia", +total: "Spolu", +close_popup: "zavrieť okno", +shortcuts: "Skratky", +add_tab: "pridať tabulovanie textu", +remove_tab: "odstrániť tabulovanie textu", +about_notice: "Upozornenie: funkcia zvýrazňovania syntaxe je dostupná iba pre malý text", +toggle: "Prepnúť editor", +accesskey: "Accesskey", +tab: "Záložka", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "Spracúvam...", +fullscreen: "cel=a obrazovka", +syntax_selection: "--Vyber Syntax--", +close_tab: "Close file" +}; ADDED applications/admin/static/edit_area/langs/zh.js Index: applications/admin/static/edit_area/langs/zh.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/langs/zh.js @@ -0,0 +1,48 @@ +editAreaLoader.lang["zh"]={ +new_document: "新建空白文档", +search_button: "查找与替换", +search_command: "查找下一个 / 打开查找框", +search: "查找", +replace: "替换", +replace_command: "替换 / 打开查找框", +find_next: "查找下一个", +replace_all: "全部替换", +reg_exp: "正则表达式", +match_case: "匹配大小写", +not_found: "未找到.", +occurrence_replaced: "处被替换.", +search_field_empty: "查找框没有内容", +restart_search_at_begin: "已到到文档末尾. 从头重新查找.", +move_popup: "移动查找对话框", +font_size: "--字体大小--", +go_to_line: "转到行", +go_to_line_prompt: "转到行:", +undo: "恢复", +redo: "重做", +change_smooth_selection: "启用/禁止一些显示特性(更好看但更耗费资源)", +highlight: "启用/禁止语法高亮", +reset_highlight: "重置语法高亮(当文本显示不同步时)", +word_wrap: "toggle word wrapping mode", +help: "关于", +save: "保存", +load: "加载", +line_abbr: "行", +char_abbr: "字符", +position: "位置", +total: "总计", +close_popup: "关闭对话框", +shortcuts: "快捷键", +add_tab: "添加制表符(Tab)", +remove_tab: "移除制表符(Tab)", +about_notice: "注意:语法高亮功能仅用于较少内容的文本(文件内容太大会导致浏览器反应慢)", +toggle: "切换编辑器", +accesskey: "快捷键", +tab: "Tab", +shift: "Shift", +ctrl: "Ctrl", +esc: "Esc", +processing: "正在处理中...", +fullscreen: "全屏编辑", +syntax_selection: "--语法--", +close_tab: "关闭文件" +}; ADDED applications/admin/static/edit_area/license.txt Index: applications/admin/static/edit_area/license.txt ================================================================== --- /dev/null +++ applications/admin/static/edit_area/license.txt ADDED applications/admin/static/edit_area/license_apache.txt Index: applications/admin/static/edit_area/license_apache.txt ================================================================== --- /dev/null +++ applications/admin/static/edit_area/license_apache.txt @@ -0,0 +1,7 @@ +Copyright 2008 Christophe Dolivet + +Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ADDED applications/admin/static/edit_area/license_bsd.txt Index: applications/admin/static/edit_area/license_bsd.txt ================================================================== --- /dev/null +++ applications/admin/static/edit_area/license_bsd.txt @@ -0,0 +1,10 @@ +Copyright (c) 2008, Christophe Dolivet +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of EditArea nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ADDED applications/admin/static/edit_area/license_lgpl.txt Index: applications/admin/static/edit_area/license_lgpl.txt ================================================================== --- /dev/null +++ applications/admin/static/edit_area/license_lgpl.txt @@ -0,0 +1,458 @@ + GNU LESSER GENERAL PUBLIC LICENSE + Version 2.1, February 1999 + + Copyright (C) 1991, 1999 Free Software Foundation, Inc. + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + +[This is the first released version of the Lesser GPL. It also counts + as the successor of the GNU Library Public License, version 2, hence + the version number 2.1.] + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +Licenses are intended to guarantee your freedom to share and change +free software--to make sure the software is free for all its users. + + This license, the Lesser General Public License, applies to some +specially designated software packages--typically libraries--of the +Free Software Foundation and other authors who decide to use it. You +can use it too, but we suggest you first think carefully about whether +this license or the ordinary General Public License is the better +strategy to use in any particular case, based on the explanations below. + + When we speak of free software, we are referring to freedom of use, +not price. Our General Public Licenses are designed to make sure that +you have the freedom to distribute copies of free software (and charge +for this service if you wish); that you receive source code or can get +it if you want it; that you can change the software and use pieces of +it in new free programs; and that you are informed that you can do +these things. + + To protect your rights, we need to make restrictions that forbid +distributors to deny you these rights or to ask you to surrender these +rights. These restrictions translate to certain responsibilities for +you if you distribute copies of the library or if you modify it. + + For example, if you distribute copies of the library, whether gratis +or for a fee, you must give the recipients all the rights that we gave +you. You must make sure that they, too, receive or can get the source +code. If you link other code with the library, you must provide +complete object files to the recipients, so that they can relink them +with the library after making changes to the library and recompiling +it. And you must show them these terms so they know their rights. + + We protect your rights with a two-step method: (1) we copyright the +library, and (2) we offer you this license, which gives you legal +permission to copy, distribute and/or modify the library. + + To protect each distributor, we want to make it very clear that +there is no warranty for the free library. Also, if the library is +modified by someone else and passed on, the recipients should know +that what they have is not the original version, so that the original +author's reputation will not be affected by problems that might be +introduced by others. + + Finally, software patents pose a constant threat to the existence of +any free program. We wish to make sure that a company cannot +effectively restrict the users of a free program by obtaining a +restrictive license from a patent holder. Therefore, we insist that +any patent license obtained for a version of the library must be +consistent with the full freedom of use specified in this license. + + Most GNU software, including some libraries, is covered by the +ordinary GNU General Public License. This license, the GNU Lesser +General Public License, applies to certain designated libraries, and +is quite different from the ordinary General Public License. We use +this license for certain libraries in order to permit linking those +libraries into non-free programs. + + When a program is linked with a library, whether statically or using +a shared library, the combination of the two is legally speaking a +combined work, a derivative of the original library. The ordinary +General Public License therefore permits such linking only if the +entire combination fits its criteria of freedom. The Lesser General +Public License permits more lax criteria for linking other code with +the library. + + We call this license the "Lesser" General Public License because it +does Less to protect the user's freedom than the ordinary General +Public License. It also provides other free software developers Less +of an advantage over competing non-free programs. These disadvantages +are the reason we use the ordinary General Public License for many +libraries. However, the Lesser license provides advantages in certain +special circumstances. + + For example, on rare occasions, there may be a special need to +encourage the widest possible use of a certain library, so that it becomes +a de-facto standard. To achieve this, non-free programs must be +allowed to use the library. A more frequent case is that a free +library does the same job as widely used non-free libraries. In this +case, there is little to gain by limiting the free library to free +software only, so we use the Lesser General Public License. + + In other cases, permission to use a particular library in non-free +programs enables a greater number of people to use a large body of +free software. For example, permission to use the GNU C Library in +non-free programs enables many more people to use the whole GNU +operating system, as well as its variant, the GNU/Linux operating +system. + + Although the Lesser General Public License is Less protective of the +users' freedom, it does ensure that the user of a program that is +linked with the Library has the freedom and the wherewithal to run +that program using a modified version of the Library. + + The precise terms and conditions for copying, distribution and +modification follow. Pay close attention to the difference between a +"work based on the library" and a "work that uses the library". The +former contains code derived from the library, whereas the latter must +be combined with the library in order to run. + + GNU LESSER GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License Agreement applies to any software library or other +program which contains a notice placed by the copyright holder or +other authorized party saying it may be distributed under the terms of +this Lesser General Public License (also called "this License"). +Each licensee is addressed as "you". + + A "library" means a collection of software functions and/or data +prepared so as to be conveniently linked with application programs +(which use some of those functions and data) to form executables. + + The "Library", below, refers to any such software library or work +which has been distributed under these terms. A "work based on the +Library" means either the Library or any derivative work under +copyright law: that is to say, a work containing the Library or a +portion of it, either verbatim or with modifications and/or translated +straightforwardly into another language. (Hereinafter, translation is +included without limitation in the term "modification".) + + "Source code" for a work means the preferred form of the work for +making modifications to it. For a library, complete source code means +all the source code for all modules it contains, plus any associated +interface definition files, plus the scripts used to control compilation +and installation of the library. + + Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running a program using the Library is not restricted, and output from +such a program is covered only if its contents constitute a work based +on the Library (independent of the use of the Library in a tool for +writing it). Whether that is true depends on what the Library does +and what the program that uses the Library does. + + 1. You may copy and distribute verbatim copies of the Library's +complete source code as you receive it, in any medium, provided that +you conspicuously and appropriately publish on each copy an +appropriate copyright notice and disclaimer of warranty; keep intact +all the notices that refer to this License and to the absence of any +warranty; and distribute a copy of this License along with the +Library. + + You may charge a fee for the physical act of transferring a copy, +and you may at your option offer warranty protection in exchange for a +fee. + + 2. You may modify your copy or copies of the Library or any portion +of it, thus forming a work based on the Library, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) The modified work must itself be a software library. + + b) You must cause the files modified to carry prominent notices + stating that you changed the files and the date of any change. + + c) You must cause the whole of the work to be licensed at no + charge to all third parties under the terms of this License. + + d) If a facility in the modified Library refers to a function or a + table of data to be supplied by an application program that uses + the facility, other than as an argument passed when the facility + is invoked, then you must make a good faith effort to ensure that, + in the event an application does not supply such function or + table, the facility still operates, and performs whatever part of + its purpose remains meaningful. + + (For example, a function in a library to compute square roots has + a purpose that is entirely well-defined independent of the + application. Therefore, Subsection 2d requires that any + application-supplied function or table used by this function must + be optional: if the application does not supply it, the square + root function must still compute square roots.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Library, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Library, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote +it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Library. + +In addition, mere aggregation of another work not based on the Library +with the Library (or with a work based on the Library) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may opt to apply the terms of the ordinary GNU General Public +License instead of this License to a given copy of the Library. To do +this, you must alter all the notices that refer to this License, so +that they refer to the ordinary GNU General Public License, version 2, +instead of to this License. (If a newer version than version 2 of the +ordinary GNU General Public License has appeared, then you can specify +that version instead if you wish.) Do not make any other change in +these notices. + + Once this change is made in a given copy, it is irreversible for +that copy, so the ordinary GNU General Public License applies to all +subsequent copies and derivative works made from that copy. + + This option is useful when you wish to copy part of the code of +the Library into a program that is not a library. + + 4. You may copy and distribute the Library (or a portion or +derivative of it, under Section 2) in object code or executable form +under the terms of Sections 1 and 2 above provided that you accompany +it with the complete corresponding machine-readable source code, which +must be distributed under the terms of Sections 1 and 2 above on a +medium customarily used for software interchange. + + If distribution of object code is made by offering access to copy +from a designated place, then offering equivalent access to copy the +source code from the same place satisfies the requirement to +distribute the source code, even though third parties are not +compelled to copy the source along with the object code. + + 5. A program that contains no derivative of any portion of the +Library, but is designed to work with the Library by being compiled or +linked with it, is called a "work that uses the Library". Such a +work, in isolation, is not a derivative work of the Library, and +therefore falls outside the scope of this License. + + However, linking a "work that uses the Library" with the Library +creates an executable that is a derivative of the Library (because it +contains portions of the Library), rather than a "work that uses the +library". The executable is therefore covered by this License. +Section 6 states terms for distribution of such executables. + + When a "work that uses the Library" uses material from a header file +that is part of the Library, the object code for the work may be a +derivative work of the Library even though the source code is not. +Whether this is true is especially significant if the work can be +linked without the Library, or if the work is itself a library. The +threshold for this to be true is not precisely defined by law. + + If such an object file uses only numerical parameters, data +structure layouts and accessors, and small macros and small inline +functions (ten lines or less in length), then the use of the object +file is unrestricted, regardless of whether it is legally a derivative +work. (Executables containing this object code plus portions of the +Library will still fall under Section 6.) + + Otherwise, if the work is a derivative of the Library, you may +distribute the object code for the work under the terms of Section 6. +Any executables containing that work also fall under Section 6, +whether or not they are linked directly with the Library itself. + + 6. As an exception to the Sections above, you may also combine or +link a "work that uses the Library" with the Library to produce a +work containing portions of the Library, and distribute that work +under terms of your choice, provided that the terms permit +modification of the work for the customer's own use and reverse +engineering for debugging such modifications. + + You must give prominent notice with each copy of the work that the +Library is used in it and that the Library and its use are covered by +this License. You must supply a copy of this License. If the work +during execution displays copyright notices, you must include the +copyright notice for the Library among them, as well as a reference +directing the user to the copy of this License. Also, you must do one +of these things: + + a) Accompany the work with the complete corresponding + machine-readable source code for the Library including whatever + changes were used in the work (which must be distributed under + Sections 1 and 2 above); and, if the work is an executable linked + with the Library, with the complete machine-readable "work that + uses the Library", as object code and/or source code, so that the + user can modify the Library and then relink to produce a modified + executable containing the modified Library. (It is understood + that the user who changes the contents of definitions files in the + Library will not necessarily be able to recompile the application + to use the modified definitions.) + + b) Use a suitable shared library mechanism for linking with the + Library. A suitable mechanism is one that (1) uses at run time a + copy of the library already present on the user's computer system, + rather than copying library functions into the executable, and (2) + will operate properly with a modified version of the library, if + the user installs one, as long as the modified version is + interface-compatible with the version that the work was made with. + + c) Accompany the work with a written offer, valid for at + least three years, to give the same user the materials + specified in Subsection 6a, above, for a charge no more + than the cost of performing this distribution. + + d) If distribution of the work is made by offering access to copy + from a designated place, offer equivalent access to copy the above + specified materials from the same place. + + e) Verify that the user has already received a copy of these + materials or that you have already sent this user a copy. + + For an executable, the required form of the "work that uses the +Library" must include any data and utility programs needed for +reproducing the executable from it. However, as a special exception, +the materials to be distributed need not include anything that is +normally distributed (in either source or binary form) with the major +components (compiler, kernel, and so on) of the operating system on +which the executable runs, unless that component itself accompanies +the executable. + + It may happen that this requirement contradicts the license +restrictions of other proprietary libraries that do not normally +accompany the operating system. Such a contradiction means you cannot +use both them and the Library together in an executable that you +distribute. + + 7. You may place library facilities that are a work based on the +Library side-by-side in a single library together with other library +facilities not covered by this License, and distribute such a combined +library, provided that the separate distribution of the work based on +the Library and of the other library facilities is otherwise +permitted, and provided that you do these two things: + + a) Accompany the combined library with a copy of the same work + based on the Library, uncombined with any other library + facilities. This must be distributed under the terms of the + Sections above. + + b) Give prominent notice with the combined library of the fact + that part of it is a work based on the Library, and explaining + where to find the accompanying uncombined form of the same work. + + 8. You may not copy, modify, sublicense, link with, or distribute +the Library except as expressly provided under this License. Any +attempt otherwise to copy, modify, sublicense, link with, or +distribute the Library is void, and will automatically terminate your +rights under this License. However, parties who have received copies, +or rights, from you under this License will not have their licenses +terminated so long as such parties remain in full compliance. + + 9. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Library or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Library (or any work based on the +Library), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Library or works based on it. + + 10. Each time you redistribute the Library (or any work based on the +Library), the recipient automatically receives a license from the +original licensor to copy, distribute, link with or modify the Library +subject to these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties with +this License. + + 11. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Library at all. For example, if a patent +license would not permit royalty-free redistribution of the Library by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Library. + +If any portion of this section is held invalid or unenforceable under any +particular circumstance, the balance of the section is intended to apply, +and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 12. If the distribution and/or use of the Library is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Library under this License may add +an explicit geographical distribution limitation excluding those countries, +so that distribution is permitted only in or among countries not thus +excluded. In such case, this License incorporates the limitation as if +written in the body of this License. + + 13. The Free Software Foundation may publish revised and/or new +versions of the Lesser General Public License from time to time. +Such new versions will be similar in spirit to the present version, +but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Library +specifies a version number of this License which applies to it and +"any later version", you have the option of following the terms and +conditions either of that version or of any later version published by +the Free Software Foundation. If the Library does not specify a +license version number, you may choose any version ever published by +the Free Software Foundation. + + 14. If you wish to incorporate parts of the Library into other free +programs whose distribution conditions are incompatible with these, +write to the author to ask for permission. For software which is +copyrighted by the Free Software Foundation, write to the Free +Software Foundation; we sometimes make exceptions for this. Our +decision will be guided by the two goals of preserving the free status +of all derivatives of our free software and of promoting the sharing +and reuse of software generally. + + NO WARRANTY + + 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO +WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. +EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR +OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY +KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE +LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME +THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN +WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY +AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU +FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR +CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE +LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING +RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A +FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF +SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGES. + + END OF TERMS AND CONDITIONS ADDED applications/admin/static/edit_area/manage_area.js Index: applications/admin/static/edit_area/manage_area.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/manage_area.js @@ -0,0 +1,623 @@ + EditArea.prototype.focus = function() { + this.textarea.focus(); + this.textareaFocused=true; + }; + + + EditArea.prototype.check_line_selection= function(timer_checkup){ + var changes, infos, new_top, new_width,i; + + var t1=t2=t2_1=t3=tLines=tend= new Date().getTime(); + // l'editeur n'existe plus => on quitte + if(!editAreas[this.id]) + return false; + + if(!this.smooth_selection && !this.do_highlight) + { + //do nothing + } + else if(this.textareaFocused && editAreas[this.id]["displayed"]==true && this.isResizing==false) + { + infos = this.get_selection_infos(); + changes = this.checkTextEvolution( typeof( this.last_selection['full_text'] ) == 'undefined' ? '' : this.last_selection['full_text'], infos['full_text'] ); + + t2= new Date().getTime(); + + // if selection change + if(this.last_selection["line_start"] != infos["line_start"] || this.last_selection["line_nb"] != infos["line_nb"] || infos["full_text"] != this.last_selection["full_text"] || this.reload_highlight || this.last_selection["selectionStart"] != infos["selectionStart"] || this.last_selection["selectionEnd"] != infos["selectionEnd"] || !timer_checkup ) + { + // move and adjust text selection elements + new_top = this.getLinePosTop( infos["line_start"] ); + new_width = Math.max(this.textarea.scrollWidth, this.container.clientWidth -50); + this.selection_field.style.top=this.selection_field_text.style.top=new_top+"px"; + if(!this.settings['word_wrap']){ + this.selection_field.style.width=this.selection_field_text.style.width=this.test_font_size.style.width=new_width+"px"; + } + + // usefull? => _$("cursor_pos").style.top=new_top+"px"; + + if(this.do_highlight==true) + { + // fill selection elements + var curr_text = infos["full_text"].split("\n"); + var content = ""; + //alert("length: "+curr_text.length+ " i: "+ Math.max(0,infos["line_start"]-1)+ " end: "+Math.min(curr_text.length, infos["line_start"]+infos["line_nb"]-1)+ " line: "+infos["line_start"]+" [0]: "+curr_text[0]+" [1]: "+curr_text[1]); + var start = Math.max(0,infos["line_start"]-1); + var end = Math.min(curr_text.length, infos["line_start"]+infos["line_nb"]-1); + + //curr_text[start]= curr_text[start].substr(0,infos["curr_pos"]-1) +"¤_overline_¤"+ curr_text[start].substr(infos["curr_pos"]-1); + for(i=start; i< end; i++){ + content+= curr_text[i]+"\n"; + } + + // add special chars arround selected characters + selLength = infos['selectionEnd'] - infos['selectionStart']; + content = content.substr( 0, infos["curr_pos"] - 1 ) + "\r\r" + content.substr( infos["curr_pos"] - 1, selLength ) + "\r\r" + content.substr( infos["curr_pos"] - 1 + selLength ); + content = ''+ content.replace(/&/g,"&").replace(//g,">").replace("\r\r", '').replace("\r\r", '') +''; + + if( this.isIE || ( this.isOpera && this.isOpera < 9.6 ) ) { + this.selection_field.innerHTML= "
" + content.replace(/^\r?\n/, "
") + "
"; + } else { + this.selection_field.innerHTML= content; + } + this.selection_field_text.innerHTML = this.selection_field.innerHTML; + t2_1 = new Date().getTime(); + // check if we need to update the highlighted background + if(this.reload_highlight || (infos["full_text"] != this.last_text_to_highlight && (this.last_selection["line_start"]!=infos["line_start"] || this.show_line_colors || this.settings['word_wrap'] || this.last_selection["line_nb"]!=infos["line_nb"] || this.last_selection["nb_line"]!=infos["nb_line"]) ) ) + { + this.maj_highlight(infos); + } + } + } + t3= new Date().getTime(); + + // manage line heights + if( this.settings['word_wrap'] && infos["full_text"] != this.last_selection["full_text"]) + { + // refresh only 1 line if text change concern only one line and that the total line number has not changed + if( changes.newText.split("\n").length == 1 && this.last_selection['nb_line'] && infos['nb_line'] == this.last_selection['nb_line'] ) + { + this.fixLinesHeight( infos['full_text'], changes.lineStart, changes.lineStart ); + } + else + { + this.fixLinesHeight( infos['full_text'], changes.lineStart, -1 ); + } + } + + tLines= new Date().getTime(); + // manage bracket finding + if( infos["line_start"] != this.last_selection["line_start"] || infos["curr_pos"] != this.last_selection["curr_pos"] || infos["full_text"].length!=this.last_selection["full_text"].length || this.reload_highlight || !timer_checkup ) + { + // move _cursor_pos + var selec_char= infos["curr_line"].charAt(infos["curr_pos"]-1); + var no_real_move=true; + if(infos["line_nb"]==1 && (this.assocBracket[selec_char] || this.revertAssocBracket[selec_char]) ){ + + no_real_move=false; + //findEndBracket(infos["line_start"], infos["curr_pos"], selec_char); + if(this.findEndBracket(infos, selec_char) === true){ + _$("end_bracket").style.visibility ="visible"; + _$("cursor_pos").style.visibility ="visible"; + _$("cursor_pos").innerHTML = selec_char; + _$("end_bracket").innerHTML = (this.assocBracket[selec_char] || this.revertAssocBracket[selec_char]); + }else{ + _$("end_bracket").style.visibility ="hidden"; + _$("cursor_pos").style.visibility ="hidden"; + } + }else{ + _$("cursor_pos").style.visibility ="hidden"; + _$("end_bracket").style.visibility ="hidden"; + } + //alert("move cursor"); + this.displayToCursorPosition("cursor_pos", infos["line_start"], infos["curr_pos"]-1, infos["curr_line"], no_real_move); + if(infos["line_nb"]==1 && infos["line_start"]!=this.last_selection["line_start"]) + this.scroll_to_view(); + } + this.last_selection=infos; + } + + tend= new Date().getTime(); + //if( (tend-t1) > 7 ) + // console.log( "tps total: "+ (tend-t1) + " tps get_infos: "+ (t2-t1)+ " tps selec: "+ (t2_1-t2)+ " tps highlight: "+ (t3-t2_1) +" tps lines: "+ (tLines-t3) +" tps cursor+lines: "+ (tend-tLines)+" \n" ); + + + if(timer_checkup){ + setTimeout("editArea.check_line_selection(true)", this.check_line_selection_timer); + } + }; + + + EditArea.prototype.get_selection_infos= function(){ + var sel={}, start, end, len, str; + + this.getIESelection(); + start = this.textarea.selectionStart; + end = this.textarea.selectionEnd; + + if( this.last_selection["selectionStart"] == start && this.last_selection["selectionEnd"] == end && this.last_selection["full_text"] == this.textarea.value ) + { + return this.last_selection; + } + + if(this.tabulation!="\t" && this.textarea.value.indexOf("\t")!=-1) + { // can append only after copy/paste + len = this.textarea.value.length; + this.textarea.value = this.replace_tab(this.textarea.value); + start = end = start+(this.textarea.value.length-len); + this.area_select( start, 0 ); + } + + sel["selectionStart"] = start; + sel["selectionEnd"] = end; + sel["full_text"] = this.textarea.value; + sel["line_start"] = 1; + sel["line_nb"] = 1; + sel["curr_pos"] = 0; + sel["curr_line"] = ""; + sel["indexOfCursor"] = 0; + sel["selec_direction"] = this.last_selection["selec_direction"]; + + //return sel; + var splitTab= sel["full_text"].split("\n"); + var nbLine = Math.max(0, splitTab.length); + var nbChar = Math.max(0, sel["full_text"].length - (nbLine - 1)); // (remove \n caracters from the count) + if( sel["full_text"].indexOf("\r") != -1 ) + nbChar = nbChar - ( nbLine - 1 ); // (remove \r caracters from the count) + sel["nb_line"] = nbLine; + sel["nb_char"] = nbChar; + + if(start>0){ + str = sel["full_text"].substr(0,start); + sel["curr_pos"] = start - str.lastIndexOf("\n"); + sel["line_start"] = Math.max(1, str.split("\n").length); + }else{ + sel["curr_pos"]=1; + } + if(end>start){ + sel["line_nb"]=sel["full_text"].substring(start,end).split("\n").length; + } + sel["indexOfCursor"]=start; + sel["curr_line"]=splitTab[Math.max(0,sel["line_start"]-1)]; + + // determine in which direction the selection grow + if(sel["selectionStart"] == this.last_selection["selectionStart"]){ + if(sel["selectionEnd"]>this.last_selection["selectionEnd"]) + sel["selec_direction"]= "down"; + else if(sel["selectionEnd"] == this.last_selection["selectionStart"]) + sel["selec_direction"]= this.last_selection["selec_direction"]; + }else if(sel["selectionStart"] == this.last_selection["selectionEnd"] && sel["selectionEnd"]>this.last_selection["selectionEnd"]){ + sel["selec_direction"]= "down"; + }else{ + sel["selec_direction"]= "up"; + } + + _$("nbLine").innerHTML = nbLine; + _$("nbChar").innerHTML = nbChar; + _$("linePos").innerHTML = sel["line_start"]; + _$("currPos").innerHTML = sel["curr_pos"]; + + return sel; + }; + + // set IE position in Firefox mode (textarea.selectionStart and textarea.selectionEnd) + EditArea.prototype.getIESelection= function(){ + var selectionStart, selectionEnd, range, stored_range; + + if( !this.isIE ) + return false; + + // make it work as nowrap mode (easier for range manipulation with lineHeight) + if( this.settings['word_wrap'] ) + this.textarea.wrap='off'; + + try{ + range = document.selection.createRange(); + stored_range = range.duplicate(); + stored_range.moveToElementText( this.textarea ); + stored_range.setEndPoint( 'EndToEnd', range ); + if( stored_range.parentElement() != this.textarea ) + throw "invalid focus"; + + // the range don't take care of empty lines in the end of the selection + var scrollTop = this.result.scrollTop + document.body.scrollTop; + var relative_top= range.offsetTop - parent.calculeOffsetTop(this.textarea) + scrollTop; + var line_start = Math.round((relative_top / this.lineHeight) +1); + var line_nb = Math.round( range.boundingHeight / this.lineHeight ); + + selectionStart = stored_range.text.length - range.text.length; + selectionStart += ( line_start - this.textarea.value.substr(0, selectionStart).split("\n").length)*2; // count missing empty \r to the selection + selectionStart -= ( line_start - this.textarea.value.substr(0, selectionStart).split("\n").length ) * 2; + + selectionEnd = selectionStart + range.text.length; + selectionEnd += (line_start + line_nb - 1 - this.textarea.value.substr(0, selectionEnd ).split("\n").length)*2; + + this.textarea.selectionStart = selectionStart; + this.textarea.selectionEnd = selectionEnd; + } + catch(e){} + + // restore wrap mode + if( this.settings['word_wrap'] ) + this.textarea.wrap='soft'; + }; + + // select the text for IE (and take care of \r caracters) + EditArea.prototype.setIESelection= function(){ + var a = this.textarea, nbLineStart, nbLineEnd, range; + + if( !this.isIE ) + return false; + + nbLineStart = a.value.substr(0, a.selectionStart).split("\n").length - 1; + nbLineEnd = a.value.substr(0, a.selectionEnd).split("\n").length - 1; + range = document.selection.createRange(); + range.moveToElementText( a ); + range.setEndPoint( 'EndToStart', range ); + + range.moveStart('character', a.selectionStart - nbLineStart); + range.moveEnd('character', a.selectionEnd - nbLineEnd - (a.selectionStart - nbLineStart) ); + range.select(); + }; + + + + EditArea.prototype.checkTextEvolution=function(lastText,newText){ + // ch will contain changes datas + var ch={},baseStep=200, cpt=0, end, step,tStart=new Date().getTime(); + + end = Math.min(newText.length, lastText.length); + step = baseStep; + // find how many chars are similar at the begin of the text + while( cpt=1 ){ + if(lastText.substr(cpt, step) == newText.substr(cpt, step)){ + cpt+= step; + }else{ + step= Math.floor(step/2); + } + } + + ch.posStart = cpt; + ch.lineStart= newText.substr(0, ch.posStart).split("\n").length -1; + + cpt_last = lastText.length; + cpt = newText.length; + step = baseStep; + // find how many chars are similar at the end of the text + while( cpt>=0 && cpt_last>=0 && step>=1 ){ + if(lastText.substr(cpt_last-step, step) == newText.substr(cpt-step, step)){ + cpt-= step; + cpt_last-= step; + }else{ + step= Math.floor(step/2); + } + } + + ch.posNewEnd = cpt; + ch.posLastEnd = cpt_last; + if(ch.posNewEnd<=ch.posStart){ + if(lastText.length < newText.length){ + ch.posNewEnd= ch.posStart + newText.length - lastText.length; + ch.posLastEnd= ch.posStart; + }else{ + ch.posLastEnd= ch.posStart + lastText.length - newText.length; + ch.posNewEnd= ch.posStart; + } + } + ch.newText = newText.substring(ch.posStart, ch.posNewEnd); + ch.lastText = lastText.substring(ch.posStart, ch.posLastEnd); + + ch.lineNewEnd = newText.substr(0, ch.posNewEnd).split("\n").length -1; + ch.lineLastEnd = lastText.substr(0, ch.posLastEnd).split("\n").length -1; + + ch.newTextLine = newText.split("\n").slice(ch.lineStart, ch.lineNewEnd+1).join("\n"); + ch.lastTextLine = lastText.split("\n").slice(ch.lineStart, ch.lineLastEnd+1).join("\n"); + //console.log( ch ); + return ch; + }; + + EditArea.prototype.tab_selection= function(){ + if(this.is_tabbing) + return; + this.is_tabbing=true; + //infos=getSelectionInfos(); + //if( document.selection ){ + this.getIESelection(); + /* Insertion du code de formatage */ + var start = this.textarea.selectionStart; + var end = this.textarea.selectionEnd; + var insText = this.textarea.value.substring(start, end); + + /* Insert tabulation and ajust cursor position */ + var pos_start=start; + var pos_end=end; + if (insText.length == 0) { + // if only one line selected + this.textarea.value = this.textarea.value.substr(0, start) + this.tabulation + this.textarea.value.substr(end); + pos_start = start + this.tabulation.length; + pos_end=pos_start; + } else { + start= Math.max(0, this.textarea.value.substr(0, start).lastIndexOf("\n")+1); + endText=this.textarea.value.substr(end); + startText=this.textarea.value.substr(0, start); + tmp= this.textarea.value.substring(start, end).split("\n"); + insText= this.tabulation+tmp.join("\n"+this.tabulation); + this.textarea.value = startText + insText + endText; + pos_start = start; + pos_end= this.textarea.value.indexOf("\n", startText.length + insText.length); + if(pos_end==-1) + pos_end=this.textarea.value.length; + //pos = start + repdeb.length + insText.length + ; + } + this.textarea.selectionStart = pos_start; + this.textarea.selectionEnd = pos_end; + + //if( document.selection ){ + if(this.isIE) + { + this.setIESelection(); + setTimeout("editArea.is_tabbing=false;", 100); // IE can't accept to make 2 tabulation without a little break between both + } + else + { + this.is_tabbing=false; + } + + }; + + EditArea.prototype.invert_tab_selection= function(){ + var t=this, a=this.textarea; + if(t.is_tabbing) + return; + t.is_tabbing=true; + //infos=getSelectionInfos(); + //if( document.selection ){ + t.getIESelection(); + + var start = a.selectionStart; + var end = a.selectionEnd; + var insText = a.value.substring(start, end); + + /* Tab remove and cursor seleciton adjust */ + var pos_start=start; + var pos_end=end; + if (insText.length == 0) { + if(a.value.substring(start-t.tabulation.length, start)==t.tabulation) + { + a.value = a.value.substr(0, start-t.tabulation.length) + a.value.substr(end); + pos_start = Math.max(0, start-t.tabulation.length); + pos_end = pos_start; + } + /* + a.value = a.value.substr(0, start) + t.tabulation + insText + a.value.substr(end); + pos_start = start + t.tabulation.length; + pos_end=pos_start;*/ + } else { + start = a.value.substr(0, start).lastIndexOf("\n")+1; + endText = a.value.substr(end); + startText = a.value.substr(0, start); + tmp = a.value.substring(start, end).split("\n"); + insText = ""; + for(i=0; i=0; ){ + if(infos["full_text"].charAt(i)==endBracket){ + nbBracketOpen--; + if(nbBracketOpen<=0){ + //i=infos["full_text"].length; + end=i; + break; + } + }else if(infos["full_text"].charAt(i)==bracket) + nbBracketOpen++; + if(normal_order) + i++; + else + i--; + } + + //end=infos["full_text"].indexOf("}", start); + if(end==-1) + return false; + var endLastLine=infos["full_text"].substr(0, end).lastIndexOf("\n"); + if(endLastLine==-1) + line=1; + else + line= infos["full_text"].substr(0, endLastLine).split("\n").length + 1; + + var curPos= end - endLastLine - 1; + var endLineLength = infos["full_text"].substring(end).split("\n")[0].length; + this.displayToCursorPosition("end_bracket", line, curPos, infos["full_text"].substring(endLastLine +1, end + endLineLength)); + return true; + }; + + EditArea.prototype.displayToCursorPosition= function(id, start_line, cur_pos, lineContent, no_real_move){ + var elem,dest,content,posLeft=0,posTop,fixPadding,topOffset,endElem; + + elem = this.test_font_size; + dest = _$(id); + content = ""+lineContent.substr(0, cur_pos).replace(/&/g,"&").replace(/"+lineContent.substr(cur_pos).replace(/&/g,"&").replace(/"; + if( this.isIE || ( this.isOpera && this.isOpera < 9.6 ) ) { + elem.innerHTML= "
" + content.replace(/^\r?\n/, "
") + "
"; + } else { + elem.innerHTML= content; + } + + + endElem = _$('endTestFont'); + topOffset = endElem.offsetTop; + fixPadding = parseInt( this.content_highlight.style.paddingLeft.replace("px", "") ); + posLeft = 45 + endElem.offsetLeft + ( !isNaN( fixPadding ) && topOffset > 0 ? fixPadding : 0 ); + posTop = this.getLinePosTop( start_line ) + topOffset;// + Math.floor( ( endElem.offsetHeight - 1 ) / this.lineHeight ) * this.lineHeight; + + // detect the case where the span start on a line but has no display on it + if( this.isIE && cur_pos > 0 && endElem.offsetLeft == 0 ) + { + posTop += this.lineHeight; + } + if(no_real_move!=true){ // when the cursor is hidden no need to move him + dest.style.top=posTop+"px"; + dest.style.left=posLeft+"px"; + } + // usefull for smarter scroll + dest.cursor_top=posTop; + dest.cursor_left=posLeft; + // _$(id).style.marginLeft=posLeft+"px"; + }; + + EditArea.prototype.getLinePosTop= function(start_line){ + var elem= _$('line_'+ start_line), posTop=0; + if( elem ) + posTop = elem.offsetTop; + else + posTop = this.lineHeight * (start_line-1); + return posTop; + }; + + + // return the dislpayed height of a text (take word-wrap into account) + EditArea.prototype.getTextHeight= function(text){ + var t=this,elem,height; + elem = t.test_font_size; + content = text.replace(/&/g,"&").replace(/") + ""; + } else { + elem.innerHTML= content; + } + height = elem.offsetHeight; + height = Math.max( 1, Math.floor( elem.offsetHeight / this.lineHeight ) ) * this.lineHeight; + return height; + }; + + /** + * Fix line height for the given lines + * @param Integer linestart + * @param Integer lineEnd End line or -1 to cover all lines + */ + EditArea.prototype.fixLinesHeight= function( textValue, lineStart,lineEnd ){ + var aText = textValue.split("\n"); + if( lineEnd == -1 ) + lineEnd = aText.length-1; + for( var i = Math.max(0, lineStart); i <= lineEnd; i++ ) + { + if( elem = _$('line_'+ ( i+1 ) ) ) + { + elem.style.height= typeof( aText[i] ) != "undefined" ? this.getTextHeight( aText[i] )+"px" : this.lineHeight; + } + } + }; + + EditArea.prototype.area_select= function(start, length){ + this.textarea.focus(); + + start = Math.max(0, Math.min(this.textarea.value.length, start)); + end = Math.max(start, Math.min(this.textarea.value.length, start+length)); + + if(this.isIE) + { + this.textarea.selectionStart = start; + this.textarea.selectionEnd = end; + this.setIESelection(); + } + else + { + // Opera bug when moving selection start and selection end + if(this.isOpera && this.isOpera < 9.6 ) + { + this.textarea.setSelectionRange(0, 0); + } + this.textarea.setSelectionRange(start, end); + } + this.check_line_selection(); + }; + + + EditArea.prototype.area_get_selection= function(){ + var text=""; + if( document.selection ){ + var range = document.selection.createRange(); + text=range.text; + }else{ + text= this.textarea.value.substring(this.textarea.selectionStart, this.textarea.selectionEnd); + } + return text; + }; ADDED applications/admin/static/edit_area/plugins/charmap/charmap.js Index: applications/admin/static/edit_area/plugins/charmap/charmap.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/charmap.js @@ -0,0 +1,90 @@ +/** + * Charmap plugin + * by Christophe Dolivet + * v0.1 (2006/09/22) + * + * + * This plugin allow to use a visual keyboard allowing to insert any UTF-8 characters in the text. + * + * - plugin name to add to the plugin list: "charmap" + * - plugin name to add to the toolbar list: "charmap" + * - possible parameters to add to EditAreaLoader.init(): + * "charmap_default": (String) define the name of the default character range displayed on popup display + * (default: "arrows") + * + * + */ + +var EditArea_charmap= { + /** + * Get called once this file is loaded (editArea still not initialized) + * + * @return nothing + */ + init: function(){ + this.default_language="Arrows"; + } + + /** + * Returns the HTML code for a specific control string or false if this plugin doesn't have that control. + * A control can be a button, select list or any other HTML item to present in the EditArea user interface. + * Language variables such as {$lang_somekey} will also be replaced with contents from + * the language packs. + * + * @param {string} ctrl_name: the name of the control to add + * @return HTML code for a specific control or false. + * @type string or boolean + */ + ,get_control_html: function(ctrl_name){ + switch(ctrl_name){ + case "charmap": + // Control id, button img, command + return parent.editAreaLoader.get_button_html('charmap_but', 'charmap.gif', 'charmap_press', false, this.baseURL); + } + return false; + } + /** + * Get called once EditArea is fully loaded and initialised + * + * @return nothing + */ + ,onload: function(){ + if(editArea.settings["charmap_default"] && editArea.settings["charmap_default"].length>0) + this.default_language= editArea.settings["charmap_default"]; + } + + /** + * Is called each time the user touch a keyboard key. + * + * @param (event) e: the keydown event + * @return true - pass to next handler in chain, false - stop chain execution + * @type boolean + */ + ,onkeydown: function(e){ + + } + + /** + * Executes a specific command, this function handles plugin commands. + * + * @param {string} cmd: the name of the command being executed + * @param {unknown} param: the parameter of the command + * @return true - pass to next handler in chain, false - stop chain execution + * @type boolean + */ + ,execCommand: function(cmd, param){ + // Handle commands + switch(cmd){ + case "charmap_press": + win= window.open(this.baseURL+"popup.html", "charmap", "width=500,height=270,scrollbars=yes,resizable=yes"); + win.focus(); + return false; + } + // Pass to next handler in chain + return true; + } + +}; + +// Adds the plugin class to the list of available EditArea plugins +editArea.add_plugin("charmap", EditArea_charmap); ADDED applications/admin/static/edit_area/plugins/charmap/css/charmap.css Index: applications/admin/static/edit_area/plugins/charmap/css/charmap.css ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/css/charmap.css @@ -0,0 +1,64 @@ +body{ + background-color: #F0F0EE; + font: 12px monospace, sans-serif; +} + +select{ + background-color: #F9F9F9; + border: solid 1px #888888; +} + +h1, h2, h3, h4, h5, h6{ + margin: 0; + padding: 0; + color: #2B6FB6; +} + +h1{ + font-size: 1.5em; +} + +div#char_list{ + height: 200px; + overflow: auto; + padding: 1px; + border: 1px solid #0A246A; + background-color: #F9F9F9; + clear: both; + margin-top: 5px; +} + +a.char{ + display: block; + float: left; + width: 20px; + height: 20px; + line-height: 20px; + margin: 1px; + border: solid 1px #888888; + text-align: center; + cursor: pointer; +} + +a.char:hover{ + background-color: #CCCCCC; +} + +.preview{ + border: solid 1px #888888; + width: 50px; + padding: 2px 5px; + height: 35px; + line-height: 35px; + text-align:center; + background-color: #CCCCCC; + font-size: 2em; + float: right; + font-weight: bold; + margin: 0 0 5px 5px; +} + +#preview_code{ + font-size: 1.1em; + width: 70px; +} ADDED applications/admin/static/edit_area/plugins/charmap/images/charmap.gif Index: applications/admin/static/edit_area/plugins/charmap/images/charmap.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/images/charmap.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/plugins/charmap/jscripts/map.js Index: applications/admin/static/edit_area/plugins/charmap/jscripts/map.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/jscripts/map.js @@ -0,0 +1,373 @@ +var editArea; + + +/** + * UTF-8 list taken from http://www.utf8-chartable.de/unicode-utf8-table.pl?utf8=dec + */ + + +/* +var char_range_list={ +"Basic Latin":"0021,007F", +"Latin-1 Supplement":"0080,00FF", +"Latin Extended-A":"0100,017F", +"Latin Extended-B":"0180,024F", +"IPA Extensions":"0250,02AF", +"Spacing Modifier Letters":"02B0,02FF", + +"Combining Diacritical Marks":"0300,036F", +"Greek and Coptic":"0370,03FF", +"Cyrillic":"0400,04FF", +"Cyrillic Supplement":"0500,052F", +"Armenian":"0530,058F", +"Hebrew":"0590,05FF", +"Arabic":"0600,06FF", +"Syriac":"0700,074F", +"Arabic Supplement":"0750,077F", + +"Thaana":"0780,07BF", +"Devanagari":"0900,097F", +"Bengali":"0980,09FF", +"Gurmukhi":"0A00,0A7F", +"Gujarati":"0A80,0AFF", +"Oriya":"0B00,0B7F", +"Tamil":"0B80,0BFF", +"Telugu":"0C00,0C7F", +"Kannada":"0C80,0CFF", + +"Malayalam":"0D00,0D7F", +"Sinhala":"0D80,0DFF", +"Thai":"0E00,0E7F", +"Lao":"0E80,0EFF", +"Tibetan":"0F00,0FFF", +"Myanmar":"1000,109F", +"Georgian":"10A0,10FF", +"Hangul Jamo":"1100,11FF", +"Ethiopic":"1200,137F", + +"Ethiopic Supplement":"1380,139F", +"Cherokee":"13A0,13FF", +"Unified Canadian Aboriginal Syllabics":"1400,167F", +"Ogham":"1680,169F", +"Runic":"16A0,16FF", +"Tagalog":"1700,171F", +"Hanunoo":"1720,173F", +"Buhid":"1740,175F", +"Tagbanwa":"1760,177F", + +"Khmer":"1780,17FF", +"Mongolian":"1800,18AF", +"Limbu":"1900,194F", +"Tai Le":"1950,197F", +"New Tai Lue":"1980,19DF", +"Khmer Symbols":"19E0,19FF", +"Buginese":"1A00,1A1F", +"Phonetic Extensions":"1D00,1D7F", +"Phonetic Extensions Supplement":"1D80,1DBF", + +"Combining Diacritical Marks Supplement":"1DC0,1DFF", +"Latin Extended Additional":"1E00,1EFF", +"Greek Extended":"1F00,1FFF", +"General Punctuation":"2000,206F", +"Superscripts and Subscripts":"2070,209F", +"Currency Symbols":"20A0,20CF", +"Combining Diacritical Marks for Symbols":"20D0,20FF", +"Letterlike Symbols":"2100,214F", +"Number Forms":"2150,218F", + +"Arrows":"2190,21FF", +"Mathematical Operators":"2200,22FF", +"Miscellaneous Technical":"2300,23FF", +"Control Pictures":"2400,243F", +"Optical Character Recognition":"2440,245F", +"Enclosed Alphanumerics":"2460,24FF", +"Box Drawing":"2500,257F", +"Block Elements":"2580,259F", +"Geometric Shapes":"25A0,25FF", + +"Miscellaneous Symbols":"2600,26FF", +"Dingbats":"2700,27BF", +"Miscellaneous Mathematical Symbols-A":"27C0,27EF", +"Supplemental Arrows-A":"27F0,27FF", +"Braille Patterns":"2800,28FF", +"Supplemental Arrows-B":"2900,297F", +"Miscellaneous Mathematical Symbols-B":"2980,29FF", +"Supplemental Mathematical Operators":"2A00,2AFF", +"Miscellaneous Symbols and Arrows":"2B00,2BFF", + +"Glagolitic":"2C00,2C5F", +"Coptic":"2C80,2CFF", +"Georgian Supplement":"2D00,2D2F", +"Tifinagh":"2D30,2D7F", +"Ethiopic Extended":"2D80,2DDF", +"Supplemental Punctuation":"2E00,2E7F", +"CJK Radicals Supplement":"2E80,2EFF", +"Kangxi Radicals":"2F00,2FDF", +"Ideographic Description Characters":"2FF0,2FFF", + +"CJK Symbols and Punctuation":"3000,303F", +"Hiragana":"3040,309F", +"Katakana":"30A0,30FF", +"Bopomofo":"3100,312F", +"Hangul Compatibility Jamo":"3130,318F", +"Kanbun":"3190,319F", +"Bopomofo Extended":"31A0,31BF", +"CJK Strokes":"31C0,31EF", +"Katakana Phonetic Extensions":"31F0,31FF", + +"Enclosed CJK Letters and Months":"3200,32FF", +"CJK Compatibility":"3300,33FF", +"CJK Unified Ideographs Extension A":"3400,4DBF", +"Yijing Hexagram Symbols":"4DC0,4DFF", +"CJK Unified Ideographs":"4E00,9FFF", +"Yi Syllables":"A000,A48F", +"Yi Radicals":"A490,A4CF", +"Modifier Tone Letters":"A700,A71F", +"Syloti Nagri":"A800,A82F", + +"Hangul Syllables":"AC00,D7AF", +"High Surrogates":"D800,DB7F", +"High Private Use Surrogates":"DB80,DBFF", +"Low Surrogates":"DC00,DFFF", +"Private Use Area":"E000,F8FF", +"CJK Compatibility Ideographs":"F900,FAFF", +"Alphabetic Presentation Forms":"FB00,FB4F", +"Arabic Presentation Forms-A":"FB50,FDFF", +"Variation Selectors":"FE00,FE0F", + +"Vertical Forms":"FE10,FE1F", +"Combining Half Marks":"FE20,FE2F", +"CJK Compatibility Forms":"FE30,FE4F", +"Small Form Variants":"FE50,FE6F", +"Arabic Presentation Forms-B":"FE70,FEFF", +"Halfwidth and Fullwidth Forms":"FF00,FFEF", +"Specials":"FFF0,FFFF", +"Linear B Syllabary":"10000,1007F", +"Linear B Ideograms":"10080,100FF", + +"Aegean Numbers":"10100,1013F", +"Ancient Greek Numbers":"10140,1018F", +"Old Italic":"10300,1032F", +"Gothic":"10330,1034F", +"Ugaritic":"10380,1039F", +"Old Persian":"103A0,103DF", +"Deseret":"10400,1044F", +"Shavian":"10450,1047F", +"Osmanya":"10480,104AF", + +"Cypriot Syllabary":"10800,1083F", +"Kharoshthi":"10A00,10A5F", +"Byzantine Musical Symbols":"1D000,1D0FF", +"Musical Symbols":"1D100,1D1FF", +"Ancient Greek Musical Notation":"1D200,1D24F", +"Tai Xuan Jing Symbols":"1D300,1D35F", +"Mathematical Alphanumeric Symbols":"1D400,1D7FF", +"CJK Unified Ideographs Extension B":"20000,2A6DF", +"CJK Compatibility Ideographs Supplement":"2F800,2FA1F", +"Tags":"E0000,E007F", +"Variation Selectors Supplement":"E0100,E01EF" +}; +*/ +var char_range_list={ +"Aegean Numbers":"10100,1013F", +"Alphabetic Presentation Forms":"FB00,FB4F", +"Ancient Greek Musical Notation":"1D200,1D24F", +"Ancient Greek Numbers":"10140,1018F", +"Arabic":"0600,06FF", +"Arabic Presentation Forms-A":"FB50,FDFF", +"Arabic Presentation Forms-B":"FE70,FEFF", +"Arabic Supplement":"0750,077F", +"Armenian":"0530,058F", +"Arrows":"2190,21FF", +"Basic Latin":"0020,007F", +"Bengali":"0980,09FF", +"Block Elements":"2580,259F", +"Bopomofo Extended":"31A0,31BF", +"Bopomofo":"3100,312F", +"Box Drawing":"2500,257F", +"Braille Patterns":"2800,28FF", +"Buginese":"1A00,1A1F", +"Buhid":"1740,175F", +"Byzantine Musical Symbols":"1D000,1D0FF", +"CJK Compatibility Forms":"FE30,FE4F", +"CJK Compatibility Ideographs Supplement":"2F800,2FA1F", +"CJK Compatibility Ideographs":"F900,FAFF", +"CJK Compatibility":"3300,33FF", +"CJK Radicals Supplement":"2E80,2EFF", +"CJK Strokes":"31C0,31EF", +"CJK Symbols and Punctuation":"3000,303F", +"CJK Unified Ideographs Extension A":"3400,4DBF", +"CJK Unified Ideographs Extension B":"20000,2A6DF", +"CJK Unified Ideographs":"4E00,9FFF", +"Cherokee":"13A0,13FF", +"Combining Diacritical Marks Supplement":"1DC0,1DFF", +"Combining Diacritical Marks for Symbols":"20D0,20FF", +"Combining Diacritical Marks":"0300,036F", +"Combining Half Marks":"FE20,FE2F", +"Control Pictures":"2400,243F", +"Coptic":"2C80,2CFF", +"Currency Symbols":"20A0,20CF", +"Cypriot Syllabary":"10800,1083F", +"Cyrillic Supplement":"0500,052F", +"Cyrillic":"0400,04FF", +"Deseret":"10400,1044F", +"Devanagari":"0900,097F", +"Dingbats":"2700,27BF", +"Enclosed Alphanumerics":"2460,24FF", +"Enclosed CJK Letters and Months":"3200,32FF", +"Ethiopic Extended":"2D80,2DDF", +"Ethiopic Supplement":"1380,139F", +"Ethiopic":"1200,137F", +"General Punctuation":"2000,206F", +"Geometric Shapes":"25A0,25FF", +"Georgian Supplement":"2D00,2D2F", +"Georgian":"10A0,10FF", +"Glagolitic":"2C00,2C5F", +"Gothic":"10330,1034F", +"Greek Extended":"1F00,1FFF", +"Greek and Coptic":"0370,03FF", +"Gujarati":"0A80,0AFF", +"Gurmukhi":"0A00,0A7F", +"Halfwidth and Fullwidth Forms":"FF00,FFEF", +"Hangul Compatibility Jamo":"3130,318F", +"Hangul Jamo":"1100,11FF", +"Hangul Syllables":"AC00,D7AF", +"Hanunoo":"1720,173F", +"Hebrew":"0590,05FF", +"High Private Use Surrogates":"DB80,DBFF", +"High Surrogates":"D800,DB7F", +"Hiragana":"3040,309F", +"IPA Extensions":"0250,02AF", +"Ideographic Description Characters":"2FF0,2FFF", +"Kanbun":"3190,319F", +"Kangxi Radicals":"2F00,2FDF", +"Kannada":"0C80,0CFF", +"Katakana Phonetic Extensions":"31F0,31FF", +"Katakana":"30A0,30FF", +"Kharoshthi":"10A00,10A5F", +"Khmer Symbols":"19E0,19FF", +"Khmer":"1780,17FF", +"Lao":"0E80,0EFF", +"Latin Extended Additional":"1E00,1EFF", +"Latin Extended-A":"0100,017F", +"Latin Extended-B":"0180,024F", +"Latin-1 Supplement":"0080,00FF", +"Letterlike Symbols":"2100,214F", +"Limbu":"1900,194F", +"Linear B Ideograms":"10080,100FF", +"Linear B Syllabary":"10000,1007F", +"Low Surrogates":"DC00,DFFF", +"Malayalam":"0D00,0D7F", +"Mathematical Alphanumeric Symbols":"1D400,1D7FF", +"Mathematical Operators":"2200,22FF", +"Miscellaneous Mathematical Symbols-A":"27C0,27EF", +"Miscellaneous Mathematical Symbols-B":"2980,29FF", +"Miscellaneous Symbols and Arrows":"2B00,2BFF", +"Miscellaneous Symbols":"2600,26FF", +"Miscellaneous Technical":"2300,23FF", +"Modifier Tone Letters":"A700,A71F", +"Mongolian":"1800,18AF", +"Musical Symbols":"1D100,1D1FF", +"Myanmar":"1000,109F", +"New Tai Lue":"1980,19DF", +"Number Forms":"2150,218F", +"Ogham":"1680,169F", +"Old Italic":"10300,1032F", +"Old Persian":"103A0,103DF", +"Optical Character Recognition":"2440,245F", +"Oriya":"0B00,0B7F", +"Osmanya":"10480,104AF", +"Phonetic Extensions Supplement":"1D80,1DBF", +"Phonetic Extensions":"1D00,1D7F", +"Private Use Area":"E000,F8FF", +"Runic":"16A0,16FF", +"Shavian":"10450,1047F", +"Sinhala":"0D80,0DFF", +"Small Form Variants":"FE50,FE6F", +"Spacing Modifier Letters":"02B0,02FF", +"Specials":"FFF0,FFFF", +"Superscripts and Subscripts":"2070,209F", +"Supplemental Arrows-A":"27F0,27FF", +"Supplemental Arrows-B":"2900,297F", +"Supplemental Mathematical Operators":"2A00,2AFF", +"Supplemental Punctuation":"2E00,2E7F", +"Syloti Nagri":"A800,A82F", +"Syriac":"0700,074F", +"Tagalog":"1700,171F", +"Tagbanwa":"1760,177F", +"Tags":"E0000,E007F", +"Tai Le":"1950,197F", +"Tai Xuan Jing Symbols":"1D300,1D35F", +"Tamil":"0B80,0BFF", +"Telugu":"0C00,0C7F", +"Thaana":"0780,07BF", +"Thai":"0E00,0E7F", +"Tibetan":"0F00,0FFF", +"Tifinagh":"2D30,2D7F", +"Ugaritic":"10380,1039F", +"Unified Canadian Aboriginal Syllabics":"1400,167F", +"Variation Selectors Supplement":"E0100,E01EF", +"Variation Selectors":"FE00,FE0F", +"Vertical Forms":"FE10,FE1F", +"Yi Radicals":"A490,A4CF", +"Yi Syllables":"A000,A48F", +"Yijing Hexagram Symbols":"4DC0,4DFF" +}; + +var insert="charmap_insert"; + +function map_load(){ + editArea=opener.editArea; + // translate the document + insert= editArea.get_translation(insert, "word"); + //alert(document.title); + document.title= editArea.get_translation(document.title, "template"); + document.body.innerHTML= editArea.get_translation(document.body.innerHTML, "template"); + //document.title= editArea.get_translation(document.getElementBytitle, "template"); + + var selected_lang=opener.EditArea_charmap.default_language.toLowerCase(); + var selected=0; + + var select= document.getElementById("select_range") + for(var i in char_range_list){ + if(i.toLowerCase()==selected_lang) + selected=select.options.length; + select.options[select.options.length]=new Option(i, char_range_list[i]); + } + select.options[selected].selected=true; +/* start=0; + end=127; + content=""; + for(var i=start; i"+ String.fromCharCode(i) +""; + } + document.getElementById("char_list").innerHTML= html; + document.getElementById("preview_char").innerHTML=""; +} + +function previewChar(i){ + document.getElementById("preview_char").innerHTML= String.fromCharCode(i); + document.getElementById("preview_code").innerHTML= "&#"+ i +";"; +} + +function insertChar(i){ + opener.parent.editAreaLoader.setSelectedText(editArea.id, String.fromCharCode( i)); + range= opener.parent.editAreaLoader.getSelectionRange(editArea.id); + opener.parent.editAreaLoader.setSelectionRange(editArea.id, range["end"], range["end"]); + window.focus(); +} ADDED applications/admin/static/edit_area/plugins/charmap/langs/bg.js Index: applications/admin/static/edit_area/plugins/charmap/langs/bg.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/bg.js @@ -0,0 +1,12 @@ +/* + * Bulgarian translation + * Author: Valentin Hristov + * Company: SOFTKIT Bulgarian + * Site: http://www.softkit-bg.com + */ +editArea.add_lang("bg",{ +charmap_but: "Виртуална клавиатура", +charmap_title: "Виртуална клавиатура", +charmap_choose_block: "избери езиков блок", +charmap_insert:"постави този символ" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/cs.js Index: applications/admin/static/edit_area/plugins/charmap/langs/cs.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/cs.js @@ -0,0 +1,6 @@ +editArea.add_lang("cs",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/de.js Index: applications/admin/static/edit_area/plugins/charmap/langs/de.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/de.js @@ -0,0 +1,6 @@ +editArea.add_lang("de",{ +charmap_but: "Sonderzeichen", +charmap_title: "Sonderzeichen", +charmap_choose_block: "Bereich auswählen", +charmap_insert: "dieses Zeichen einfügen" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/dk.js Index: applications/admin/static/edit_area/plugins/charmap/langs/dk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/dk.js @@ -0,0 +1,6 @@ +editArea.add_lang("dk",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/en.js Index: applications/admin/static/edit_area/plugins/charmap/langs/en.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/en.js @@ -0,0 +1,6 @@ +editArea.add_lang("en",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/eo.js Index: applications/admin/static/edit_area/plugins/charmap/langs/eo.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/eo.js @@ -0,0 +1,6 @@ +editArea.add_lang("eo",{ +charmap_but: "Ekranklavaro", +charmap_title: "Ekranklavaro", +charmap_choose_block: "Elekto de lingvo", +charmap_insert:"enmeti tiun signaron" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/es.js Index: applications/admin/static/edit_area/plugins/charmap/langs/es.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/es.js @@ -0,0 +1,6 @@ +editArea.add_lang("es",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/fr.js Index: applications/admin/static/edit_area/plugins/charmap/langs/fr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/fr.js @@ -0,0 +1,6 @@ +editArea.add_lang("fr",{ +charmap_but: "Clavier visuel", +charmap_title: "Clavier visuel", +charmap_choose_block: "choix du language", +charmap_insert:"insérer ce caractère" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/hr.js Index: applications/admin/static/edit_area/plugins/charmap/langs/hr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/hr.js @@ -0,0 +1,6 @@ +editArea.add_lang("hr",{ +charmap_but: "Virtualna tipkovnica", +charmap_title: "Virtualna tipkovnica", +charmap_choose_block: "Odaberi blok s jezikom", +charmap_insert:"Ubaci taj znak" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/it.js Index: applications/admin/static/edit_area/plugins/charmap/langs/it.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/it.js @@ -0,0 +1,6 @@ +editArea.add_lang("it",{ +charmap_but: "Tastiera visuale", +charmap_title: "Tastiera visuale", +charmap_choose_block: "seleziona blocco", +charmap_insert:"inserisci questo carattere" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/ja.js Index: applications/admin/static/edit_area/plugins/charmap/langs/ja.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/ja.js @@ -0,0 +1,6 @@ +editArea.add_lang("ja",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/mk.js Index: applications/admin/static/edit_area/plugins/charmap/langs/mk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/mk.js @@ -0,0 +1,6 @@ +editArea.add_lang("mkn",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/nl.js Index: applications/admin/static/edit_area/plugins/charmap/langs/nl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/nl.js @@ -0,0 +1,6 @@ +editArea.add_lang("nl",{ +charmap_but: "Visueel toetsenbord", +charmap_title: "Visueel toetsenbord", +charmap_choose_block: "Kies een taal blok", +charmap_insert:"Voeg dit symbool in" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/pl.js Index: applications/admin/static/edit_area/plugins/charmap/langs/pl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/pl.js @@ -0,0 +1,6 @@ +editArea.add_lang("pl",{ +charmap_but: "Klawiatura ekranowa", +charmap_title: "Klawiatura ekranowa", +charmap_choose_block: "wybierz grupę znaków", +charmap_insert:"wstaw ten znak" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/pt.js Index: applications/admin/static/edit_area/plugins/charmap/langs/pt.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/pt.js @@ -0,0 +1,6 @@ +editArea.add_lang("pt",{ +charmap_but: "Visual keyboard", +charmap_title: "Visual keyboard", +charmap_choose_block: "select language block", +charmap_insert:"insert this character" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/ru.js Index: applications/admin/static/edit_area/plugins/charmap/langs/ru.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/ru.js @@ -0,0 +1,6 @@ +editArea.add_lang("ru",{ +charmap_but: "Визуальная клавиатура", +charmap_title: "Визуальная клавиатура", +charmap_choose_block: "выбрать языковой блок", +charmap_insert:"вставить этот символ" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/sk.js Index: applications/admin/static/edit_area/plugins/charmap/langs/sk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/sk.js @@ -0,0 +1,6 @@ +editArea.add_lang("sk",{ +charmap_but: "Vizuálna klávesnica", +charmap_title: "Vizuálna klávesnica", +charmap_choose_block: "vyber jazykový blok", +charmap_insert: "vlož tento znak" +}); ADDED applications/admin/static/edit_area/plugins/charmap/langs/zh.js Index: applications/admin/static/edit_area/plugins/charmap/langs/zh.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/langs/zh.js @@ -0,0 +1,6 @@ +editArea.add_lang("zh",{ +charmap_but: "软键盘", +charmap_title: "软键盘", +charmap_choose_block: "选择一个语言块", +charmap_insert:"插入此字符" +}); ADDED applications/admin/static/edit_area/plugins/charmap/popup.html Index: applications/admin/static/edit_area/plugins/charmap/popup.html ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/charmap/popup.html @@ -0,0 +1,24 @@ + + + + +{$charmap_title} + + + + + +
+
+

{$charmap_title}:

+ +
+ +
+ + + + + ADDED applications/admin/static/edit_area/plugins/test/css/test.css Index: applications/admin/static/edit_area/plugins/test/css/test.css ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/css/test.css @@ -0,0 +1,3 @@ +select#test_select{ + background-color: #FF0000; +} ADDED applications/admin/static/edit_area/plugins/test/images/test.gif Index: applications/admin/static/edit_area/plugins/test/images/test.gif ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/images/test.gif cannot compute difference between binary files ADDED applications/admin/static/edit_area/plugins/test/langs/bg.js Index: applications/admin/static/edit_area/plugins/test/langs/bg.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/bg.js @@ -0,0 +1,10 @@ +/* + * Bulgarian translation + * Author: Valentin Hristov + * Company: SOFTKIT Bulgarian + * Site: http://www.softkit-bg.com + */ +editArea.add_lang("bg",{ +test_select: "избери таг", +test_but: "тествай копието" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/cs.js Index: applications/admin/static/edit_area/plugins/test/langs/cs.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/cs.js @@ -0,0 +1,4 @@ +editArea.add_lang("cs",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/de.js Index: applications/admin/static/edit_area/plugins/test/langs/de.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/de.js @@ -0,0 +1,4 @@ +editArea.add_lang("de",{ +test_select: "Tag auswählen", +test_but: "Test Button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/dk.js Index: applications/admin/static/edit_area/plugins/test/langs/dk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/dk.js @@ -0,0 +1,4 @@ +editArea.add_lang("dk",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/en.js Index: applications/admin/static/edit_area/plugins/test/langs/en.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/en.js @@ -0,0 +1,4 @@ +editArea.add_lang("en",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/eo.js Index: applications/admin/static/edit_area/plugins/test/langs/eo.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/eo.js @@ -0,0 +1,4 @@ +editArea.add_lang("eo",{ +test_select:"elekto de marko", +test_but: "provo-butono" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/es.js Index: applications/admin/static/edit_area/plugins/test/langs/es.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/es.js @@ -0,0 +1,4 @@ +editArea.add_lang("es",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/fr.js Index: applications/admin/static/edit_area/plugins/test/langs/fr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/fr.js @@ -0,0 +1,4 @@ +editArea.add_lang("fr",{ +test_select:"choix balise", +test_but: "bouton de test" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/hr.js Index: applications/admin/static/edit_area/plugins/test/langs/hr.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/hr.js @@ -0,0 +1,4 @@ +editArea.add_lang("hr",{ +test_select: "Odaberi tag", +test_but: "Probna tipka" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/it.js Index: applications/admin/static/edit_area/plugins/test/langs/it.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/it.js @@ -0,0 +1,4 @@ +editArea.add_lang("it",{ +test_select: "seleziona tag", +test_but: "pulsante di test" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/ja.js Index: applications/admin/static/edit_area/plugins/test/langs/ja.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/ja.js @@ -0,0 +1,4 @@ +editArea.add_lang("ja",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/mk.js Index: applications/admin/static/edit_area/plugins/test/langs/mk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/mk.js @@ -0,0 +1,4 @@ +editArea.add_lang("mk",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/nl.js Index: applications/admin/static/edit_area/plugins/test/langs/nl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/nl.js @@ -0,0 +1,4 @@ +editArea.add_lang("nl",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/pl.js Index: applications/admin/static/edit_area/plugins/test/langs/pl.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/pl.js @@ -0,0 +1,4 @@ +editArea.add_lang("pl",{ +test_select: "wybierz tag", +test_but: "test" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/pt.js Index: applications/admin/static/edit_area/plugins/test/langs/pt.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/pt.js @@ -0,0 +1,4 @@ +editArea.add_lang("pt",{ +test_select: "select tag", +test_but: "test button" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/ru.js Index: applications/admin/static/edit_area/plugins/test/langs/ru.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/ru.js @@ -0,0 +1,4 @@ +editArea.add_lang("ru",{ +test_select: "выбрать тэг", +test_but: "тестировать кнопку" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/sk.js Index: applications/admin/static/edit_area/plugins/test/langs/sk.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/sk.js @@ -0,0 +1,4 @@ +editArea.add_lang("sk",{ +test_select: "vyber tag", +test_but: "testovacie tlačidlo" +}); ADDED applications/admin/static/edit_area/plugins/test/langs/zh.js Index: applications/admin/static/edit_area/plugins/test/langs/zh.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/langs/zh.js @@ -0,0 +1,4 @@ +editArea.add_lang("zh",{ +test_select: "选择标签", +test_but: "测试按钮" +}); ADDED applications/admin/static/edit_area/plugins/test/test.js Index: applications/admin/static/edit_area/plugins/test/test.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/test.js @@ -0,0 +1,110 @@ +/** + * Plugin designed for test prupose. It add a button (that manage an alert) and a select (that allow to insert tags) in the toolbar. + * This plugin also disable the "f" key in the editarea, and load a CSS and a JS file + */ +var EditArea_test= { + /** + * Get called once this file is loaded (editArea still not initialized) + * + * @return nothing + */ + init: function(){ + // alert("test init: "+ this._someInternalFunction(2, 3)); + editArea.load_css(this.baseURL+"css/test.css"); + editArea.load_script(this.baseURL+"test2.js"); + } + /** + * Returns the HTML code for a specific control string or false if this plugin doesn't have that control. + * A control can be a button, select list or any other HTML item to present in the EditArea user interface. + * Language variables such as {$lang_somekey} will also be replaced with contents from + * the language packs. + * + * @param {string} ctrl_name: the name of the control to add + * @return HTML code for a specific control or false. + * @type string or boolean + */ + ,get_control_html: function(ctrl_name){ + switch(ctrl_name){ + case "test_but": + // Control id, button img, command + return parent.editAreaLoader.get_button_html('test_but', 'test.gif', 'test_cmd', false, this.baseURL); + case "test_select": + html= ""; + return html; + } + return false; + } + /** + * Get called once EditArea is fully loaded and initialised + * + * @return nothing + */ + ,onload: function(){ + alert("test load"); + } + + /** + * Is called each time the user touch a keyboard key. + * + * @param (event) e: the keydown event + * @return true - pass to next handler in chain, false - stop chain execution + * @type boolean + */ + ,onkeydown: function(e){ + var str= String.fromCharCode(e.keyCode); + // desactivate the "f" character + if(str.toLowerCase()=="f"){ + return true; + } + return false; + } + + /** + * Executes a specific command, this function handles plugin commands. + * + * @param {string} cmd: the name of the command being executed + * @param {unknown} param: the parameter of the command + * @return true - pass to next handler in chain, false - stop chain execution + * @type boolean + */ + ,execCommand: function(cmd, param){ + // Handle commands + switch(cmd){ + case "test_select_change": + var val= document.getElementById("test_select").value; + if(val!=-1) + parent.editAreaLoader.insertTags(editArea.id, "<"+val+">", ""); + document.getElementById("test_select").options[0].selected=true; + return false; + case "test_cmd": + alert("user clicked on test_cmd"); + return false; + } + // Pass to next handler in chain + return true; + } + + /** + * This is just an internal plugin method, prefix all internal methods with a _ character. + * The prefix is needed so they doesn't collide with future EditArea callback functions. + * + * @param {string} a Some arg1. + * @param {string} b Some arg2. + * @return Some return. + * @type unknown + */ + ,_someInternalFunction : function(a, b) { + return a+b; + } +}; + +// Adds the plugin class to the list of available EditArea plugins +editArea.add_plugin("test", EditArea_test); ADDED applications/admin/static/edit_area/plugins/test/test2.js Index: applications/admin/static/edit_area/plugins/test/test2.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/test/test2.js @@ -0,0 +1,1 @@ +alert("test2.js is loaded from test plugin"); ADDED applications/admin/static/edit_area/plugins/zencoding/core.js Index: applications/admin/static/edit_area/plugins/zencoding/core.js ================================================================== --- /dev/null +++ applications/admin/static/edit_area/plugins/zencoding/core.js @@ -0,0 +1,4 @@ +var zen_settings={"variables":{"lang":"en","locale":"en-US","charset":"UTF-8","profile":"xhtml","indentation":"\t"},"css":{"snippets":{"@i":"@import url(|);","@m":"@media print {\n\t|\n}","@f":"@font-face {\n\tfont-family:|;\n\tsrc:url(|);\n}","!":"!important","pos":"position:|;","pos:s":"position:static;","pos:a":"position:absolute;","pos:r":"position:relative;","pos:f":"position:fixed;","t":"top:|;","t:a":"top:auto;","r":"right:|;","r:a":"right:auto;","b":"bottom:|;","b:a":"bottom:auto;","l":"left:|;","l:a":"left:auto;","z":"z-index:|;","z:a":"z-index:auto;","fl":"float:|;","fl:n":"float:none;","fl:l":"float:left;","fl:r":"float:right;","cl":"clear:|;","cl:n":"clear:none;","cl:l":"clear:left;","cl:r":"clear:right;","cl:b":"clear:both;","d":"display:|;","d:n":"display:none;","d:b":"display:block;","d:ib":"display:inline;","d:li":"display:list-item;","d:ri":"display:run-in;","d:cp":"display:compact;","d:tb":"display:table;","d:itb":"display:inline-table;","d:tbcp":"display:table-caption;","d:tbcl":"display:table-column;","d:tbclg":"display:table-column-group;","d:tbhg":"display:table-header-group;","d:tbfg":"display:table-footer-group;","d:tbr":"display:table-row;","d:tbrg":"display:table-row-group;","d:tbc":"display:table-cell;","d:rb":"display:ruby;","d:rbb":"display:ruby-base;","d:rbbg":"display:ruby-base-group;","d:rbt":"display:ruby-text;","d:rbtg":"display:ruby-text-group;","v":"visibility:|;","v:v":"visibility:visible;","v:h":"visibility:hidden;","v:c":"visibility:collapse;","ov":"overflow:|;","ov:v":"overflow:visible;","ov:h":"overflow:hidden;","ov:s":"overflow:scroll;","ov:a":"overflow:auto;","ovx":"overflow-x:|;","ovx:v":"overflow-x:visible;","ovx:h":"overflow-x:hidden;","ovx:s":"overflow-x:scroll;","ovx:a":"overflow-x:auto;","ovy":"overflow-y:|;","ovy:v":"overflow-y:visible;","ovy:h":"overflow-y:hidden;","ovy:s":"overflow-y:scroll;","ovy:a":"overflow-y:auto;","ovs":"overflow-style:|;","ovs:a":"overflow-style:auto;","ovs:s":"overflow-style:scrollbar;","ovs:p":"overflow-style:panner;","ovs:m":"overflow-style:move;","ovs:mq":"overflow-style:marquee;","zoo":"zoom:1;","cp":"clip:|;","cp:a":"clip:auto;","cp:r":"clip:rect(|);","bxz":"box-sizing:|;","bxz:cb":"box-sizing:content-box;","bxz:bb":"box-sizing:border-box;","bxsh":"box-shadow:|;","bxsh:n":"box-shadow:none;","bxsh:w":"-webkit-box-shadow:0 0 0 #000;","bxsh:m":"-moz-box-shadow:0 0 0 0 #000;","m":"margin:|;","m:a":"margin:auto;","m:0":"margin:0;","m:2":"margin:0 0;","m:3":"margin:0 0 0;","m:4":"margin:0 0 0 0;","mt":"margin-top:|;","mt:a":"margin-top:auto;","mr":"margin-right:|;","mr:a":"margin-right:auto;","mb":"margin-bottom:|;","mb:a":"margin-bottom:auto;","ml":"margin-left:|;","ml:a":"margin-left:auto;","p":"padding:|;","p:0":"padding:0;","p:2":"padding:0 0;","p:3":"padding:0 0 0;","p:4":"padding:0 0 0 0;","pt":"padding-top:|;","pr":"padding-right:|;","pb":"padding-bottom:|;","pl":"padding-left:|;","w":"width:|;","w:a":"width:auto;","h":"height:|;","h:a":"height:auto;","maw":"max-width:|;","maw:n":"max-width:none;","mah":"max-height:|;","mah:n":"max-height:none;","miw":"min-width:|;","mih":"min-height:|;","o":"outline:|;","o:n":"outline:none;","oo":"outline-offset:|;","ow":"outline-width:|;","os":"outline-style:|;","oc":"outline-color:#000;","oc:i":"outline-color:invert;","bd":"border:|;","bd+":"border:1px solid #000;","bd:n":"border:none;","bdbk":"border-break:|;","bdbk:c":"border-break:close;","bdcl":"border-collapse:|;","bdcl:c":"border-collapse:collapse;","bdcl:s":"border-collapse:separate;","bdc":"border-color:#000;","bdi":"border-image:url(|);","bdi:n":"border-image:none;","bdi:w":"-webkit-border-image:url(|) 0 0 0 0 stretch stretch;","bdi:m":"-moz-border-image:url(|) 0 0 0 0 stretch stretch;","bdti":"border-top-image:url(|);","bdti:n":"border-top-image:none;","bdri":"border-right-image:url(|);","bdri:n":"border-right-image:none;","bdbi":"border-bottom-image:url(|);","bdbi:n":"border-bottom-image:none;","bdli":"border-left-image:url(|);","bdli:n":"border-left-image:none;","bdci":"border-corner-image:url(|);","bdci:n":"border-corner-image:none;","bdci:c":"border-corner-image:continue;","bdtli":"border-top-left-image:url(|);","bdtli:n":"border-top-left-image:none;","bdtli:c":"border-top-left-image:continue;","bdtri":"border-top-right-image:url(|);","bdtri:n":"border-top-right-image:none;","bdtri:c":"border-top-right-image:continue;","bdbri":"border-bottom-right-image:url(|);","bdbri:n":"border-bottom-right-image:none;","bdbri:c":"border-bottom-right-image:continue;","bdbli":"border-bottom-left-image:url(|);","bdbli:n":"border-bottom-left-image:none;","bdbli:c":"border-bottom-left-image:continue;","bdf":"border-fit:|;","bdf:c":"border-fit:clip;","bdf:r":"border-fit:repeat;","bdf:sc":"border-fit:scale;","bdf:st":"border-fit:stretch;","bdf:ow":"border-fit:overwrite;","bdf:of":"border-fit:overflow;","bdf:sp":"border-fit:space;","bdl":"border-length:|;","bdl:a":"border-length:auto;","bdsp":"border-spacing:|;","bds":"border-style:|;","bds:n":"border-style:none;","bds:h":"border-style:hidden;","bds:dt":"border-style:dotted;","bds:ds":"border-style:dashed;","bds:s":"border-style:solid;","bds:db":"border-style:double;","bds:dtds":"border-style:dot-dash;","bds:dtdtds":"border-style:dot-dot-dash;","bds:w":"border-style:wave;","bds:g":"border-style:groove;","bds:r":"border-style:ridge;","bds:i":"border-style:inset;","bds:o":"border-style:outset;","bdw":"border-width:|;","bdt":"border-top:|;","bdt+":"border-top:1px solid #000;","bdt:n":"border-top:none;","bdtw":"border-top-width:|;","bdts":"border-top-style:|;","bdts:n":"border-top-style:none;","bdtc":"border-top-color:#000;","bdr":"border-right:|;","bdr+":"border-right:1px solid #000;","bdr:n":"border-right:none;","bdrw":"border-right-width:|;","bdrs":"border-right-style:|;","bdrs:n":"border-right-style:none;","bdrc":"border-right-color:#000;","bdb":"border-bottom:|;","bdb+":"border-bottom:1px solid #000;","bdb:n":"border-bottom:none;","bdbw":"border-bottom-width:|;","bdbs":"border-bottom-style:|;","bdbs:n":"border-bottom-style:none;","bdbc":"border-bottom-color:#000;","bdl":"border-left:|;","bdl+":"border-left:1px solid #000;","bdl:n":"border-left:none;","bdlw":"border-left-width:|;","bdls":"border-left-style:|;","bdls:n":"border-left-style:none;","bdlc":"border-left-color:#000;","bdrs":"border-radius:|;","bdtrrs":"border-top-right-radius:|;","bdtlrs":"border-top-left-radius:|;","bdbrrs":"border-bottom-right-radius:|;","bdblrs":"border-bottom-left-radius:|;","bg":"background:|;","bg+":"background:#FFF url(|) 0 0 no-repeat;","bg:n":"background:none;","bg:ie":"filter:progid:DXImageTransform.Microsoft.AlphaImageLoader(src='|x.png');","bgc":"background-color:#FFF;","bgi":"background-image:url(|);","bgi:n":"background-image:none;","bgr":"background-repeat:|;","bgr:n":"background-repeat:no-repeat;","bgr:x":"background-repeat:repeat-x;","bgr:y":"background-repeat:repeat-y;","bga":"background-attachment:|;","bga:f":"background-attachment:fixed;","bga:s":"background-attachment:scroll;","bgp":"background-position:0 0;","bgpx":"background-position-x:|;","bgpy":"background-position-y:|;","bgbk":"background-break:|;","bgbk:bb":"background-break:bounding-box;","bgbk:eb":"background-break:each-box;","bgbk:c":"background-break:continuous;","bgcp":"background-clip:|;","bgcp:bb":"background-clip:border-box;","bgcp:pb":"background-clip:padding-box;","bgcp:cb":"background-clip:content-box;","bgcp:nc":"background-clip:no-clip;","bgo":"background-origin:|;","bgo:pb":"background-origin:padding-box;","bgo:bb":"background-origin:border-box;","bgo:cb":"background-origin:content-box;","bgz":"background-size:|;","bgz:a":"background-size:auto;","bgz:ct":"background-size:contain;","bgz:cv":"background-size:cover;","c":"color:#000;","tbl":"table-layout:|;","tbl:a":"table-layout:auto;","tbl:f":"table-layout:fixed;","cps":"caption-side:|;","cps:t":"caption-side:top;","cps:b":"caption-side:bottom;","ec":"empty-cells:|;","ec:s":"empty-cells:show;","ec:h":"empty-cells:hide;","lis":"list-style:|;","lis:n":"list-style:none;","lisp":"list-style-position:|;","lisp:i":"list-style-position:inside;","lisp:o":"list-style-position:outside;","list":"list-style-type:|;","list:n":"list-style-type:none;","list:d":"list-style-type:disc;","list:c":"list-style-type:circle;","list:s":"list-style-type:square;","list:dc":"list-style-type:decimal;","list:dclz":"list-style-type:decimal-leading-zero;","list:lr":"list-style-type:lower-roman;","list:ur":"list-style-type:upper-roman;","lisi":"list-style-image:|;","lisi:n":"list-style-image:none;","q":"quotes:|;","q:n":"quotes:none;","q:ru":"quotes:'\00AB' '\00BB' '\201E' '\201C';","q:en":"quotes:'\201C' '\201D' '\2018' '\2019';","ct":"content:|;","ct:n":"content:normal;","ct:oq":"content:open-quote;","ct:noq":"content:no-open-quote;","ct:cq":"content:close-quote;","ct:ncq":"content:no-close-quote;","ct:a":"content:attr(|);","ct:c":"content:counter(|);","ct:cs":"content:counters(|);","coi":"counter-increment:|;","cor":"counter-reset:|;","va":"vertical-align:|;","va:sup":"vertical-align:super;","va:t":"vertical-align:top;","va:tt":"vertical-align:text-top;","va:m":"vertical-align:middle;","va:bl":"vertical-align:baseline;","va:b":"vertical-align:bottom;","va:tb":"vertical-align:text-bottom;","va:sub":"vertical-align:sub;","ta":"text-align:|;","ta:l":"text-align:left;","ta:c":"text-align:center;","ta:r":"text-align:right;","tal":"text-align-last:|;","tal:a":"text-align-last:auto;","tal:l":"text-align-last:left;","tal:c":"text-align-last:center;","tal:r":"text-align-last:right;","td":"text-decoration:|;","td:n":"text-decoration:none;","td:u":"text-decoration:underline;","td:o":"text-decoration:overline;","td:l":"text-decoration:line-through;","te":"text-emphasis:|;","te:n":"text-emphasis:none;","te:ac":"text-emphasis:accent;","te:dt":"text-emphasis:dot;","te:c":"text-emphasis:circle;","te:ds":"text-emphasis:disc;","te:b":"text-emphasis:before;","te:a":"text-emphasis:after;","th":"text-height:|;","th:a":"text-height:auto;","th:f":"text-height:font-size;","th:t":"text-height:text-size;","th:m":"text-height:max-size;","ti":"text-indent:|;","ti:-":"text-indent:-9999px;","tj":"text-justify:|;","tj:a":"text-justify:auto;","tj:iw":"text-justify:inter-word;","tj:ii":"text-justify:inter-ideograph;","tj:ic":"text-justify:inter-cluster;","tj:d":"text-justify:distribute;","tj:k":"text-justify:kashida;","tj:t":"text-justify:tibetan;","to":"text-outline:|;","to+":"text-outline:0 0 #000;","to:n":"text-outline:none;","tr":"text-replace:|;","tr:n":"text-replace:none;","tt":"text-transform:|;","tt:n":"text-transform:none;","tt:c":"text-transform:capitalize;","tt:u":"text-transform:uppercase;","tt:l":"text-transform:lowercase;","tw":"text-wrap:|;","tw:n":"text-wrap:normal;","tw:no":"text-wrap:none;","tw:u":"text-wrap:unrestricted;","tw:s":"text-wrap:suppress;","tsh":"text-shadow:|;","tsh+":"text-shadow:0 0 0 #000;","tsh:n":"text-shadow:none;","lh":"line-height:|;","whs":"white-space:|;","whs:n":"white-space:normal;","whs:p":"white-space:pre;","whs:nw":"white-space:nowrap;","whs:pw":"white-space:pre-wrap;","whs:pl":"white-space:pre-line;","whsc":"white-space-collapse:|;","whsc:n":"white-space-collapse:normal;","whsc:k":"white-space-collapse:keep-all;","whsc:l":"white-space-collapse:loose;","whsc:bs":"white-space-collapse:break-strict;","whsc:ba":"white-space-collapse:break-all;","wob":"word-break:|;","wob:n":"word-break:normal;","wob:k":"word-break:keep-all;","wob:l":"word-break:loose;","wob:bs":"word-break:break-strict;","wob:ba":"word-break:break-all;","wos":"word-spacing:|;","wow":"word-wrap:|;","wow:nm":"word-wrap:normal;","wow:n":"word-wrap:none;","wow:u":"word-wrap:unrestricted;","wow:s":"word-wrap:suppress;","lts":"letter-spacing:|;","f":"font:|;","f+":"font:1em Arial,sans-serif;","fw":"font-weight:|;","fw:n":"font-weight:normal;","fw:b":"font-weight:bold;","fw:br":"font-weight:bolder;","fw:lr":"font-weight:lighter;","fs":"font-style:|;","fs:n":"font-style:normal;","fs:i":"font-style:italic;","fs:o":"font-style:oblique;","fv":"font-variant:|;","fv:n":"font-variant:normal;","fv:sc":"font-variant:small-caps;","fz":"font-size:|;","fza":"font-size-adjust:|;","fza:n":"font-size-adjust:none;","ff":"font-family:|;","ff:s":"font-family:serif;","ff:ss":"font-family:sans-serif;","ff:c":"font-family:cursive;","ff:f":"font-family:fantasy;","ff:m":"font-family:monospace;","fef":"font-effect:|;","fef:n":"font-effect:none;","fef:eg":"font-effect:engrave;","fef:eb":"font-effect:emboss;","fef:o":"font-effect:outline;","fem":"font-emphasize:|;","femp":"font-emphasize-position:|;","femp:b":"font-emphasize-position:before;","femp:a":"font-emphasize-position:after;","fems":"font-emphasize-style:|;","fems:n":"font-emphasize-style:none;","fems:ac":"font-emphasize-style:accent;","fems:dt":"font-emphasize-style:dot;","fems:c":"font-emphasize-style:circle;","fems:ds":"font-emphasize-style:disc;","fsm":"font-smooth:|;","fsm:a":"font-smooth:auto;","fsm:n":"font-smooth:never;","fsm:aw":"font-smooth:always;","fst":"font-stretch:|;","fst:n":"font-stretch:normal;","fst:uc":"font-stretch:ultra-condensed;","fst:ec":"font-stretch:extra-condensed;","fst:c":"font-stretch:condensed;","fst:sc":"font-stretch:semi-condensed;","fst:se":"font-stretch:semi-expanded;","fst:e":"font-stretch:expanded;","fst:ee":"font-stretch:extra-expanded;","fst:ue":"font-stretch:ultra-expanded;","op":"opacity:|;","op:ie":"filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);","op:ms":"-ms-filter:'progid:DXImageTransform.Microsoft.Alpha(Opacity=100)';","rz":"resize:|;","rz:n":"resize:none;","rz:b":"resize:both;","rz:h":"resize:horizontal;","rz:v":"resize:vertical;","cur":"cursor:|;","cur:a":"cursor:auto;","cur:d":"cursor:default;","cur:c":"cursor:crosshair;","cur:ha":"cursor:hand;","cur:he":"cursor:help;","cur:m":"cursor:move;","cur:p":"cursor:pointer;","cur:t":"cursor:text;","pgbb":"page-break-before:|;","pgbb:au":"page-break-before:auto;","pgbb:al":"page-break-before:always;","pgbb:l":"page-break-before:left;","pgbb:r":"page-break-before:right;","pgbi":"page-break-inside:|;","pgbi:au":"page-break-inside:auto;","pgbi:av":"page-break-inside:avoid;","pgba":"page-break-after:|;","pgba:au":"page-break-after:auto;","pgba:al":"page-break-after:always;","pgba:l":"page-break-after:left;","pgba:r":"page-break-after:right;","orp":"orphans:|;","wid":"widows:|;"}},"html":{"snippets":{"cc:ie6":"","cc:ie":"","cc:noie":"\n\t${child}|\n","html:4t":'\n'+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+"","html:4s":'\n'+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+"","html:xt":'\n'+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+"","html:xs":'\n'+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+"","html:xxs":'\n'+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+"","html:5":"\n"+'\n'+"\n"+" \n"+' \n'+"\n"+"\n\t${child}|\n\n"+""},"abbreviations":{"a":'',"a:link":'',"a:mail":'',"abbr":'',"acronym":'',"base":'',"bdo":'',"bdo:r":'',"bdo:l":'',"link:css":'',"link:print":'',"link:favicon":'',"link:touch":'',"link:rss":'',"link:atom":'',"meta:utf":'',"meta:win":'',"meta:compat":'',"style":'',"script":' + +{{if request.function=='index':}} +

{{=T("Available databases and tables")}}

+ {{if not databases:}}{{=T("No databases in this application")}}{{pass}} + {{for db in sorted(databases):}} + {{for table in databases[db].tables:}} + {{qry='%s.%s.id>0'%(db,table)}} + {{tbl=databases[db][table]}} + {{if hasattr(tbl,'_primarykey'):}} + {{if tbl._primarykey:}} + {{firstkey=tbl[tbl._primarykey[0]]}} + {{if firstkey.type in ['string','text']:}} + {{qry='%s.%s.%s!=""'%(db,table,firstkey.name)}} + {{else:}} + {{qry='%s.%s.%s>0'%(db,table,firstkey.name)}} + {{pass}} + {{else:}} + {{qry=''}} + {{pass}} + {{pass}} +

{{=A("%s.%s" % (db,table),_href=URL('select',args=[db],vars=dict(query=qry)))}} +

+ [ {{=A(str(T('insert new'))+' '+table,_href=URL('insert',args=[db,table]))}} ] +

+ {{pass}} + {{pass}} + +{{elif request.function=='select':}} +

{{=XML(str(T("database %s select"))%A(request.args[0],_href=URL('index'))) }} +

+ {{if table:}} + [ {{=A(str(T('insert new %s'))%table,_href=URL('insert',args=[request.args[0],table]))}} ]

+

{{=T("Rows in table")}}


+ {{else:}} +

{{=T("Rows selected")}}


+ {{pass}} + {{=form}} +

{{=T('The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.')}}
+ {{=T('Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.')}}
+ {{=T('"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN')}}

+

+

{{=nrows}} {{=T("selected")}}

+ {{if start>0:}}[ {{=A(T('previous 100 rows'),_href=URL('select',args=request.args[0],vars=dict(start=start-100)))}} ]{{pass}} + {{if stop + {{linkto=URL('update',args=request.args[0])}} + {{upload=URL('download',args=request.args[0])}} + {{=SQLTABLE(rows,linkto,upload,orderby=True,_class='sortable')}} + + {{pass}} +

{{=T("Import/Export")}}


+ [ {{=T("export as csv file")}} ] + {{if table:}} + {{=FORM(str(T('or import from csv file'))+" ",INPUT(_type='file',_name='csvfile'),INPUT(_type='hidden',_value=table,_name='table'),INPUT(_type='submit',_value='import'))}} + {{pass}} + + +{{elif request.function=='insert':}} +

{{=T("database")}} {{=A(request.args[0],_href=URL('index'))}} + {{if hasattr(table,'_primarykey'):}} + {{fieldname=table._primarykey[0]}} + {{dbname=request.args[0]}} + {{tablename=request.args[1]}} + {{cond = table[fieldname].type in ['string','text'] and '!=""' or '>0'}} + {{=T("table")}} {{=A(tablename,_href=URL('select',args=dbname,vars=dict(query='%s.%s.%s%s'%(dbname,tablename,fieldname,cond))))}} + {{else:}} + {{=T("table")}} {{=A(request.args[1],_href=URL('select',args=request.args[0],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2]))))}} + {{pass}} +

+

{{=T("New Record")}}


+ {{=form}} + + + +{{elif request.function=='update':}} +

{{=T("database")}} {{=A(request.args[0],_href=URL('index'))}} + {{if hasattr(table,'_primarykey'):}} + {{fieldname=request.vars.keys()[0]}} + {{dbname=request.args[0]}} + {{tablename=request.args[1]}} + {{cond = table[fieldname].type in ['string','text'] and '!=""' or '>0'}} + {{=T("table")}} {{=A(tablename,_href=URL('select',args=dbname,vars=dict(query='%s.%s.%s%s'%(dbname,tablename,fieldname,cond))))}} + {{=T("record")}} {{=A('%s=%s'%request.vars.items()[0],_href=URL('update',args=request.args[:2],vars=request.vars))}} + {{else:}} + {{=T("table")}} {{=A(request.args[1],_href=URL('select',args=request.args[0],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2]))))}} + {{=T("record id")}} {{=A(request.args[2],_href=URL('update',args=request.args[:3]))}} + {{pass}} +

+

{{=T("Edit current record")}}



{{=form}} + + + +{{elif request.function=='state':}} +

{{=T("Internal State")}}

+

{{=T("Current request")}}

+ {{=BEAUTIFY(request)}} +

{{=T("Current response")}}

+ {{=BEAUTIFY(response)}} +

{{=T("Current session")}}

+ {{=BEAUTIFY(session)}} + + +{{elif request.function == 'ccache':}} +

Cache

+
+
+
+ Statistics +
+
+

Overview

+

+ Hit Ratio: + {{=total['ratio']}}% + ({{=total['hits']}} hits + and {{=total['misses']}} misses) +

+

+ Size of cache: + {{=total['objects']}} items, + {{=total['bytes']}} bytes + {{if total['bytes'] > 524287:}} + ({{="%.0d" % (total['bytes'] / 1048576)}} MB) + {{pass}} +

+

+ Cache contains items up to + {{="%02d" % total['oldest'][0]}} hours + {{="%02d" % total['oldest'][1]}} minutes + {{="%02d" % total['oldest'][2]}} seconds old. +

+

RAM

+

+ Hit Ratio: + {{=ram['ratio']}}% + ({{=ram['hits']}} hits + and {{=ram['misses']}} misses) +

+

+ Size of cache: + {{=ram['objects']}} items, + {{=ram['bytes']}} bytes + {{if ram['bytes'] > 524287:}} + ({{=ram['bytes'] / 1048576}} MB) + {{pass}} +

+

+ RAM contains items up to + {{="%02d" % ram['oldest'][0]}} hours + {{="%02d" % ram['oldest'][1]}} minutes + {{="%02d" % ram['oldest'][2]}} seconds old. +

+

DISK

+

+ Hit Ratio: + {{=disk['ratio']}}% + ({{=disk['hits']}} hits + and {{=disk['misses']}} misses) +

+

+ Size of cache: + {{=disk['objects']}} items, + {{=disk['bytes']}} bytes + {{if disk['bytes'] > 524287:}} + ({{=disk['bytes'] / 1048576}} MB) + {{pass}} +

+

+ DISK contains items up to + {{="%02d" % disk['oldest'][0]}} hours + {{="%02d" % disk['oldest'][1]}} minutes + {{="%02d" % disk['oldest'][2]}} seconds old. +

+
+ +
+ Manage Cache +
+
+

+ {{=form}} +

+
+
+
+
+{{pass}} ADDED applications/admin/views/debug/index.html Index: applications/admin/views/debug/index.html ================================================================== --- /dev/null +++ applications/admin/views/debug/index.html @@ -0,0 +1,159 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}shell{{end}} + + +
+ + +
+
+
>>>
+
+ + Type PDB debugger command in here and hit Return (Enter) to execute it. +
+
+
+
+ +
+
    +
  • Using the shell may lock the database to other users of this app.
  • +
+
+ + ADDED applications/admin/views/default/about.html Index: applications/admin/views/default/about.html ================================================================== --- /dev/null +++ applications/admin/views/default/about.html @@ -0,0 +1,12 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}about{{end}} + +

{{=T("About application")}} "{{=app}}"

+

{{=T("About")}} {{=app}}

+

{{=button(URL('edit/%s/ABOUT' % (app)), T('Edit'))}}

+
{{=about}}
+

{{=T('License for')}} {{=app}}

+

{{=button(URL('edit/%s/LICENSE' % (app)), T('Edit'))}}

+
{{=license}}
+ ADDED applications/admin/views/default/amy_ajax.html Index: applications/admin/views/default/amy_ajax.html ================================================================== --- /dev/null +++ applications/admin/views/default/amy_ajax.html @@ -0,0 +1,76 @@ + + + +{{if request.args[1]=="views":}} + +{{else:}} + +{{pass}} + + + ADDED applications/admin/views/default/change_password.html Index: applications/admin/views/default/change_password.html ================================================================== --- /dev/null +++ applications/admin/views/default/change_password.html @@ -0,0 +1,9 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}change_password{{end}} + +

Change Admin Password

+ +
+ {{=form}} +
ADDED applications/admin/views/default/delete.html Index: applications/admin/views/default/delete.html ================================================================== --- /dev/null +++ applications/admin/views/default/delete.html @@ -0,0 +1,8 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}delete{{end}} + +
+

{{=T('Are you sure you want to delete file "%s"?', filename)}}

+

{{=FORM(INPUT(_type='submit',_name='nodelete',_value=T('Abort')),INPUT(_type='hidden',_name='sender',_value=sender), _class="inline")}}{{=FORM(INPUT(_type='submit',_name='delete',_value=T('Delete')),INPUT(_type='hidden',_name='sender',_value=sender), _class="inline")}}

+
ADDED applications/admin/views/default/delete_plugin.html Index: applications/admin/views/default/delete_plugin.html ================================================================== --- /dev/null +++ applications/admin/views/default/delete_plugin.html @@ -0,0 +1,9 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}delete_plugin{{end}} + +
+

{{=T('Are you sure you want to delete plugin "%s"?', plugin)}}

+

{{=FORM(INPUT(_type='submit',_name='nodelete',_value=T('NO')))}}

+

{{=FORM(INPUT(_type='submit',_name='delete',_value=T('YES')))}}

+
ADDED applications/admin/views/default/design.html Index: applications/admin/views/default/design.html ================================================================== --- /dev/null +++ applications/admin/views/default/design.html @@ -0,0 +1,313 @@ +{{extend 'layout.html'}} +{{ +def all(items): + return reduce(lambda a,b:a and b,items,True) +def peekfile(path,file): + return A(file.replace('\\\\','/'),_href=URL('peek', args=(app, path, file))) +def editfile(path,file): + return A(SPAN(T('Edit')),_class='button editbutton',_href=URL('edit', args=(app, path, file))) +def testfile(path,file): + return A(TAG[''](IMG(_src=URL('static', 'images/test_icon.png'), _alt=T('test')), SPAN(T("Run tests in this file (to run all files, you may also use the button labelled 'test')"))), _class='icon test tooltip',_href=URL('test', args=(app, file))) +def editlanguagefile(path,file): + return A(SPAN(T('Edit')),_class='button editbutton',_href=URL('edit_language', args=(app, path, file))) +def file_upload_form(location): + form=FORM(T("upload file:")," ", + INPUT(_type="file",_name="file")," ",T("and rename it:")," ", + INPUT(_type="text",_name="filename",requires=IS_NOT_EMPTY), + INPUT(_type="hidden",_name="location",_value=location), + INPUT(_type="hidden",_name="sender",_value=URL('design',args=app)), + INPUT(_type="submit",_value=T("upload")),_action=URL('upload_file')) + return form +def file_create_form(location): + form=FORM(T("create file with filename:")," ", + INPUT(_type="text",_name="filename",requires=IS_NOT_EMPTY), + INPUT(_type="hidden",_name="location",_value=location), + INPUT(_type="hidden",_name="sender",_value=URL('design',args=app)), + INPUT(_type="submit",_value=T("Create")),_action=URL('create_file')) + return form +def upload_plugin_form(app): + form=FORM(T("upload plugin file:")," ", + INPUT(_type="file",_name="pluginfile"), + INPUT(_type="submit",_value=T("upload"))) + return form +def deletefile(arglist): + return A(TAG[''](IMG(_src=URL('static', 'images/delete_icon.png')), SPAN(T('Delete this file (you will be asked to confirm deletion)'))), _class='icon delete tooltip', _href=URL('delete',args=arglist,vars=dict(sender=request.function+'/'+app))) +}} + +{{block sectionclass}}design{{end}} + +

{{=T("Edit application")}} "{{=A(app,_href=URL(app,'default','index'),_target="_blank")}}"

+ + +
+

+ {{=searchbox('search')}} + {{=T("collapse/expand all")}} + + {{=button('#models', T("models"))}} + {{=button('#controllers', T("controllers"))}} + {{=button('#views', T("views"))}} + {{=button('#languages', T("languages"))}} + {{=button('#static', T("static"))}} + {{=button('#modules', T("modules"))}} + {{=button('#plugins', T("plugins"))}} + +

+
+ + + +

+ {{=T("Models")}} + {{=helpicon()}} {{=T("The data representation, define database tables and sets")}} +

+
+ {{if not models:}}

{{=T("There are no models")}}

{{else:}} +
+ {{=button(URL(a=app,c='appadmin',f='index'), T('database administration'))}} + {{if os.access(os.path.join(request.folder,'..',app,'databases','sql.log'),os.R_OK):}} + {{=button(URL('peek/%s/databases/sql.log'%app), 'sql.log')}} + {{pass}} +
+ {{pass}} + +
    + {{for m in models:}} +
  • + + {{=editfile('models',m)}} + {{=deletefile([app, 'models', m])}} + + + {{=peekfile('models',m)}} + + + {{if len(defines[m]):}}{{=T("defines tables")}} {{pass}}{{=XML(', '.join([B(table).xml() for table in defines[m]]))}} + +
  • + {{pass}} +
+
{{=file_create_form('%s/models/' % app)}}
+
+ + +{{ +controller_functions=[] +for c in controllers: controller_functions+=[c[:-3]+'/%s.html'%x for x in functions[c]] +}} + + + +

+ {{=T("Controllers")}} + {{=helpicon()}} {{=T("The application logic, each URL path is mapped in one exposed function in the controller")}} +

+
+ {{if not controllers:}}

{{=T("There are no controllers")}}

{{else:}} +
+ {{=button(URL(r=request,c='shell',f='index',args=app), T("shell"))}} + {{=button(URL('test',args=app), T("test"))}} + {{=button(URL('edit',args=[app,'cron','crontab']), T("crontab"))}} +
+ {{pass}} +
    + {{for c in controllers:}} +
  • + + {{=editfile('controllers',c)}} + {{=deletefile([app, 'controllers', c])}} + {{=testfile('controllers',c)}} + + + {{=peekfile('controllers',c)}} + + + {{if functions[c]:}}{{=T("exposes")}} {{pass}}{{=XML(', '.join([A(f,_href=URL(a=app,c=c[:-3],f=f)).xml() for f in functions[c]]))}} + +
  • + {{pass}} +
+
{{=file_create_form('%s/controllers/' % app)}}
+
+ + + +

+ {{=T("Views")}} + {{=helpicon()}} {{=T("The presentations layer, views are also known as templates")}} +

+
+
+ {{=button(LAYOUTS_APP, T("download layouts"))}} +
+ {{if not views:}}

{{=T("There are no views")}}

{{pass}} +
    + {{for c in views:}} +
  • + + {{=editfile('views',c)}} + {{=deletefile([app, 'views', c])}} + + + {{=peekfile('views',c)}} + + + {{if extend.has_key(c):}}{{=T("extends")}} {{=extend[c]}} {{pass}} + {{if include[c]:}}{{=T("includes")}} {{pass}}{{=XML(', '.join([B(f).xml() for f in include[c]]))}} + +
  • + {{pass}} +
+
{{=file_create_form('%s/views/' % app)}}
+
+ + + +

+ {{=T("Languages")}} + {{=helpicon()}} {{=T("Translation strings for the application")}} +

+
+
+ {{=button(URL('update_languages/'+app), T('update all languages'))}} +
+ {{if not languages:}}

{{=T("There are no translators, only default language is supported")}}

{{pass}} +
    + {{for file in languages:}} +
  • + + {{=editlanguagefile('languages',file)}} + {{=deletefile([app, 'languages', file])}} + + + {{=peekfile('languages',file)}} + +
  • + {{pass}} +
+
{{=file_create_form('%s/languages/' % app)}}{{=T('(something like "it-it")')}}
+
+ + + +

+ {{=T("Static files")}} + {{=helpicon()}} {{=T("These files are served without processing, your images go here")}} +

+
+
+
+ {{if not statics:}}

{{=T("There are no static files")}}

{{pass}} +
    + {{ + path=[] + for file in statics+['']: + items=file.split('/') + file_path=items[:-1] + filename=items[-1] + while path!=file_path: + if len(file_path)>=len(path) and all([v==file_path[k] for k,v in enumerate(path)]): + path.append(file_path[len(path)]) + thispath='static__'+'__'.join(path) + }} +
  • + {{=path[-1]}}/ +
  • + {{ + pass + pass + if filename: + }}
  • + + {{=editfile('static',file)}} {{=deletefile([app,'static',file])}} + + + {{=filename}} + +
  • {{ + pass + pass + }} + {{pass}} +
+
{{=file_create_form('%s/static/' % app)}} + {{=file_upload_form('%s/static/' % app)}}
+
+ + + +

+ {{=T("Modules")}} + {{=helpicon()}} {{=T("Additional code for your application")}} +

+
+
+
+ {{if not modules:}}

{{=T("There are no modules")}}

{{pass}} +
    + {{for m in modules:}} +
  • + + {{=editfile('modules',m)}} + {{if m!='__init__.py':}}{{=deletefile([app, 'modules', m])}}{{pass}} + + + {{=peekfile('modules',m)}} + +
  • + {{pass}} +
+
{{=file_create_form('%s/modules/' % app)}} + {{=file_upload_form('%s/modules/' % app)}}
+
+ + + +

+ {{=T("Plugins")}} + {{=helpicon()}} {{=T("To create a plugin, name a file/folder plugin_[name]")}} +

+
+
+ {{=button(PLUGINS_APP, T('download plugins'))}} +
+
+
+ {{if plugins:}} +
    + {{for plugin in plugins:}} +
  • + {{=A('plugin_%s' % plugin, _class='file', _href=URL('plugin', args=[app, plugin]))}} +
  • + {{pass}} +
+ {{else:}} +

{{=T('There are no plugins')}}

+ {{pass}} +
{{=upload_plugin_form(app)}}
+
+ + + ADDED applications/admin/views/default/downgrade_web2py.html Index: applications/admin/views/default/downgrade_web2py.html ================================================================== --- /dev/null +++ applications/admin/views/default/downgrade_web2py.html @@ -0,0 +1,13 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}upgrade{{end}} + +

{{=T('web2py downgrade')}}

+ +

{{=T('ATTENTION:')}} {{=T('This is an experimental feature and it needs more testing. If you decide to downgrade you do it at your own risk')}}
+{{=T('If start the downgrade, be patient, it may take a while to rollback')}}

+ +
+{{=FORM(INPUT(_type='submit',_name='nodowngrade',_value=T('Cancel')), _class='inline')}} +{{=FORM(INPUT(_type='submit',_name='downgrade',_value=T('Downgrade')), _class='inline')}} +
ADDED applications/admin/views/default/edit.html Index: applications/admin/views/default/edit.html ================================================================== --- /dev/null +++ applications/admin/views/default/edit.html @@ -0,0 +1,92 @@ +{{extend 'layout.html'}} + +{{ + def shortcut(combo, description): + return XML('
  • %s %s
  • ' % (combo, description)) +}} + +{{if TEXT_EDITOR == 'amy':}} +{{include 'default/amy_ajax.html'}} +{{else:}} + + + +{{pass}} + +{{block sectionclass}}edit{{end}} + +

    {{=T('Editing file "%s"',filename)}}

    + +{{if functions:}} +

    + + {{=B(T('exposes:'))}}{{=XML(', '.join([A(f,_href=URL(a=app,c=controller,f=f)).xml() for f in functions]))}} + + {{if editviewlinks:}}
    + {{=B(T('edit views:'))}} + {{=XML(', '.join([v.xml() for v in editviewlinks]))}} + {{pass}} +

    +{{pass}} + +

    + {{=button(URL('design',args=request.args[0]), T('back'))}} + {{if edit_controller:}} + {{=button(edit_controller, T('edit controller'))}} + {{pass}} + {{if view_link:}} + {{=button(view_link, T('try view'))}} + {{pass}} + {{if request.args[1]=='models':}} + {{=T('online designer')}} + {{pass}} + {{=T('docs')}} +

    + +
    +
    + + {{=IMG(_src=URL('static', 'images/save_icon.png'), _alt=T('Save'))}} + + {{=T('Saved file hash:')}} + + {{=T('Last saved on:')}} + {{if TEXT_EDITOR=='amy':}} + + {{else:}} + + {{pass}} + {{=T('currently saved or')}} + {{=T('to previous version.')}} +
    +
    +
    + +{{if filetype=='html':}} +
    +

    Key bindings for ZenCoding Plugin

    +
      + {{=shortcut('Ctrl+S', 'Save via Ajax')}} + {{=shortcut('Ctrl+,', 'Expand Abbreviation')}} + {{=shortcut('Ctrl+M', 'Match Pair')}} + {{=shortcut('Ctrl+H', 'Wrap with Abbreviation')}} + {{=shortcut('Shift+Ctrl+M', 'Merge Lines')}} + {{=shortcut('Ctrl+Shift+←', 'Previous Edit Point')}} + {{=shortcut('Ctrl+Shift+→', 'Next Edit Point')}} + {{=shortcut('Ctrl+Shift+↑', 'Go to Matching Pair')}} +
    +
    +{{else:}} +
    +

    Key bindings

    +
      + {{=shortcut('Ctrl+S', 'Save via Ajax')}} +
    +
    +{{pass}} ADDED applications/admin/views/default/edit_language.html Index: applications/admin/views/default/edit_language.html ================================================================== --- /dev/null +++ applications/admin/views/default/edit_language.html @@ -0,0 +1,18 @@ +{{extend 'layout.html'}} + + +{{block sectionclass}}edit_language{{end}} + +

    {{=T('Editing Language file')}} "{{=filename}}"

    + +
    + {{=form}} +
    + ADDED applications/admin/views/default/index.html Index: applications/admin/views/default/index.html ================================================================== --- /dev/null +++ applications/admin/views/default/index.html @@ -0,0 +1,21 @@ +{{extend 'layout.html'}} + + +{{block sectionclass}}login{{end}} + +

    web2py™ {{=T('Web Framework')}}

    +

    {{=T('Login to the Administrative Interface')}}

    +

    {{=T('ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.')}}

    +
    +
    +
    + + + +
    {{=T('Administrator Password:')}}
    +
    +
    ADDED applications/admin/views/default/peek.html Index: applications/admin/views/default/peek.html ================================================================== --- /dev/null +++ applications/admin/views/default/peek.html @@ -0,0 +1,16 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}peek{{end}} + +

    {{=T("Peeking at file")}} "{{=filename}}"

    + +

    +{{=button(URL('design',args=request.args[0]), T('back'))}} +{{=button(URL('edit',args=request.args), T('Edit'))}} +

    + +{{ +if filename[-3:]=='.py': language='python' +else: language='html' +}} +{{=CODE(data,language=language,link='/examples/global/vars/')}} ADDED applications/admin/views/default/plugin.html Index: applications/admin/views/default/plugin.html ================================================================== --- /dev/null +++ applications/admin/views/default/plugin.html @@ -0,0 +1,221 @@ +{{extend 'layout.html'}} +{{ +import os +def all(items): + return reduce(lambda a,b:a and b,items,True) +def peekfile(path,file): + return A(file.replace('\\\\','/'),_href=URL('peek', args=(app, path, file))) +def editfile(path,file): + return A(SPAN(T('Edit')),_class='button editbutton',_href=URL('edit', args=(app, path, file))) +def testfile(path,file): + return A(TAG[''](IMG(_src=URL('static', 'images/test_icon.png'), _alt=T('test')), SPAN(T("Run tests in this file"))), _class='icon test tooltip',_href=URL('test', args=(app, file))) +def editlanguagefile(path,file): + return A(SPAN(T('Edit')),_class='button editbutton',_href=URL('edit_language', args=(app, path, file))) +def file_upload_form(location): + form=FORM(T("upload file:")," ", + INPUT(_type="file",_name="file")," ",T("and rename it:")," ", + INPUT(_type="text",_name="filename",requires=IS_NOT_EMPTY), + INPUT(_type="hidden",_name="location",_value=location), + INPUT(_type="hidden",_name="sender",_value=URL('design/'+app)), + INPUT(_type="submit",_value=T("submit")),_action=URL('upload_file')) + return form +def file_create_form(location): + form=FORM(T("create file with filename:")," ", + INPUT(_type="text",_name="filename",requires=IS_NOT_EMPTY), + INPUT(_type="hidden",_name="location",_value=location), + INPUT(_type="hidden",_name="sender",_value=URL('design/'+app)), + INPUT(_type="submit",_value=T("submit")),_action=URL('create_file')) + return form +def upload_plugin_form(app): + form=FORM(T("upload plugin file:")," ", + INPUT(_type="file",_name="pluginfile"), + INPUT(_type="submit",_value=T("submit"))) + return form +def deletefile(arglist): + return A(TAG[''](IMG(_src=URL('static', 'images/delete_icon.png')), SPAN(T('Delete this file (you will be asked to confirm deletion)'))), _class='icon delete tooltip', _href=URL('delete',args=arglist,vars=dict(sender=request.function+'/'+app))) +}} + +{{block sectionclass}}plugin{{end}} + +

    + {{=T('Plugin "%s" in application', request.args(1))}} "{{=app}}" +

    +
    + {{=T("collapse/expand all")}} + + {{=button("#models", T("models"))}} + {{=button("#controllers", T("controllers"))}} + {{=button("#views", T("views"))}} + {{=button("#static", T("static"))}} + {{=button("#modules", T("modules"))}} + + + {{=sp_button(URL('design',args=app), T("back"))}} + {{=sp_button(URL('delete_plugin',args=request.args), T("delete plugin"))}} + {{=sp_button(URL('pack_plugin',args=request.args), T("pack plugin"))}} + +
    + + + +

    + {{=T("Models")}} +

    +
    + {{if not models:}} +

    {{=T("There are no models")}}

    + {{pass}} +
    +
    +
      + {{for m in models:}} +
    • + + {{=editfile('models',m)}} + {{=deletefile([app, 'models', m])}} + + + {{=peekfile('models',m)}} + + + {{if len(defines[m]):}}{{=T("defines tables")}} {{pass}}{{=XML(', '.join([B(table).xml() for table in defines[m]]))}} + +
    • + {{pass}} +
    +
    + + +{{ +controller_functions=[] +for c in controllers: controller_functions+=[c[:-3]+'/%s.html'%x for x in functions[c]] +}} + + + +

    + {{=T("Controllers")}} +

    +
    + {{if not controllers:}} +

    {{=T("There are no controllers")}}

    + {{pass}} +
    +
    +
      + {{for c in controllers:}} +
    • + + {{=editfile('controllers',c)}} + {{=deletefile([app,'controllers',c])}} + {{=testfile('controllers',c)}} + + + {{=peekfile('controllers',c)}} + + + {{if functions[c]:}}{{=T("exposes")}} {{pass}}{{=XML(', '.join([A(f,_href=URL(a=app,c=c[:-3],f=f)).xml() for f in functions[c]]))}} + +
    • + {{pass}} +
    +
    + + + +

    + {{=T("Views")}} +

    +
    + {{if not views:}} +

    {{=T("There are no views")}}

    + {{pass}} +
    +
    +
      + {{for c in views:}} +
    • + + {{=editfile('views',c)}} + {{=deletefile([app,'views',c])}} + + + {{=peekfile('views',c)}} + + + {{if extend.has_key(c):}}{{=T("extends")}} {{=extend[c]}} {{pass}} + {{if include[c]:}}{{=T("includes")}} {{pass}}{{=XML(', '.join([B(f).xml() for f in include[c]]))}} + +
    • + {{pass}} +
    +
    + + + +

    + {{=T("Static files")}} +

    +
    + {{if not statics:}}

    {{=T("There are no static files")}}

    {{pass}} +
      + {{ + path=[] + for file in statics+['']: + items=file.split('/') + file_path=items[:-1] + filename=items[-1] + while path!=file_path: + if len(file_path)>=len(path) and all([v==file_path[k] for k,v in enumerate(path)]): + path.append(file_path[len(path)]) + thispath='static__'+'__'.join(path) + }} +
    • + {{=path[-1]}}/ +
    • + {{ + pass + pass + if filename: + }}
    • + + {{=editfile('static',file)}} {{=deletefile([app,'static',file])}} + + + {{=filename}} + +
    • {{ + pass + pass + }} + {{pass}} +
    +
    + + + +

    + {{=T("Modules")}} +

    +
    + {{if not modules:}} +

    {{=T("There are no modules")}}

    + {{pass}} +
    +
    +
      + {{for m in modules:}} +
    • + {{=editfile('modules',m)}} + {{if m!='__init__.py':}}{{=T("delete")}}{{pass}} + {{=peekfile('modules',m)}} +
    • + {{pass}} +
    +
    + + ADDED applications/admin/views/default/resolve.html Index: applications/admin/views/default/resolve.html ================================================================== --- /dev/null +++ applications/admin/views/default/resolve.html @@ -0,0 +1,24 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}resolve{{end}} + +

    {{=T('Resolve Conflict file')}} "{{=filename}}"

    + + + +
    + + + +
    + +
    +
    + {{=diff}} +
    +
    +
    ADDED applications/admin/views/default/site.html Index: applications/admin/views/default/site.html ================================================================== --- /dev/null +++ applications/admin/views/default/site.html @@ -0,0 +1,157 @@ +{{extend 'layout.html'}} +{{import os, glob}} + +{{block sectionclass}}site{{end}} + +
    +
    +

    {{=T("Installed applications")}}

    +
      + {{for a in apps:}} +
    • + {{if a==request.application:}} +

      {{=a}} ({{=T('currently running')}})

      +

      + {{else:}} +

      {{=A(a,_href=URL(a,'default','index'))}}

      + {{if MULTI_USER_MODE and db.app(name=a):}}(created by {{="%(first_name)s %(last_name)s" % db.auth_user[db.app(name=a).owner]}}){{pass}} +

      + {{if not os.path.exists('applications/%s/compiled' % a):}} + {{=sp_button(URL('design',args=a), T("Edit"))}} + {{else:}} + {{=button(URL(a,'appadmin','index'), T("appadmin"))}} + {{pass}} + {{=button(URL('about',args=a), T("About"))}} + {{pass}} + {{=button(URL('errors',args=a), T("Errors"))}} + {{=button(URL('cleanup',args=a), T("Clean"))}} + {{=button(URL('pack',args=a), T("Pack all"))}} + {{if not os.path.exists('applications/%s/compiled' % a):}} + {{=button(URL('compile_app',args=a), T("Compile"))}} + {{else:}} + {{=button(URL('pack',args=(a, 'compiled')), T("Pack compiled"))}} + {{if glob.glob('applications/%s/controllers/*.py' % a):}} + {{=button(URL('remove_compiled_app',args=a), T("Remove compiled"))}} + {{pass}} + {{pass}} + {{if a!=request.application:}} + {{=button(URL('uninstall',args=a), T("Uninstall"))}} + {{pass}} +

      +
    • + {{pass}} +
    +
    +
    + + ADDED applications/admin/views/default/test.html Index: applications/admin/views/default/test.html ================================================================== --- /dev/null +++ applications/admin/views/default/test.html @@ -0,0 +1,20 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}test{{end}} + +

    {{=T('Testing application')}} "{{=app}}"

    + +{{for controller in sorted(controllers):}} +
    +

    Testing controller "{{=controller}}"... please wait!

    +
    + + +{{pass}} + +

    {{=T("""If the report above contains a ticket number it indicates a failure in executing the controller, before any attempt to execute the doctests. This is usually due to an indentation error or an error outside function code. +A green title indicates that all tests (if defined) passed. In this case test results are not shown.""")}}

    +

    {{=T('Functions with no doctests will result in [passed] tests.')}}

    +

    {{=T('ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.')}}

    ADDED applications/admin/views/default/ticket.html Index: applications/admin/views/default/ticket.html ================================================================== --- /dev/null +++ applications/admin/views/default/ticket.html @@ -0,0 +1,132 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}ticket{{end}} + +

    {{=T('Error ticket')}} for "{{=app}}"

    +

    {{=T('Ticket ID')}}

    +

    {{=ticket}}

    +{{if output:}}

    {{=output}}

    {{pass}} +

    {{=T('Version')}}

    + + + + + + + + + + + +
    web2py™{{=myversion}}
    Python{{=snapshot.get('pyver','')}}
    +

    {{=T('Traceback')}}

    +
    +{{=traceback}} +
    + +{{if snapshot:}} +{{try:}} + + + +

    + {{=T('Error snapshot')}} + {{=helpicon()}} {{=T('Detailed traceback description')}} +

    + + + +
    + +

    + {{=snapshot['etype']}}({{=snapshot['evalue']}}) +

    +

    + {{=T('inspect attributes')}} +

    +
    +
    +
    {{=T("Exception instance attributes")}}
    + + + {{for k,v in snapshot['exception'].items():}} + + + + + {{pass}} + +
    {{=k}}{{=v}}
    +
    +
    +
    + + +
    +

    {{=T('Frames')}}

    +
      + {{for i, frame in enumerate(snapshot['frames']):}} +
    • + {{is_hidden = (i != len(snapshot['frames'])-1 and 'hide' or 'inspect')}} +
      +

      + File {{="%s in %s at line %s" % (frame['file'], frame['func'], frame['lnum'])}} + {{=T("code")}} + {{=T("arguments")}} + {{=T("variables")}} +

      +
      +
      Function argument list
      +

      {{=frame['call']}}

      +
      +
      +
      Code listing
      + {{if frame['lines']:}} +
      {{=CODE('\n'.join([x[1] for x in sorted(frame['lines'].items(),key=lambda x: x[0])]), 
      +                    language='python', link=None, counter=min(frame['lines'].keys()), highlight_line=frame['lnum'])}}
      + {{pass}} +
      +
      +
      Variables
      + + + {{for k,v in frame['dump'].items():}} + + + + + {{pass}} + +
      {{=k}}{{=v}}
      +
      +
      +
    • + {{pass}} +
    +
    + + +
    +

    Context

    +

    + {{=T('locals')}} + {{=T('request')}} + {{=T('session')}} + {{=T('response')}} +

    +
    locals
    {{=BEAUTIFY(snapshot['locals'])}}
    +
    request
    {{=BEAUTIFY(snapshot['request'])}}
    +
    session
    {{=BEAUTIFY(snapshot['session'])}}
    +
    response
    {{=BEAUTIFY(snapshot['response'])}}
    +
    +{{except Exception, e:}} + + {{import traceback;tb=traceback.format_exc().replace("\n","\\n") }} + +{{pass}} +{{pass}} + +
    +

    In file: {{=layer}}

    + {{=CODE(code.replace('\r',''),language='python',link='/examples/global/vars/')}} +
    ADDED applications/admin/views/default/uninstall.html Index: applications/admin/views/default/uninstall.html ================================================================== --- /dev/null +++ applications/admin/views/default/uninstall.html @@ -0,0 +1,9 @@ +{{extend 'layout.html'}} + +
    +

    {{=T('Are you sure you want to uninstall application "%s"?', app)}}

    + + + +
    {{=FORM(INPUT(_type='submit',_name='nodelete',_value=T('Abort')))}}{{=FORM(INPUT(_type='submit',_name='delete',_value=T('Uninstall')))}}
    +
    ADDED applications/admin/views/default/upgrade_web2py.html Index: applications/admin/views/default/upgrade_web2py.html ================================================================== --- /dev/null +++ applications/admin/views/default/upgrade_web2py.html @@ -0,0 +1,13 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}upgrade{{end}} + +

    {{=T('web2py upgrade')}}

    + +

    {{=T('ATTENTION:')}} {{=T('This is an experimental feature and it needs more testing. If you decide to upgrade you do it at your own risk')}}
    +{{=T('If start the upgrade, be patient, it may take a while to download')}}

    + +
    +{{=FORM(INPUT(_type='submit',_name='noupgrade',_value=T('Cancel')), _class='inline')}} +{{=FORM(INPUT(_type='submit',_name='upgrade',_value=T('Upgrade')), _class='inline')}} +
    ADDED applications/admin/views/default/user.html Index: applications/admin/views/default/user.html ================================================================== --- /dev/null +++ applications/admin/views/default/user.html @@ -0,0 +1,19 @@ +{{extend 'layout.html'}} +

    {{=T( request.args(0).replace('_',' ').capitalize() )}}

    +
    +{{=form}} +{{if request.args(0)=='login':}} +{{if not 'register' in auth.settings.actions_disabled:}} +
    register +{{pass}} +{{if not 'request_reset_password' in auth.settings.actions_disabled:}} +
    lost password +{{pass}} +{{pass}} +
    + + ADDED applications/admin/views/gae/deploy.html Index: applications/admin/views/gae/deploy.html ================================================================== --- /dev/null +++ applications/admin/views/gae/deploy.html @@ -0,0 +1,30 @@ +{{extend 'layout.html'}} + + + + + +

    Google App Engine Deployment Interface

    + +

    {{=T("This page can upload your application to the Google App Engine computing cloud. Mind that you must first create indexes locally and this is done by installing the Google appserver and running the app locally with it once, or there will be errors when selecting records. Attention: deployment may take long time, depending on the network speed. Attention: it will overwrite your app.yaml. DO NOT SUBMIT TWICE.")}}

    + +{{if command:}} +

    Command

    + +{{=CODE(command)}} +

    GAE Output

    +
    
    +{{else:}}
    +

    Deployment form

    +
    +{{=form}} +
    +{{pass}} ADDED applications/admin/views/generic.html Index: applications/admin/views/generic.html ================================================================== --- /dev/null +++ applications/admin/views/generic.html @@ -0,0 +1,18 @@ +{{extend 'layout.html'}} +{{""" + +You should not modify this file. +It is used as default when a view is not provided for your controllers + +"""}} + +{{=BEAUTIFY(response._vars)}} + + + + + + + + + ADDED applications/admin/views/layout.html Index: applications/admin/views/layout.html ================================================================== --- /dev/null +++ applications/admin/views/layout.html @@ -0,0 +1,44 @@ + + + + + + {{=response.title or URL()}} + {{response.files.append(URL('static','css/styles.css'))}} + {{include 'web2py_ajax.html'}} + + + + +
    +
    +
    {{=response.flash or ''}}
    + {{include}} +
    +
    + + + ADDED applications/admin/views/mercurial/commit.html Index: applications/admin/views/mercurial/commit.html ================================================================== --- /dev/null +++ applications/admin/views/mercurial/commit.html @@ -0,0 +1,33 @@ +{{extend 'layout.html'}} +{{import time}} + +

    Mercurial Version Control System Interface
    +for application "{{=request.args[0]}}"

    + +

    Commit form

    +{{=form}} + + +{{if repo['.'].rev()>=0:}} +

    Last Revision

    + + + + + + +
    Revision:{{=repo['.'].rev()}}
    Node:{{=repo['.']}}
    Created by:{{=repo['.'].user()}}
    Created on:{{=time.ctime(repo['.'].date()[0])}}
    Description:{{=repo['.'].description()}}
    + +

    Past revisions

    +
    + {{=changes}} +
    + +{{if files:}} +

    Committed files

    +
    +{{=files}} +
    +{{pass}} + +{{pass}} ADDED applications/admin/views/mercurial/revision.html Index: applications/admin/views/mercurial/revision.html ================================================================== --- /dev/null +++ applications/admin/views/mercurial/revision.html @@ -0,0 +1,17 @@ +{{extend 'layout.html'}} + +

    Revision {{=rev}}

    + +{{=form}} + +

    +

    Changelog

    + +{{=desc}} + +

    +

    Files added

    + +{{=TABLE(*[TR(f) for f in files])}} + + ADDED applications/admin/views/shell/index.html Index: applications/admin/views/shell/index.html ================================================================== --- /dev/null +++ applications/admin/views/shell/index.html @@ -0,0 +1,162 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}shell{{end}} + + +
    + + +
    +
    +
    >>>
    +
    + + Type some Python code in here and hit Return (Enter) to execute it. +
    +
    +
    +
    + +
    +
      +
    • Using the shell may lock the database to other users of this app.
    • +
    • Each db statement is automatically committed.
    • +
    • Creating new tables dynamically is not allowed.
    • +
    • Models are automatically imported in the shell.
    • +
    +
    + + ADDED applications/admin/views/toolbar/index.html Index: applications/admin/views/toolbar/index.html ================================================================== --- /dev/null +++ applications/admin/views/toolbar/index.html @@ -0,0 +1,17 @@ + + + {{response.files.append(URL('static','js/jquery.js'))}} + {{include 'web2py_ajax.html'}} + + +
    + URL: {{=URL(app,'default','index')}} + + +
    + + + ADDED applications/admin/views/web2py_ajax.html Index: applications/admin/views/web2py_ajax.html ================================================================== --- /dev/null +++ applications/admin/views/web2py_ajax.html @@ -0,0 +1,27 @@ +{{ +response.files.insert(0,URL('static','js/jquery.js')) +response.files.insert(1,URL('static','css/calendar.css')) +response.files.insert(2,URL('static','js/calendar.js')) +for _item in response.meta or []:}} + {{ +pass +for _k,_file in enumerate(response.files or []): + if _file in response.files[:_k]: + continue + _file0=_file.lower().split('?')[0] + if _file0.endswith('.css'):}} + {{ + elif _file0.endswith('.js'):}} + {{ + pass +pass +}} + + + + ADDED applications/admin/views/wizard/generated.html Index: applications/admin/views/wizard/generated.html ================================================================== --- /dev/null +++ applications/admin/views/wizard/generated.html @@ -0,0 +1,13 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}generated{{end}} + +Open new app in new window +Back to wizard +Admin design page +{{if have_mercurial:}} +Admin versioning page +{{pass}} +Database administration +

    + ADDED applications/admin/views/wizard/step.html Index: applications/admin/views/wizard/step.html ================================================================== --- /dev/null +++ applications/admin/views/wizard/step.html @@ -0,0 +1,140 @@ +{{extend 'layout.html'}} + +{{block sectionclass}}step{{end}} + +

    {{=T('New Application Wizard')}}

    + +
    +{{if request.function=='index':}} +

    {{=T('Start a new app')}}

    +{{else:}} +
    +

    {{=T('Basics')}}

    +

    {{=button(URL('index'), T('restart'))}}

    +

    App Name: {{=session.app['name']}}

    +
    +
    +

    Current settings

    +

    {{=button(URL('step1'), T('Edit'))}}

    +
      + {{for key,value in session.app['params']:}} +
    • {{=key}}: {{=value}}
    • + {{pass}} +
    +
    +
    +

    Tables

    +

    {{=button(URL('step2'), T('edit all'))}}

    +
      + {{for i,table in enumerate(session.app['tables']):}} +
    • {{=button(URL('step3',args=i), T('Edit'))}} {{=table}}
    • + {{pass}} +
    +
    +
    +

    Pages

    +

    {{=button(URL('step4'), T('edit all'))}}

    +
      + {{for i,page in enumerate(session.app['pages']):}} +
    • {{=button(URL('step5',args=i), T('Edit'))}} {{=page}}
    • + {{pass}} +
    +
    +
    +

    {{=T('Generate')}}

    +

    {{=button(URL('step6'), T('go!'))}}

    +
    +{{pass}} +
    + +
    + + + + {{if 'step' in request.function:}} +

    {{=T('Step')}} {{=step}}

    + {{if request.function!='step1':}} + {{=button(URL('step' + str(int(request.function[-1])-1)), T('back'))}} + {{pass}} + {{else:}} +

    {{=T('Begin')}}

    + {{pass}} + {{if request.function in ('step1','step2','step3','step4','step5'):}} + {{=button(URL('step6'), T('skip to generate'))}} + {{pass}} +

    + {{if request.function in ('step1','step6'):}} + + {{pass}} +
    + {{=form}} +
    + + + +
    +

    Instructions

    +
    + {{if request.function=='index':}} +
      +
    • Insert the name of a new app.
    • +
    • If the app exists and was created with the wizard, you will be able to edit it, but any manual editing to the app will be lost.
    • +
    + {{elif request.function=='step1':}} +
      +
    • This Wizard will help you build a new web2py app.
    • +
    • You can create an app with a name that already exists.
    • +
    • If you do not have an email server set email server to "logging".
    • +
    • If you want to use Janrain Engage for login: 1) Sign up for a Janrain Engage account; 2) Register you hostname, domain, and obtain an api key; 3) Set Login Config above to "domain:api_key".
    • +
    • ATTENTION: you can use the wizard to download plugins BUT we cannot guarantee the stability or backward compatibility of plugins. Moreover plugins may conflict with each other. Anyway, we do recommend installing plugin "wiki" with adds CMS like capabilities to your app.
    • +
    + {{elif request.function=='step2':}} +
      +
    • List the names of table that you need.
    • +
    • If you do not need authentication remove the table "auth_user".
    • +
    • Press enter to create a new input row.
    • +
    • Empty rows are ignored.
    • +
    • Other tables for role based access control will be created automatically, and do not need to be listed.
    • +
    • You will be able to add fields later.
    • +
    + {{elif request.function=='step3':}} +
      +
    • List the fields for this table (do not include an id field, it is automatic), for example "name unique" or "birthday date" or "image upload" or "comments multiple" or "description wiki required"
    • +
    • The first keyword(s) for each entry will be used to generate a name for the table field and its label. You can use spaces an other unicode characters.
    • +
    • Keywords "string" (default), "text", "integer", "boolean", "float", "double", "file", "date", "time", "datetime", "file", "upload" will be used to determine the field type and they will not be made part of the field name. +
    • For a reference field use a field name, followed by the name of the referenced table.
    • +
    • Other special keywords are "required", "notnull" or "notempty", "unique". They map into equivalent validators but (at this time) should only be used with string and text types.
    • +
    • The keywords "html" and "wiki" force a text type and set a representation for the field value as sanitized HTML and MARKMIN resepectively.
    • +
    • string, integer and reference fields can be "multiple", i.e. multiple values will be allowed
    • +
    • For the "auth_user" table do not add attributes to the default fields (username, first_name, last_name, password and email). They are handled automatically.
    • +
    • Some fields will be added automatically upon creation and handled automatically: "created_by", "created_on", "modified_by", "modified_on", "active" (only active fields can be selected).
    • +
    • For every table "table" another table "table_archive" is created and it contains the previous versions of each record. This is only accessible via appadmin or programmatically.
    • +
    + {{elif request.function=='step4':}} +
      +
    • List the names of the pages you want to create.
    • +
    • Some pages are listed automatically because they expose Create/Read/Update/Delete for each tables you have created.
    • +
    • All pages, except "error" and those with name starting in underscore willbe listed in the menu. You will be able to edit the menu later.
    • +
    • You should have page "index", the starting point of your app, and page "error", where web2py will redirect to in case of error.
    • +
    + {{elif request.function=='step5':}} +
      +
    • Use the markmin syntax to add text to your pages.
    • +
    + {{elif request.function=='step6':}} +
      +
    • Almost done. Click on the button above to create your new app.
    • +
    • Once done you will be able to edit it as any normal web2py app.
    • +
    + {{pass}} +
    +
    +
    + ADDED applications/mobileblur/ABOUT Index: applications/mobileblur/ABOUT ================================================================== --- /dev/null +++ applications/mobileblur/ABOUT @@ -0,0 +1,2 @@ +Write something about this app. +Developed with web2py. ADDED applications/mobileblur/LICENSE Index: applications/mobileblur/LICENSE ================================================================== --- /dev/null +++ applications/mobileblur/LICENSE @@ -0,0 +1,4 @@ +The web2py welcome app is licensed under public domain +(except for the css and js files that it includes, which have their own third party licenses). + +You can modify this license when you add your own code. ADDED applications/mobileblur/__init__.py Index: applications/mobileblur/__init__.py ================================================================== --- /dev/null +++ applications/mobileblur/__init__.py ADDED applications/mobileblur/controllers/appadmin.py Index: applications/mobileblur/controllers/appadmin.py ================================================================== --- /dev/null +++ applications/mobileblur/controllers/appadmin.py @@ -0,0 +1,408 @@ +# -*- coding: utf-8 -*- + +# ########################################################## +# ## make sure administrator is on localhost +# ########################################################### + +import os +import socket +import datetime +import copy +import gluon.contenttype +import gluon.fileutils + +# ## critical --- make a copy of the environment + +global_env = copy.copy(globals()) +global_env['datetime'] = datetime + +http_host = request.env.http_host.split(':')[0] +remote_addr = request.env.remote_addr +try: + hosts = (http_host, socket.gethostname(), + socket.gethostbyname(http_host), + '::1','127.0.0.1','::ffff:127.0.0.1') +except: + hosts = (http_host, ) + +if request.env.http_x_forwarded_for or request.env.wsgi_url_scheme\ + in ['https', 'HTTPS']: + session.secure() +elif (remote_addr not in hosts) and (remote_addr != "127.0.0.1"): + raise HTTP(200, T('appadmin is disabled because insecure channel')) + +if (request.application=='admin' and not session.authorized) or \ + (request.application!='admin' and not gluon.fileutils.check_credentials(request)): + redirect(URL('admin', 'default', 'index')) + +ignore_rw = True +response.view = 'appadmin.html' +response.menu = [[T('design'), False, URL('admin', 'default', 'design', + args=[request.application])], [T('db'), False, + URL('index')], [T('state'), False, + URL('state')], [T('cache'), False, + URL('ccache')]] + +# ########################################################## +# ## auxiliary functions +# ########################################################### + + +def get_databases(request): + dbs = {} + for (key, value) in global_env.items(): + cond = False + try: + cond = isinstance(value, GQLDB) + except: + cond = isinstance(value, SQLDB) + if cond: + dbs[key] = value + return dbs + + +databases = get_databases(None) + + +def eval_in_global_env(text): + exec ('_ret=%s' % text, {}, global_env) + return global_env['_ret'] + + +def get_database(request): + if request.args and request.args[0] in databases: + return eval_in_global_env(request.args[0]) + else: + session.flash = T('invalid request') + redirect(URL('index')) + + +def get_table(request): + db = get_database(request) + if len(request.args) > 1 and request.args[1] in db.tables: + return (db, request.args[1]) + else: + session.flash = T('invalid request') + redirect(URL('index')) + + +def get_query(request): + try: + return eval_in_global_env(request.vars.query) + except Exception: + return None + + +def query_by_table_type(tablename,db,request=request): + keyed = hasattr(db[tablename],'_primarykey') + if keyed: + firstkey = db[tablename][db[tablename]._primarykey[0]] + cond = '>0' + if firstkey.type in ['string', 'text']: + cond = '!=""' + qry = '%s.%s.%s%s' % (request.args[0], request.args[1], firstkey.name, cond) + else: + qry = '%s.%s.id>0' % tuple(request.args[:2]) + return qry + + + +# ########################################################## +# ## list all databases and tables +# ########################################################### + + +def index(): + return dict(databases=databases) + + +# ########################################################## +# ## insert a new record +# ########################################################### + + +def insert(): + (db, table) = get_table(request) + form = SQLFORM(db[table], ignore_rw=ignore_rw) + if form.accepts(request.vars, session): + response.flash = T('new record inserted') + return dict(form=form,table=db[table]) + + +# ########################################################## +# ## list all records in table and insert new record +# ########################################################### + + +def download(): + import os + db = get_database(request) + return response.download(request,db) + +def csv(): + import gluon.contenttype + response.headers['Content-Type'] = \ + gluon.contenttype.contenttype('.csv') + db = get_database(request) + query = get_query(request) + if not query: + return None + response.headers['Content-disposition'] = 'attachment; filename=%s_%s.csv'\ + % tuple(request.vars.query.split('.')[:2]) + return str(db(query).select()) + + +def import_csv(table, file): + table.import_from_csv_file(file) + +def select(): + import re + db = get_database(request) + dbname = request.args[0] + regex = re.compile('(?P\w+)\.(?P\w+)=(?P\d+)') + if len(request.args)>1 and hasattr(db[request.args[1]],'_primarykey'): + regex = re.compile('(?P
    \w+)\.(?P\w+)=(?P.+)') + if request.vars.query: + match = regex.match(request.vars.query) + if match: + request.vars.query = '%s.%s.%s==%s' % (request.args[0], + match.group('table'), match.group('field'), + match.group('value')) + else: + request.vars.query = session.last_query + query = get_query(request) + if request.vars.start: + start = int(request.vars.start) + else: + start = 0 + nrows = 0 + stop = start + 100 + table = None + rows = [] + orderby = request.vars.orderby + if orderby: + orderby = dbname + '.' + orderby + if orderby == session.last_orderby: + if orderby[0] == '~': + orderby = orderby[1:] + else: + orderby = '~' + orderby + session.last_orderby = orderby + session.last_query = request.vars.query + form = FORM(TABLE(TR(T('Query:'), '', INPUT(_style='width:400px', + _name='query', _value=request.vars.query or '', + requires=IS_NOT_EMPTY(error_message=T("Cannot be empty")))), TR(T('Update:'), + INPUT(_name='update_check', _type='checkbox', + value=False), INPUT(_style='width:400px', + _name='update_fields', _value=request.vars.update_fields + or '')), TR(T('Delete:'), INPUT(_name='delete_check', + _class='delete', _type='checkbox', value=False), ''), + TR('', '', INPUT(_type='submit', _value='submit'))), + _action=URL(r=request,args=request.args)) + if request.vars.csvfile != None: + try: + import_csv(db[request.vars.table], + request.vars.csvfile.file) + response.flash = T('data uploaded') + except Exception, e: + response.flash = DIV(T('unable to parse csv file'),PRE(str(e))) + if form.accepts(request.vars, formname=None): +# regex = re.compile(request.args[0] + '\.(?P
    \w+)\.id\>0') + regex = re.compile(request.args[0] + '\.(?P
    \w+)\..+') + + match = regex.match(form.vars.query.strip()) + if match: + table = match.group('table') + try: + nrows = db(query).count() + if form.vars.update_check and form.vars.update_fields: + db(query).update(**eval_in_global_env('dict(%s)' + % form.vars.update_fields)) + response.flash = T('%s rows updated', nrows) + elif form.vars.delete_check: + db(query).delete() + response.flash = T('%s rows deleted', nrows) + nrows = db(query).count() + if orderby: + rows = db(query).select(limitby=(start, stop), + orderby=eval_in_global_env(orderby)) + else: + rows = db(query).select(limitby=(start, stop)) + except Exception, e: + (rows, nrows) = ([], 0) + response.flash = DIV(T('Invalid Query'),PRE(str(e))) + return dict( + form=form, + table=table, + start=start, + stop=stop, + nrows=nrows, + rows=rows, + query=request.vars.query, + ) + + +# ########################################################## +# ## edit delete one record +# ########################################################### + + +def update(): + (db, table) = get_table(request) + keyed = hasattr(db[table],'_primarykey') + record = None + if keyed: + key = [f for f in request.vars if f in db[table]._primarykey] + if key: + record = db(db[table][key[0]] == request.vars[key[0]]).select().first() + else: + record = db(db[table].id == request.args(2)).select().first() + + if not record: + qry = query_by_table_type(table, db) + session.flash = T('record does not exist') + redirect(URL('select', args=request.args[:1], + vars=dict(query=qry))) + + if keyed: + for k in db[table]._primarykey: + db[table][k].writable=False + + form = SQLFORM(db[table], record, deletable=True, delete_label=T('Check to delete'), + ignore_rw=ignore_rw and not keyed, + linkto=URL('select', + args=request.args[:1]), upload=URL(r=request, + f='download', args=request.args[:1])) + + if form.accepts(request.vars, session): + session.flash = T('done!') + qry = query_by_table_type(table, db) + redirect(URL('select', args=request.args[:1], + vars=dict(query=qry))) + return dict(form=form,table=db[table]) + + +# ########################################################## +# ## get global variables +# ########################################################### + + +def state(): + return dict() + +def ccache(): + form = FORM( + P(TAG.BUTTON("Clear CACHE?", _type="submit", _name="yes", _value="yes")), + P(TAG.BUTTON("Clear RAM", _type="submit", _name="ram", _value="ram")), + P(TAG.BUTTON("Clear DISK", _type="submit", _name="disk", _value="disk")), + ) + + if form.accepts(request.vars, session): + clear_ram = False + clear_disk = False + session.flash = "" + if request.vars.yes: + clear_ram = clear_disk = True + if request.vars.ram: + clear_ram = True + if request.vars.disk: + clear_disk = True + + if clear_ram: + cache.ram.clear() + session.flash += "Ram Cleared " + if clear_disk: + cache.disk.clear() + session.flash += "Disk Cleared" + + redirect(URL(r=request)) + + try: + from guppy import hpy; hp=hpy() + except ImportError: + hp = False + + import shelve, os, copy, time, math + from gluon import portalocker + + ram = { + 'bytes': 0, + 'objects': 0, + 'hits': 0, + 'misses': 0, + 'ratio': 0, + 'oldest': time.time() + } + disk = copy.copy(ram) + total = copy.copy(ram) + + for key, value in cache.ram.storage.items(): + if isinstance(value, dict): + ram['hits'] = value['hit_total'] - value['misses'] + ram['misses'] = value['misses'] + try: + ram['ratio'] = ram['hits'] * 100 / value['hit_total'] + except (KeyError, ZeroDivisionError): + ram['ratio'] = 0 + else: + if hp: + ram['bytes'] += hp.iso(value[1]).size + ram['objects'] += hp.iso(value[1]).count + + if value[0] < ram['oldest']: + ram['oldest'] = value[0] + + locker = open(os.path.join(request.folder, + 'cache/cache.lock'), 'a') + portalocker.lock(locker, portalocker.LOCK_EX) + disk_storage = shelve.open(os.path.join(request.folder, 'cache/cache.shelve')) + try: + for key, value in disk_storage.items(): + if isinstance(value, dict): + disk['hits'] = value['hit_total'] - value['misses'] + disk['misses'] = value['misses'] + try: + disk['ratio'] = disk['hits'] * 100 / value['hit_total'] + except (KeyError, ZeroDivisionError): + disk['ratio'] = 0 + else: + if hp: + disk['bytes'] += hp.iso(value[1]).size + disk['objects'] += hp.iso(value[1]).count + if value[0] < disk['oldest']: + disk['oldest'] = value[0] + finally: + portalocker.unlock(locker) + locker.close() + disk_storage.close() + + total['bytes'] = ram['bytes'] + disk['bytes'] + total['objects'] = ram['objects'] + disk['objects'] + total['hits'] = ram['hits'] + disk['hits'] + total['misses'] = ram['misses'] + disk['misses'] + try: + total['ratio'] = total['hits'] * 100 / (total['hits'] + total['misses']) + except (KeyError, ZeroDivisionError): + total['ratio'] = 0 + + if disk['oldest'] < ram['oldest']: + total['oldest'] = disk['oldest'] + else: + total['oldest'] = ram['oldest'] + + def GetInHMS(seconds): + hours = math.floor(seconds / 3600) + seconds -= hours * 3600 + minutes = math.floor(seconds / 60) + seconds -= minutes * 60 + seconds = math.floor(seconds) + + return (hours, minutes, seconds) + + ram['oldest'] = GetInHMS(time.time() - ram['oldest']) + disk['oldest'] = GetInHMS(time.time() - disk['oldest']) + total['oldest'] = GetInHMS(time.time() - total['oldest']) + + return dict(form=form, total=total, + ram=ram, disk=disk) + ADDED applications/mobileblur/controllers/default.py Index: applications/mobileblur/controllers/default.py ================================================================== --- /dev/null +++ applications/mobileblur/controllers/default.py @@ -0,0 +1,37 @@ +from pprint import pprint +import simplejson + +def index(): + raw_feeds = newsblur.feeds(flat=True)["feeds"] + feeds = {} + for feed in raw_feeds.itervalues(): + for i in range(threshold, 2): + if feed[thresholds[i]] > 0: + feeds[feed["feed_title"]] = feed + break + + return dict(feeds=feeds, threshold=threshold) + + +def login(): + login_form = SQLFORM.factory( + Field("username", requires=IS_NOT_EMPTY()), + Field("password", "password", requires=IS_NOT_EMPTY()) + ) + if login_form.accepts(request): + try: + results = newsblur.login(login_form.vars["username"], login_form.vars["password"]) + response.cookies["nb_cookie"] = newsblur.cookies["newsblur_sessionid"] + response.cookies["nb_cookie"]["path"] = "/" + redirect(URL("index")) + except Exception as ex: + login_form.insert(-1, ex.message) + + return dict(login_form=login_form) + + +def logout(): + response.cookies["nb_cookie"] = "" + response.cookies["nb_cookie"]["expires"] = -10 + response.cookies["nb_cookie"]["path"] = "/" + redirect(URL("index")) ADDED applications/mobileblur/controllers/default.py~ Index: applications/mobileblur/controllers/default.py~ ================================================================== --- /dev/null +++ applications/mobileblur/controllers/default.py~ @@ -0,0 +1,30 @@ +# -*- coding: utf-8 -*- +# this file is released under public domain and you can use without limitations + +from pprint import pprint +import simplejson +import urllib + +base = "http://newsblur.com/" +username = "spiffytech" +password = "JYRKJM9UuQg9" +threshold = 0 + +def index(): + login() + data = urllib.urlencode({"flat": "true"}) + u = urllib.urlopen(base + "reader/feeds?" % data) + print u.read() + raw_feeds = simplejson.loads(u.read()) + pprint(raw_feeds) + feeds = {} + for feed in raw_feeds: + if not (feed["ng"] == 0 and feed["nt"] == 0 and feed["ps"] == 0): + feeds[feed["feed_title"]] = feed + + return feeds + +def login(): + data = urllib.urlencode({"login_username": username, "login_password": password}) + u = urllib.urlopen(base + "api/login", data) + print u.read() ADDED applications/mobileblur/controllers/feeds.py Index: applications/mobileblur/controllers/feeds.py ================================================================== --- /dev/null +++ applications/mobileblur/controllers/feeds.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +from pprint import pprint + +def view(): + stories = newsblur.feed(request.args[0])["stories"] + feeds = newsblur.feeds(flat=True)["feeds"] + feed = [feed for feed in feeds.itervalues() if feed["id"]==int(request.args[0])][0] + return dict(stories=stories, feed=feed) + +def mark_read(): + if len(request.args) > 0: + newsblur.mark_feed_as_read(request.args[0]) + redirect(URL("default", "index")) ADDED applications/mobileblur/controllers/stories.py Index: applications/mobileblur/controllers/stories.py ================================================================== --- /dev/null +++ applications/mobileblur/controllers/stories.py @@ -0,0 +1,12 @@ +# -*- coding: utf-8 -*- + +from pprint import pprint + +def view(): + stories = newsblur.feed(request.vars["feed_id"])["stories"] + story = [story for story in stories if story["id"]==request.vars["story"]][0] + return dict(story=story, feed_id=request.vars["feed_id"]) + +def mark_read(): + results = newsblur.mark_story_as_read(request.vars["story_id"], request.vars["feed_id"]) + redirect(URL("default", "index")) ADDED applications/mobileblur/cron/crontab Index: applications/mobileblur/cron/crontab ================================================================== --- /dev/null +++ applications/mobileblur/cron/crontab @@ -0,0 +1,1 @@ +#crontab ADDED applications/mobileblur/languages/es-es.py Index: applications/mobileblur/languages/es-es.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/es-es.py @@ -0,0 +1,259 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"actualice" es una expresión opcional como "campo1=\'nuevo_valor\'". No se puede actualizar o eliminar resultados de un JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s filas eliminadas', +'%s rows updated': '%s filas actualizadas', +'(something like "it-it")': '(algo como "it-it")', +'A new version of web2py is available': 'Hay una nueva versión de web2py disponible', +'A new version of web2py is available: %s': 'Hay una nueva versión de web2py disponible: %s', +'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': 'ATENCION: Inicio de sesión requiere una conexión segura (HTTPS) o localhost.', +'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': 'ATENCION: NO EJECUTE VARIAS PRUEBAS SIMULTANEAMENTE, NO SON THREAD SAFE.', +'ATTENTION: you cannot edit the running application!': 'ATENCION: no puede modificar la aplicación que se ejecuta!', +'About': 'Acerca de', +'About application': 'Acerca de la aplicación', +'Admin is disabled because insecure channel': 'Admin deshabilitado, el canal no es seguro', +'Admin is disabled because unsecure channel': 'Admin deshabilitado, el canal no es seguro', +'Administrator Password:': 'Contraseña del Administrador:', +'Are you sure you want to delete file "%s"?': '¿Está seguro que desea eliminar el archivo "%s"?', +'Are you sure you want to uninstall application "%s"': '¿Está seguro que desea desinstalar la aplicación "%s"', +'Are you sure you want to uninstall application "%s"?': '¿Está seguro que desea desinstalar la aplicación "%s"?', +'Authentication': 'Autenticación', +'Available databases and tables': 'Bases de datos y tablas disponibles', +'Cannot be empty': 'No puede estar vacío', +'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': 'No se puede compilar: hay errores en su aplicación. Depure, corrija errores y vuelva a intentarlo.', +'Change Password': 'Cambie Contraseña', +'Check to delete': 'Marque para eliminar', +'Client IP': 'IP del Cliente', +'Controller': 'Controlador', +'Controllers': 'Controladores', +'Copyright': 'Derechos de autor', +'Create new application': 'Cree una nueva aplicación', +'Current request': 'Solicitud en curso', +'Current response': 'Respuesta en curso', +'Current session': 'Sesión en curso', +'DB Model': 'Modelo "db"', +'DESIGN': 'DISEÑO', +'Database': 'Base de datos', +'Date and Time': 'Fecha y Hora', +'Delete': 'Elimine', +'Delete:': 'Elimine:', +'Deploy on Google App Engine': 'Instale en Google App Engine', +'Description': 'Descripción', +'Design for': 'Diseño para', +'E-mail': 'Correo electrónico', +'EDIT': 'EDITAR', +'Edit': 'Editar', +'Edit Profile': 'Editar Perfil', +'Edit This App': 'Edite esta App', +'Edit application': 'Editar aplicación', +'Edit current record': 'Edite el registro actual', +'Editing file': 'Editando archivo', +'Editing file "%s"': 'Editando archivo "%s"', +'Error logs for "%(app)s"': 'Bitácora de errores en "%(app)s"', +'First name': 'Nombre', +'Functions with no doctests will result in [passed] tests.': 'Funciones sin doctests equivalen a pruebas [aceptadas].', +'Group ID': 'ID de Grupo', +'Hello World': 'Hola Mundo', +'Import/Export': 'Importar/Exportar', +'Index': 'Indice', +'Installed applications': 'Aplicaciones instaladas', +'Internal State': 'Estado Interno', +'Invalid Query': 'Consulta inválida', +'Invalid action': 'Acción inválida', +'Invalid email': 'Correo inválido', +'Language files (static strings) updated': 'Archivos de lenguaje (cadenas estáticas) actualizados', +'Languages': 'Lenguajes', +'Last name': 'Apellido', +'Last saved on:': 'Guardado en:', +'Layout': 'Diseño de página', +'License for': 'Licencia para', +'Login': 'Inicio de sesión', +'Login to the Administrative Interface': 'Inicio de sesión para la Interfaz Administrativa', +'Logout': 'Fin de sesión', +'Lost Password': 'Contraseña perdida', +'Main Menu': 'Menú principal', +'Menu Model': 'Modelo "menu"', +'Models': 'Modelos', +'Modules': 'Módulos', +'NO': 'NO', +'Name': 'Nombre', +'New Record': 'Registro nuevo', +'No databases in this application': 'No hay bases de datos en esta aplicación', +'Origin': 'Origen', +'Original/Translation': 'Original/Traducción', +'Password': 'Contraseña', +'Peeking at file': 'Visualizando archivo', +'Powered by': 'Este sitio usa', +'Query:': 'Consulta:', +'Record ID': 'ID de Registro', +'Register': 'Registrese', +'Registration key': 'Contraseña de Registro', +'Reset Password key': 'Reset Password key', +'Resolve Conflict file': 'archivo Resolución de Conflicto', +'Role': 'Rol', +'Rows in table': 'Filas en la tabla', +'Rows selected': 'Filas seleccionadas', +'Saved file hash:': 'Hash del archivo guardado:', +'Static files': 'Archivos estáticos', +'Stylesheet': 'Hoja de estilo', +'Sure you want to delete this object?': '¿Está seguro que desea eliminar este objeto?', +'Table name': 'Nombre de la tabla', +'Testing application': 'Probando aplicación', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "consulta" es una condición como "db.tabla1.campo1==\'valor\'". Algo como "db.tabla1.campo1==db.tabla2.campo2" resulta en un JOIN SQL.', +'The output of the file is a dictionary that was rendered by the view': 'La salida del archivo es un diccionario escenificado por la vista', +'There are no controllers': 'No hay controladores', +'There are no models': 'No hay modelos', +'There are no modules': 'No hay módulos', +'There are no static files': 'No hay archivos estáticos', +'There are no translators, only default language is supported': 'No hay traductores, sólo el lenguaje por defecto es soportado', +'There are no views': 'No hay vistas', +'This is a copy of the scaffolding application': 'Esta es una copia de la aplicación de andamiaje', +'This is the %(filename)s template': 'Esta es la plantilla %(filename)s', +'Ticket': 'Tiquete', +'Timestamp': 'Timestamp', +'Unable to check for upgrades': 'No es posible verificar la existencia de actualizaciones', +'Unable to download': 'No es posible la descarga', +'Unable to download app': 'No es posible descarga la aplicación', +'Update:': 'Actualice:', +'Upload existing application': 'Suba esta aplicación', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, y ~(...) para NOT, para crear consultas más complejas.', +'User ID': 'ID de Usuario', +'View': 'Vista', +'Views': 'Vistas', +'Welcome': 'Welcome', +'Welcome %s': 'Bienvenido %s', +'Welcome to web2py': 'Bienvenido a web2py', +'Which called the function': 'La cual llamó la función', +'YES': 'SI', +'You are successfully running web2py': 'Usted está ejecutando web2py exitosamente', +'You can modify this application and adapt it to your needs': 'Usted puede modificar esta aplicación y adaptarla a sus necesidades', +'You visited the url': 'Usted visitó la url', +'about': 'acerca de', +'additional code for your application': 'código adicional para su aplicación', +'admin disabled because no admin password': ' por falta de contraseña', +'admin disabled because not supported on google app engine': 'admin deshabilitado, no es soportado en GAE', +'admin disabled because unable to access password file': 'admin deshabilitado, imposible acceder al archivo con la contraseña', +'and rename it (required):': 'y renombrela (requerido):', +'and rename it:': ' y renombrelo:', +'appadmin': 'appadmin', +'appadmin is disabled because insecure channel': 'admin deshabilitado, el canal no es seguro', +'application "%s" uninstalled': 'aplicación "%s" desinstalada', +'application compiled': 'aplicación compilada', +'application is compiled and cannot be designed': 'la aplicación está compilada y no puede ser modificada', +'cache': 'cache', +'cache, errors and sessions cleaned': 'cache, errores y sesiones eliminados', +'cannot create file': 'no es posible crear archivo', +'cannot upload file "%(filename)s"': 'no es posible subir archivo "%(filename)s"', +'change password': 'cambie contraseña', +'check all': 'marcar todos', +'clean': 'limpiar', +'Online examples': 'Ejemplos en línea', +'Administrative interface': 'Interfaz administrativa', +'click to check for upgrades': 'haga clic para buscar actualizaciones', +'compile': 'compilar', +'compiled application removed': 'aplicación compilada removida', +'controllers': 'controladores', +'create file with filename:': 'cree archivo con nombre:', +'create new application:': 'nombre de la nueva aplicación:', +'crontab': 'crontab', +'currently saved or': 'actualmente guardado o', +'customize me!': 'Adaptame!', +'data uploaded': 'datos subidos', +'database': 'base de datos', +'database %s select': 'selección en base de datos %s', +'database administration': 'administración base de datos', +'db': 'db', +'defines tables': 'define tablas', +'delete': 'eliminar', +'delete all checked': 'eliminar marcados', +'design': 'modificar', +'Documentation': 'Documentación', +'done!': 'listo!', +'edit': 'editar', +'edit controller': 'editar controlador', +'edit profile': 'editar perfil', +'errors': 'errores', +'export as csv file': 'exportar como archivo CSV', +'exposes': 'expone', +'extends': 'extiende', +'failed to reload module': 'recarga del módulo ha fallado', +'file "%(filename)s" created': 'archivo "%(filename)s" creado', +'file "%(filename)s" deleted': 'archivo "%(filename)s" eliminado', +'file "%(filename)s" uploaded': 'archivo "%(filename)s" subido', +'file "%(filename)s" was not deleted': 'archivo "%(filename)s" no fué eliminado', +'file "%s" of %s restored': 'archivo "%s" de %s restaurado', +'file changed on disk': 'archivo modificado en el disco', +'file does not exist': 'archivo no existe', +'file saved on %(time)s': 'archivo guardado %(time)s', +'file saved on %s': 'archivo guardado %s', +'help': 'ayuda', +'htmledit': 'htmledit', +'includes': 'incluye', +'insert new': 'inserte nuevo', +'insert new %s': 'inserte nuevo %s', +'internal error': 'error interno', +'invalid password': 'contraseña inválida', +'invalid request': 'solicitud inválida', +'invalid ticket': 'tiquete inválido', +'language file "%(filename)s" created/updated': 'archivo de lenguaje "%(filename)s" creado/actualizado', +'languages': 'lenguajes', +'languages updated': 'lenguajes actualizados', +'loading...': 'cargando...', +'located in the file': 'localizada en el archivo', +'login': 'inicio de sesión', +'logout': 'fin de sesión', +'lost password?': '¿olvido la contraseña?', +'merge': 'combinar', +'models': 'modelos', +'modules': 'módulos', +'new application "%s" created': 'nueva aplicación "%s" creada', +'new record inserted': 'nuevo registro insertado', +'next 100 rows': '100 filas siguientes', +'or import from csv file': 'o importar desde archivo CSV', +'or provide application url:': 'o provea URL de la aplicación:', +'pack all': 'empaquetar todo', +'pack compiled': 'empaquete compiladas', +'previous 100 rows': '100 filas anteriores', +'record': 'registro', +'record does not exist': 'el registro no existe', +'record id': 'id de registro', +'register': 'registrese', +'remove compiled': 'eliminar compiladas', +'restore': 'restaurar', +'revert': 'revertir', +'save': 'guardar', +'selected': 'seleccionado(s)', +'session expired': 'sesión expirada', +'shell': 'shell', +'site': 'sitio', +'some files could not be removed': 'algunos archivos no pudieron ser removidos', +'state': 'estado', +'static': 'estáticos', +'table': 'tabla', +'test': 'probar', +'the application logic, each URL path is mapped in one exposed function in the controller': 'la lógica de la aplicación, cada ruta URL se mapea en una función expuesta en el controlador', +'the data representation, define database tables and sets': 'la representación de datos, define tablas y conjuntos de base de datos', +'the presentations layer, views are also known as templates': 'la capa de presentación, las vistas también son llamadas plantillas', +'these files are served without processing, your images go here': 'estos archivos son servidos sin procesar, sus imágenes van aquí', +'to previous version.': 'a la versión previa.', +'translation strings for the application': 'cadenas de caracteres de traducción para la aplicación', +'try': 'intente', +'try something like': 'intente algo como', +'unable to create application "%s"': 'no es posible crear la aplicación "%s"', +'unable to delete file "%(filename)s"': 'no es posible eliminar el archivo "%(filename)s"', +'unable to parse csv file': 'no es posible analizar el archivo CSV', +'unable to uninstall "%s"': 'no es posible instalar "%s"', +'uncheck all': 'desmarcar todos', +'uninstall': 'desinstalar', +'update': 'actualizar', +'update all languages': 'actualizar todos los lenguajes', +'upload application:': 'subir aplicación:', +'upload file:': 'suba archivo:', +'versioning': 'versiones', +'view': 'vista', +'views': 'vistas', +'web2py Recent Tweets': 'Tweets Recientes de web2py', +'web2py is up to date': 'web2py está actualizado', +} ADDED applications/mobileblur/languages/fr-ca.py Index: applications/mobileblur/languages/fr-ca.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/fr-ca.py @@ -0,0 +1,167 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s rangées supprimées', +'%s rows updated': '%s rangées mises à jour', +'About': 'À propos', +'Access Control': "Contrôle d'accès", +'Administrative interface': "Interface d'administration", +'Ajax Recipes': 'Recettes Ajax', +'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', +'Authentication': 'Authentification', +'Available databases and tables': 'Bases de données et tables disponibles', +'Buy this book': 'Acheter ce livre', +'Cannot be empty': 'Ne peut pas être vide', +'Check to delete': 'Cliquez pour supprimer', +'Check to delete:': 'Cliquez pour supprimer:', +'Client IP': 'IP client', +'Community': 'Communauté', +'Controller': 'Contrôleur', +'Copyright': "Droit d'auteur", +'Current request': 'Demande actuelle', +'Current response': 'Réponse actuelle', +'Current session': 'Session en cours', +'DB Model': 'Modèle DB', +'Database': 'Base de données', +'Delete:': 'Supprimer:', +'Demo': 'Démo', +'Deployment Recipes': 'Recettes de déploiement ', +'Description': 'Descriptif', +'Documentation': 'Documentation', +'Download': 'Téléchargement', +'E-mail': 'Courriel', +'Edit': 'Éditer', +'Edit This App': 'Modifier cette application', +'Edit current record': "Modifier l'enregistrement courant", +'Errors': 'Erreurs', +'FAQ': 'faq', +'First name': 'Prénom', +'Forms and Validators': 'Formulaires et Validateurs', +'Free Applications': 'Applications gratuites', +'Function disabled': 'Fonction désactivée', +'Group %(group_id)s created': '%(group_id)s groupe créé', +'Group ID': 'Groupe ID', +'Group uniquely assigned to user %(id)s': "Groupe unique attribué à l'utilisateur %(id)s", +'Groups': 'Groupes', +'Hello World': 'Bonjour le monde', +'Home': 'Accueil', +'Import/Export': 'Importer/Exporter', +'Index': 'Index', +'Internal State': 'État interne', +'Introduction': 'Présentation', +'Invalid Query': 'Requête Invalide', +'Invalid email': 'Courriel invalide', +'Last name': 'Nom', +'Layout': 'Mise en page', +'Layouts': 'layouts', +'Live chat': 'Clavardage en direct', +'Logged in': 'Connecté', +'Login': 'Connectez-vous', +'Lost Password': 'Mot de passe perdu', +'Main Menu': 'Menu principal', +'Menu Model': 'Menu modèle', +'Name': 'Nom', +'New Record': 'Nouvel enregistrement', +'No databases in this application': "Cette application n'a pas de bases de données", +'Online examples': 'Exemples en ligne', +'Origin': 'Origine', +'Other Recipes': 'Autres recettes', +'Overview': 'Présentation', +'Password': 'Mot de passe', +"Password fields don't match": 'Les mots de passe ne correspondent pas', +'Plugins': 'Plugiciels', +'Powered by': 'Alimenté par', +'Preface': 'Préface', +'Python': 'Python', +'Query:': 'Requête:', +'Quick Examples': 'Examples Rapides', +'Readme': 'Lisez-moi', +'Recipes': 'Recettes', +'Record %(id)s created': 'Record %(id)s created', +'Record %(id)s updated': 'Record %(id)s updated', +'Record Created': 'Record Created', +'Record ID': "ID d'enregistrement", +'Record Updated': 'Record Updated', +'Register': "S'inscrire", +'Registration key': "Clé d'enregistrement", +'Registration successful': 'Inscription réussie', +'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)', +'Request reset password': 'Demande de réinitialiser le mot clé', +'Reset Password key': 'Réinitialiser le mot clé', +'Resources': 'Ressources', +'Role': 'Rôle', +'Rows in table': 'Lignes du tableau', +'Rows selected': 'Lignes sélectionnées', +'Semantic': 'Sémantique', +'Services': 'Services', +'Stylesheet': 'Feuille de style', +'Submit': 'Soumettre', +'Support': 'Soutien', +'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', +'Table name': 'Nom du tableau', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.', +'The Core': 'Le noyau', +'The Views': 'Les Vues', +'The output of the file is a dictionary that was rendered by the view': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue', +'This App': 'Cette Appli', +'This is a copy of the scaffolding application': "Ceci est une copie de l'application échafaudage", +'Timestamp': 'Horodatage', +'Twitter': 'Twitter', +'Update:': 'Mise à jour:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.', +'User %(id)s Logged-in': 'Utilisateur %(id)s connecté', +'User %(id)s Registered': 'Utilisateur %(id)s enregistré', +'User ID': 'ID utilisateur', +'User Voice': 'User Voice', +'Verify Password': 'Vérifiez le mot de passe', +'Videos': 'Vidéos', +'View': 'Présentation', +'Web2py': 'Web2py', +'Welcome': 'Bienvenu', +'Welcome %s': 'Bienvenue %s', +'Welcome to web2py': 'Bienvenue à web2py', +'Which called the function': 'Qui a appelé la fonction', +'You are successfully running web2py': 'Vous roulez avec succès web2py', +'You can modify this application and adapt it to your needs': "Vous pouvez modifier cette application et l'adapter à vos besoins", +'You visited the url': "Vous avez visité l'URL", +'about': 'à propos', +'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé", +'cache': 'cache', +'change password': 'changer le mot de passe', +'customize me!': 'personnalisez-moi!', +'data uploaded': 'données téléchargées', +'database': 'base de données', +'database %s select': 'base de données %s select', +'db': 'db', +'design': 'design', +'done!': 'fait!', +'edit profile': 'modifier le profil', +'enter an integer between %(min)g and %(max)g': 'entrer un entier compris entre %(min)g et %(max)g', +'export as csv file': 'exporter sous forme de fichier csv', +'insert new': 'insérer un nouveau', +'insert new %s': 'insérer un nouveau %s', +'invalid request': 'requête invalide', +'located in the file': 'se trouvant dans le fichier', +'login': 'connectez-vous', +'logout': 'déconnectez-vous', +'lost password': 'mot de passe perdu', +'lost password?': 'mot de passe perdu?', +'new record inserted': 'nouvel enregistrement inséré', +'next 100 rows': '100 prochaines lignes', +'or import from csv file': "ou importer d'un fichier CSV", +'password': 'mot de passe', +'please input your password again': "S'il vous plaît entrer votre mot de passe", +'previous 100 rows': '100 lignes précédentes', +'profile': 'profile', +'record': 'enregistrement', +'record does not exist': "l'archive n'existe pas", +'record id': "id d'enregistrement", +'register': "s'inscrire", +'selected': 'sélectionné', +'state': 'état', +'table': 'tableau', +'unable to parse csv file': "incapable d'analyser le fichier cvs", +'value already in database or empty': 'valeur déjà dans la base ou vide', +} ADDED applications/mobileblur/languages/fr-fr.py Index: applications/mobileblur/languages/fr-fr.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/fr-fr.py @@ -0,0 +1,155 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" est une expression optionnelle comme "champ1=\'nouvellevaleur\'". Vous ne pouvez mettre à jour ou supprimer les résultats d\'un JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s rangées supprimées', +'%s rows updated': '%s rangées mises à jour', +'About': 'À propos', +'Access Control': 'Contrôle d\'accès', +'Ajax Recipes': 'Recettes Ajax', +'Are you sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', +'Authentication': 'Authentification', +'Available databases and tables': 'Bases de données et tables disponibles', +'Buy this book': 'Acheter ce livre', +'Cannot be empty': 'Ne peut pas être vide', +'Check to delete': 'Cliquez pour supprimer', +'Check to delete:': 'Cliquez pour supprimer:', +'Client IP': 'IP client', +'Community': 'Communauté', +'Controller': 'Contrôleur', +'Copyright': 'Copyright', +'Current request': 'Demande actuelle', +'Current response': 'Réponse actuelle', +'Current session': 'Session en cours', +'DB Model': 'Modèle DB', +'Database': 'Base de données', +'Delete:': 'Supprimer:', +'Demo': 'Démo', +'Deployment Recipes': 'Recettes de déploiement', +'Description': 'Description', +'Documentation': 'Documentation', +'Download': 'Téléchargement', +'E-mail': 'E-mail', +'Edit': 'Éditer', +'Edit This App': 'Modifier cette application', +'Edit current record': "Modifier l'enregistrement courant", +'Errors': 'Erreurs', +'FAQ': 'FAQ', +'First name': 'Prénom', +'Forms and Validators': 'Formulaires et Validateurs', +'Free Applications': 'Applications gratuites', +'Function disabled': 'Fonction désactivée', +'Group ID': 'Groupe ID', +'Groups': 'Groups', +'Hello World': 'Bonjour le monde', +'Home': 'Accueil', +'Import/Export': 'Importer/Exporter', +'Index': 'Index', +'Internal State': 'État interne', +'Introduction': 'Introduction', +'Invalid Query': 'Requête Invalide', +'Invalid email': 'E-mail invalide', +'Last name': 'Nom', +'Layout': 'Mise en page', +'Layouts': 'Layouts', +'Live chat': 'Chat live', +'Login': 'Connectez-vous', +'Lost Password': 'Mot de passe perdu', +'Main Menu': 'Menu principal', +'Menu Model': 'Menu modèle', +'Name': 'Nom', +'New Record': 'Nouvel enregistrement', +'No databases in this application': "Cette application n'a pas de bases de données", +'Origin': 'Origine', +'Other Recipes': 'Autres recettes', +'Overview': 'Présentation', +'Password': 'Mot de passe', +"Password fields don't match": 'Les mots de passe ne correspondent pas', +'Plugins': 'Plugiciels', +'Powered by': 'Alimenté par', +'Preface': 'Préface', +'Python': 'Python', +'Query:': 'Requête:', +'Quick Examples': 'Examples Rapides', +'Recipes': 'Recettes', +'Record ID': 'ID d\'enregistrement', +'Register': "S'inscrire", +'Registration key': "Clé d'enregistrement", +'Remember me (for 30 days)': 'Se souvenir de moi (pendant 30 jours)', +'Request reset password': 'Demande de réinitialiser le mot clé', +'Reset Password key': 'Réinitialiser le mot clé', +'Resources': 'Ressources', +'Role': 'Rôle', +'Rows in table': 'Lignes du tableau', +'Rows selected': 'Lignes sélectionnées', +'Semantic': 'Sémantique', +'Services': 'Services', +'Stylesheet': 'Feuille de style', +'Submit': 'Soumettre', +'Support': 'Support', +'Sure you want to delete this object?': 'Êtes-vous sûr de vouloir supprimer cet objet?', +'Table name': 'Nom du tableau', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La "query" est une condition comme "db.table1.champ1==\'valeur\'". Quelque chose comme "db.table1.champ1==db.table2.champ2" résulte en un JOIN SQL.', +'The Core': 'Le noyau', +'The Views': 'Les Vues', +'The output of the file is a dictionary that was rendered by the view': 'La sortie de ce fichier est un dictionnaire qui été restitué par la vue', +'This App': 'Cette Appli', +'This is a copy of the scaffolding application': 'Ceci est une copie de l\'application échafaudage', +'Timestamp': 'Horodatage', +'Twitter': 'Twitter', +'Update:': 'Mise à jour:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Employez (...)&(...) pour AND, (...)|(...) pour OR, and ~(...) pour NOT pour construire des requêtes plus complexes.', +'User %(id)s Logged-in': 'Utilisateur %(id)s connecté', +'User %(id)s Registered': 'Utilisateur %(id)s enregistré', +'User ID': 'ID utilisateur', +'User Voice': 'User Voice', +'Verify Password': 'Vérifiez le mot de passe', +'Videos': 'Vidéos', +'View': 'Présentation', +'Web2py': 'Web2py', +'Welcome': 'Bienvenu', +'Welcome %s': 'Bienvenue %s', +'Welcome to web2py': 'Bienvenue à web2py', +'Which called the function': 'Qui a appelé la fonction', +'You are successfully running web2py': 'Vous roulez avec succès web2py', +'You can modify this application and adapt it to your needs': 'Vous pouvez modifier cette application et l\'adapter à vos besoins', +'You visited the url': 'Vous avez visité l\'URL', +'appadmin is disabled because insecure channel': "appadmin est désactivée parce que le canal n'est pas sécurisé", +'cache': 'cache', +'change password': 'changer le mot de passe', +'Online examples': 'Exemples en ligne', +'Administrative interface': "Interface d'administration", +'customize me!': 'personnalisez-moi!', +'data uploaded': 'données téléchargées', +'database': 'base de données', +'database %s select': 'base de données %s select', +'db': 'db', +'design': 'design', +'Documentation': 'Documentation', +'done!': 'fait!', +'edit profile': 'modifier le profil', +'enter an integer between %(min)g and %(max)g': 'enter an integer between %(min)g and %(max)g', +'export as csv file': 'exporter sous forme de fichier csv', +'insert new': 'insérer un nouveau', +'insert new %s': 'insérer un nouveau %s', +'invalid request': 'requête invalide', +'located in the file': 'se trouvant dans le fichier', +'login': 'connectez-vous', +'logout': 'déconnectez-vous', +'lost password': 'mot de passe perdu', +'lost password?': 'mot de passe perdu?', +'new record inserted': 'nouvel enregistrement inséré', +'next 100 rows': '100 prochaines lignes', +'or import from csv file': "ou importer d'un fichier CSV", +'previous 100 rows': '100 lignes précédentes', +'record': 'enregistrement', +'record does not exist': "l'archive n'existe pas", +'record id': "id d'enregistrement", +'register': "s'inscrire", +'selected': 'sélectionné', +'state': 'état', +'table': 'tableau', +'unable to parse csv file': "incapable d'analyser le fichier cvs", +'Readme': "Lisez-moi", +} ADDED applications/mobileblur/languages/hi-hi.py Index: applications/mobileblur/languages/hi-hi.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/hi-hi.py @@ -0,0 +1,82 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81 \xe0\xa4\xae\xe0\xa4\xbf\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\x8f\xe0\xa4\x81', +'%s rows updated': '%s \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81 \xe0\xa4\x85\xe0\xa4\xa6\xe0\xa5\x8d\xe0\xa4\xaf\xe0\xa4\xa4\xe0\xa4\xa8', +'Available databases and tables': '\xe0\xa4\x89\xe0\xa4\xaa\xe0\xa4\xb2\xe0\xa4\xac\xe0\xa5\x8d\xe0\xa4\xa7 \xe0\xa4\xa1\xe0\xa5\x87\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xac\xe0\xa5\x87\xe0\xa4\xb8 \xe0\xa4\x94\xe0\xa4\xb0 \xe0\xa4\xa4\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa4\xbe', +'Cannot be empty': '\xe0\xa4\x96\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa5\x80 \xe0\xa4\xa8\xe0\xa4\xb9\xe0\xa5\x80\xe0\xa4\x82 \xe0\xa4\xb9\xe0\xa5\x8b \xe0\xa4\xb8\xe0\xa4\x95\xe0\xa4\xa4\xe0\xa4\xbe', +'Change Password': '\xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\xb8\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\xac\xe0\xa4\xa6\xe0\xa4\xb2\xe0\xa5\x87\xe0\xa4\x82', +'Check to delete': '\xe0\xa4\xb9\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xa8\xe0\xa5\x87 \xe0\xa4\x95\xe0\xa5\x87 \xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x8f \xe0\xa4\x9a\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa5\x87\xe0\xa4\x82', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': '\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\xa8 \xe0\xa4\x85\xe0\xa4\xa8\xe0\xa5\x81\xe0\xa4\xb0\xe0\xa5\x8b\xe0\xa4\xa7', +'Current response': '\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\xa8 \xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe', +'Current session': '\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\xa8 \xe0\xa4\xb8\xe0\xa5\x87\xe0\xa4\xb6\xe0\xa4\xa8', +'DB Model': 'DB Model', +'Database': 'Database', +'Delete:': '\xe0\xa4\xae\xe0\xa4\xbf\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xa8\xe0\xa4\xbe:', +'Edit': 'Edit', +'Edit Profile': '\xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa5\x8b\xe0\xa4\xab\xe0\xa4\xbc\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\xb2 \xe0\xa4\xb8\xe0\xa4\x82\xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\xa6\xe0\xa4\xbf\xe0\xa4\xa4 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82', +'Edit This App': 'Edit This App', +'Edit current record': '\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\xa8 \xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x95\xe0\xa5\x89\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\xb8\xe0\xa4\x82\xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\xa6\xe0\xa4\xbf\xe0\xa4\xa4 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82 ', +'Hello World': 'Hello World', +'Hello from MyApp': 'Hello from MyApp', +'Import/Export': '\xe0\xa4\x86\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\xa4 / \xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\xa4', +'Index': 'Index', +'Internal State': '\xe0\xa4\x86\xe0\xa4\x82\xe0\xa4\xa4\xe0\xa4\xb0\xe0\xa4\xbf\xe0\xa4\x95 \xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa5\xe0\xa4\xbf\xe0\xa4\xa4\xe0\xa4\xbf', +'Invalid Query': '\xe0\xa4\x85\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\xa8\xe0\xa5\x8d\xe0\xa4\xaf \xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xb6\xe0\xa5\x8d\xe0\xa4\xa8', +'Layout': 'Layout', +'Login': '\xe0\xa4\xb2\xe0\xa5\x89\xe0\xa4\x97 \xe0\xa4\x87\xe0\xa4\xa8', +'Logout': '\xe0\xa4\xb2\xe0\xa5\x89\xe0\xa4\x97 \xe0\xa4\x86\xe0\xa4\x89\xe0\xa4\x9f', +'Lost Password': '\xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\xb8\xe0\xa4\xb5\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\x96\xe0\xa5\x8b \xe0\xa4\x97\xe0\xa4\xaf\xe0\xa4\xbe', +'Main Menu': 'Main Menu', +'Menu Model': 'Menu Model', +'New Record': '\xe0\xa4\xa8\xe0\xa4\xaf\xe0\xa4\xbe \xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x95\xe0\xa5\x89\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1', +'No databases in this application': '\xe0\xa4\x87\xe0\xa4\xb8 \xe0\xa4\x85\xe0\xa4\xa8\xe0\xa5\x81\xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xaf\xe0\xa5\x8b\xe0\xa4\x97 \xe0\xa4\xae\xe0\xa5\x87\xe0\xa4\x82 \xe0\xa4\x95\xe0\xa5\x8b\xe0\xa4\x88 \xe0\xa4\xa1\xe0\xa5\x87\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xac\xe0\xa5\x87\xe0\xa4\xb8 \xe0\xa4\xa8\xe0\xa4\xb9\xe0\xa5\x80\xe0\xa4\x82 \xe0\xa4\xb9\xe0\xa5\x88\xe0\xa4\x82', +'Powered by': 'Powered by', +'Query:': '\xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xb6\xe0\xa5\x8d\xe0\xa4\xa8:', +'Register': '\xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x9c\xe0\xa5\x80\xe0\xa4\x95\xe0\xa5\x83\xe0\xa4\xa4 (\xe0\xa4\xb0\xe0\xa4\x9c\xe0\xa4\xbf\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\x9f\xe0\xa4\xb0) \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa4\xa8\xe0\xa4\xbe ', +'Rows in table': '\xe0\xa4\xa4\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa4\xbe \xe0\xa4\xae\xe0\xa5\x87\xe0\xa4\x82 \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81 ', +'Rows selected': '\xe0\xa4\x9a\xe0\xa4\xaf\xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xa4 (\xe0\xa4\x9a\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa5\x87 \xe0\xa4\x97\xe0\xa4\xaf\xe0\xa5\x87) \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81 ', +'Stylesheet': 'Stylesheet', +'Sure you want to delete this object?': '\xe0\xa4\xb8\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xb6\xe0\xa5\x8d\xe0\xa4\x9a\xe0\xa4\xbf\xe0\xa4\xa4 \xe0\xa4\xb9\xe0\xa5\x88\xe0\xa4\x82 \xe0\xa4\x95\xe0\xa4\xbf \xe0\xa4\x86\xe0\xa4\xaa \xe0\xa4\x87\xe0\xa4\xb8 \xe0\xa4\xb5\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa5\x81 \xe0\xa4\x95\xe0\xa5\x8b \xe0\xa4\xb9\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xa8\xe0\xa4\xbe \xe0\xa4\x9a\xe0\xa4\xbe\xe0\xa4\xb9\xe0\xa4\xa4\xe0\xa5\x87 \xe0\xa4\xb9\xe0\xa5\x88\xe0\xa4\x82?', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.', +'Update:': '\xe0\xa4\x85\xe0\xa4\xa6\xe0\xa5\x8d\xe0\xa4\xaf\xe0\xa4\xa4\xe0\xa4\xa8 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa4\xa8\xe0\xa4\xbe:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.', +'View': 'View', +'Welcome %s': 'Welcome %s', +'Welcome to web2py': '\xe0\xa4\xb5\xe0\xa5\x87\xe0\xa4\xac\xe0\xa5\xa8\xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\x87 (web2py) \xe0\xa4\xae\xe0\xa5\x87\xe0\xa4\x82 \xe0\xa4\x86\xe0\xa4\xaa\xe0\xa4\x95\xe0\xa4\xbe \xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xb5\xe0\xa4\xbe\xe0\xa4\x97\xe0\xa4\xa4 \xe0\xa4\xb9\xe0\xa5\x88', +'appadmin is disabled because insecure channel': '\xe0\xa4\x85\xe0\xa4\xaa \xe0\xa4\x86\xe0\xa4\xa1\xe0\xa4\xae\xe0\xa4\xbf\xe0\xa4\xa8 (appadmin) \xe0\xa4\x85\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb7\xe0\xa4\xae \xe0\xa4\xb9\xe0\xa5\x88 \xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xaf\xe0\xa5\x8b\xe0\xa4\x82\xe0\xa4\x95\xe0\xa4\xbf \xe0\xa4\x85\xe0\xa4\xb8\xe0\xa5\x81\xe0\xa4\xb0\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb7\xe0\xa4\xbf\xe0\xa4\xa4 \xe0\xa4\x9a\xe0\xa5\x88\xe0\xa4\xa8\xe0\xa4\xb2', +'cache': 'cache', +'change password': 'change password', +'Online examples': '\xe0\xa4\x91\xe0\xa4\xa8\xe0\xa4\xb2\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\xa8 \xe0\xa4\x89\xe0\xa4\xa6\xe0\xa4\xbe\xe0\xa4\xb9\xe0\xa4\xb0\xe0\xa4\xa3 \xe0\xa4\x95\xe0\xa5\x87 \xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x8f \xe0\xa4\xaf\xe0\xa4\xb9\xe0\xa4\xbe\xe0\xa4\x81 \xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x95 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82', +'Administrative interface': '\xe0\xa4\xaa\xe0\xa5\x8d\xe0\xa4\xb0\xe0\xa4\xb6\xe0\xa4\xbe\xe0\xa4\xb8\xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\x95 \xe0\xa4\x87\xe0\xa4\x82\xe0\xa4\x9f\xe0\xa4\xb0\xe0\xa4\xab\xe0\xa5\x87\xe0\xa4\xb8 \xe0\xa4\x95\xe0\xa5\x87 \xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x8f \xe0\xa4\xaf\xe0\xa4\xb9\xe0\xa4\xbe\xe0\xa4\x81 \xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x95 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82', +'customize me!': '\xe0\xa4\xae\xe0\xa5\x81\xe0\xa4\x9d\xe0\xa5\x87 \xe0\xa4\x85\xe0\xa4\xa8\xe0\xa5\x81\xe0\xa4\x95\xe0\xa5\x82\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\xa4 (\xe0\xa4\x95\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\x9f\xe0\xa4\xae\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\x9c\xe0\xa4\xbc) \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82!', +'data uploaded': '\xe0\xa4\xa1\xe0\xa4\xbe\xe0\xa4\x9f\xe0\xa4\xbe \xe0\xa4\x85\xe0\xa4\xaa\xe0\xa4\xb2\xe0\xa5\x8b\xe0\xa4\xa1 \xe0\xa4\xb8\xe0\xa4\xae\xe0\xa5\x8d\xe0\xa4\xaa\xe0\xa4\xa8\xe0\xa5\x8d\xe0\xa4\xa8 ', +'database': '\xe0\xa4\xa1\xe0\xa5\x87\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xac\xe0\xa5\x87\xe0\xa4\xb8', +'database %s select': '\xe0\xa4\xa1\xe0\xa5\x87\xe0\xa4\x9f\xe0\xa4\xbe\xe0\xa4\xac\xe0\xa5\x87\xe0\xa4\xb8 %s \xe0\xa4\x9a\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa5\x80 \xe0\xa4\xb9\xe0\xa5\x81\xe0\xa4\x88', +'db': 'db', +'design': '\xe0\xa4\xb0\xe0\xa4\x9a\xe0\xa4\xa8\xe0\xa4\xbe \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x82', +'done!': '\xe0\xa4\xb9\xe0\xa5\x8b \xe0\xa4\x97\xe0\xa4\xaf\xe0\xa4\xbe!', +'edit profile': 'edit profile', +'export as csv file': 'csv \xe0\xa4\xab\xe0\xa4\xbc\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\xb2 \xe0\xa4\x95\xe0\xa5\x87 \xe0\xa4\xb0\xe0\xa5\x82\xe0\xa4\xaa \xe0\xa4\xae\xe0\xa5\x87\xe0\xa4\x82 \xe0\xa4\xa8\xe0\xa4\xbf\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\xa4', +'insert new': '\xe0\xa4\xa8\xe0\xa4\xaf\xe0\xa4\xbe \xe0\xa4\xa1\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa5\x87\xe0\xa4\x82', +'insert new %s': '\xe0\xa4\xa8\xe0\xa4\xaf\xe0\xa4\xbe %s \xe0\xa4\xa1\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa5\x87\xe0\xa4\x82', +'invalid request': '\xe0\xa4\x85\xe0\xa4\xb5\xe0\xa5\x88\xe0\xa4\xa7 \xe0\xa4\x85\xe0\xa4\xa8\xe0\xa5\x81\xe0\xa4\xb0\xe0\xa5\x8b\xe0\xa4\xa7', +'login': 'login', +'logout': 'logout', +'new record inserted': '\xe0\xa4\xa8\xe0\xa4\xaf\xe0\xa4\xbe \xe0\xa4\xb0\xe0\xa5\x87\xe0\xa4\x95\xe0\xa5\x89\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\xa1\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa4\xbe', +'next 100 rows': '\xe0\xa4\x85\xe0\xa4\x97\xe0\xa4\xb2\xe0\xa5\x87 100 \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81', +'or import from csv file': '\xe0\xa4\xaf\xe0\xa4\xbe csv \xe0\xa4\xab\xe0\xa4\xbc\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\xb2 \xe0\xa4\xb8\xe0\xa5\x87 \xe0\xa4\x86\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\xa4', +'previous 100 rows': '\xe0\xa4\xaa\xe0\xa4\xbf\xe0\xa4\x9b\xe0\xa4\xb2\xe0\xa5\x87 100 \xe0\xa4\xaa\xe0\xa4\x82\xe0\xa4\x95\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbf\xe0\xa4\xaf\xe0\xa4\xbe\xe0\xa4\x81', +'record': 'record', +'record does not exist': '\xe0\xa4\xb0\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa5\x89\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\xae\xe0\xa5\x8c\xe0\xa4\x9c\xe0\xa5\x82\xe0\xa4\xa6 \xe0\xa4\xa8\xe0\xa4\xb9\xe0\xa5\x80\xe0\xa4\x82 \xe0\xa4\xb9\xe0\xa5\x88', +'record id': '\xe0\xa4\xb0\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa5\x89\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa1 \xe0\xa4\xaa\xe0\xa4\xb9\xe0\xa4\x9a\xe0\xa4\xbe\xe0\xa4\xa8\xe0\xa4\x95\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa4\xe0\xa4\xbe (\xe0\xa4\x86\xe0\xa4\x88\xe0\xa4\xa1\xe0\xa5\x80)', +'register': 'register', +'selected': '\xe0\xa4\x9a\xe0\xa5\x81\xe0\xa4\xa8\xe0\xa4\xbe \xe0\xa4\xb9\xe0\xa5\x81\xe0\xa4\x86', +'state': '\xe0\xa4\xb8\xe0\xa5\x8d\xe0\xa4\xa5\xe0\xa4\xbf\xe0\xa4\xa4\xe0\xa4\xbf', +'table': '\xe0\xa4\xa4\xe0\xa4\xbe\xe0\xa4\xb2\xe0\xa4\xbf\xe0\xa4\x95\xe0\xa4\xbe', +'unable to parse csv file': 'csv \xe0\xa4\xab\xe0\xa4\xbc\xe0\xa4\xbe\xe0\xa4\x87\xe0\xa4\xb2 \xe0\xa4\xaa\xe0\xa4\xbe\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xb8 \xe0\xa4\x95\xe0\xa4\xb0\xe0\xa4\xa8\xe0\xa5\x87 \xe0\xa4\xae\xe0\xa5\x87\xe0\xa4\x82 \xe0\xa4\x85\xe0\xa4\xb8\xe0\xa4\xae\xe0\xa4\xb0\xe0\xa5\x8d\xe0\xa4\xa5', +} ADDED applications/mobileblur/languages/hu-hu.py Index: applications/mobileblur/languages/hu-hu.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/hu-hu.py @@ -0,0 +1,93 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN', +'%Y-%m-%d': '%Y.%m.%d.', +'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S', +'%s rows deleted': '%s sorok t\xc3\xb6rl\xc5\x91dtek', +'%s rows updated': '%s sorok friss\xc3\xadt\xc5\x91dtek', +'Available databases and tables': 'El\xc3\xa9rhet\xc5\x91 adatb\xc3\xa1zisok \xc3\xa9s t\xc3\xa1bl\xc3\xa1k', +'Cannot be empty': 'Nem lehet \xc3\xbcres', +'Check to delete': 'T\xc3\xb6rl\xc3\xa9shez v\xc3\xa1laszd ki', +'Client IP': 'Client IP', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Jelenlegi lek\xc3\xa9rdez\xc3\xa9s', +'Current response': 'Jelenlegi v\xc3\xa1lasz', +'Current session': 'Jelenlegi folyamat', +'DB Model': 'DB Model', +'Database': 'Adatb\xc3\xa1zis', +'Delete:': 'T\xc3\xb6r\xc3\xb6l:', +'Description': 'Description', +'E-mail': 'E-mail', +'Edit': 'Szerkeszt', +'Edit This App': 'Alkalmaz\xc3\xa1st szerkeszt', +'Edit current record': 'Aktu\xc3\xa1lis bejegyz\xc3\xa9s szerkeszt\xc3\xa9se', +'First name': 'First name', +'Group ID': 'Group ID', +'Hello World': 'Hello Vil\xc3\xa1g', +'Import/Export': 'Import/Export', +'Index': 'Index', +'Internal State': 'Internal State', +'Invalid Query': 'Hib\xc3\xa1s lek\xc3\xa9rdez\xc3\xa9s', +'Invalid email': 'Invalid email', +'Last name': 'Last name', +'Layout': 'Szerkezet', +'Main Menu': 'F\xc5\x91men\xc3\xbc', +'Menu Model': 'Men\xc3\xbc model', +'Name': 'Name', +'New Record': '\xc3\x9aj bejegyz\xc3\xa9s', +'No databases in this application': 'Nincs adatb\xc3\xa1zis ebben az alkalmaz\xc3\xa1sban', +'Origin': 'Origin', +'Password': 'Password', +'Powered by': 'Powered by', +'Query:': 'Lek\xc3\xa9rdez\xc3\xa9s:', +'Record ID': 'Record ID', +'Registration key': 'Registration key', +'Reset Password key': 'Reset Password key', +'Role': 'Role', +'Rows in table': 'Sorok a t\xc3\xa1bl\xc3\xa1ban', +'Rows selected': 'Kiv\xc3\xa1lasztott sorok', +'Stylesheet': 'Stylesheet', +'Sure you want to delete this object?': 'Biztos t\xc3\xb6rli ezt az objektumot?', +'Table name': 'Table name', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.', +'Timestamp': 'Timestamp', +'Update:': 'Friss\xc3\xadt:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.', +'User ID': 'User ID', +'View': 'N\xc3\xa9zet', +'Welcome %s': 'Welcome %s', +'Welcome to web2py': 'Isten hozott a web2py-ban', +'appadmin is disabled because insecure channel': 'az appadmin a biztons\xc3\xa1gtalan csatorna miatt letiltva', +'cache': 'gyors\xc3\xadt\xc3\xb3t\xc3\xa1r', +'change password': 'jelsz\xc3\xb3 megv\xc3\xa1ltoztat\xc3\xa1sa', +'Online examples': 'online p\xc3\xa9ld\xc3\xa1k\xc3\xa9rt kattints ide', +'Administrative interface': 'az adminisztr\xc3\xa1ci\xc3\xb3s fel\xc3\xbclet\xc3\xa9rt kattints ide', +'customize me!': 'v\xc3\xa1ltoztass meg!', +'data uploaded': 'adat felt\xc3\xb6ltve', +'database': 'adatb\xc3\xa1zis', +'database %s select': 'adatb\xc3\xa1zis %s kiv\xc3\xa1laszt\xc3\xa1s', +'db': 'db', +'design': 'design', +'done!': 'k\xc3\xa9sz!', +'edit profile': 'profil szerkeszt\xc3\xa9se', +'export as csv file': 'export\xc3\xa1l csv f\xc3\xa1jlba', +'insert new': '\xc3\xbaj beilleszt\xc3\xa9se', +'insert new %s': '\xc3\xbaj beilleszt\xc3\xa9se %s', +'invalid request': 'hib\xc3\xa1s k\xc3\xa9r\xc3\xa9s', +'login': 'bel\xc3\xa9p', +'logout': 'kil\xc3\xa9p', +'lost password': 'elveszett jelsz\xc3\xb3', +'new record inserted': '\xc3\xbaj bejegyz\xc3\xa9s felv\xc3\xa9ve', +'next 100 rows': 'k\xc3\xb6vetkez\xc5\x91 100 sor', +'or import from csv file': 'vagy bet\xc3\xb6lt\xc3\xa9s csv f\xc3\xa1jlb\xc3\xb3l', +'previous 100 rows': 'el\xc5\x91z\xc5\x91 100 sor', +'record': 'bejegyz\xc3\xa9s', +'record does not exist': 'bejegyz\xc3\xa9s nem l\xc3\xa9tezik', +'record id': 'bejegyz\xc3\xa9s id', +'register': 'regisztr\xc3\xa1ci\xc3\xb3', +'selected': 'kiv\xc3\xa1lasztott', +'state': '\xc3\xa1llapot', +'table': 't\xc3\xa1bla', +'unable to parse csv file': 'nem lehet a csv f\xc3\xa1jlt beolvasni', +} ADDED applications/mobileblur/languages/hu.py Index: applications/mobileblur/languages/hu.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/hu.py @@ -0,0 +1,93 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN', +'%Y-%m-%d': '%Y.%m.%d.', +'%Y-%m-%d %H:%M:%S': '%Y.%m.%d. %H:%M:%S', +'%s rows deleted': '%s sorok t\xc3\xb6rl\xc5\x91dtek', +'%s rows updated': '%s sorok friss\xc3\xadt\xc5\x91dtek', +'Available databases and tables': 'El\xc3\xa9rhet\xc5\x91 adatb\xc3\xa1zisok \xc3\xa9s t\xc3\xa1bl\xc3\xa1k', +'Cannot be empty': 'Nem lehet \xc3\xbcres', +'Check to delete': 'T\xc3\xb6rl\xc3\xa9shez v\xc3\xa1laszd ki', +'Client IP': 'Client IP', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Jelenlegi lek\xc3\xa9rdez\xc3\xa9s', +'Current response': 'Jelenlegi v\xc3\xa1lasz', +'Current session': 'Jelenlegi folyamat', +'DB Model': 'DB Model', +'Database': 'Adatb\xc3\xa1zis', +'Delete:': 'T\xc3\xb6r\xc3\xb6l:', +'Description': 'Description', +'E-mail': 'E-mail', +'Edit': 'Szerkeszt', +'Edit This App': 'Alkalmaz\xc3\xa1st szerkeszt', +'Edit current record': 'Aktu\xc3\xa1lis bejegyz\xc3\xa9s szerkeszt\xc3\xa9se', +'First name': 'First name', +'Group ID': 'Group ID', +'Hello World': 'Hello Vil\xc3\xa1g', +'Import/Export': 'Import/Export', +'Index': 'Index', +'Internal State': 'Internal State', +'Invalid Query': 'Hib\xc3\xa1s lek\xc3\xa9rdez\xc3\xa9s', +'Invalid email': 'Invalid email', +'Last name': 'Last name', +'Layout': 'Szerkezet', +'Main Menu': 'F\xc5\x91men\xc3\xbc', +'Menu Model': 'Men\xc3\xbc model', +'Name': 'Name', +'New Record': '\xc3\x9aj bejegyz\xc3\xa9s', +'No databases in this application': 'Nincs adatb\xc3\xa1zis ebben az alkalmaz\xc3\xa1sban', +'Origin': 'Origin', +'Password': 'Password', +'Powered by': 'Powered by', +'Query:': 'Lek\xc3\xa9rdez\xc3\xa9s:', +'Record ID': 'Record ID', +'Registration key': 'Registration key', +'Reset Password key': 'Reset Password key', +'Role': 'Role', +'Rows in table': 'Sorok a t\xc3\xa1bl\xc3\xa1ban', +'Rows selected': 'Kiv\xc3\xa1lasztott sorok', +'Stylesheet': 'Stylesheet', +'Sure you want to delete this object?': 'Biztos t\xc3\xb6rli ezt az objektumot?', +'Table name': 'Table name', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.', +'Timestamp': 'Timestamp', +'Update:': 'Friss\xc3\xadt:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.', +'User ID': 'User ID', +'View': 'N\xc3\xa9zet', +'Welcome %s': 'Welcome %s', +'Welcome to web2py': 'Isten hozott a web2py-ban', +'appadmin is disabled because insecure channel': 'az appadmin a biztons\xc3\xa1gtalan csatorna miatt letiltva', +'cache': 'gyors\xc3\xadt\xc3\xb3t\xc3\xa1r', +'change password': 'jelsz\xc3\xb3 megv\xc3\xa1ltoztat\xc3\xa1sa', +'Online examples': 'online p\xc3\xa9ld\xc3\xa1k\xc3\xa9rt kattints ide', +'Administrative interface': 'az adminisztr\xc3\xa1ci\xc3\xb3s fel\xc3\xbclet\xc3\xa9rt kattints ide', +'customize me!': 'v\xc3\xa1ltoztass meg!', +'data uploaded': 'adat felt\xc3\xb6ltve', +'database': 'adatb\xc3\xa1zis', +'database %s select': 'adatb\xc3\xa1zis %s kiv\xc3\xa1laszt\xc3\xa1s', +'db': 'db', +'design': 'design', +'done!': 'k\xc3\xa9sz!', +'edit profile': 'profil szerkeszt\xc3\xa9se', +'export as csv file': 'export\xc3\xa1l csv f\xc3\xa1jlba', +'insert new': '\xc3\xbaj beilleszt\xc3\xa9se', +'insert new %s': '\xc3\xbaj beilleszt\xc3\xa9se %s', +'invalid request': 'hib\xc3\xa1s k\xc3\xa9r\xc3\xa9s', +'login': 'bel\xc3\xa9p', +'logout': 'kil\xc3\xa9p', +'lost password': 'elveszett jelsz\xc3\xb3', +'new record inserted': '\xc3\xbaj bejegyz\xc3\xa9s felv\xc3\xa9ve', +'next 100 rows': 'k\xc3\xb6vetkez\xc5\x91 100 sor', +'or import from csv file': 'vagy bet\xc3\xb6lt\xc3\xa9s csv f\xc3\xa1jlb\xc3\xb3l', +'previous 100 rows': 'el\xc5\x91z\xc5\x91 100 sor', +'record': 'bejegyz\xc3\xa9s', +'record does not exist': 'bejegyz\xc3\xa9s nem l\xc3\xa9tezik', +'record id': 'bejegyz\xc3\xa9s id', +'register': 'regisztr\xc3\xa1ci\xc3\xb3', +'selected': 'kiv\xc3\xa1lasztott', +'state': '\xc3\xa1llapot', +'table': 't\xc3\xa1bla', +'unable to parse csv file': 'nem lehet a csv f\xc3\xa1jlt beolvasni', +} ADDED applications/mobileblur/languages/it-it.py Index: applications/mobileblur/languages/it-it.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/it-it.py @@ -0,0 +1,104 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ', +'%Y-%m-%d': '%d/%m/%Y', +'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S', +'%s rows deleted': '%s righe ("record") cancellate', +'%s rows updated': '%s righe ("record") modificate', +'Available databases and tables': 'Database e tabelle disponibili', +'Cannot be empty': 'Non può essere vuoto', +'Check to delete': 'Seleziona per cancellare', +'Client IP': 'Client IP', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Richiesta (request) corrente', +'Current response': 'Risposta (response) corrente', +'Current session': 'Sessione (session) corrente', +'DB Model': 'Modello di DB', +'Database': 'Database', +'Delete:': 'Cancella:', +'Description': 'Descrizione', +'E-mail': 'E-mail', +'Edit': 'Modifica', +'Edit This App': 'Modifica questa applicazione', +'Edit current record': 'Modifica record corrente', +'First name': 'Nome', +'Group ID': 'ID Gruppo', +'Hello World': 'Salve Mondo', +'Hello World in a flash!': 'Salve Mondo in un flash!', +'Import/Export': 'Importa/Esporta', +'Index': 'Indice', +'Internal State': 'Stato interno', +'Invalid Query': 'Richiesta (query) non valida', +'Invalid email': 'Email non valida', +'Last name': 'Cognome', +'Layout': 'Layout', +'Main Menu': 'Menu principale', +'Menu Model': 'Menu Modelli', +'Name': 'Nome', +'New Record': 'Nuovo elemento (record)', +'No databases in this application': 'Nessun database presente in questa applicazione', +'Origin': 'Origine', +'Password': 'Password', +'Powered by': 'Powered by', +'Query:': 'Richiesta (query):', +'Record ID': 'Record ID', +'Registration key': 'Chiave di Registazione', +'Reset Password key': 'Resetta chiave Password ', +'Role': 'Ruolo', +'Rows in table': 'Righe nella tabella', +'Rows selected': 'Righe selezionate', +'Stylesheet': 'Foglio di stile (stylesheet)', +'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?', +'Table name': 'Nome tabella', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.', +'The output of the file is a dictionary that was rendered by the view': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista', +'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)", +'Timestamp': 'Ora (timestamp)', +'Update:': 'Aggiorna:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).', +'User ID': 'ID Utente', +'View': 'Vista', +'Welcome %s': 'Benvenuto %s', +'Welcome to web2py': 'Benvenuto su web2py', +'Which called the function': 'che ha chiamato la funzione', +'You are successfully running web2py': 'Stai eseguendo web2py con successo', +'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità', +'You visited the url': "Hai visitato l'URL", +'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura', +'cache': 'cache', +'change password': 'Cambia password', +'Online examples': 'Vedere gli esempi', +'Administrative interface': "Interfaccia amministrativa", +'customize me!': 'Personalizzami!', +'data uploaded': 'dati caricati', +'database': 'database', +'database %s select': 'database %s select', +'db': 'db', +'design': 'progetta', +'Documentation': 'Documentazione', +'done!': 'fatto!', +'edit profile': 'modifica profilo', +'export as csv file': 'esporta come file CSV', +'hello world': 'salve mondo', +'insert new': 'inserisci nuovo', +'insert new %s': 'inserisci nuovo %s', +'invalid request': 'richiesta non valida', +'located in the file': 'presente nel file', +'login': 'accesso', +'logout': 'uscita', +'lost password?': 'dimenticato la password?', +'new record inserted': 'nuovo record inserito', +'next 100 rows': 'prossime 100 righe', +'not authorized': 'non autorizzato', +'or import from csv file': 'oppure importa da file CSV', +'previous 100 rows': '100 righe precedenti', +'record': 'record', +'record does not exist': 'il record non esiste', +'record id': 'record id', +'register': 'registrazione', +'selected': 'selezionato', +'state': 'stato', +'table': 'tabella', +'unable to parse csv file': 'non riesco a decodificare questo file CSV', +} ADDED applications/mobileblur/languages/it.py Index: applications/mobileblur/languages/it.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/it.py @@ -0,0 +1,104 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" è un\'espressione opzionale come "campo1=\'nuovo valore\'". Non si può fare "update" o "delete" dei risultati di un JOIN ', +'%Y-%m-%d': '%d/%m/%Y', +'%Y-%m-%d %H:%M:%S': '%d/%m/%Y %H:%M:%S', +'%s rows deleted': '%s righe ("record") cancellate', +'%s rows updated': '%s righe ("record") modificate', +'Available databases and tables': 'Database e tabelle disponibili', +'Cannot be empty': 'Non può essere vuoto', +'Check to delete': 'Seleziona per cancellare', +'Client IP': 'Client IP', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Richiesta (request) corrente', +'Current response': 'Risposta (response) corrente', +'Current session': 'Sessione (session) corrente', +'DB Model': 'Modello di DB', +'Database': 'Database', +'Delete:': 'Cancella:', +'Description': 'Descrizione', +'E-mail': 'E-mail', +'Edit': 'Modifica', +'Edit This App': 'Modifica questa applicazione', +'Edit current record': 'Modifica record corrente', +'First name': 'Nome', +'Group ID': 'ID Gruppo', +'Hello World': 'Salve Mondo', +'Hello World in a flash!': 'Salve Mondo in un flash!', +'Import/Export': 'Importa/Esporta', +'Index': 'Indice', +'Internal State': 'Stato interno', +'Invalid Query': 'Richiesta (query) non valida', +'Invalid email': 'Email non valida', +'Last name': 'Cognome', +'Layout': 'Layout', +'Main Menu': 'Menu principale', +'Menu Model': 'Menu Modelli', +'Name': 'Nome', +'New Record': 'Nuovo elemento (record)', +'No databases in this application': 'Nessun database presente in questa applicazione', +'Origin': 'Origine', +'Password': 'Password', +'Powered by': 'Powered by', +'Query:': 'Richiesta (query):', +'Record ID': 'Record ID', +'Registration key': 'Chiave di Registazione', +'Reset Password key': 'Resetta chiave Password ', +'Role': 'Ruolo', +'Rows in table': 'Righe nella tabella', +'Rows selected': 'Righe selezionate', +'Stylesheet': 'Foglio di stile (stylesheet)', +'Sure you want to delete this object?': 'Vuoi veramente cancellare questo oggetto?', +'Table name': 'Nome tabella', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'La richiesta (query) è una condizione come ad esempio "db.tabella1.campo1==\'valore\'". Una condizione come "db.tabella1.campo1==db.tabella2.campo2" produce un "JOIN" SQL.', +'The output of the file is a dictionary that was rendered by the view': 'L\'output del file è un "dictionary" che è stato visualizzato dalla vista', +'This is a copy of the scaffolding application': "Questa è una copia dell'applicazione di base (scaffold)", +'Timestamp': 'Ora (timestamp)', +'Update:': 'Aggiorna:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Per costruire richieste (query) più complesse si usano (...)&(...) come "e" (AND), (...)|(...) come "o" (OR), e ~(...) come negazione (NOT).', +'User ID': 'ID Utente', +'View': 'Vista', +'Welcome %s': 'Benvenuto %s', +'Welcome to web2py': 'Benvenuto su web2py', +'Which called the function': 'che ha chiamato la funzione', +'You are successfully running web2py': 'Stai eseguendo web2py con successo', +'You can modify this application and adapt it to your needs': 'Puoi modificare questa applicazione adattandola alle tue necessità', +'You visited the url': "Hai visitato l'URL", +'appadmin is disabled because insecure channel': 'Amministrazione (appadmin) disabilitata: comunicazione non sicura', +'cache': 'cache', +'change password': 'Cambia password', +'Online examples': 'Vedere gli esempi', +'Administrative interface': "Interfaccia amministrativa", +'customize me!': 'Personalizzami!', +'data uploaded': 'dati caricati', +'database': 'database', +'database %s select': 'database %s select', +'db': 'db', +'design': 'progetta', +'Documentation': 'Documentazione', +'done!': 'fatto!', +'edit profile': 'modifica profilo', +'export as csv file': 'esporta come file CSV', +'hello world': 'salve mondo', +'insert new': 'inserisci nuovo', +'insert new %s': 'inserisci nuovo %s', +'invalid request': 'richiesta non valida', +'located in the file': 'presente nel file', +'login': 'accesso', +'logout': 'uscita', +'lost password?': 'dimenticato la password?', +'new record inserted': 'nuovo record inserito', +'next 100 rows': 'prossime 100 righe', +'not authorized': 'non autorizzato', +'or import from csv file': 'oppure importa da file CSV', +'previous 100 rows': '100 righe precedenti', +'record': 'record', +'record does not exist': 'il record non esiste', +'record id': 'record id', +'register': 'registrazione', +'selected': 'selezionato', +'state': 'stato', +'table': 'tabella', +'unable to parse csv file': 'non riesco a decodificare questo file CSV', +} ADDED applications/mobileblur/languages/pl-pl.py Index: applications/mobileblur/languages/pl-pl.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/pl-pl.py @@ -0,0 +1,81 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s', +'%s rows updated': 'Wierszy uaktualnionych: %s', +'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele', +'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste', +'Change Password': 'Change Password', +'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Aktualne \xc5\xbc\xc4\x85danie', +'Current response': 'Aktualna odpowied\xc5\xba', +'Current session': 'Aktualna sesja', +'DB Model': 'DB Model', +'Database': 'Database', +'Delete:': 'Usu\xc5\x84:', +'Edit': 'Edit', +'Edit Profile': 'Edit Profile', +'Edit This App': 'Edit This App', +'Edit current record': 'Edytuj aktualny rekord', +'Hello World': 'Witaj \xc5\x9awiecie', +'Import/Export': 'Importuj/eksportuj', +'Index': 'Index', +'Internal State': 'Stan wewn\xc4\x99trzny', +'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie', +'Layout': 'Layout', +'Login': 'Zaloguj', +'Logout': 'Logout', +'Lost Password': 'Przypomnij has\xc5\x82o', +'Main Menu': 'Main Menu', +'Menu Model': 'Menu Model', +'New Record': 'Nowy rekord', +'No databases in this application': 'Brak baz danych w tej aplikacji', +'Powered by': 'Powered by', +'Query:': 'Zapytanie:', +'Register': 'Zarejestruj', +'Rows in table': 'Wiersze w tabeli', +'Rows selected': 'Wybrane wiersze', +'Stylesheet': 'Stylesheet', +'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.', +'Update:': 'Uaktualnij:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.', +'View': 'View', +'Welcome %s': 'Welcome %s', +'Welcome to web2py': 'Witaj w web2py', +'appadmin is disabled because insecure channel': 'appadmin is disabled because insecure channel', +'cache': 'cache', +'change password': 'change password', +'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w', +'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego', +'customize me!': 'dostosuj mnie!', +'data uploaded': 'dane wys\xc5\x82ane', +'database': 'baza danych', +'database %s select': 'wyb\xc3\xb3r z bazy danych %s', +'db': 'baza danych', +'design': 'projektuj', +'done!': 'zrobione!', +'edit profile': 'edit profile', +'export as csv file': 'eksportuj jako plik csv', +'insert new': 'wstaw nowy rekord tabeli', +'insert new %s': 'wstaw nowy rekord do tabeli %s', +'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie', +'login': 'login', +'logout': 'logout', +'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony', +'next 100 rows': 'nast\xc4\x99pne 100 wierszy', +'or import from csv file': 'lub zaimportuj z pliku csv', +'previous 100 rows': 'poprzednie 100 wierszy', +'record': 'record', +'record does not exist': 'rekord nie istnieje', +'record id': 'id rekordu', +'register': 'register', +'selected': 'wybranych', +'state': 'stan', +'table': 'tabela', +'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv', +} ADDED applications/mobileblur/languages/pl.py Index: applications/mobileblur/languages/pl.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/pl.py @@ -0,0 +1,104 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Uaktualnij" jest dodatkowym wyra\xc5\xbceniem postaci "pole1=\'nowawarto\xc5\x9b\xc4\x87\'". Nie mo\xc5\xbcesz uaktualni\xc4\x87 lub usun\xc4\x85\xc4\x87 wynik\xc3\xb3w z JOIN:', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': 'Wierszy usuni\xc4\x99tych: %s', +'%s rows updated': 'Wierszy uaktualnionych: %s', +'Authentication': 'Uwierzytelnienie', +'Available databases and tables': 'Dost\xc4\x99pne bazy danych i tabele', +'Cannot be empty': 'Nie mo\xc5\xbce by\xc4\x87 puste', +'Change Password': 'Zmie\xc5\x84 has\xc5\x82o', +'Check to delete': 'Zaznacz aby usun\xc4\x85\xc4\x87', +'Check to delete:': 'Zaznacz aby usun\xc4\x85\xc4\x87:', +'Client IP': 'IP klienta', +'Controller': 'Kontroler', +'Copyright': 'Copyright', +'Current request': 'Aktualne \xc5\xbc\xc4\x85danie', +'Current response': 'Aktualna odpowied\xc5\xba', +'Current session': 'Aktualna sesja', +'DB Model': 'Model bazy danych', +'Database': 'Baza danych', +'Delete:': 'Usu\xc5\x84:', +'Description': 'Opis', +'E-mail': 'Adres e-mail', +'Edit': 'Edycja', +'Edit Profile': 'Edytuj profil', +'Edit This App': 'Edytuj t\xc4\x99 aplikacj\xc4\x99', +'Edit current record': 'Edytuj obecny rekord', +'First name': 'Imi\xc4\x99', +'Function disabled': 'Funkcja wy\xc5\x82\xc4\x85czona', +'Group ID': 'ID grupy', +'Hello World': 'Witaj \xc5\x9awiecie', +'Import/Export': 'Importuj/eksportuj', +'Index': 'Indeks', +'Internal State': 'Stan wewn\xc4\x99trzny', +'Invalid Query': 'B\xc5\x82\xc4\x99dne zapytanie', +'Invalid email': 'B\xc5\x82\xc4\x99dny adres email', +'Last name': 'Nazwisko', +'Layout': 'Uk\xc5\x82ad', +'Login': 'Zaloguj', +'Logout': 'Wyloguj', +'Lost Password': 'Przypomnij has\xc5\x82o', +'Main Menu': 'Menu g\xc5\x82\xc3\xb3wne', +'Menu Model': 'Model menu', +'Name': 'Nazwa', +'New Record': 'Nowy rekord', +'No databases in this application': 'Brak baz danych w tej aplikacji', +'Origin': '\xc5\xb9r\xc3\xb3d\xc5\x82o', +'Password': 'Has\xc5\x82o', +"Password fields don't match": 'Pola has\xc5\x82a nie s\xc4\x85 zgodne ze sob\xc4\x85', +'Powered by': 'Zasilane przez', +'Query:': 'Zapytanie:', +'Record ID': 'ID rekordu', +'Register': 'Zarejestruj', +'Registration key': 'Klucz rejestracji', +'Role': 'Rola', +'Rows in table': 'Wiersze w tabeli', +'Rows selected': 'Wybrane wiersze', +'Stylesheet': 'Arkusz styl\xc3\xb3w', +'Submit': 'Wy\xc5\x9blij', +'Sure you want to delete this object?': 'Czy na pewno chcesz usun\xc4\x85\xc4\x87 ten obiekt?', +'Table name': 'Nazwa tabeli', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Zapytanie" jest warunkiem postaci "db.tabela1.pole1==\'warto\xc5\x9b\xc4\x87\'". Takie co\xc5\x9b jak "db.tabela1.pole1==db.tabela2.pole2" oznacza SQL JOIN.', +'Timestamp': 'Znacznik czasu', +'Update:': 'Uaktualnij:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'U\xc5\xbcyj (...)&(...) jako AND, (...)|(...) jako OR oraz ~(...) jako NOT do tworzenia bardziej skomplikowanych zapyta\xc5\x84.', +'User %(id)s Registered': 'U\xc5\xbcytkownik %(id)s zosta\xc5\x82 zarejestrowany', +'User ID': 'ID u\xc5\xbcytkownika', +'Verify Password': 'Potwierd\xc5\xba has\xc5\x82o', +'View': 'Widok', +'Welcome %s': 'Welcome %s', +'Welcome to web2py': 'Witaj w web2py', +'appadmin is disabled because insecure channel': 'administracja aplikacji wy\xc5\x82\xc4\x85czona z powodu braku bezpiecznego po\xc5\x82\xc4\x85czenia', +'cache': 'cache', +'change password': 'change password', +'Online examples': 'Kliknij aby przej\xc5\x9b\xc4\x87 do interaktywnych przyk\xc5\x82ad\xc3\xb3w', +'Administrative interface': 'Kliknij aby przej\xc5\x9b\xc4\x87 do panelu administracyjnego', +'customize me!': 'dostosuj mnie!', +'data uploaded': 'dane wys\xc5\x82ane', +'database': 'baza danych', +'database %s select': 'wyb\xc3\xb3r z bazy danych %s', +'db': 'baza danych', +'design': 'projektuj', +'done!': 'zrobione!', +'edit profile': 'edit profile', +'export as csv file': 'eksportuj jako plik csv', +'insert new': 'wstaw nowy rekord tabeli', +'insert new %s': 'wstaw nowy rekord do tabeli %s', +'invalid request': 'B\xc5\x82\xc4\x99dne \xc5\xbc\xc4\x85danie', +'login': 'login', +'logout': 'logout', +'new record inserted': 'nowy rekord zosta\xc5\x82 wstawiony', +'next 100 rows': 'nast\xc4\x99pne 100 wierszy', +'or import from csv file': 'lub zaimportuj z pliku csv', +'previous 100 rows': 'poprzednie 100 wierszy', +'record': 'rekord', +'record does not exist': 'rekord nie istnieje', +'record id': 'id rekordu', +'register': 'register', +'selected': 'wybranych', +'state': 'stan', +'table': 'tabela', +'unable to parse csv file': 'nie mo\xc5\xbcna sparsowa\xc4\x87 pliku csv', +} ADDED applications/mobileblur/languages/pt-br.py Index: applications/mobileblur/languages/pt-br.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/pt-br.py @@ -0,0 +1,142 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "campo1=\'novovalor\'". Você não pode atualizar ou apagar os resultados de um JOIN', +'%Y-%m-%d': '%d-%m-%Y', +'%Y-%m-%d %H:%M:%S': '%d-%m-%Y %H:%M:%S', +'%s rows deleted': '%s linhas apagadas', +'%s rows updated': '%s linhas atualizadas', +'About': 'About', +'Access Control': 'Access Control', +'Ajax Recipes': 'Ajax Recipes', +'Available databases and tables': 'Bancos de dados e tabelas disponíveis', +'Buy this book': 'Buy this book', +'Cannot be empty': 'Não pode ser vazio', +'Check to delete': 'Marque para apagar', +'Client IP': 'Client IP', +'Community': 'Community', +'Controller': 'Controlador', +'Copyright': 'Copyright', +'Current request': 'Requisição atual', +'Current response': 'Resposta atual', +'Current session': 'Sessão atual', +'DB Model': 'Modelo BD', +'Database': 'Banco de dados', +'Delete:': 'Apagar:', +'Demo': 'Demo', +'Deployment Recipes': 'Deployment Recipes', +'Description': 'Description', +'Documentation': 'Documentation', +'Download': 'Download', +'E-mail': 'E-mail', +'Edit': 'Editar', +'Edit This App': 'Edit This App', +'Edit current record': 'Editar o registro atual', +'Errors': 'Errors', +'FAQ': 'FAQ', +'First name': 'First name', +'Forms and Validators': 'Forms and Validators', +'Free Applications': 'Free Applications', +'Group ID': 'Group ID', +'Groups': 'Groups', +'Hello World': 'Olá Mundo', +'Home': 'Home', +'Import/Export': 'Importar/Exportar', +'Index': 'Início', +'Internal State': 'Estado Interno', +'Introduction': 'Introduction', +'Invalid Query': 'Consulta Inválida', +'Invalid email': 'Invalid email', +'Last name': 'Last name', +'Layout': 'Layout', +'Layouts': 'Layouts', +'Live chat': 'Live chat', +'Login': 'Autentique-se', +'Lost Password': 'Esqueceu sua senha?', +'Main Menu': 'Menu Principal', +'Menu Model': 'Modelo de Menu', +'Name': 'Name', +'New Record': 'Novo Registro', +'No databases in this application': 'Sem bancos de dados nesta aplicação', +'Origin': 'Origin', +'Other Recipes': 'Other Recipes', +'Overview': 'Overview', +'Password': 'Password', +'Plugins': 'Plugins', +'Powered by': 'Powered by', +'Preface': 'Preface', +'Python': 'Python', +'Query:': 'Consulta:', +'Quick Examples': 'Quick Examples', +'Recipes': 'Recipes', +'Record ID': 'Record ID', +'Register': 'Registre-se', +'Registration key': 'Registration key', +'Reset Password key': 'Reset Password key', +'Resources': 'Resources', +'Role': 'Role', +'Rows in table': 'Linhas na tabela', +'Rows selected': 'Linhas selecionadas', +'Semantic': 'Semantic', +'Services': 'Services', +'Stylesheet': 'Stylesheet', +'Support': 'Support', +'Sure you want to delete this object?': 'Está certo(a) que deseja apagar esse objeto ?', +'Table name': 'Table name', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'Uma "consulta" é uma condição como "db.tabela1.campo1==\'valor\'". Expressões como "db.tabela1.campo1==db.tabela2.campo2" resultam em um JOIN SQL.', +'The Core': 'The Core', +'The Views': 'The Views', +'The output of the file is a dictionary that was rendered by the view': 'The output of the file is a dictionary that was rendered by the view', +'This App': 'This App', +'This is a copy of the scaffolding application': 'This is a copy of the scaffolding application', +'Timestamp': 'Timestamp', +'Twitter': 'Twitter', +'Update:': 'Atualizar:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Use (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir consultas mais complexas.', +'User ID': 'User ID', +'User Voice': 'User Voice', +'Videos': 'Videos', +'View': 'Visualização', +'Web2py': 'Web2py', +'Welcome': 'Welcome', +'Welcome %s': 'Vem vindo %s', +'Welcome to web2py': 'Bem vindo ao web2py', +'Which called the function': 'Which called the function', +'You are successfully running web2py': 'You are successfully running web2py', +'You are successfully running web2py.': 'You are successfully running web2py.', +'You can modify this application and adapt it to your needs': 'You can modify this application and adapt it to your needs', +'You visited the url': 'You visited the url', +'appadmin is disabled because insecure channel': 'Administração desativada devido ao canal inseguro', +'cache': 'cache', +'change password': 'modificar senha', +'Online examples': 'Alguns exemplos', +'Administrative interface': 'Interface administrativa', +'customize me!': 'Personalize-me!', +'data uploaded': 'dados enviados', +'database': 'banco de dados', +'database %s select': 'Selecionar banco de dados %s', +'db': 'bd', +'design': 'design', +'Documentation': 'Documentation', +'done!': 'concluído!', +'edit profile': 'editar perfil', +'export as csv file': 'exportar como um arquivo csv', +'insert new': 'inserir novo', +'insert new %s': 'inserir novo %s', +'invalid request': 'requisição inválida', +'located in the file': 'located in the file', +'login': 'Entrar', +'logout': 'Sair', +'lost password?': 'lost password?', +'new record inserted': 'novo registro inserido', +'next 100 rows': 'próximas 100 linhas', +'or import from csv file': 'ou importar de um arquivo csv', +'previous 100 rows': '100 linhas anteriores', +'record': 'registro', +'record does not exist': 'registro não existe', +'record id': 'id do registro', +'register': 'Registre-se', +'selected': 'selecionado', +'state': 'estado', +'table': 'tabela', +'unable to parse csv file': 'não foi possível analisar arquivo csv', +} ADDED applications/mobileblur/languages/pt-pt.py Index: applications/mobileblur/languages/pt-pt.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/pt-pt.py @@ -0,0 +1,116 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "field1=\'newvalue\'". Não pode actualizar ou eliminar os resultados de um JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s linhas eliminadas', +'%s rows updated': '%s linhas actualizadas', +'About': 'About', +'Author Reference Auth User': 'Author Reference Auth User', +'Author Reference Auth User.username': 'Author Reference Auth User.username', +'Available databases and tables': 'bases de dados e tabelas disponíveis', +'Cannot be empty': 'não pode ser vazio', +'Category Create': 'Category Create', +'Category Select': 'Category Select', +'Check to delete': 'seleccione para eliminar', +'Comment Create': 'Comment Create', +'Comment Select': 'Comment Select', +'Content': 'Content', +'Controller': 'Controlador', +'Copyright': 'Direitos de cópia', +'Created By': 'Created By', +'Created On': 'Created On', +'Current request': 'pedido currente', +'Current response': 'resposta currente', +'Current session': 'sessão currente', +'DB Model': 'Modelo de BD', +'Database': 'Base de dados', +'Delete:': 'Eliminar:', +'Edit': 'Editar', +'Edit This App': 'Edite esta aplicação', +'Edit current record': 'Edição de registo currente', +'Email': 'Email', +'First Name': 'First Name', +'For %s #%s': 'For %s #%s', +'Hello World': 'Olá Mundo', +'Import/Export': 'Importar/Exportar', +'Index': 'Índice', +'Internal State': 'Estado interno', +'Invalid Query': 'Consulta Inválida', +'Last Name': 'Last Name', +'Layout': 'Esboço', +'Main Menu': 'Menu Principal', +'Menu Model': 'Menu do Modelo', +'Modified By': 'Modified By', +'Modified On': 'Modified On', +'Name': 'Name', +'New Record': 'Novo Registo', +'No Data': 'No Data', +'No databases in this application': 'Não há bases de dados nesta aplicação', +'Password': 'Password', +'Post Create': 'Post Create', +'Post Select': 'Post Select', +'Powered by': 'Suportado por', +'Query:': 'Interrogação:', +'Replyto Reference Post': 'Replyto Reference Post', +'Rows in table': 'Linhas numa tabela', +'Rows selected': 'Linhas seleccionadas', +'Stylesheet': 'Folha de estilo', +'Sure you want to delete this object?': 'Tem a certeza que deseja eliminar este objecto?', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'A "query" é uma condição do tipo "db.table1.field1==\'value\'". Algo como "db.table1.field1==db.table2.field2" resultaria num SQL JOIN.', +'Title': 'Title', +'Update:': 'Actualização:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilize (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir interrogações mais complexas.', +'Username': 'Username', +'View': 'Vista', +'Welcome %s': 'Bem-vindo(a) %s', +'Welcome to Gluonization': 'Bem vindo ao Web2py', +'Welcome to web2py': 'Bem-vindo(a) ao web2py', +'When': 'When', +'appadmin is disabled because insecure channel': 'appadmin está desactivada pois o canal é inseguro', +'cache': 'cache', +'change password': 'alterar palavra-chave', +'Online examples': 'Exemplos online', +'Administrative interface': 'Painel administrativo', +'create new category': 'create new category', +'create new comment': 'create new comment', +'create new post': 'create new post', +'customize me!': 'Personaliza-me!', +'data uploaded': 'informação enviada', +'database': 'base de dados', +'database %s select': 'selecção de base de dados %s', +'db': 'bd', +'design': 'design', +'done!': 'concluído!', +'edit category': 'edit category', +'edit comment': 'edit comment', +'edit post': 'edit post', +'edit profile': 'Editar perfil', +'export as csv file': 'exportar como ficheiro csv', +'insert new': 'inserir novo', +'insert new %s': 'inserir novo %s', +'invalid request': 'Pedido Inválido', +'login': 'login', +'logout': 'logout', +'new record inserted': 'novo registo inserido', +'next 100 rows': 'próximas 100 linhas', +'or import from csv file': 'ou importe a partir de ficheiro csv', +'previous 100 rows': '100 linhas anteriores', +'record': 'registo', +'record does not exist': 'registo inexistente', +'record id': 'id de registo', +'register': 'register', +'search category': 'search category', +'search comment': 'search comment', +'search post': 'search post', +'select category': 'select category', +'select comment': 'select comment', +'select post': 'select post', +'selected': 'seleccionado(s)', +'show category': 'show category', +'show comment': 'show comment', +'show post': 'show post', +'state': 'estado', +'table': 'tabela', +'unable to parse csv file': 'não foi possível carregar ficheiro csv', +} ADDED applications/mobileblur/languages/pt.py Index: applications/mobileblur/languages/pt.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/pt.py @@ -0,0 +1,116 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" é uma expressão opcional como "field1=\'newvalue\'". Não pode actualizar ou eliminar os resultados de um JOIN', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s linhas eliminadas', +'%s rows updated': '%s linhas actualizadas', +'About': 'About', +'Author Reference Auth User': 'Author Reference Auth User', +'Author Reference Auth User.username': 'Author Reference Auth User.username', +'Available databases and tables': 'bases de dados e tabelas disponíveis', +'Cannot be empty': 'não pode ser vazio', +'Category Create': 'Category Create', +'Category Select': 'Category Select', +'Check to delete': 'seleccione para eliminar', +'Comment Create': 'Comment Create', +'Comment Select': 'Comment Select', +'Content': 'Content', +'Controller': 'Controlador', +'Copyright': 'Direitos de cópia', +'Created By': 'Created By', +'Created On': 'Created On', +'Current request': 'pedido currente', +'Current response': 'resposta currente', +'Current session': 'sessão currente', +'DB Model': 'Modelo de BD', +'Database': 'Base de dados', +'Delete:': 'Eliminar:', +'Edit': 'Editar', +'Edit This App': 'Edite esta aplicação', +'Edit current record': 'Edição de registo currente', +'Email': 'Email', +'First Name': 'First Name', +'For %s #%s': 'For %s #%s', +'Hello World': 'Olá Mundo', +'Import/Export': 'Importar/Exportar', +'Index': 'Índice', +'Internal State': 'Estado interno', +'Invalid Query': 'Consulta Inválida', +'Last Name': 'Last Name', +'Layout': 'Esboço', +'Main Menu': 'Menu Principal', +'Menu Model': 'Menu do Modelo', +'Modified By': 'Modified By', +'Modified On': 'Modified On', +'Name': 'Name', +'New Record': 'Novo Registo', +'No Data': 'No Data', +'No databases in this application': 'Não há bases de dados nesta aplicação', +'Password': 'Password', +'Post Create': 'Post Create', +'Post Select': 'Post Select', +'Powered by': 'Suportado por', +'Query:': 'Interrogação:', +'Replyto Reference Post': 'Replyto Reference Post', +'Rows in table': 'Linhas numa tabela', +'Rows selected': 'Linhas seleccionadas', +'Stylesheet': 'Folha de estilo', +'Sure you want to delete this object?': 'Tem a certeza que deseja eliminar este objecto?', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': 'A "query" é uma condição do tipo "db.table1.field1==\'value\'". Algo como "db.table1.field1==db.table2.field2" resultaria num SQL JOIN.', +'Title': 'Title', +'Update:': 'Actualização:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Utilize (...)&(...) para AND, (...)|(...) para OR, e ~(...) para NOT para construir interrogações mais complexas.', +'Username': 'Username', +'View': 'Vista', +'Welcome %s': 'Bem-vindo(a) %s', +'Welcome to Gluonization': 'Bem vindo ao Web2py', +'Welcome to web2py': 'Bem-vindo(a) ao web2py', +'When': 'When', +'appadmin is disabled because insecure channel': 'appadmin está desactivada pois o canal é inseguro', +'cache': 'cache', +'change password': 'alterar palavra-chave', +'Online examples': 'Exemplos online', +'Administrative interface': 'Painel administrativo', +'create new category': 'create new category', +'create new comment': 'create new comment', +'create new post': 'create new post', +'customize me!': 'Personaliza-me!', +'data uploaded': 'informação enviada', +'database': 'base de dados', +'database %s select': 'selecção de base de dados %s', +'db': 'bd', +'design': 'design', +'done!': 'concluído!', +'edit category': 'edit category', +'edit comment': 'edit comment', +'edit post': 'edit post', +'edit profile': 'Editar perfil', +'export as csv file': 'exportar como ficheiro csv', +'insert new': 'inserir novo', +'insert new %s': 'inserir novo %s', +'invalid request': 'Pedido Inválido', +'login': 'login', +'logout': 'logout', +'new record inserted': 'novo registo inserido', +'next 100 rows': 'próximas 100 linhas', +'or import from csv file': 'ou importe a partir de ficheiro csv', +'previous 100 rows': '100 linhas anteriores', +'record': 'registo', +'record does not exist': 'registo inexistente', +'record id': 'id de registo', +'register': 'register', +'search category': 'search category', +'search comment': 'search comment', +'search post': 'search post', +'select category': 'select category', +'select comment': 'select comment', +'select post': 'select post', +'selected': 'seleccionado(s)', +'show category': 'show category', +'show comment': 'show comment', +'show post': 'show post', +'state': 'estado', +'table': 'tabela', +'unable to parse csv file': 'não foi possível carregar ficheiro csv', +} ADDED applications/mobileblur/languages/ru-ru.py Index: applications/mobileblur/languages/ru-ru.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/ru-ru.py @@ -0,0 +1,96 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"Изменить" - необязательное выражение вида "field1=\'новое значение\'". Результаты операции JOIN нельзя изменить или удалить.', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '%s строк удалено', +'%s rows updated': '%s строк изменено', +'Available databases and tables': 'Базы данных и таблицы', +'Cannot be empty': 'Пустое значение недопустимо', +'Change Password': 'Смените пароль', +'Check to delete': 'Удалить', +'Check to delete:': 'Удалить:', +'Client IP': 'Client IP', +'Current request': 'Текущий запрос', +'Current response': 'Текущий ответ', +'Current session': 'Текущая сессия', +'Delete:': 'Удалить:', +'Description': 'Описание', +'E-mail': 'E-mail', +'Edit Profile': 'Редактировать профиль', +'Edit current record': 'Редактировать текущую запись', +'First name': 'Имя', +'Group ID': 'Group ID', +'Hello World': 'Заработало!', +'Import/Export': 'Импорт/экспорт', +'Internal State': 'Внутренне состояние', +'Invalid Query': 'Неверный запрос', +'Invalid email': 'Неверный email', +'Invalid login': 'Неверный логин', +'Invalid password': 'Неверный пароль', +'Last name': 'Фамилия', +'Logged in': 'Вход выполнен', +'Logged out': 'Выход выполнен', +'Login': 'Вход', +'Logout': 'Выход', +'Lost Password': 'Забыли пароль?', +'Name': 'Name', +'New Record': 'Новая запись', +'New password': 'Новый пароль', +'No databases in this application': 'В приложении нет баз данных', +'Old password': 'Старый пароль', +'Origin': 'Происхождение', +'Password': 'Пароль', +"Password fields don't match": 'Пароли не совпадают', +'Query:': 'Запрос:', +'Record ID': 'ID записи', +'Register': 'Зарегистрироваться', +'Registration key': 'Ключ регистрации', +'Remember me (for 30 days)': 'Запомнить меня (на 30 дней)', +'Reset Password key': 'Сбросить ключ пароля', +'Role': 'Роль', +'Rows in table': 'Строк в таблице', +'Rows selected': 'Выделено строк', +'Submit': 'Отправить', +'Sure you want to delete this object?': 'Подтвердите удаление объекта', +'Table name': 'Имя таблицы', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"Запрос" - это условие вида "db.table1.field1==\'значение\'". Выражение вида "db.table1.field1==db.table2.field2" формирует SQL JOIN.', +'Timestamp': 'Отметка времени', +'Update:': 'Изменить:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Для построение сложных запросов используйте операторы "И": (...)&(...), "ИЛИ": (...)|(...), "НЕ": ~(...).', +'User %(id)s Logged-in': 'Пользователь %(id)s вошёл', +'User %(id)s Logged-out': 'Пользователь %(id)s вышел', +'User %(id)s Password changed': 'Пользователь %(id)s сменил пароль', +'User %(id)s Profile updated': 'Пользователь %(id)s обновил профиль', +'User %(id)s Registered': 'Пользователь %(id)s зарегистрировался', +'User ID': 'ID пользователя', +'Verify Password': 'Повторите пароль', +'Welcome to web2py': 'Добро пожаловать в web2py', +'Online examples': 'примеры он-лайн', +'Administrative interface': 'административный интерфейс', +'customize me!': 'настройте внешний вид!', +'data uploaded': 'данные загружены', +'database': 'база данных', +'database %s select': 'выбор базы данных %s', +'db': 'БД', +'design': 'дизайн', +'done!': 'готово!', +'export as csv file': 'экспорт в csv-файл', +'insert new': 'добавить', +'insert new %s': 'добавить %s', +'invalid request': 'неверный запрос', +'login': 'вход', +'logout': 'выход', +'new record inserted': 'новая запись добавлена', +'next 100 rows': 'следующие 100 строк', +'or import from csv file': 'или импорт из csv-файла', +'password': 'пароль', +'previous 100 rows': 'предыдущие 100 строк', +'profile': 'профиль', +'record does not exist': 'запись не найдена', +'record id': 'id записи', +'selected': 'выбрано', +'state': 'состояние', +'table': 'таблица', +'unable to parse csv file': 'нечитаемый csv-файл', +} ADDED applications/mobileblur/languages/sk-sk.py Index: applications/mobileblur/languages/sk-sk.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/sk-sk.py @@ -0,0 +1,111 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"update" je voliteľný výraz ako "field1=\'newvalue\'". Nemôžete upravovať alebo zmazať výsledky JOINu', +'%Y-%m-%d': '%d.%m.%Y', +'%Y-%m-%d %H:%M:%S': '%d.%m.%Y %H:%M:%S', +'%s rows deleted': '%s zmazaných záznamov', +'%s rows updated': '%s upravených záznamov', +'Available databases and tables': 'Dostupné databázy a tabuľky', +'Cannot be empty': 'Nemôže byť prázdne', +'Check to delete': 'Označiť na zmazanie', +'Controller': 'Controller', +'Copyright': 'Copyright', +'Current request': 'Aktuálna požiadavka', +'Current response': 'Aktuálna odpoveď', +'Current session': 'Aktuálne sedenie', +'DB Model': 'DB Model', +'Database': 'Databáza', +'Delete:': 'Zmazať:', +'Description': 'Popis', +'Edit': 'Upraviť', +'Edit Profile': 'Upraviť profil', +'Edit current record': 'Upraviť aktuálny záznam', +'First name': 'Krstné meno', +'Group ID': 'ID skupiny', +'Hello World': 'Ahoj svet', +'Import/Export': 'Import/Export', +'Index': 'Index', +'Internal State': 'Vnútorný stav', +'Invalid email': 'Neplatný email', +'Invalid Query': 'Neplatná otázka', +'Invalid password': 'Nesprávne heslo', +'Last name': 'Priezvisko', +'Layout': 'Layout', +'Logged in': 'Prihlásený', +'Logged out': 'Odhlásený', +'Lost Password': 'Stratené heslo?', +'Menu Model': 'Menu Model', +'Name': 'Meno', +'New Record': 'Nový záznam', +'New password': 'Nové heslo', +'No databases in this application': 'V tejto aplikácii nie sú databázy', +'Old password': 'Staré heslo', +'Origin': 'Pôvod', +'Password': 'Heslo', +'Powered by': 'Powered by', +'Query:': 'Otázka:', +'Record ID': 'ID záznamu', +'Register': 'Zaregistrovať sa', +'Registration key': 'Registračný kľúč', +'Remember me (for 30 days)': 'Zapamätaj si ma (na 30 dní)', +'Reset Password key': 'Nastaviť registračný kľúč', +'Role': 'Rola', +'Rows in table': 'riadkov v tabuľke', +'Rows selected': 'označených riadkov', +'Submit': 'Odoslať', +'Stylesheet': 'Stylesheet', +'Sure you want to delete this object?': 'Ste si istí, že chcete zmazať tento objekt?', +'Table name': 'Názov tabuľky', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"query" je podmienka ako "db.table1.field1==\'value\'". Niečo ako "db.table1.field1==db.table2.field2" má za výsledok SQL JOIN.', +'The output of the file is a dictionary that was rendered by the view': 'Výstup zo súboru je slovník, ktorý bol zobrazený vo view', +'This is a copy of the scaffolding application': 'Toto je kópia skeletu aplikácie', +'Timestamp': 'Časová pečiatka', +'Update:': 'Upraviť:', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': 'Použite (...)&(...) pre AND, (...)|(...) pre OR a ~(...) pre NOT na poskladanie komplexnejších otázok.', +'User %(id)s Logged-in': 'Používateľ %(id)s prihlásený', +'User %(id)s Logged-out': 'Používateľ %(id)s odhlásený', +'User %(id)s Password changed': 'Používateľ %(id)s zmenil heslo', +'User %(id)s Profile updated': 'Používateľ %(id)s upravil profil', +'User %(id)s Registered': 'Používateľ %(id)s sa zaregistroval', +'User ID': 'ID používateľa', +'Verify Password': 'Zopakujte heslo', +'View': 'Zobraziť', +'Welcome to web2py': 'Vitajte vo web2py', +'Which called the function': 'Ktorý zavolal funkciu', +'You are successfully running web2py': 'Úspešne ste spustili web2py', +'You can modify this application and adapt it to your needs': 'Môžete upraviť túto aplikáciu a prispôsobiť ju svojim potrebám', +'You visited the url': 'Navštívili ste URL', +'appadmin is disabled because insecure channel': 'appadmin je zakázaný bez zabezpečeného spojenia', +'cache': 'cache', +'Online examples': 'pre online príklady kliknite sem', +'Administrative interface': 'pre administrátorské rozhranie kliknite sem', +'customize me!': 'prispôsob ma!', +'data uploaded': 'údaje naplnené', +'database': 'databáza', +'database %s select': 'databáza %s výber', +'db': 'db', +'design': 'návrh', +'Documentation': 'Dokumentácia', +'done!': 'hotovo!', +'export as csv file': 'exportovať do csv súboru', +'insert new': 'vložiť nový záznam ', +'insert new %s': 'vložiť nový záznam %s', +'invalid request': 'Neplatná požiadavka', +'located in the file': 'nachádzajúci sa v súbore ', +'login': 'prihlásiť', +'logout': 'odhlásiť', +'lost password?': 'stratené heslo?', +'new record inserted': 'nový záznam bol vložený', +'next 100 rows': 'ďalších 100 riadkov', +'or import from csv file': 'alebo naimportovať z csv súboru', +'password': 'heslo', +'previous 100 rows': 'predchádzajúcich 100 riadkov', +'record': 'záznam', +'record does not exist': 'záznam neexistuje', +'record id': 'id záznamu', +'register': 'registrovať', +'selected': 'označených', +'state': 'stav', +'table': 'tabuľka', +'unable to parse csv file': 'nedá sa načítať csv súbor', +} ADDED applications/mobileblur/languages/zh-tw.py Index: applications/mobileblur/languages/zh-tw.py ================================================================== --- /dev/null +++ applications/mobileblur/languages/zh-tw.py @@ -0,0 +1,165 @@ +# coding: utf8 +{ +'"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN': '"更新" 是選擇性的條件式, 格式就像 "欄位1=\'值\'". 但是 JOIN 的資料不可以使用 update 或是 delete"', +'%Y-%m-%d': '%Y-%m-%d', +'%Y-%m-%d %H:%M:%S': '%Y-%m-%d %H:%M:%S', +'%s rows deleted': '已刪除 %s 筆', +'%s rows updated': '已更新 %s 筆', +'(something like "it-it")': '(格式類似 "zh-tw")', +'A new version of web2py is available': '新版的 web2py 已發行', +'A new version of web2py is available: %s': '新版的 web2py 已發行: %s', +'ATTENTION: Login requires a secure (HTTPS) connection or running on localhost.': '注意: 登入管理帳號需要安全連線(HTTPS)或是在本機連線(localhost).', +'ATTENTION: TESTING IS NOT THREAD SAFE SO DO NOT PERFORM MULTIPLE TESTS CONCURRENTLY.': '注意: 因為在測試模式不保證多執行緒安全性,也就是說不可以同時執行多個測試案例', +'ATTENTION: you cannot edit the running application!': '注意:不可編輯正在執行的應用程式!', +'About': '關於', +'About application': '關於本應用程式', +'Admin is disabled because insecure channel': '管理功能(Admin)在不安全連線環境下自動關閉', +'Admin is disabled because unsecure channel': '管理功能(Admin)在不安全連線環境下自動關閉', +'Administrator Password:': '管理員密碼:', +'Are you sure you want to delete file "%s"?': '確定要刪除檔案"%s"?', +'Are you sure you want to uninstall application "%s"': '確定要移除應用程式 "%s"', +'Are you sure you want to uninstall application "%s"?': '確定要移除應用程式 "%s"', +'Authentication': '驗證', +'Available databases and tables': '可提供的資料庫和資料表', +'Cannot be empty': '不可空白', +'Cannot compile: there are errors in your app. Debug it, correct errors and try again.': '無法編譯:應用程式中含有錯誤,請除錯後再試一次.', +'Change Password': '變更密碼', +'Check to delete': '打勾代表刪除', +'Check to delete:': '點選以示刪除:', +'Client IP': '客戶端網址(IP)', +'Controller': '控件', +'Controllers': '控件', +'Copyright': '版權所有', +'Create new application': '創建應用程式', +'Current request': '目前網路資料要求(request)', +'Current response': '目前網路資料回應(response)', +'Current session': '目前網路連線資訊(session)', +'DB Model': '資料庫模組', +'DESIGN': '設計', +'Database': '資料庫', +'Date and Time': '日期和時間', +'Delete': '刪除', +'Delete:': '刪除:', +'Deploy on Google App Engine': '配置到 Google App Engine', +'Description': '描述', +'Design for': '設計為了', +'E-mail': '電子郵件', +'EDIT': '編輯', +'Edit': '編輯', +'Edit Profile': '編輯設定檔', +'Edit This App': '編輯本應用程式', +'Edit application': '編輯應用程式', +'Edit current record': '編輯當前紀錄', +'Editing file': '編輯檔案', +'Editing file "%s"': '編輯檔案"%s"', +'Error logs for "%(app)s"': '"%(app)s"的錯誤紀錄', +'First name': '名', +'Functions with no doctests will result in [passed] tests.': '沒有 doctests 的函式會顯示 [passed].', +'Group ID': '群組編號', +'Hello World': '嗨! 世界', +'Import/Export': '匯入/匯出', +'Index': '索引', +'Installed applications': '已安裝應用程式', +'Internal State': '內部狀態', +'Invalid Query': '不合法的查詢', +'Invalid action': '不合法的動作(action)', +'Invalid email': '不合法的電子郵件', +'Language files (static strings) updated': '語言檔已更新', +'Languages': '各國語言', +'Last name': '姓', +'Last saved on:': '最後儲存時間:', +'Layout': '網頁配置', +'License for': '軟體版權為', +'Login': '登入', +'Login to the Administrative Interface': '登入到管理員介面', +'Logout': '登出', +'Lost Password': '密碼遺忘', +'Main Menu': '主選單', +'Menu Model': '選單模組(menu)', +'Models': '資料模組', +'Modules': '程式模組', +'NO': '否', +'Name': '名字', +'New Record': '新紀錄', +'No databases in this application': '這應用程式不含資料庫', +'Origin': '原文', +'Original/Translation': '原文/翻譯', +'Password': '密碼', +"Password fields don't match": '密碼欄不匹配', +'Peeking at file': '選擇檔案', +'Powered by': '基於以下技術構建:', +'Query:': '查詢:', +'Record ID': '紀錄編號', +'Register': '註冊', +'Registration key': '註冊金鑰', +'Remember me (for 30 days)': '記住我(30 天)', +'Reset Password key': '重設密碼', +'Resolve Conflict file': '解決衝突檔案', +'Role': '角色', +'Rows in table': '在資料表裏的資料', +'Rows selected': '筆資料被選擇', +'Saved file hash:': '檔案雜湊值已紀錄:', +'Static files': '靜態檔案', +'Stylesheet': '網頁風格檔', +'Submit': '傳送', +'Sure you want to delete this object?': '確定要刪除此物件?', +'Table name': '資料表名稱', +'Testing application': '測試中的應用程式', +'The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.': '"查詢"是一個像 "db.表1.欄位1==\'值\'" 的條件式. 以"db.表1.欄位1==db.表2.欄位2"方式則相當於執行 JOIN SQL.', +'There are no controllers': '沒有控件(controllers)', +'There are no models': '沒有資料庫模組(models)', +'There are no modules': '沒有程式模組(modules)', +'There are no static files': '沒有靜態檔案', +'There are no translators, only default language is supported': '沒有翻譯檔,只支援原始語言', +'There are no views': '沒有視圖', +'This is the %(filename)s template': '這是%(filename)s檔案的樣板(template)', +'Ticket': '問題單', +'Timestamp': '時間標記', +'Unable to check for upgrades': '無法做升級檢查', +'Unable to download': '無法下載', +'Unable to download app': '無法下載應用程式', +'Update:': '更新:', +'Upload existing application': '更新存在的應用程式', +'Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.': '使用下列方式來組合更複雜的條件式, (...)&(...) 代表同時存在的條件, (...)|(...) 代表擇一的條件, ~(...)則代表反向條件.', +'User %(id)s Logged-in': '使用者 %(id)s 已登入', +'User %(id)s Registered': '使用者 %(id)s 已註冊', +'User ID': '使用者編號', +'Verify Password': '驗證密碼', +'View': '視圖', +'Views': '視圖', +'Welcome %s': '歡迎 %s', +'Welcome to web2py': '歡迎使用 web2py', +'YES': '是', +'about': '關於', +'appadmin is disabled because insecure channel': '因為來自非安全通道,管理介面關閉', +'cache': '快取記憶體', +'change password': '變更密碼', +'Online examples': '點此處進入線上範例', +'Administrative interface': '點此處進入管理介面', +'customize me!': '請調整我!', +'data uploaded': '資料已上傳', +'database': '資料庫', +'database %s select': '已選擇 %s 資料庫', +'db': 'db', +'design': '設計', +'done!': '完成!', +'edit profile': '編輯設定檔', +'export as csv file': '以逗號分隔檔(csv)格式匯出', +'insert new': '插入新資料', +'insert new %s': '插入新資料 %s', +'invalid request': '不合法的網路要求(request)', +'login': '登入', +'logout': '登出', +'new record inserted': '已插入新紀錄', +'next 100 rows': '往後 100 筆', +'or import from csv file': '或是從逗號分隔檔(CSV)匯入', +'previous 100 rows': '往前 100 筆', +'record': '紀錄', +'record does not exist': '紀錄不存在', +'record id': '紀錄編號', +'register': '註冊', +'selected': '已選擇', +'state': '狀態', +'table': '資料表', +'unable to parse csv file': '無法解析逗號分隔檔(csv)', +} ADDED applications/mobileblur/models/0_helpers.py Index: applications/mobileblur/models/0_helpers.py ================================================================== --- /dev/null +++ applications/mobileblur/models/0_helpers.py @@ -0,0 +1,12 @@ +newsblur = local_import("newsblur") +newsblur = newsblur.NewsBlur() + +threshold = 0 +thresholds = ["nt", "ps", "ng"] # indices -1, 0, 1 for negative, neutral, and positive intelligence filters + +print request.cookies +if [request.application, request.controller, request.function] != [request.application, "default", "login"]: + if "nb_cookie" not in request.cookies.keys(): + redirect(URL("default", "login")) + else: + newsblur.cookies["newsblur_sessionid"] = request.cookies["nb_cookie"].value ADDED applications/mobileblur/models/db.py Index: applications/mobileblur/models/db.py ================================================================== --- /dev/null +++ applications/mobileblur/models/db.py @@ -0,0 +1,89 @@ +# -*- coding: utf-8 -*- +# this file is released under public domain and you can use without limitations +from gluon.custom_import import track_changes; track_changes(True) + +######################################################################### +## This scaffolding model makes your app work on Google App Engine too +######################################################################### + +if request.env.web2py_runtime_gae: # if running on Google App Engine + db = DAL('google:datastore') # connect to Google BigTable + # optional DAL('gae://namespace') + session.connect(request, response, db = db) # and store sessions and tickets there + ### or use the following lines to store sessions in Memcache + # from gluon.contrib.memdb import MEMDB + # from google.appengine.api.memcache import Client + # session.connect(request, response, db = MEMDB(Client())) +else: # else use a normal relational database + db = DAL('sqlite://storage.sqlite') # if not, use SQLite or other DB + +# by default give a view/generic.extension to all actions from localhost +# none otherwise. a pattern can be 'controller/function.extension' +response.generic_patterns = ['*'] if request.is_local else [] + +######################################################################### +## Here is sample code if you need for +## - email capabilities +## - authentication (registration, login, logout, ... ) +## - authorization (role based authorization) +## - services (xml, csv, json, xmlrpc, jsonrpc, amf, rss) +## - crud actions +## (more options discussed in gluon/tools.py) +######################################################################### + +from gluon.tools import Mail, Auth, Crud, Service, PluginManager, prettydate +mail = Mail() # mailer +auth = Auth(db) # authentication/authorization + +crud = Crud(db) # for CRUD helpers using auth +service = Service() # for json, xml, jsonrpc, xmlrpc, amfrpc +plugins = PluginManager() # for configuring plugins + +mail.settings.server = 'logging' or 'smtp.gmail.com:587' # your SMTP server +mail.settings.sender = 'you@gmail.com' # your email +mail.settings.login = 'username:password' # your credentials or None + +auth.settings.hmac_key = '' # before define_tables() +auth.define_tables(username=True) # creates all needed tables +auth.settings.mailer = mail # for user email verification +auth.settings.registration_requires_verification = False +auth.settings.registration_requires_approval = False +auth.messages.verify_email = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['verify_email'])+'/%(key)s to verify your email' +auth.settings.reset_password_requires_verification = True +auth.messages.reset_password = 'Click on the link http://'+request.env.http_host+URL('default','user',args=['reset_password'])+'/%(key)s to reset your password' + +######################################################################### +## If you need to use OpenID, Facebook, MySpace, Twitter, Linkedin, etc. +## register with janrain.com, uncomment and customize following +# from gluon.contrib.login_methods.rpx_account import RPXAccount +# auth.settings.actions_disabled = \ +# ['register','change_password','request_reset_password'] +# auth.settings.login_form = RPXAccount(request, api_key='...',domain='...', +# url = "http://localhost:8000/%s/default/user/login" % request.application) +## other login methods are in gluon/contrib/login_methods +######################################################################### + +crud.settings.auth = None # =auth to enforce authorization on crud + +######################################################################### +## Define your tables below (or better in another model file) for example +## +## >>> db.define_table('mytable',Field('myfield','string')) +## +## Fields can be 'string','text','password','integer','double','boolean' +## 'date','time','datetime','blob','upload', 'reference TABLENAME' +## There is an implicit 'id integer autoincrement' field +## Consult manual for more options, validators, etc. +## +## More API examples for controllers: +## +## >>> db.mytable.insert(myfield='value') +## >>> rows=db(db.mytable.myfield=='value').select(db.mytable.ALL) +## >>> for row in rows: print row.id, row.myfield +######################################################################### + +db.define_table("users", + Field("username"), + Field("password"), + Field("cookie") +) ADDED applications/mobileblur/models/menu.py Index: applications/mobileblur/models/menu.py ================================================================== --- /dev/null +++ applications/mobileblur/models/menu.py @@ -0,0 +1,126 @@ +# -*- coding: utf-8 -*- +# this file is released under public domain and you can use without limitations +######################################################################### +## Customize your APP title, subtitle and menus here +######################################################################### + +response.title = request.application +response.subtitle = T('customize me!') + +#http://dev.w3.org/html5/markup/meta.name.html +response.meta.author = 'you' +response.meta.description = 'Free and open source full-stack enterprise framework for agile development of fast, scalable, secure and portable database-driven web-based applications. Written and programmable in Python' +response.meta.keywords = 'web2py, python, framework' +response.meta.generator = 'Web2py Enterprise Framework' +response.meta.copyright = 'Copyright 2007-2010' + + +########################################## +## this is the main application menu +## add/remove items as required +########################################## + +response.menu = [ + (T('Home'), False, URL('default','index'), []) + ] + +########################################## +## this is here to provide shortcuts +## during development. remove in production +## +## mind that plugins may also affect menu +########################################## + +######################################### +## Make your own menus +########################################## + +response.menu+=[ + (T('This App'), False, URL('admin', 'default', 'design/%s' % request.application), + [ + (T('Controller'), False, + URL('admin', 'default', 'edit/%s/controllers/%s.py' \ + % (request.application,request.controller=='appadmin' and + 'default' or request.controller))), + (T('View'), False, + URL('admin', 'default', 'edit/%s/views/%s' \ + % (request.application,response.view))), + (T('Layout'), False, + URL('admin', 'default', 'edit/%s/views/layout.html' \ + % request.application)), + (T('Stylesheet'), False, + URL('admin', 'default', 'edit/%s/static/base.css' \ + % request.application)), + (T('DB Model'), False, + URL('admin', 'default', 'edit/%s/models/db.py' \ + % request.application)), + (T('Menu Model'), False, + URL('admin', 'default', 'edit/%s/models/menu.py' \ + % request.application)), + (T('Database'), False, + URL(request.application, 'appadmin', 'index')), + + (T('Errors'), False, + URL('admin', 'default', 'errors/%s' \ + % request.application)), + + (T('About'), False, + URL('admin', 'default', 'about/%s' \ + % request.application)), + + ] + )] + + +########################################## +## this is here to provide shortcuts to some resources +## during development. remove in production +## +## mind that plugins may also affect menu +########################################## + + +response.menu+=[(T('Resources'), False, None, + [ + (T('Documentation'), False, 'http://www.web2py.com/book', + [ + (T('Preface'), False, 'http://www.web2py.com/book/default/chapter/00'), + (T('Introduction'), False, 'http://www.web2py.com/book/default/chapter/01'), + (T('Python'), False, 'http://www.web2py.com/book/default/chapter/02'), + (T('Overview'), False, 'http://www.web2py.com/book/default/chapter/03'), + (T('The Core'), False, 'http://www.web2py.com/book/default/chapter/04'), + (T('The Views'), False, 'http://www.web2py.com/book/default/chapter/05'), + (T('Database'), False, 'http://www.web2py.com/book/default/chapter/06'), + (T('Forms and Validators'), False, 'http://www.web2py.com/book/default/chapter/07'), + (T('Access Control'), False, 'http://www.web2py.com/book/default/chapter/08'), + (T('Services'), False, 'http://www.web2py.com/book/default/chapter/09'), + (T('Ajax Recipes'), False, 'http://www.web2py.com/book/default/chapter/10'), + (T('Deployment Recipes'), False, 'http://www.web2py.com/book/default/chapter/11'), + (T('Other Recipes'), False, 'http://www.web2py.com/book/default/chapter/12'), + (T('Buy this book'), False, 'http://stores.lulu.com/web2py'), + ]), + + (T('Community'), False, None, + [ + (T('Groups'), False, 'http://www.web2py.com/examples/default/usergroups'), + (T('Twitter'), False, 'http://twitter.com/web2py'), + (T('Live chat'), False, 'http://mibbit.com/?channel=%23web2py&server=irc.mibbit.net'), + (T('User Voice'), False, 'http://web2py.uservoice.com/'), + ]), + + (T('Web2py'), False, 'http://www.web2py.com', + [ + (T('Download'), False, 'http://www.web2py.com/examples/default/download'), + (T('Support'), False, 'http://www.web2py.com/examples/default/support'), + (T('Quick Examples'), False, 'http://web2py.com/examples/default/examples'), + (T('FAQ'), False, 'http://web2py.com/AlterEgo'), + (T('Free Applications'), False, 'http://web2py.com/appliances'), + (T('Plugins'), False, 'http://web2py.com/plugins'), + (T('Recipes'), False, 'http://web2pyslices.com/'), + (T('Demo'), False, 'http://web2py.com/demo_admin'), + (T('Semantic'), False, 'http://web2py.com/semantic'), + (T('Layouts'), False, 'http://web2py.com/layouts'), + (T('Videos'), False, 'http://www.web2py.com/examples/default/videos/'), + ]), + ] + )] ADDED applications/mobileblur/modules/__init__.py Index: applications/mobileblur/modules/__init__.py ================================================================== --- /dev/null +++ applications/mobileblur/modules/__init__.py ADDED applications/mobileblur/modules/newsblur.py Index: applications/mobileblur/modules/newsblur.py ================================================================== --- /dev/null +++ applications/mobileblur/modules/newsblur.py @@ -0,0 +1,340 @@ +#!/usr/bin/python + +"""newsblur.py - An API wrapper library for newsblur.com""" + +import simplejson +import requests + +__author__ = 'Dananjaya Ramanayake , spiffytech ' +__version__ = "0.1" + +nb_url = "http://www.newsblur.com/" + +class NewsBlur(): + def __init__(self): + self.cookies = {} + + def login(self, username,password): + ''' + Login as an existing user. + If a user has no password set, you cannot just send any old password. + Required parameters, username and password, must be of string type. + ''' + + url = nb_url + 'api/login' + results = requests.post(url, data={"username": username, "password": password}) + self.cookies = results.cookies + results = simplejson.loads(results.content) + if results["authenticated"] is False: + raise Exception("The newsblur credentials you provided are invalid") + return results + + def logout(self, ): + ''' + Logout the currently logged in user. + ''' + + url = nb_url + 'api/logout' + results = requests.get(url, cookies=self.cookies) + return simplejson.loads(results.content) + + def signup(self, username,password,email): + ''' + Create a new user. + All three required parameters must be of type string. + ''' + + url = nb_url + 'api/signup' + payload = {'signup_username':username,'signup_password':password,'signup_email':email} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def search_feed(self, address,offset=1): + ''' + + Retrieve information about a feed from its website or RSS address. + Parameter address must be of type string while parameter offset must be an integer. + Will return a feed. + + ''' + + url = nb_url + 'rss_feeds/search_feed' + payload = {'address':address,'offset':offset} + results = results.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def feeds(self, include_favicons=True,flat=False): + ''' + Retrieve a list of feeds to which a user is actively subscribed. + Includes the 3 unread counts (positive, neutral, negative), as well as optional favicons. + ''' + + url = nb_url + 'reader/feeds' + payload = {'include_favicons':include_favicons,'flat':flat} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def favicons(self, feeds=[1,2,3]): + ''' + Retrieve a list of favicons for a list of feeds. + Used when combined with /reader/feeds and include_favicons=false, so the feeds request contains far less data. + Useful for mobile devices, but requires a second request. + ''' + + url = nb_url + 'reader/favicons' + payload = {'feeds':feeds} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def id(self, id_no): + ''' + Retrieve the original page from a single feed. + ''' + + url = nb_url + 'reader/page/' % id_no + payload = {} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def refresh_feeds(self, ): + ''' + Up-to-the-second unread counts for each active feed. + Poll for these counts no more than once a minute. + ''' + + url = nb_url + 'reader/refresh_feeds' + results = requests.get(url, cookies=self.cookies) + return simplejson.loads(results.content) + + def feeds_trainer(self, feed_id): + ''' + Retrieves all popular and known intelligence classifiers. + Also includes user's own classifiers. + ''' + + url = nb_url + 'reader/feeds_trainer' + payload = {'feed_id':feed_id} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def statistics(self, id_no): + ''' + If you only want a user's classifiers, use /classifiers/:id. + Omit the feed_id to get all classifiers for all subscriptions. + ''' + + url = nb_url + 'rss_feeds/statistics/%d' % id_no + results = requests.get(url, cookies=self.cookies) + return simplejson.loads(results.content) + + def feed_autocomplete(self, term): + ''' + Get a list of feeds that contain a search phrase. + Searches by feed address, feed url, and feed title, in that order. + Will only show sites with 2+ subscribers. + ''' + + url = nb_url + 'rss_feeds/feed_autocomplete?%' + payload = {'term':term} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def feed(self, id): + ''' + Retrieve stories from a single feed. + ''' + + url = nb_url + 'reader/feed/%s' % id + results = requests.get(url, cookies=self.cookies) + return simplejson.loads(results.content) + + def starred_stories(self, page=1): + ''' + Retrieve a user's starred stories. + ''' + + url = nb_url + 'reader/starred_stories' + payload = {'page':page} + results = requests.get(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def river_stories(self, feeds,page=1,read_stories_count=0): + ''' + Retrieve stories from a collection of feeds. This is known as the River of News. + Stories are ordered in reverse chronological order. + ''' + + url = nb_url + 'reader/river_stories' + payload = {'feeds':feeds,'page':page,'read_stories_count':read_stories_count} + results = urllib2.urlopen(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def mark_story_as_read(self, story_id,feed_id): + ''' + Mark stories as read. + Multiple story ids can be sent at once. + Each story must be from the same feed. + ''' + + url = nb_url + 'reader/mark_story_as_read' + payload = {'story_id':story_id,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def mark_story_as_starred(self, story_id,feed_id): + ''' + Mark a story as starred (saved). + ''' + + url = nb_url + 'reader/mark_story_as_starred' + payload = {'story_id':story_id,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def mark_all_as_read(self, days=0): + ''' + Mark all stories in *all* feeds read. + ''' + + url = nb_url + 'reader/mark_all_as_read' + payload = {'days':days} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def add_url(self, url,folder='[Top Level]'): + ''' + Add a feed by its URL. + Can be either the RSS feed or the website itself. + ''' + + url = nb_url + 'reader/add_url' + payload = {'url':url,'folder':folder} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def add_folder(self, folder,parent_folder='[Top Level]'): + ''' + Add a new folder. + ''' + + url = nb_url + 'reader/add_folder' + payload = {'folder':folder,'parent_folder':parent_folder} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def rename_feed(self, feed_title,feed_id): + ''' + Rename a feed title. Only the current user will see the new title. + ''' + + url = nb_url + 'reader/rename_feed' + payload = {'feed_title':feed_title,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def delete_feed(self, feed_id,in_folder): + ''' + Unsubscribe from a feed. Removes it from the folder. + Set the in_folder parameter to remove a feed from the correct folder, in case the user is subscribed to the feed in multiple folders. + ''' + + url = nb_url + 'reader/delete_feed' + payload = {'feed_id':feed_id,'in_folder':in_folder} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def rename_folder(self, folder_to_rename,new_folder_name,in_folder): + ''' + Rename a folder. + ''' + + url = nb_url + 'reader/rename_folder' + payload = {'folder_to_rename':folder_to_rename,'new_folder_name':new_folder_name,'in_folder':in_folder} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + def delete_folder(self, folder_to_delete,in_folder,feed_id): + ''' + Delete a folder and unsubscribe from all feeds inside. + ''' + + url = nb_url + 'reader/delete_folder' + payload = {'folder_to_delete':folder_to_delete,'in_folder':in_folder,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def mark_feed_as_read(self, feed_id): + ''' + Mark a list of feeds as read. + ''' + + url = nb_url + 'reader/mark_feed_as_read' + payload = {'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def save_feed_order(self, folders): + ''' + Reorder feeds and move them around between folders. + The entire folder structure needs to be serialized. + ''' + + url = nb_url + 'reader/save_feed_order' + payload = {'folders':folders} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def classifier(self, id_no): + ''' + Get the intelligence classifiers for a user's site. + Only includes the user's own classifiers. + Use /reader/feeds_trainer for popular classifiers. + ''' + + url = nb_url + 'classifier/%d' % id_no + results = requests.get(url) + return simplejson.loads(results.content) + + + def classifier_save(self, like_type,dislike_type,remove_like_type,remove_dislike_type): + ''' + Save intelligence classifiers (tags, titles, authors, and the feed) for a feed. + ''' + + url = nb_url + 'classifier/save' + payload = {'like_[TYPE]':like_type, + 'dislike_[TYPE]':dislike_type, + 'remove_like_[TYPE]':remove_like_type, + 'remove_dislike_[TYPE]':remove_dislike_type} + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) + + + def opml_export(self, ): + ''' + Download a backup of feeds and folders as an OPML file. + Contains folders and feeds in XML; useful for importing in another RSS reader. + ''' + + url = nb_url + 'import/opml_export' + results = requests.get(url) + return simplejson.loads(results.content) + + + + def opml_upload(self, opml_file): + ''' + Upload an OPML file. + ''' + + url = nb_url + 'import/opml_upload' + f = open(opml_file) + payload = {'file':f} + f.close() + results = requests.post(url, data=payload, cookies=self.cookies) + return simplejson.loads(results.content) ADDED applications/mobileblur/modules/python-newsblur/LICENSE Index: applications/mobileblur/modules/python-newsblur/LICENSE ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/LICENSE @@ -0,0 +1,10 @@ +The MIT License + +Copyright (c) 2010-2011 Dananjaya Ramanayake dananjaya86@gmail.com. + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + ADDED applications/mobileblur/modules/python-newsblur/MANIFEST Index: applications/mobileblur/modules/python-newsblur/MANIFEST ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/MANIFEST @@ -0,0 +1,5 @@ +LICENSE +README.rst +documentation.rst +newsblur.py +setup.py ADDED applications/mobileblur/modules/python-newsblur/README.rst Index: applications/mobileblur/modules/python-newsblur/README.rst ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/README.rst @@ -0,0 +1,23 @@ +=============== +python-newsblur +=============== + +A Python wrapper library for accessing API of newsblur.com. + +Copyright Dananjaya Ramanayake , released under the MIT license. + +Source: https://github.com/dananjayavr/python-newsblur + +For more information please check the newsblur.com API documentation at, http://www.newsblur.com/api + + +Installation +============ +To install run + + ``python setup.py install`` + +Usage: +====== + +Please read the documentation.rst file. ADDED applications/mobileblur/modules/python-newsblur/documentation.rst Index: applications/mobileblur/modules/python-newsblur/documentation.rst ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/documentation.rst @@ -0,0 +1,78 @@ +Login: + + ``import newsblur`` + + ``newsblur.login('samuelclay','new$blur')`` + +Output: + + ``{"code": 1, "result": "ok"}`` + + + +Logout: + + ``newsblur.logout()`` + +Output: + + ``{"code": 1, "result": "ok"}`` + + + +Signup: + + ``newsblur.signup('samuelclay','new$blur','samuel@ofbrooklyn.com')`` + +Output (Successful): + + ``{"code": 1, "result": "ok"}`` + +Output (Unsuccessful): + + ``{"code": -1, "result": "ok"}`` + + + +Search Feed: + + ``newsblur.search_feed('techcrunch.com')`` + +Output: + + ``{"feed_address": "http://www.techcrunch.com/author/mg/", "updated": "5077 hours", "subs": 1, "feed_link": "[]", "favicon_fetching": true, "feed_title": "TechCrunch \xc2\xbb MG Siegler", "exception_type": "feed", "exception_code": 404, "result": "ok", "has_exception": true, "id": 237525, "favicon_color": null}`` + + + +Feeds: + + ``newsblur.feeds()`` + +Output: + + ``{"flat_folders": {"Blogs": [{"ps": 0, "feed_link": "http://kottke.org/", "feed_title": "kottke.org", "ng": 0, "nt": 0, "id": 39}, {"ps": 0, "feed_link": "http://blog.newsblur.com/", "feed_title": "The NewsBlur Blog", "ng": 0, "nt": 0, "id": 558041}, {"ps": 0, "feed_link": "http://www.waxy.org/links/", "feed_title": "Waxy.org Links", "ng": 0, "nt": 0, "id": 3581}, {"ps": 0, "feed_link": "http://xkcd.com/", "feed_title": "xkcd.com", "ng": 0, "nt": 1, "id": 169}], "Cooking": [{"ps": 0, "feed_link": "http://americandrink.net/", "feed_title": "American Drink", "ng": 0, "nt": 0, "id": 64313}, {"ps": 0, "feed_link": "http://saltandfat.com/", "feed_title": "Salt & Fat", "ng": 0, "nt": 0, "id": 48}, {"ps": 1, "feed_link": "http://savorysweetlife.com", "feed_title": "Savory Sweet Life", "ng": 0, "nt": 0, "id": 45}, {"ps": 0, "feed_link": "http://smittenkitchen.com", "feed_title": "smitten kitchen", "ng": 0, "nt": 0, "id": 47}], "Blogs - Photoblogs": [{"ps": 0, "feed_link": "http://iconicphotos.wordpress.com", "feed_title": "Iconic Photos", "ng": 0, "nt": 0, "id": 50}, {"ps": 0, "feed_link": "http://blog.pictorymag.com/", "feed_title": "Pictory Blog", "ng": 0, "nt": 0, "id": 551953}, {"ps": 0, "feed_link": "http://theimpossiblecool.tumblr.com/", "feed_title": "the impossible cool.", "ng": 0, "nt": 1, "id": 34}], "New York": [{"ps": 2, "feed_link": "http://gothamist.com/", "feed_title": "Gothamist", "ng": 4, "nt": 17, "id": 23}, {"ps": 0, "feed_link": "http://www.scoutingny.com", "feed_title": "Scouting NY", "ng": 0, "nt": 0, "id": 27}], "Tech": [{"ps": 0, "feed_link": "http://www.codinghorror.com/blog/", "feed_title": "Coding Horror", "ng": 0, "nt": 0, "id": 2}, {"ps": 2, "feed_link": "http://news.ycombinator.com/", "feed_title": "Hacker News", "ng": 0, "nt": 133, "id": 6}, {"ps": 0, "feed_link": "http://www.macrumors.com", "feed_title": "MacRumors: Mac News and Rumors - Front Page", "ng": 0, "nt": 1, "id": 11}, {"ps": 2, "feed_link": "http://techcrunch.com", "feed_title": "TechCrunch", "ng": 0, "nt": 7, "id": 12}], "Absolute Reads": [{"ps": 0, "feed_link": "http://daringfireball.net/", "feed_title": "Daring Fireball", "ng": 0, "nt": 0, "id": 3}, {"ps": 1, "feed_link": "http://www.avc.com/a_vc/", "feed_title": "Fred Wilson: A VC", "ng": 0, "nt": 0, "id": 159}, {"ps": 0, "feed_link": "http://blog.louisgray.com/", "feed_title": "louisgray.com", "ng": 0, "nt": 0, "id": 172}, {"ps": 0, "feed_link": "http://www.marco.org/", "feed_title": "Marco.org", "ng": 0, "nt": 0, "id": 76}, {"ps": 0, "feed_link": "http://www.randsinrepose.com/", "feed_title": "Rands In Repose", "ng": 0, "nt": 0, "id": 38}]}, "user": "conesus", "result": "ok"}`` + + + +Favicons: + + ``newsblur.favicons()`` + +Output: + + Will return a list of favicons + + + +Retrieve a Page from the Feed: + + ``newsblur.id(32)`` + +Output: + + Will return the HTML data of the story. + + + + + ADDED applications/mobileblur/modules/python-newsblur/newsblur.py Index: applications/mobileblur/modules/python-newsblur/newsblur.py ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/newsblur.py @@ -0,0 +1,336 @@ +#!/usr/bin/python + +"""newsblur.py - An API wrapper library for newsblur.com""" + +import simplejson + +import requests + +__author__ = 'Dananjaya Ramanayake , spiffytech ' +__version__ = "0.1" + +nb_url = "http://www.newsblur.com/" +cookies = None + +def login(username,password): + ''' + Login as an existing user. + If a user has no password set, you cannot just send any old password. + Required parameters, username and password, must be of string type. + ''' + + url = nb_url + 'api/login' + results = requests.post(url, data={"username": username, "password": password}) + global cookies + cookies = results.cookies + return simplejson.loads(results.content) + +def logout(): + ''' + Logout the currently logged in user. + ''' + + url = nb_url + 'api/logout' + results = requests.get(url, cookies=cookies) + return simplejson.loads(results.content) + +def signup(username,password,email): + ''' + Create a new user. + All three required parameters must be of type string. + ''' + + url = nb_url + 'api/signup' + payload = {'signup_username':username,'signup_password':password,'signup_email':email} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def search_feed(address,offset=1): + ''' + + Retrieve information about a feed from its website or RSS address. + Parameter address must be of type string while parameter offset must be an integer. + Will return a feed. + + ''' + + url = nb_url + 'rss_feeds/search_feed' + payload = {'address':address,'offset':offset} + results = results.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def feeds(include_favicons=True,flat=False): + ''' + Retrieve a list of feeds to which a user is actively subscribed. + Includes the 3 unread counts (positive, neutral, negative), as well as optional favicons. + ''' + + url = nb_url + 'reader/feeds' + payload = {'include_favicons':include_favicons,'flat':flat} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def favicons(feeds=[1,2,3]): + ''' + Retrieve a list of favicons for a list of feeds. + Used when combined with /reader/feeds and include_favicons=false, so the feeds request contains far less data. + Useful for mobile devices, but requires a second request. + ''' + + url = nb_url + 'reader/favicons' + payload = {'feeds':feeds} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def id(id_no): + ''' + Retrieve the original page from a single feed. + ''' + + url = nb_url + 'reader/page/' % id_no + payload = {} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def refresh_feeds(): + ''' + Up-to-the-second unread counts for each active feed. + Poll for these counts no more than once a minute. + ''' + + url = nb_url + 'reader/refresh_feeds' + results = requests.get(url, cookies=cookies) + return simplejson.loads(results.content) + +def feeds_trainer(feed_id): + ''' + Retrieves all popular and known intelligence classifiers. + Also includes user's own classifiers. + ''' + + url = nb_url + 'reader/feeds_trainer' + payload = {'feed_id':feed_id} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def statistics(id_no): + ''' + If you only want a user's classifiers, use /classifiers/:id. + Omit the feed_id to get all classifiers for all subscriptions. + ''' + + url = nb_url + 'rss_feeds/statistics/%d' % id_no + results = requests.get(url, cookies=cookies) + return simplejson.loads(results.content) + +def feed_autocomplete(term): + ''' + Get a list of feeds that contain a search phrase. + Searches by feed address, feed url, and feed title, in that order. + Will only show sites with 2+ subscribers. + ''' + + url = nb_url + 'rss_feeds/feed_autocomplete?%' + payload = {'term':term} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def feed(id): + ''' + Retrieve stories from a single feed. + ''' + + url = nb_url + 'reader/feed/%s' % id + results = requests.get(url, cookies=cookies) + return simplejson.loads(results.content) + +def starred_stories(page=1): + ''' + Retrieve a user's starred stories. + ''' + + url = nb_url + 'reader/starred_stories' + payload = {'page':page} + results = requests.get(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def river_stories(feeds,page=1,read_stories_count=0): + ''' + Retrieve stories from a collection of feeds. This is known as the River of News. + Stories are ordered in reverse chronological order. + ''' + + url = nb_url + 'reader/river_stories' + payload = {'feeds':feeds,'page':page,'read_stories_count':read_stories_count} + results = urllib2.urlopen(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def mark_story_as_read(story_id,feed_id): + ''' + Mark stories as read. + Multiple story ids can be sent at once. + Each story must be from the same feed. + ''' + + url = nb_url + 'reader/mark_story_as_read' + payload = {'story_id':story_id,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def mark_story_as_starred(story_id,feed_id): + ''' + Mark a story as starred (saved). + ''' + + url = nb_url + 'reader/mark_story_as_starred' + payload = {'story_id':story_id,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def mark_all_as_read(days=0): + ''' + Mark all stories in *all* feeds read. + ''' + + url = nb_url + 'reader/mark_all_as_read' + payload = {'days':days} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def add_url(url,folder='[Top Level]'): + ''' + Add a feed by its URL. + Can be either the RSS feed or the website itself. + ''' + + url = nb_url + 'reader/add_url' + payload = {'url':url,'folder':folder} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def add_folder(folder,parent_folder='[Top Level]'): + ''' + Add a new folder. + ''' + + url = nb_url + 'reader/add_folder' + payload = {'folder':folder,'parent_folder':parent_folder} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def rename_feed(feed_title,feed_id): + ''' + Rename a feed title. Only the current user will see the new title. + ''' + + url = nb_url + 'reader/rename_feed' + payload = {'feed_title':feed_title,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def delete_feed(feed_id,in_folder): + ''' + Unsubscribe from a feed. Removes it from the folder. + Set the in_folder parameter to remove a feed from the correct folder, in case the user is subscribed to the feed in multiple folders. + ''' + + url = nb_url + 'reader/delete_feed' + payload = {'feed_id':feed_id,'in_folder':in_folder} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def rename_folder(folder_to_rename,new_folder_name,in_folder): + ''' + Rename a folder. + ''' + + url = nb_url + 'reader/rename_folder' + payload = {'folder_to_rename':folder_to_rename,'new_folder_name':new_folder_name,'in_folder':in_folder} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + +def delete_folder(folder_to_delete,in_folder,feed_id): + ''' + Delete a folder and unsubscribe from all feeds inside. + ''' + + url = nb_url + 'reader/delete_folder' + payload = {'folder_to_delete':folder_to_delete,'in_folder':in_folder,'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def mark_feed_as_read(feed_id): + ''' + Mark a list of feeds as read. + ''' + + url = nb_url + 'reader/mark_feed_as_read' + payload = {'feed_id':feed_id} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def save_feed_order(folders): + ''' + Reorder feeds and move them around between folders. + The entire folder structure needs to be serialized. + ''' + + url = nb_url + 'reader/save_feed_order' + payload = {'folders':folders} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def classifier(id_no): + ''' + Get the intelligence classifiers for a user's site. + Only includes the user's own classifiers. + Use /reader/feeds_trainer for popular classifiers. + ''' + + url = nb_url + 'classifier/%d' % id_no + results = requests.get(url) + return simplejson.loads(results.content) + + +def classifier_save(like_type,dislike_type,remove_like_type,remove_dislike_type): + ''' + Save intelligence classifiers (tags, titles, authors, and the feed) for a feed. + ''' + + url = nb_url + 'classifier/save' + payload = {'like_[TYPE]':like_type, + 'dislike_[TYPE]':dislike_type, + 'remove_like_[TYPE]':remove_like_type, + 'remove_dislike_[TYPE]':remove_dislike_type} + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) + + +def opml_export(): + ''' + Download a backup of feeds and folders as an OPML file. + Contains folders and feeds in XML; useful for importing in another RSS reader. + ''' + + url = nb_url + 'import/opml_export' + results = requests.get(url) + return simplejson.loads(results.content) + + + +def opml_upload(opml_file): + ''' + Upload an OPML file. + ''' + + url = nb_url + 'import/opml_upload' + f = open(opml_file) + payload = {'file':f} + f.close() + results = requests.post(url, data=payload, cookies=cookies) + return simplejson.loads(results.content) ADDED applications/mobileblur/modules/python-newsblur/setup.py Index: applications/mobileblur/modules/python-newsblur/setup.py ================================================================== --- /dev/null +++ applications/mobileblur/modules/python-newsblur/setup.py @@ -0,0 +1,23 @@ +from setuptools import setup +from newsblur import __version__ + +long_description = open('README.rst').read() + +setup(name='newsblur', + version=__version__, + py_modules=['newsblur'], + description='API Wrapper library for newsblur.com', + author='Dananjaya Ramanayake', + author_email='dananjaya86@gmail.com', + license='MIT', + url='', + long_description=long_description, + platforms=['any'], + classifiers=['Development Status :: 1 - Beta', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', + 'Programming Language :: Python', + 'Topic :: Internet :: WWW/HTTP :: News/RSS', + ], + ) ADDED applications/mobileblur/static/css/base.css Index: applications/mobileblur/static/css/base.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/base.css @@ -0,0 +1,563 @@ +@charset "UTF-8"; + +/* This file is contains the following sections: + +Update: Revision: 20101102 by Martin Mulone + +- The new revision contains: +- Html5, good practice and normalization support. +- Diferent hacks. +- The normalization and some tags come from + diferent sites so i keep the credits and comments. + but the base of support html5 come from: + http://html5boilerplate.com/ + +- ez.css (http://www.ez-css.org/layouts) +- reset common tags +- choose default fonts +- choose link style +- add bottom line to table rows +- labels bold and occasionally centered +- make all input fields the same size +- add proper separation between h1-h6 and text +- always indent the first line and add space below paragraphs +- bullets and numbers style and indent +- form and table padding +- code blocks +- left and right padding to quoted text +- page layout alignment, width and padding (change this for spaces) +- column widths (change this to use left_sidebar and right_sidebar) +- backrgound images and colors (change this for colors) +- web2py specific (.flash, .error) + +Notice: +- even if you use a different layout/css you may need classes .flash and .error +- this is all color neutral except for #349C01 (header, links, lines) +- there are two backrgound images: images/background.png and images/header.png + +License: This file is released under BSD and MIT + +*/ + +/* + credit is left where credit is due. + additionally, much inspiration was taken from these projects: + yui.yahooapis.com/2.8.1/build/base/base.css + camendesign.com/design/ + praegnanz.de/weblog/htmlcssjs-kickstart +*/ + +/* + html5doctor.com Reset Stylesheet (Eric Meyer's Reset Reloaded + HTML5 baseline) + v1.4 2009-07-27 | Authors: Eric Meyer & Richard Clark + html5doctor.com/html-5-reset-stylesheet/ +*/ + +html, body, div, span, object, iframe, +h1, h2, h3, h4, h5, h6, p, blockquote, pre, +abbr, address, cite, code, +del, dfn, em, img, ins, kbd, q, samp, +small, strong, sub, sup, var, +b, i, +dl, dt, dd, ol, ul, li, +fieldset, form, label, legend, +table, caption, tbody, tfoot, thead, tr, th, td, +article, aside, canvas, details, figcaption, figure, +footer, header, hgroup, menu, nav, section, summary, +time, mark, audio, video { + margin:0; + padding:0; + border:0; + outline:0; + font-size:100%; + vertical-align:baseline; + background:transparent; +} + +article, aside, details, figcaption, figure, +footer, header, hgroup, menu, nav, section { + display:block; +} + +nav ul { list-style:none; } + +blockquote, q { quotes:none; } + +blockquote:before, blockquote:after, +q:before, q:after { content:''; content:none; } + +a { margin:0; padding:0; font-size:100%; vertical-align:baseline; background:transparent; } +a:hover { text-decoration: underline } + +ins { background-color:#ff9; color:#000; text-decoration:none; } + +mark { background-color:#ff9; color:#000; font-style:italic; font-weight:bold; } + +del { text-decoration: line-through; } + +abbr[title], dfn[title] { border-bottom:1px dotted #000; cursor:help; } + +/* tables still need cellspacing="0" in the markup */ +table { border-collapse:collapse; border-spacing:0; } + +hr { display:block; height:1px; border:0; border-top:1px solid #ccc; margin:1em 0; padding:0; } + +input, select { vertical-align:middle; } +/* END RESET CSS */ + + +/* +fonts.css from the YUI Library: developer.yahoo.com/yui/ + Please refer to developer.yahoo.com/yui/fonts/ for font sizing percentages + +There are three custom edits: + * remove arial, helvetica from explicit font stack + * make the line-height relative and unit-less + * remove the pre, code styles +*/ +body { font:13px/1.231 sans-serif; *font-size:small; } /* hack retained to preserve specificity */ + +/*table { font-size:inherit; font:100%; }*/ + +select, input, textarea, button { font:99% sans-serif; } + + +/* normalize monospace sizing + * en.wikipedia.org/wiki/MediaWiki_talk:Common.css/Archive_11#Teletype_style_fix_for_Chrome + */ +pre, code, kbd, samp { font-family: monospace, sans-serif; } + +/* + * minimal base styles + */ + +/* #444 looks better than black: twitter.com/H_FJ/statuses/11800719859 */ +body, select, input, textarea { color:#444; } + +/* Headers (h1,h2,etc) have no default font-size or margin, + you'll want to define those yourself. */ + +/* www.aestheticallyloyal.com/public/optimize-legibility/ */ +h1,h2,h3,h4,h5,h6 { font-weight: bold; } + +/* always force a scrollbar in non-IE */ +html { overflow-y: scroll; } + +/* Accessible focus treatment: people.opera.com/patrickl/experiments/keyboard/test */ +a:hover, a:active { outline: none; } +/* +a, a:active, a:visited { color:#607890; } +a:hover { color:#036; } +*/ + +ul, ol { margin-left: 1.8em; } +ol { list-style-type: decimal; } + +/* Remove margins for navigation lists */ +nav ul, nav li { margin: 0; } + +small { font-size:85%; } +strong, th { font-weight: bold; } + +td, td img { vertical-align:top; } + +sub { vertical-align: sub; font-size: smaller; } +sup { vertical-align: super; font-size: smaller; } + +pre { + padding: 15px; + + /* www.pathf.com/blogs/2008/05/formatting-quoted-code-in-blog-posts-css21-white-space-pre-wrap/ */ + white-space: pre; /* CSS2 */ + white-space: pre-wrap; /* CSS 2.1 */ + white-space: pre-line; /* CSS 3 (and 2.1 as well, actually) */ + word-wrap: break-word; /* IE */ +} + +textarea { overflow: auto; } /* thnx ivannikolic! www.sitepoint.com/blogs/2010/08/20/ie-remove-textarea-scrollbars/ */ + +.ie6 legend, .ie7 legend { margin-left: -7px; } /* thnx ivannikolic! */ + +/* align checkboxes, radios, text inputs with their label + by: Thierry Koblentz tjkdesign.com/ez-css/css/base.css */ +input[type="radio"] { vertical-align: text-bottom; } +input[type="checkbox"] { vertical-align: bottom; } +.ie7 input[type="checkbox"] { vertical-align: baseline; } +.ie6 input { vertical-align: text-bottom; } + +/* hand cursor on clickable input elements */ +label, input[type=button], input[type=submit], button { cursor: pointer; } + +/* webkit browsers add a 2px margin outside the chrome of form elements */ +button, input, select, textarea { margin: 0; } + +/* colors for form validity */ +input:valid, textarea:valid { } +input:invalid, textarea:invalid { + border-radius: 1px; + -moz-box-shadow: 0px 0px 5px red; + -webkit-box-shadow: 0px 0px 5px red; + box-shadow: 0px 0px 5px red; +} +.no-boxshadow input:invalid, +.no-boxshadow textarea:invalid { background-color: #f0dddd; } + + +/* These selection declarations have to be separate. + No text-shadow: twitter.com/miketaylr/status/12228805301 + Also: hot pink. */ +::-moz-selection{ background: #555; color:#fff; text-shadow: none; } +::selection { background:#555; color:#fff; text-shadow: none; } + +/* j.mp/webkit-tap-highlight-color */ +a:link { -webkit-tap-highlight-color: #555; } + + +/* make buttons play nice in IE: + www.viget.com/inspire/styling-the-button-element-in-internet-explorer/ */ +button { width: auto; overflow: visible; } + +/* bicubic resizing for non-native sized IMG: + code.flickr.com/blog/2008/11/12/on-ui-quality-the-little-things-client-side-image-resizing/ */ +.ie7 img { -ms-interpolation-mode: bicubic; } + +/* + * Non-semantic helper classes + */ + +/* for image replacement */ +.ir { display:block; text-indent:-999em; overflow:hidden; background-repeat: no-repeat; } + +/* Hide for both screenreaders and browsers + css-discuss.incutio.com/wiki/Screenreader_Visibility */ +.hidden { display:none; } + +/* Hide only visually, but have it available for screenreaders + www.webaim.org/techniques/css/invisiblecontent/ + Solution from: j.mp/visuallyhidden - Thanks Jonathan Neal! */ +.visuallyhidden { position:absolute !important; + clip: rect(1px 1px 1px 1px); /* IE6, IE7 */ + clip: rect(1px, 1px, 1px, 1px); } + +/* Hide visually and from screenreaders, but maintain layout */ +.invisible { visibility: hidden; } + +/* >> The Magnificent CLEARFIX: Updated to prevent margin-collapsing on child elements << j.mp/bestclearfix */ +.clearfix:before, .clearfix:after { + content: "\0020"; display: block; height: 0; visibility: hidden; +} + +.clearfix:after { clear: both; } +/* Fix clearfix: blueprintcss.lighthouseapp.com/projects/15318/tickets/5-extra-margin-padding-bottom-of-page */ +.clearfix { zoom: 1; } + + +/*********** layout info (ez.css) ***********/ +/* 2009 -2010 (c) | ez-css.org + * ez-plug-min.css :: version 1.1 :: 01182010 + */ +.ez-wr:after,.ez-box:after{content:".";display:block;height:0;clear:both;visibility:hidden}.ez-wr,.ez-box,.ez-last{display:inline-block;min-height:0}/* \*/ * html .ez-wr,* html .ez-box,* html .ez-last{height:1%}.ez-wr,.ez-box,.ez-last{display:block}/* */.ez-oh{overflow:hidden}* html .ez-oh{overflow:visible}.ez-oa{overflow:auto}.ez-dt{display:table}.ez-it{display:inline-table}.ez-tc{display:table-cell}.ez-ib{display:inline-block}.ez-fl{float:left}* html .ez-fl{margin-right:-3px}.ez-fr{float:right}* html .ez-fr{margin-left:-3px}.ez-25{width:25%}.ez-33{width:33.33%}.ez-50{width:50%}.ez-66{width:66.66%}.ez-75{width:75%}.ez-negmr{margin-right:-1px}* html .ez-negmr{margin-right:-4px}.ez-negmx{margin-right:-1px}.ez-negml{margin-left:-1px}* html .ez-negml{margin-left:-4px} + + +/*********** add bottom line to table rows ***********/ +th, td { padding: 0.1em 0.5em 0.1em 0.5em;} + +/*********** labels bold and occasionally centered ***********/ +label { + white-space: nowrap; +} +label, b, th { + font-weight: bold; +} +thead th { + text-align: center; + border-bottom: 1px solid #444; +} +/*********** forms and table padding ***********/ +form, table { + padding: 5px 10px 5px 10px; +} + +/*********** code blocks ***********/ +code { + padding: 3px 5px; + font-family: Andale Mono, monospace; + font-size: 0.9em; +} + +/*********** left and right padding to quoted text ***********/ +blockquote { + background: #cccccc; + border-left: 30px transparent; + border-right: 30px transparent; + /*padding: 5px;*/ +} + +input[type=text], input[type=password], textarea, select { + margin: 2px 15px 2px 5px; + width: 280px; + background: #fff; + color: #555; + border: 1px solid #dedede; + -moz-border-radius: 2px; + -webkit-border-radius: 2px; + border-radius: 2px; + font-size: 12px; +} + +input[type=text], input[type=password] { + height: 16px; +} + +select[multiple=multiple] { + height: 90px; +} + +input[type=submit], input[type=button], button { + margin: 0px; + /*width: 85px;*/ + height: 22px; + background: #eaeaea; + color: #555; + border: 1px solid #dedede; + -moz-border-radius: 2px; + -webkit-border-radius: 2px; + border-radius: 2px; +} + +fieldset { border: 1px solid #dedede; padding: 6px; } +legend { font-weight: bold; } + +input:focus, textarea:focus { background: #fafafa; } + +p {text-indent:30px;} + +p, blockquote { + margin-bottom: 10px; +} + +h1,h2,h3,h4,h5,h6 { line-height: 170%; } +h1 {font-size: 2.0em;} +h2 {font-size: 1.8em;} +h3 {font-size: 1.4em;} +h4 {font-size: 1.2em;} +h5 {font-size: 1.0em;} +h6 {font-size: 0.8em;} + +/*********** page layout alignment, width and padding ***********/ +/*body {background-color: #000;}*/ +#container, #header, #page, #content, #statusbar, +#footer, #wrapper { display:block; line-height: 170%; } +#wrapper {width: 900px;} +#container { + margin: 0 auto; + padding: 0; +} +#wrapper {margin: 0 auto;} +#wrapper {background-color: #fff; padding: 5px;} +#statusbar { margin: 5px 0px 20px 0px;} +#footer { + margin-top: 30px; + padding: 5px; +} +#statusbar, #footer { + background: #eaeaea; + border-top: 1px #aaa solid; +} +#logo { + width: 68px; + height: 62px; + background: url(../images/logo.png); +} +#appname { + color: #cccccc; +} + +#right_sidebar { width: 160px; float:right; display: none; } +#left_sidebar { width: 160px; float:left; display: none; } +#content { float: left; /*width: 740px;*//*width: 63%;*/ /*width: 640px; float:left;*/ } /* uncomment this if you are going to use sidebars */ + +.auth_navbar { + top: 0px; + float: right; + padding: 3px 10px 3px 10px; +} + +/*********** web2py specific ***********/ +div.flash { + font-weight: bold; + display: none; + position: fixed; + padding: 10px; + top: 40px; + right: 10px; + min-width: 280px; + opacity: 0.85; + margin: 0px 0px 10px 10px; + color: #fff; + vertical-align: middle; + cursor: pointer; + background: #000; + border: 2px solid #fff; + -moz-border-radius: 5px; + -webkit-border-radius: 5px; + z-index: 2; +} +div.error { + background-color: red; + color: white; + padding: 3px; +} + +/*************************** + * CSS 3 Buttons + * http://github.com/michenriksen/css3buttons + * created by Michael Henriksen + * License: Unlicense + * + * *******************/ + +a.button { display: inline-block; padding: 3px 5px 3px 5px; font-family: 'lucida grande', tahoma, verdana, arial, sans-serif; font-size: 12px; color: #3C3C3D; text-shadow: 1px 1px 0 #FFFFFF; background: #ECECEC url('../images/css3buttons_backgrounds.png') 0 0 no-repeat; white-space: nowrap; overflow: visible; cursor: pointer; text-decoration: none; border: 1px solid #CACACA; -webkit-border-radius: 2px; -moz-border-radius: 2px; -webkit-background-clip: padding-box; border-radius: 2px; outline: none; position: relative; zoom: 1; *display: inline; } +a.button.primary { font-weight: bold } +a.button:hover { color: #FFFFFF; border-color: #388AD4; text-decoration: none; text-shadow: -1px -1px 0 rgba(0,0,0,0.3); background-position: 0 -40px; background-color: #2D7DC5; } +a.button:active, +a.button.active { background-position: 0 -81px; border-color: #347BBA; background-color: #0F5EA2; color: #FFFFFF; text-shadow: none; } +a.button:active { top: 1px } +a.button.negative:hover { color: #FFFFFF; background-position: 0 -121px; background-color: #D84743; border-color: #911D1B; } +a.button.negative:active, +a.button.negative.active { background-position: 0 -161px; background-color: #A5211E; border-color: #911D1B; } +a.button.pill { -webkit-border-radius: 19px; -moz-border-radius: 19px; border-radius: 19px; padding: 2px 10px 2px 10px; } +a.button.left { -webkit-border-bottom-right-radius: 0px; -webkit-border-top-right-radius: 0px; -moz-border-radius-bottomright: 0px; -moz-border-radius-topright: 0px; border-bottom-right-radius: 0px; border-top-right-radius: 0px; margin-right: 0px; } +a.button.middle { margin-right: 0px; margin-left: 0px; -webkit-border-radius: 0px; -moz-border-radius: 0px; border-radius: 0px; border-right: none; border-left: none; } +a.button.right { -webkit-border-bottom-left-radius: 0px; -webkit-border-top-left-radius: 0px; -moz-border-radius-bottomleft: 0px; -moz-border-radius-topleft: 0px; border-top-left-radius: 0px; border-bottom-left-radius: 0px; margin-left: 0px; } +a.button.left:active, +a.button.middle:active, +a.button.right:active { top: 0px } +a.button.big { font-size: 16px; padding-left: 17px; padding-right: 17px; } +a.button span.icon { display: inline-block; width: 14px; height: 12px; margin: auto 7px auto auto; position: relative; top: 2px; background-image: url('../images/css3buttons_icons.png'); background-repeat: no-repeat; } +a.big.button span.icon { top: 0px } +a.button span.icon.book { background-position: 0 0 } +a.button:hover span.icon.book { background-position: 0 -15px } +a.button span.icon.calendar { background-position: 0 -30px } +a.button:hover span.icon.calendar { background-position: 0 -45px } +a.button span.icon.chat { background-position: 0 -60px } +a.button:hover span.icon.chat { background-position: 0 -75px } +a.button span.icon.check { background-position: 0 -90px } +a.button:hover span.icon.check { background-position: 0 -103px } +a.button span.icon.clock { background-position: 0 -116px } +a.button:hover span.icon.clock { background-position: 0 -131px } +a.button span.icon.cog { background-position: 0 -146px } +a.button:hover span.icon.cog { background-position: 0 -161px } +a.button span.icon.comment { background-position: 0 -176px } +a.button:hover span.icon.comment { background-position: 0 -190px } +a.button span.icon.cross { background-position: 0 -204px } +a.button:hover span.icon.cross { background-position: 0 -219px } +a.button span.icon.downarrow { background-position: 0 -234px } +a.button:hover span.icon.downarrow { background-position: 0 -249px } +a.button span.icon.fork { background-position: 0 -264px } +a.button:hover span.icon.fork { background-position: 0 -279px } +a.button span.icon.heart { background-position: 0 -294px } +a.button:hover span.icon.heart { background-position: 0 -308px } +a.button span.icon.home { background-position: 0 -322px } +a.button:hover span.icon.home { background-position: 0 -337px } +a.button span.icon.key { background-position: 0 -352px } +a.button:hover span.icon.key { background-position: 0 -367px } +a.button span.icon.leftarrow { background-position: 0 -382px } +a.button:hover span.icon.leftarrow { background-position: 0 -397px } +a.button span.icon.lock { background-position: 0 -412px } +a.button:hover span.icon.lock { background-position: 0 -427px } +a.button span.icon.loop { background-position: 0 -442px } +a.button:hover span.icon.loop { background-position: 0 -457px } +a.button span.icon.magnifier { background-position: 0 -472px } +a.button:hover span.icon.magnifier { background-position: 0 -487px } +a.button span.icon.mail { background-position: 0 -502px } +a.button:hover span.icon.mail { background-position: 0 -514px } +a.button span.icon.move { background-position: 0 -526px } +a.button:hover span.icon.move { background-position: 0 -541px } +a.button span.icon.pen { background-position: 0 -556px } +a.button:hover span.icon.pen { background-position: 0 -571px } +a.button span.icon.pin { background-position: 0 -586px } +a.button:hover span.icon.pin { background-position: 0 -601px } +a.button span.icon.plus { background-position: 0 -616px } +a.button:hover span.icon.plus { background-position: 0 -631px } +a.button span.icon.reload { background-position: 0 -646px } +a.button:hover span.icon.reload { background-position: 0 -660px } +a.button span.icon.rightarrow { background-position: 0 -674px } +a.button:hover span.icon.rightarrow { background-position: 0 -689px } +a.button span.icon.rss { background-position: 0 -704px } +a.button:hover span.icon.rss { background-position: 0 -719px } +a.button span.icon.tag { background-position: 0 -734px } +a.button:hover span.icon.tag { background-position: 0 -749px } +a.button span.icon.trash { background-position: 0 -764px } +a.button:hover span.icon.trash { background-position: 0 -779px } +a.button span.icon.unlock { background-position: 0 -794px } +a.button:hover span.icon.unlock { background-position: 0 -809px } +a.button span.icon.uparrow { background-position: 0 -824px } +a.button:hover span.icon.uparrow { background-position: 0 -839px } +a.button span.icon.user { background-position: 0 -854px } +a.button:hover span.icon.user { background-position: 0 -869px } + + + + + + +/***************************************************** + * HERE YOU CAN START TO WRITE YOUR OWN DIVS + */ + + + + + + + +/* + * Media queries for responsive design + */ + +@media all and (orientation:portrait) { + /* Style adjustments for portrait mode goes here */ + +} + +@media all and (orientation:landscape) { + /* Style adjustments for landscape mode goes here */ + +} + +/* Grade-A Mobile Browsers (Opera Mobile, iPhone Safari, Android Chrome) + Consider this: www.cloudfour.com/css-media-query-for-mobile-is-fools-gold/ */ +@media screen and (max-device-width: 480px) { + + + /* Uncomment if you don't want iOS and WinMobile to mobile-optimize the text for you + j.mp/textsizeadjust + html { -webkit-text-size-adjust:none; -ms-text-size-adjust:none; } */ +} + + +/* + * print styles + * inlined to avoid required HTTP connection www.phpied.com/delay-loading-your-print-css/ + */ +@media print { + * { background: transparent !important; color: #444 !important; text-shadow: none !important; } + a, a:visited { color: #444 !important; text-decoration: underline; } + a:after { content: " (" attr(href) ")"; } + abbr:after { content: " (" attr(title) ")"; } + .ir a:after { content: ""; } /* Don't show links for images */ + pre, blockquote { border: 1px solid #999; page-break-inside: avoid; } + thead { display: table-header-group; } /* css-discuss.incutio.com/wiki/Printing_Tables */ + tr, img { page-break-inside: avoid; } + @page { margin: 0.5cm; } + p, h2, h3 { orphans: 3; widows: 3; } + h2, h3{ page-break-after: avoid; } +} + +#story-list > .read > h2 { + color: #607890; +} +#story-list > .unread > h2 { + color: #306; +} ADDED applications/mobileblur/static/css/calendar.css Index: applications/mobileblur/static/css/calendar.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/calendar.css @@ -0,0 +1,1 @@ +.calendar{z-index:99;position:relative;display:none;border-top:2px solid #fff;border-right:2px solid #000;border-bottom:2px solid #000;border-left:2px solid #fff;font-size:11px;color:#000;cursor:default;background:#d4d0c8;font-family:tahoma,verdana,sans-serif;}.calendar table{border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;font-size:11px;color:#000;cursor:default;background:#d4d0c8;font-family:tahoma,verdana,sans-serif;}.calendar .button{text-align:center;padding:1px;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;}.calendar .nav{background:transparent}.calendar thead .title{font-weight:bold;padding:1px;border:1px solid #000;background:#848078;color:#fff;text-align:center;}.calendar thead .name{border-bottom:1px solid #000;padding:2px;text-align:center;background:#f4f0e8;}.calendar thead .weekend{color:#f00;}.calendar thead .hilite{border-top:2px solid #fff;border-right:2px solid #000;border-bottom:2px solid #000;border-left:2px solid #fff;padding:0;background-color:#e4e0d8;}.calendar thead .active{padding:2px 0 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;background-color:#c4c0b8;}.calendar tbody .day{width:2em;text-align:right;padding:2px 4px 2px 2px;}.calendar tbody .day.othermonth{font-size:80%;color:#aaa;}.calendar tbody .day.othermonth.oweekend{color:#faa;}.calendar table .wn{padding:2px 3px 2px 2px;border-right:1px solid #000;background:#f4f0e8;}.calendar tbody .rowhilite td{background:#e4e0d8;}.calendar tbody .rowhilite td.wn{background:#d4d0c8;}.calendar tbody td.hilite{padding:1px 3px 1px 1px;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;}.calendar tbody td.active{padding:2px 2px 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar tbody td.selected{font-weight:bold;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;padding:2px 2px 0 2px;background:#e4e0d8;}.calendar tbody td.weekend{color:#f00;}.calendar tbody td.today{font-weight:bold;color:#00f;}.calendar tbody .disabled{color:#999;}.calendar tbody .emptycell{visibility:hidden;}.calendar tbody .emptyrow{display:none;}.calendar tfoot .ttip{background:#f4f0e8;padding:1px;border:1px solid #000;background:#848078;color:#fff;text-align:center;}.calendar tfoot .hilite{border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;padding:1px;background:#e4e0d8;}.calendar tfoot .active{padding:2px 0 0 2px;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar .combo{position:absolute;display:none;width:4em;top:0;left:0;cursor:default;border-top:1px solid #fff;border-right:1px solid #000;border-bottom:1px solid #000;border-left:1px solid #fff;background:#e4e0d8;font-size:90%;padding:1px;z-index:100;}.calendar .combo .label,.calendar .combo .label-IEfix{text-align:center;padding:1px;}.calendar .combo .label-IEfix{width:4em;}.calendar .combo .active{background:#c4c0b8;padding:0;border-top:1px solid #000;border-right:1px solid #fff;border-bottom:1px solid #fff;border-left:1px solid #000;}.calendar .combo .hilite{background:#048;color:#fea;}.calendar td.time{border-top:1px solid #000;padding:1px 0;text-align:center;background-color:#f4f0e8;}.calendar td.time .hour,.calendar td.time .minute,.calendar td.time .ampm{padding:0 3px 0 4px;border:1px solid #889;font-weight:bold;background-color:#fff;}.calendar td.time .ampm{text-align:center;}.calendar td.time .colon{padding:0 2px 0 3px;font-weight:bold;}.calendar td.time span.hilite{border-color:#000;background-color:#766;color:#fff;}.calendar td.time span.active{border-color:#f00;background-color:#000;color:#0f0;}#CP_hourcont{z-index:99;padding:0;position:absolute;border:1px dashed #666;background-color:#eee;display:none;}#CP_minutecont{z-index:99;background-color:#ddd;padding:1px;position:absolute;width:45px;display:none;}.floatleft{float:left;}.CP_hour{z-index:99;padding:1px;font-family:Arial,Helvetica,sans-serif;font-size:9px;white-space:nowrap;cursor:pointer;width:35px;}.CP_minute{z-index:99;padding:1px;font-family:Arial,Helvetica,sans-serif;font-size:9px;white-space:nowrap;cursor:pointer;width:auto;}.CP_over{background-color:#fff;z-index:99} ADDED applications/mobileblur/static/css/handheld.css Index: applications/mobileblur/static/css/handheld.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/handheld.css @@ -0,0 +1,7 @@ + +* { + float: none; /* Screens are not big enough to account for floats */ + font-size: 80%; /* Slightly reducing font size to reduce need to scroll */ + background: #fff; /* As much contrast as possible */ + color: #000; +} ADDED applications/mobileblur/static/css/superfish-navbar.css Index: applications/mobileblur/static/css/superfish-navbar.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/superfish-navbar.css @@ -0,0 +1,93 @@ + +/*** adding the class sf-navbar in addition to sf-menu creates an all-horizontal nav-bar menu ***/ +.sf-navbar { + background: #BDD2FF; + height: 2.5em; + padding-bottom: 2.5em; + position: relative; +} +.sf-navbar li { + background: #AABDE6; + position: static; +} +.sf-navbar a { + border-top: none; +} +.sf-navbar li ul { + width: 44em; /*IE6 soils itself without this*/ +} +.sf-navbar li li { + background: #BDD2FF; + position: relative; +} +.sf-navbar li li ul { + width: 13em; +} +.sf-navbar li li li { + width: 100%; +} +.sf-navbar ul li { + width: auto; + float: left; +} +.sf-navbar a, .sf-navbar a:visited { + border: none; +} +.sf-navbar li.current { + background: #BDD2FF; +} +.sf-navbar li:hover, +.sf-navbar li.sfHover, +.sf-navbar li li.current, +.sf-navbar a:focus, .sf-navbar a:hover, .sf-navbar a:active { + background: #BDD2FF; +} +.sf-navbar ul li:hover, +.sf-navbar ul li.sfHover, +ul.sf-navbar ul li:hover li, +ul.sf-navbar ul li.sfHover li, +.sf-navbar ul a:focus, .sf-navbar ul a:hover, .sf-navbar ul a:active { + background: #D1DFFF; +} +ul.sf-navbar li li li:hover, +ul.sf-navbar li li li.sfHover, +.sf-navbar li li.current li.current, +.sf-navbar ul li li a:focus, .sf-navbar ul li li a:hover, .sf-navbar ul li li a:active { + background: #E6EEFF; +} +ul.sf-navbar .current ul, +ul.sf-navbar ul li:hover ul, +ul.sf-navbar ul li.sfHover ul { + left: 0; + top: 2.5em; /* match top ul list item height */ +} +ul.sf-navbar .current ul ul { + top: -999em; +} + +.sf-navbar li li.current > a { + font-weight: bold; +} + +/*** point all arrows down ***/ +/* point right for anchors in subs */ +.sf-navbar ul .sf-sub-indicator { background-position: -10px -100px; } +.sf-navbar ul a > .sf-sub-indicator { background-position: 0 -100px; } +/* apply hovers to modern browsers */ +.sf-navbar ul a:focus > .sf-sub-indicator, +.sf-navbar ul a:hover > .sf-sub-indicator, +.sf-navbar ul a:active > .sf-sub-indicator, +.sf-navbar ul li:hover > a > .sf-sub-indicator, +.sf-navbar ul li.sfHover > a > .sf-sub-indicator { + background-position: -10px -100px; /* arrow hovers for modern browsers*/ +} + +/*** remove shadow on first submenu ***/ +.sf-navbar > li > ul { + background: transparent; + padding: 0; + -moz-border-radius-bottomleft: 0; + -moz-border-radius-topright: 0; + -webkit-border-top-right-radius: 0; + -webkit-border-bottom-left-radius: 0; +} ADDED applications/mobileblur/static/css/superfish-vertical.css Index: applications/mobileblur/static/css/superfish-vertical.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/superfish-vertical.css @@ -0,0 +1,23 @@ +/*** adding sf-vertical in addition to sf-menu creates a vertical menu ***/ +.sf-vertical, .sf-vertical li { + width: 10em; +} +/* this lacks ul at the start of the selector, so the styles from the main CSS file override it where needed */ +.sf-vertical li:hover ul, +.sf-vertical li.sfHover ul { + left: 10em; /* match ul width */ + top: 0; +} + +/*** alter arrow directions ***/ +.sf-vertical .sf-sub-indicator { background-position: -10px 0; } /* IE6 gets solid image only */ +.sf-vertical a > .sf-sub-indicator { background-position: 0 0; } /* use translucent arrow for modern browsers*/ + +/* hover arrow direction for modern browsers*/ +.sf-vertical a:focus > .sf-sub-indicator, +.sf-vertical a:hover > .sf-sub-indicator, +.sf-vertical a:active > .sf-sub-indicator, +.sf-vertical li:hover > a > .sf-sub-indicator, +.sf-vertical li.sfHover > a > .sf-sub-indicator { + background-position: -10px 0; /* arrow hovers for modern browsers*/ +} ADDED applications/mobileblur/static/css/superfish.css Index: applications/mobileblur/static/css/superfish.css ================================================================== --- /dev/null +++ applications/mobileblur/static/css/superfish.css @@ -0,0 +1,139 @@ + +/*** ESSENTIAL STYLES ***/ +.sf-menu, .sf-menu * { + margin: 0; + padding: 0; + list-style: none; +} +.sf-menu { + line-height: 1.0; +} +.sf-menu ul { + position: absolute; + top: -999em; + width: 10em; /* left offset of submenus need to match (see below) */ +} +.sf-menu ul li { + width: 100%; +} +.sf-menu li:hover { + visibility: inherit; /* fixes IE7 'sticky bug' */ +} +.sf-menu li { + float: left; + position: relative; +} +.sf-menu a { + display: block; + position: relative; +} +.sf-menu li:hover ul, +.sf-menu li.sfHover ul { + left: 0; + top: 2.5em; /* match top ul list item height */ + z-index: 99; +} +ul.sf-menu li:hover li ul, +ul.sf-menu li.sfHover li ul { + top: -999em; +} +ul.sf-menu li li:hover ul, +ul.sf-menu li li.sfHover ul { + left: 10em; /* match ul width */ + top: 0; +} +ul.sf-menu li li:hover li ul, +ul.sf-menu li li.sfHover li ul { + top: -999em; +} +ul.sf-menu li li li:hover ul, +ul.sf-menu li li li.sfHover ul { + left: 10em; /* match ul width */ + top: 0; +} + +/*** DEMO SKIN ***/ +.sf-menu { + float: left; + /*margin-bottom: 1em;*/ +} +.sf-menu a { + border-left: 1px solid #fff; + /*border-top: 1px solid #CFDEFF;*/ + padding: .75em 1em; + text-decoration:none; +} +.sf-menu a, .sf-menu a:visited { /* visited pseudo selector so IE6 applies text colour*/ + color: #275b90;/*#13a;*/ +} +.sf-menu li { + background: #dadada;/*#BDD2FF;*/ +} +.sf-menu li li { + background: #AABDE6; +} +.sf-menu li li a { + /*color: #13a;*/ +} +.sf-menu li li li { + background: #9AAEDB; +} +.sf-menu li:hover, .sf-menu li.sfHover, +.sf-menu a:focus, .sf-menu a:hover, .sf-menu a:active { + background: #CFDEFF; + outline: 0; +} + +/*** arrows **/ +.sf-menu a.sf-with-ul { + padding-right: 2.25em; + min-width: 1px; /* trigger IE7 hasLayout so spans position accurately */ +} +.sf-sub-indicator { + position: absolute; + display: block; + right: .75em; + top: 1.05em; /* IE6 only */ + width: 10px; + height: 10px; + text-indent: -999em; + overflow: hidden; + background: url('../images/arrows-ffffff.png') no-repeat -10px -100px; /* 8-bit indexed alpha png. IE6 gets solid image only */ +} +a > .sf-sub-indicator { /* give all except IE6 the correct values */ + top: .8em; + background-position: 0 -100px; /* use translucent arrow for modern browsers*/ +} +/* apply hovers to modern browsers */ +a:focus > .sf-sub-indicator, +a:hover > .sf-sub-indicator, +a:active > .sf-sub-indicator, +li:hover > a > .sf-sub-indicator, +li.sfHover > a > .sf-sub-indicator { + background-position: -10px -100px; /* arrow hovers for modern browsers*/ +} + +/* point right for anchors in subs */ +.sf-menu ul .sf-sub-indicator { background-position: -10px 0; } +.sf-menu ul a > .sf-sub-indicator { background-position: 0 0; } +/* apply hovers to modern browsers */ +.sf-menu ul a:focus > .sf-sub-indicator, +.sf-menu ul a:hover > .sf-sub-indicator, +.sf-menu ul a:active > .sf-sub-indicator, +.sf-menu ul li:hover > a > .sf-sub-indicator, +.sf-menu ul li.sfHover > a > .sf-sub-indicator { + background-position: -10px 0; /* arrow hovers for modern browsers*/ +} + +/*** shadows for all but IE6 ***/ +.sf-shadow ul { + background: url('../images/shadow.png') no-repeat bottom right; + padding: 0 8px 9px 0; + -moz-border-radius-bottomleft: 17px; + -moz-border-radius-topright: 17px; + -webkit-border-top-right-radius: 17px; + -webkit-border-bottom-left-radius: 17px; +} +.sf-shadow ul.sf-shadow-off { + background: transparent; +} ADDED applications/mobileblur/static/favicon.ico Index: applications/mobileblur/static/favicon.ico ================================================================== --- /dev/null +++ applications/mobileblur/static/favicon.ico cannot compute difference between binary files ADDED applications/mobileblur/static/images/arrows-ffffff.png Index: applications/mobileblur/static/images/arrows-ffffff.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/arrows-ffffff.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/css3buttons_backgrounds.png Index: applications/mobileblur/static/images/css3buttons_backgrounds.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/css3buttons_backgrounds.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/css3buttons_icons.png Index: applications/mobileblur/static/images/css3buttons_icons.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/css3buttons_icons.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/error.png Index: applications/mobileblur/static/images/error.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/error.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/ok.png Index: applications/mobileblur/static/images/ok.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/ok.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/poweredby.png Index: applications/mobileblur/static/images/poweredby.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/poweredby.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/shadow.png Index: applications/mobileblur/static/images/shadow.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/shadow.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/warn.png Index: applications/mobileblur/static/images/warn.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/warn.png cannot compute difference between binary files ADDED applications/mobileblur/static/images/warning.png Index: applications/mobileblur/static/images/warning.png ================================================================== --- /dev/null +++ applications/mobileblur/static/images/warning.png cannot compute difference between binary files ADDED applications/mobileblur/static/js/calendar.js Index: applications/mobileblur/static/js/calendar.js ================================================================== --- /dev/null +++ applications/mobileblur/static/js/calendar.js cannot compute difference between binary files ADDED applications/mobileblur/static/js/dd_belatedpng.js Index: applications/mobileblur/static/js/dd_belatedpng.js ================================================================== --- /dev/null +++ applications/mobileblur/static/js/dd_belatedpng.js @@ -0,0 +1,13 @@ +/** +* DD_belatedPNG: Adds IE6 support: PNG images for CSS background-image and HTML . +* Author: Drew Diller +* Email: drew.diller@gmail.com +* URL: http://www.dillerdesign.com/experiment/DD_belatedPNG/ +* Version: 0.0.8a +* Licensed under the MIT License: http://dillerdesign.com/experiment/DD_belatedPNG/#license +* +* Example usage: +* DD_belatedPNG.fix('.png_bg'); // argument is a CSS selector +* DD_belatedPNG.fixPng( someNode ); // argument is an HTMLDomElement +**/ +var DD_belatedPNG={ns:"DD_belatedPNG",imgSize:{},delay:10,nodesFixed:0,createVmlNameSpace:function(){if(document.namespaces&&!document.namespaces[this.ns]){document.namespaces.add(this.ns,"urn:schemas-microsoft-com:vml")}},createVmlStyleSheet:function(){var b,a;b=document.createElement("style");b.setAttribute("media","screen");document.documentElement.firstChild.insertBefore(b,document.documentElement.firstChild.firstChild);if(b.styleSheet){b=b.styleSheet;b.addRule(this.ns+"\\:*","{behavior:url(#default#VML)}");b.addRule(this.ns+"\\:shape","position:absolute;");b.addRule("img."+this.ns+"_sizeFinder","behavior:none; border:none; position:absolute; z-index:-1; top:-10000px; visibility:hidden;");this.screenStyleSheet=b;a=document.createElement("style");a.setAttribute("media","print");document.documentElement.firstChild.insertBefore(a,document.documentElement.firstChild.firstChild);a=a.styleSheet;a.addRule(this.ns+"\\:*","{display: none !important;}");a.addRule("img."+this.ns+"_sizeFinder","{display: none !important;}")}},readPropertyChange:function(){var b,c,a;b=event.srcElement;if(!b.vmlInitiated){return}if(event.propertyName.search("background")!=-1||event.propertyName.search("border")!=-1){DD_belatedPNG.applyVML(b)}if(event.propertyName=="style.display"){c=(b.currentStyle.display=="none")?"none":"block";for(a in b.vml){if(b.vml.hasOwnProperty(a)){b.vml[a].shape.style.display=c}}}if(event.propertyName.search("filter")!=-1){DD_belatedPNG.vmlOpacity(b)}},vmlOpacity:function(b){if(b.currentStyle.filter.search("lpha")!=-1){var a=b.currentStyle.filter;a=parseInt(a.substring(a.lastIndexOf("=")+1,a.lastIndexOf(")")),10)/100;b.vml.color.shape.style.filter=b.currentStyle.filter;b.vml.image.fill.opacity=a}},handlePseudoHover:function(a){setTimeout(function(){DD_belatedPNG.applyVML(a)},1)},fix:function(a){if(this.screenStyleSheet){var c,b;c=a.split(",");for(b=0;bn.H){i.B=n.H}d.vml.image.shape.style.clip="rect("+i.T+"px "+(i.R+a)+"px "+i.B+"px "+(i.L+a)+"px)"}else{d.vml.image.shape.style.clip="rect("+f.T+"px "+f.R+"px "+f.B+"px "+f.L+"px)"}},figurePercentage:function(d,c,f,a){var b,e;e=true;b=(f=="X");switch(a){case"left":case"top":d[f]=0;break;case"center":d[f]=0.5;break;case"right":case"bottom":d[f]=1;break;default:if(a.search("%")!=-1){d[f]=parseInt(a,10)/100}else{e=false}}d[f]=Math.ceil(e?((c[b?"W":"H"]*d[f])-(c[b?"w":"h"]*d[f])):parseInt(a,10));if(d[f]%2===0){d[f]++}return d[f]},fixPng:function(c){c.style.behavior="none";var g,b,f,a,d;if(c.nodeName=="BODY"||c.nodeName=="TD"||c.nodeName=="TR"){return}c.isImg=false;if(c.nodeName=="IMG"){if(c.src.toLowerCase().search(/\.png$/)!=-1){c.isImg=true;c.style.visibility="hidden"}else{return}}else{if(c.currentStyle.backgroundImage.toLowerCase().search(".png")==-1){return}}g=DD_belatedPNG;c.vml={color:{},image:{}};b={shape:{},fill:{}};for(a in c.vml){if(c.vml.hasOwnProperty(a)){for(d in b){if(b.hasOwnProperty(d)){f=g.ns+":"+d;c.vml[a][d]=document.createElement(f)}}c.vml[a].shape.stroked=false;c.vml[a].shape.appendChild(c.vml[a].fill);c.parentNode.insertBefore(c.vml[a].shape,c)}}c.vml.image.shape.fillcolor="none";c.vml.image.fill.type="tile";c.vml.color.fill.on=false;g.attachHandlers(c);g.giveLayout(c);g.giveLayout(c.offsetParent);c.vmlInitiated=true;g.applyVML(c)}};try{document.execCommand("BackgroundImageCache",false,true)}catch(r){}DD_belatedPNG.createVmlNameSpace();DD_belatedPNG.createVmlStyleSheet(); ADDED applications/mobileblur/static/js/jquery.js Index: applications/mobileblur/static/js/jquery.js ================================================================== --- /dev/null +++ applications/mobileblur/static/js/jquery.js cannot compute difference between binary files ADDED applications/mobileblur/static/js/modernizr-1.7.min.js Index: applications/mobileblur/static/js/modernizr-1.7.min.js ================================================================== --- /dev/null +++ applications/mobileblur/static/js/modernizr-1.7.min.js @@ -0,0 +1,2 @@ +// Modernizr v1.7 www.modernizr.com +window.Modernizr=function(a,b,c){function G(){e.input=function(a){for(var b=0,c=a.length;b7)},r.history=function(){return !!(a.history&&history.pushState)},r.draganddrop=function(){return x("dragstart")&&x("drop")},r.websockets=function(){return"WebSocket"in a},r.rgba=function(){A("background-color:rgba(150,255,150,.5)");return D(k.backgroundColor,"rgba")},r.hsla=function(){A("background-color:hsla(120,40%,100%,.5)");return D(k.backgroundColor,"rgba")||D(k.backgroundColor,"hsla")},r.multiplebgs=function(){A("background:url(//:),url(//:),red url(//:)");return(new RegExp("(url\\s*\\(.*?){3}")).test(k.background)},r.backgroundsize=function(){return F("backgroundSize")},r.borderimage=function(){return F("borderImage")},r.borderradius=function(){return F("borderRadius","",function(a){return D(a,"orderRadius")})},r.boxshadow=function(){return F("boxShadow")},r.textshadow=function(){return b.createElement("div").style.textShadow===""},r.opacity=function(){B("opacity:.55");return/^0.55$/.test(k.opacity)},r.cssanimations=function(){return F("animationName")},r.csscolumns=function(){return F("columnCount")},r.cssgradients=function(){var a="background-image:",b="gradient(linear,left top,right bottom,from(#9f9),to(white));",c="linear-gradient(left top,#9f9, white);";A((a+o.join(b+a)+o.join(c+a)).slice(0,-a.length));return D(k.backgroundImage,"gradient")},r.cssreflections=function(){return F("boxReflect")},r.csstransforms=function(){return!!E(["transformProperty","WebkitTransform","MozTransform","OTransform","msTransform"])},r.csstransforms3d=function(){var a=!!E(["perspectiveProperty","WebkitPerspective","MozPerspective","OPerspective","msPerspective"]);a&&"webkitPerspective"in g.style&&(a=w("@media ("+o.join("transform-3d),(")+"modernizr)"));return a},r.csstransitions=function(){return F("transitionProperty")},r.fontface=function(){var a,c,d=h||g,e=b.createElement("style"),f=b.implementation||{hasFeature:function(){return!1}};e.type="text/css",d.insertBefore(e,d.firstChild),a=e.sheet||e.styleSheet;var i=f.hasFeature("CSS2","")?function(b){if(!a||!b)return!1;var c=!1;try{a.insertRule(b,0),c=/src/i.test(a.cssRules[0].cssText),a.deleteRule(a.cssRules.length-1)}catch(d){}return c}:function(b){if(!a||!b)return!1;a.cssText=b;return a.cssText.length!==0&&/src/i.test(a.cssText)&&a.cssText.replace(/\r+|\n+/g,"").indexOf(b.split(" ")[0])===0};c=i('@font-face { font-family: "font"; src: url(data:,); }'),d.removeChild(e);return c},r.video=function(){var a=b.createElement("video"),c=!!a.canPlayType;if(c){c=new Boolean(c),c.ogg=a.canPlayType('video/ogg; codecs="theora"');var d='video/mp4; codecs="avc1.42E01E';c.h264=a.canPlayType(d+'"')||a.canPlayType(d+', mp4a.40.2"'),c.webm=a.canPlayType('video/webm; codecs="vp8, vorbis"')}return c},r.audio=function(){var a=b.createElement("audio"),c=!!a.canPlayType;c&&(c=new Boolean(c),c.ogg=a.canPlayType('audio/ogg; codecs="vorbis"'),c.mp3=a.canPlayType("audio/mpeg;"),c.wav=a.canPlayType('audio/wav; codecs="1"'),c.m4a=a.canPlayType("audio/x-m4a;")||a.canPlayType("audio/aac;"));return c},r.localstorage=function(){try{return!!localStorage.getItem}catch(a){return!1}},r.sessionstorage=function(){try{return!!sessionStorage.getItem}catch(a){return!1}},r.webWorkers=function(){return!!a.Worker},r.applicationcache=function(){return!!a.applicationCache},r.svg=function(){return!!b.createElementNS&&!!b.createElementNS(q.svg,"svg").createSVGRect},r.inlinesvg=function(){var a=b.createElement("div");a.innerHTML="";return(a.firstChild&&a.firstChild.namespaceURI)==q.svg},r.smil=function(){return!!b.createElementNS&&/SVG/.test(n.call(b.createElementNS(q.svg,"animate")))},r.svgclippaths=function(){return!!b.createElementNS&&/SVG/.test(n.call(b.createElementNS(q.svg,"clipPath")))};for(var H in r)z(r,H)&&(v=H.toLowerCase(),e[v]=r[H](),u.push((e[v]?"":"no-")+v));e.input||G(),e.crosswindowmessaging=e.postmessage,e.historymanagement=e.history,e.addTest=function(a,b){a=a.toLowerCase();if(!e[a]){b=!!b(),g.className+=" "+(b?"":"no-")+a,e[a]=b;return e}},A(""),j=l=null,f&&a.attachEvent&&function(){var a=b.createElement("div");a.innerHTML="";return a.childNodes.length!==1}()&&function(a,b){function p(a,b){var c=-1,d=a.length,e,f=[];while(++c »'].join('')), + over = function(){ + var $$ = $(this), menu = getMenu($$); + clearTimeout(menu.sfTimer); + $$.showSuperfishUl().siblings().hideSuperfishUl(); + }, + out = function(){ + var $$ = $(this), menu = getMenu($$), o = sf.op; + clearTimeout(menu.sfTimer); + menu.sfTimer=setTimeout(function(){ + o.retainPath=($.inArray($$[0],o.$path)>-1); + $$.hideSuperfishUl(); + if (o.$path.length && $$.parents(['li.',o.hoverClass].join('')).length<1){over.call(o.$path);} + },o.delay); + }, + getMenu = function($menu){ + var menu = $menu.parents(['ul.',c.menuClass,':first'].join(''))[0]; + sf.op = sf.o[menu.serial]; + return menu; + }, + addArrow = function($a){ $a.addClass(c.anchorClass).append($arrow.clone()); }; + + return this.each(function() { + var s = this.serial = sf.o.length; + var o = $.extend({},sf.defaults,op); + o.$path = $('li.'+o.pathClass,this).slice(0,o.pathLevels).each(function(){ + $(this).addClass([o.hoverClass,c.bcClass].join(' ')) + .filter('li:has(ul)').removeClass(o.pathClass); + }); + sf.o[s] = sf.op = o; + + $('li:has(ul)',this)[($.fn.hoverIntent && !o.disableHI) ? 'hoverIntent' : 'hover'](over,out).each(function() { + if (o.autoArrows) addArrow( $('>a:first-child',this) ); + }) + .not('.'+c.bcClass) + .hideSuperfishUl(); + + var $a = $('a',this); + $a.each(function(i){ + var $li = $a.eq(i).parents('li'); + $a.eq(i).focus(function(){over.call($li);}).blur(function(){out.call($li);}); + }); + o.onInit.call(this); + + }).each(function() { + var menuClasses = [c.menuClass]; + if (sf.op.dropShadows && !($.browser.msie && $.browser.version < 7)) menuClasses.push(c.shadowClass); + $(this).addClass(menuClasses.join(' ')); + }); + }; + + var sf = $.fn.superfish; + sf.o = []; + sf.op = {}; + sf.IE7fix = function(){ + var o = sf.op; + if ($.browser.msie && $.browser.version > 6 && o.dropShadows && o.animation.opacity!=undefined) + this.toggleClass(sf.c.shadowClass+'-off'); + }; + sf.c = { + bcClass : 'sf-breadcrumb', + menuClass : 'sf-js-enabled', + anchorClass : 'sf-with-ul', + arrowClass : 'sf-sub-indicator', + shadowClass : 'sf-shadow' + }; + sf.defaults = { + hoverClass : 'sfHover', + pathClass : 'overideThisToUse', + pathLevels : 1, + delay : 800, + animation : {opacity:'show'}, + speed : 'normal', + autoArrows : true, + dropShadows : true, + disableHI : false, // true disables hoverIntent detection + onInit : function(){}, // callback functions + onBeforeShow: function(){}, + onShow : function(){}, + onHide : function(){} + }; + $.fn.extend({ + hideSuperfishUl : function(){ + var o = sf.op, + not = (o.retainPath===true) ? o.$path : ''; + o.retainPath = false; + var $ul = $(['li.',o.hoverClass].join(''),this).add(this).not(not).removeClass(o.hoverClass) + .find('>ul').hide().css('visibility','hidden'); + o.onHide.call($ul); + return this; + }, + showSuperfishUl : function(){ + var o = sf.op, + sh = sf.c.shadowClass+'-off', + $ul = this.addClass(o.hoverClass) + .find('>ul:hidden').css('visibility','visible'); + sf.IE7fix.call($ul); + o.onBeforeShow.call($ul); + $ul.animate(o.animation,o.speed,function(){ sf.IE7fix.call($ul); o.onShow.call($ul); }); + return this; + } + }); + +})(jQuery); ADDED applications/mobileblur/static/js/web2py_ajax.js Index: applications/mobileblur/static/js/web2py_ajax.js ================================================================== --- /dev/null +++ applications/mobileblur/static/js/web2py_ajax.js @@ -0,0 +1,97 @@ +function popup(url) { + newwindow=window.open(url,'name','height=400,width=600'); + if (window.focus) newwindow.focus(); + return false; +} +function collapse(id) { jQuery('#'+id).slideToggle(); } +function fade(id,value) { if(value>0) jQuery('#'+id).hide().fadeIn('slow'); else jQuery('#'+id).show().fadeOut('slow'); } +function ajax(u,s,t) { + query = ''; + if (typeof s == "string") { + d = jQuery(s).serialize(); + if(d){ query = d; } + } else { + pcs = []; + for(i=0; i0){query = pcs.join("&");} + } + jQuery.ajax({type: "POST", url: u, data: query, success: function(msg) { if(t) { if(t==':eval') eval(msg); else jQuery("#" + t).html(msg); } } }); +} + +String.prototype.reverse = function () { return this.split('').reverse().join('');}; +function web2py_ajax_init() { + jQuery('.hidden').hide(); + jQuery('.error').hide().slideDown('slow'); + jQuery('.flash').click(function() { jQuery(this).fadeOut('slow'); return false; }); + // jQuery('input[type=submit]').click(function(){var t=jQuery(this);t.hide();t.after('')}); + jQuery('input.integer').live('keyup', function(){this.value=this.value.reverse().replace(/[^0-9\-]|\-(?=.)/g,'').reverse();}); + jQuery('input.double,input.decimal').live('keyup', function(){this.value=this.value.reverse().replace(/[^0-9\-\.,]|[\-](?=.)|[\.,](?=[0-9]*[\.,])/g,'').reverse();}); + var confirm_message = (typeof w2p_ajax_confirm_message != 'undefined') ? w2p_ajax_confirm_message : "Are you sure you want to delete this object?"; + jQuery("input[type='checkbox'].delete").live('click', function(){ if(this.checked) if(!confirm(confirm_message)) this.checked=false; }); + var date_format = (typeof w2p_ajax_date_format != 'undefined') ? w2p_ajax_date_format : "%Y-%m-%d"; + try {jQuery("input.date").live('focus',function() {Calendar.setup({ + inputField:this, ifFormat:date_format, showsTime:false + }); }); } catch(e) {}; + var datetime_format = (typeof w2p_ajax_datetime_format != 'undefined') ? w2p_ajax_datetime_format : "%Y-%m-%d %H:%M:%S"; + try { jQuery("input.datetime").live('focus', function() {Calendar.setup({ + inputField:this, ifFormat:datetime_format, showsTime: true,timeFormat: "24" + }); }); } catch(e) {}; + + jQuery("input.time").live('focus', function() { var el = jQuery(this); + if (!el.hasClass('hasTimeEntry')) try { el.timeEntry(); } catch(e) {}; + }); +}; + +jQuery(function() { + var flash = jQuery('.flash'); + flash.hide(); + if(flash.html()) flash.slideDown(); + web2py_ajax_init(); +}); +function web2py_trap_form(action,target) { + jQuery('#'+target+' form').each(function(i){ + var form=jQuery(this); + if(!form.hasClass('no_trap')) + form.submit(function(obj){ + jQuery('.flash').hide().html(''); + web2py_ajax_page('post',action,form.serialize(),target); + return false; + }); + }); +} +function web2py_ajax_page(method,action,data,target) { + jQuery.ajax({'type':method,'url':action,'data':data, + 'beforeSend':function(xhr) { + xhr.setRequestHeader('web2py-component-location',document.location); + xhr.setRequestHeader('web2py-component-element',target);}, + 'complete':function(xhr,text){ + var html=xhr.responseText; + var content=xhr.getResponseHeader('web2py-component-content'); + var command=xhr.getResponseHeader('web2py-component-command'); + var flash=xhr.getResponseHeader('web2py-component-flash'); + var t = jQuery('#'+target); + if(content=='prepend') t.prepend(html); + else if(content=='append') t.append(html); + else if(content!='hide') t.html(html); + web2py_trap_form(action,target); + web2py_ajax_init(); + if(command) eval(command); + if(flash) jQuery('.flash').html(flash).slideDown(); + } + }); +} +function web2py_component(action,target) { + jQuery(function(){ web2py_ajax_page('get',action,null,target); }); +} +function web2py_comet(url,onmessage,onopen,onclose) { + if ("WebSocket" in window) { + var ws = new WebSocket(url); + ws.onopen = onopen?onopen:(function(){}); + ws.onmessage = onmessage; + ws.onclose = onclose?onclose:(function(){}); + return true; // supported + } else return false; // not supported +} ADDED applications/mobileblur/static/robots.txt Index: applications/mobileblur/static/robots.txt ================================================================== --- /dev/null +++ applications/mobileblur/static/robots.txt @@ -0,0 +1,2 @@ +User-agent: * +Disallow: /welcome/default/user ADDED applications/mobileblur/views/__init__.py Index: applications/mobileblur/views/__init__.py ================================================================== --- /dev/null +++ applications/mobileblur/views/__init__.py ADDED applications/mobileblur/views/appadmin.html Index: applications/mobileblur/views/appadmin.html ================================================================== --- /dev/null +++ applications/mobileblur/views/appadmin.html @@ -0,0 +1,198 @@ +{{extend 'layout.html'}} + + +{{if request.function=='index':}} +

    {{=T("Available databases and tables")}}

    + {{if not databases:}}{{=T("No databases in this application")}}{{pass}} + {{for db in sorted(databases):}} + {{for table in databases[db].tables:}} + {{qry='%s.%s.id>0'%(db,table)}} + {{tbl=databases[db][table]}} + {{if hasattr(tbl,'_primarykey'):}} + {{if tbl._primarykey:}} + {{firstkey=tbl[tbl._primarykey[0]]}} + {{if firstkey.type in ['string','text']:}} + {{qry='%s.%s.%s!=""'%(db,table,firstkey.name)}} + {{else:}} + {{qry='%s.%s.%s>0'%(db,table,firstkey.name)}} + {{pass}} + {{else:}} + {{qry=''}} + {{pass}} + {{pass}} +

    {{=A("%s.%s" % (db,table),_href=URL('select',args=[db],vars=dict(query=qry)))}} +

    + [ {{=A(str(T('insert new'))+' '+table,_href=URL('insert',args=[db,table]))}} ] +

    + {{pass}} + {{pass}} + +{{elif request.function=='select':}} +

    {{=XML(str(T("database %s select"))%A(request.args[0],_href=URL('index'))) }} +

    + {{if table:}} + [ {{=A(str(T('insert new %s'))%table,_href=URL('insert',args=[request.args[0],table]))}} ]

    +

    {{=T("Rows in table")}}


    + {{else:}} +

    {{=T("Rows selected")}}


    + {{pass}} + {{=form}} +

    {{=T('The "query" is a condition like "db.table1.field1==\'value\'". Something like "db.table1.field1==db.table2.field2" results in a SQL JOIN.')}}
    + {{=T('Use (...)&(...) for AND, (...)|(...) for OR, and ~(...) for NOT to build more complex queries.')}}
    + {{=T('"update" is an optional expression like "field1=\'newvalue\'". You cannot update or delete the results of a JOIN')}}

    +

    +

    {{=nrows}} {{=T("selected")}}

    + {{if start>0:}}[ {{=A(T('previous 100 rows'),_href=URL('select',args=request.args[0],vars=dict(start=start-100)))}} ]{{pass}} + {{if stop + {{linkto=URL('update',args=request.args[0])}} + {{upload=URL('download',args=request.args[0])}} + {{=SQLTABLE(rows,linkto,upload,orderby=True,_class='sortable')}} + + {{pass}} +

    {{=T("Import/Export")}}


    + [ {{=T("export as csv file")}} ] + {{if table:}} + {{=FORM(str(T('or import from csv file'))+" ",INPUT(_type='file',_name='csvfile'),INPUT(_type='hidden',_value=table,_name='table'),INPUT(_type='submit',_value='import'))}} + {{pass}} + + +{{elif request.function=='insert':}} +

    {{=T("database")}} {{=A(request.args[0],_href=URL('index'))}} + {{if hasattr(table,'_primarykey'):}} + {{fieldname=table._primarykey[0]}} + {{dbname=request.args[0]}} + {{tablename=request.args[1]}} + {{cond = table[fieldname].type in ['string','text'] and '!=""' or '>0'}} + {{=T("table")}} {{=A(tablename,_href=URL('select',args=dbname,vars=dict(query='%s.%s.%s%s'%(dbname,tablename,fieldname,cond))))}} + {{else:}} + {{=T("table")}} {{=A(request.args[1],_href=URL('select',args=request.args[0],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2]))))}} + {{pass}} +

    +

    {{=T("New Record")}}


    + {{=form}} + + + +{{elif request.function=='update':}} +

    {{=T("database")}} {{=A(request.args[0],_href=URL('index'))}} + {{if hasattr(table,'_primarykey'):}} + {{fieldname=request.vars.keys()[0]}} + {{dbname=request.args[0]}} + {{tablename=request.args[1]}} + {{cond = table[fieldname].type in ['string','text'] and '!=""' or '>0'}} + {{=T("table")}} {{=A(tablename,_href=URL('select',args=dbname,vars=dict(query='%s.%s.%s%s'%(dbname,tablename,fieldname,cond))))}} + {{=T("record")}} {{=A('%s=%s'%request.vars.items()[0],_href=URL('update',args=request.args[:2],vars=request.vars))}} + {{else:}} + {{=T("table")}} {{=A(request.args[1],_href=URL('select',args=request.args[0],vars=dict(query='%s.%s.id>0'%tuple(request.args[:2]))))}} + {{=T("record id")}} {{=A(request.args[2],_href=URL('update',args=request.args[:3]))}} + {{pass}} +

    +

    {{=T("Edit current record")}}



    {{=form}} + + + +{{elif request.function=='state':}} +

    {{=T("Internal State")}}

    +

    {{=T("Current request")}}

    + {{=BEAUTIFY(request)}} +

    {{=T("Current response")}}

    + {{=BEAUTIFY(response)}} +

    {{=T("Current session")}}

    + {{=BEAUTIFY(session)}} + + +{{elif request.function == 'ccache':}} +

    Cache

    +
    +
    +
    + Statistics +
    +
    +

    Overview

    +

    + Hit Ratio: + {{=total['ratio']}}% + ({{=total['hits']}} hits + and {{=total['misses']}} misses) +

    +

    + Size of cache: + {{=total['objects']}} items, + {{=total['bytes']}} bytes + {{if total['bytes'] > 524287:}} + ({{="%.0d" % (total['bytes'] / 1048576)}} MB) + {{pass}} +

    +

    + Cache contains items up to + {{="%02d" % total['oldest'][0]}} hours + {{="%02d" % total['oldest'][1]}} minutes + {{="%02d" % total['oldest'][2]}} seconds old. +

    +

    RAM

    +

    + Hit Ratio: + {{=ram['ratio']}}% + ({{=ram['hits']}} hits + and {{=ram['misses']}} misses) +

    +

    + Size of cache: + {{=ram['objects']}} items, + {{=ram['bytes']}} bytes + {{if ram['bytes'] > 524287:}} + ({{=ram['bytes'] / 1048576}} MB) + {{pass}} +

    +

    + RAM contains items up to + {{="%02d" % ram['oldest'][0]}} hours + {{="%02d" % ram['oldest'][1]}} minutes + {{="%02d" % ram['oldest'][2]}} seconds old. +

    +

    DISK

    +

    + Hit Ratio: + {{=disk['ratio']}}% + ({{=disk['hits']}} hits + and {{=disk['misses']}} misses) +

    +

    + Size of cache: + {{=disk['objects']}} items, + {{=disk['bytes']}} bytes + {{if disk['bytes'] > 524287:}} + ({{=disk['bytes'] / 1048576}} MB) + {{pass}} +

    +

    + DISK contains items up to + {{="%02d" % disk['oldest'][0]}} hours + {{="%02d" % disk['oldest'][1]}} minutes + {{="%02d" % disk['oldest'][2]}} seconds old. +

    +
    + +
    + Manage Cache +
    +
    +

    + {{=form}} +

    +
    +
    +
    +
    +{{pass}} ADDED applications/mobileblur/views/default/index.html Index: applications/mobileblur/views/default/index.html ================================================================== --- /dev/null +++ applications/mobileblur/views/default/index.html @@ -0,0 +1,12 @@ +{{left_sidebar_enabled=right_sidebar_enabled=False}} +{{extend 'layout.html'}} + +{{ for feed in feeds.itervalues(): }} + {{ if threshold == -1 and feed["ng"] > 0: }} [ {{= feed["ng"] }} ] {{ pass }} + {{ if threshold <= 0 and feed["nt"] > 0: }} [ {{= feed["nt"] }} ] {{ pass }} + {{if feed["ps"] > 0: }}[ {{= feed["ps"] }} ] {{ pass }} + {{= feed["feed_title"] }}
    +{{ pass }} + +{{block left_sidebar}}New Left Sidebar Content{{end}} +{{block right_sidebar}}New Right Sidebar Content{{end}} ADDED applications/mobileblur/views/default/login.html Index: applications/mobileblur/views/default/login.html ================================================================== --- /dev/null +++ applications/mobileblur/views/default/login.html @@ -0,0 +1,7 @@ +{{left_sidebar_enabled=right_sidebar_enabled=False}} +{{extend 'layout.html'}} + +{{= login_form }} + +{{block left_sidebar}}New Left Sidebar Content{{end}} +{{block right_sidebar}}New Right Sidebar Content{{end}} ADDED applications/mobileblur/views/default/user.html Index: applications/mobileblur/views/default/user.html ================================================================== --- /dev/null +++ applications/mobileblur/views/default/user.html @@ -0,0 +1,19 @@ +{{extend 'layout.html'}} +

    {{=T( request.args(0).replace('_',' ').capitalize() )}}

    +
    +{{=form}} +{{if request.args(0)=='login':}} +{{if not 'register' in auth.settings.actions_disabled:}} +
    register +{{pass}} +{{if not 'request_reset_password' in auth.settings.actions_disabled:}} +
    lost password +{{pass}} +{{pass}} +
    + + ADDED applications/mobileblur/views/feeds/view.html Index: applications/mobileblur/views/feeds/view.html ================================================================== --- /dev/null +++ applications/mobileblur/views/feeds/view.html @@ -0,0 +1,15 @@ +{{left_sidebar_enabled=right_sidebar_enabled=False}} +{{extend 'layout.html'}} + +

    {{= feed["feed_title"] }}

    + Mark feed as read + +
    + {{ for story in stories: }} +

    {{= story["story_title"] }}

    +

    {{= story["story_date"] }}

    + {{ pass }} +
    + +{{block left_sidebar}}New Left Sidebar Content{{end}} +{{block right_sidebar}}New Right Sidebar Content{{end}} ADDED applications/mobileblur/views/generic.html Index: applications/mobileblur/views/generic.html ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.html @@ -0,0 +1,16 @@ +{{extend 'layout.html'}} +{{""" + +You should not modify this file. +It is used as default when a view is not provided for your controllers + +"""}} +

    {{=' '.join(x.capitalize() for x in request.function.split('_'))}}

    +{{if len(response._vars)==1:}} +{{=response._vars.values()[0]}} +{{elif len(response._vars)>1:}} +{{=BEAUTIFY(response._vars)}} +{{pass}} +{{if request.is_local:}} +{{=response.toolbar()}} +{{pass}} ADDED applications/mobileblur/views/generic.json Index: applications/mobileblur/views/generic.json ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.json @@ -0,0 +1,1 @@ +{{from gluon.serializers import json}}{{=XML(json(response._vars))}} ADDED applications/mobileblur/views/generic.load Index: applications/mobileblur/views/generic.load ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.load @@ -0,0 +1,30 @@ +{{''' +# License: Public Domain +# Author: Iceberg at 21cn dot com + +With this generic.load file, you can use same function to serve two purposes. + += regular action +- ajax callback (when called with .load) + +Example modified from http://www.web2py.com/AlterEgo/default/show/252: + +def index(): + return dict( + part1='hello world', + part2=LOAD(url=URL(r=request,f='auxiliary.load'),ajax=True)) + +def auxiliary(): + form=SQLFORM.factory(Field('name')) + if form.accepts(request.vars): + response.flash = 'ok' + return dict(message="Hello %s" % form.vars.name) + return dict(form=form) + +Notice: + +- no need to set response.headers['web2py-response-flash'] +- no need to return a string +even if the function is called via ajax. + +'''}}{{if len(response._vars)==1:}}{{=response._vars.values()[0]}}{{else:}}{{=BEAUTIFY(response._vars)}}{{pass}} ADDED applications/mobileblur/views/generic.pdf Index: applications/mobileblur/views/generic.pdf ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.pdf @@ -0,0 +1,11 @@ +{{ +import os +from gluon.contrib.generics import pdf_from_html +filename = '%s/%s.html' % (request.controller,request.function) +if os.path.exists(os.path.join(request.folder,'views',filename)): + html=response.render(filename) +else: + html=BODY(BEAUTIFY(response._vars)).xml() +pass +=pdf_from_html(html) +}} ADDED applications/mobileblur/views/generic.rss Index: applications/mobileblur/views/generic.rss ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.rss @@ -0,0 +1,10 @@ +{{ +### +# response._vars contains the dictionary returned by the controller action +# for this to work the action must return something like +# +# dict(title=...,link=...,description=...,created_on='...',items=...) +# +# items is a list of dictionaries each with title, link, description, pub_date. +### +from gluon.serializers import rss}}{{=XML(rss(response._vars))}} ADDED applications/mobileblur/views/generic.xml Index: applications/mobileblur/views/generic.xml ================================================================== --- /dev/null +++ applications/mobileblur/views/generic.xml @@ -0,0 +1,1 @@ +{{from gluon.serializers import xml}}{{=XML(xml(response._vars))}} ADDED applications/mobileblur/views/layout.html Index: applications/mobileblur/views/layout.html ================================================================== --- /dev/null +++ applications/mobileblur/views/layout.html @@ -0,0 +1,160 @@ + + + + + + + + + + + {{=response.title or request.application}} + + + + + + + + + + + + + + + + + + + + + {{#------ require CSS and JS files for this page (read info in base.css) ------}} + {{response.files.append(URL('static','css/base.css'))}} + {{response.files.append(URL('static','css/superfish.css'))}} + {{response.files.append(URL('static','js/superfish.js'))}} + {{#------ include web2py specific js code (jquery, calendar, form stuff) ------}} + {{include 'web2py_ajax.html'}} + + {{ + #using sidebars need to know what sidebar you want to use + #prior of using it, because of static width size of content, you can use + #left_sidebar, right_sidebar, both or none (False left and right) + left_sidebar_enabled = globals().get('left_sidebar_enabled',False) + right_sidebar_enabled = globals().get('right_sidebar_enabled',False) + if left_sidebar_enabled and right_sidebar_enabled: width_content='63%' + elif left_sidebar_enabled != right_sidebar_enabled: width_content='740px' + else: width_content='100%' + if left_sidebar_enabled: left_sidebar_style = 'style="display: block;"' + else: left_sidebar_style = 'style="display: none;"' + if right_sidebar_enabled: right_sidebar_style = 'style="display: block;"' + else: right_sidebar_style = 'style="display: none;"' + style_content = 'style="width: %s"' % width_content + }} + + + + + + + + + +
    {{=response.flash or ''}}
    + +
    + +
    + + + +
    + {{block statusbar}} + {{#------ superfish menu ------}} + {{=MENU(response.menu,_class='sf-menu')}} + +
    + {{end}} +
    + +
    + + {{if left_sidebar_enabled:}} + + {{pass}} + + Log out + +
    + {{include}} +
    + + + {{if right_sidebar_enabled:}} + + {{pass}} + + +
    + +
    + + +
    +
    + + + + + + + ADDED applications/mobileblur/views/stories/view.html Index: applications/mobileblur/views/stories/view.html ================================================================== --- /dev/null +++ applications/mobileblur/views/stories/view.html @@ -0,0 +1,10 @@ +{{left_sidebar_enabled=right_sidebar_enabled=False}} +{{extend 'layout.html'}} + +

    {{= story["story_title"] }}

    +Mark story as read + +{{= XML(story["story_content"]) }} + +{{block left_sidebar}}New Left Sidebar Content{{end}} +{{block right_sidebar}}New Right Sidebar Content{{end}} ADDED applications/mobileblur/views/web2py_ajax.html Index: applications/mobileblur/views/web2py_ajax.html ================================================================== --- /dev/null +++ applications/mobileblur/views/web2py_ajax.html @@ -0,0 +1,25 @@ +{{ +response.files.insert(0,URL('static','js/jquery.js')) +response.files.insert(1,URL('static','css/calendar.css')) +response.files.insert(2,URL('static','js/calendar.js')) +for _item in response.meta or []:}} + {{ +pass +for _k,_file in enumerate(response.files or []): + if _file in response.files[:_k]: + continue + _file0=_file.lower().split('?')[0] + if _file0.endswith('.css'):}} + {{ + elif _file0.endswith('.js'):}} + {{ + pass +pass +}} + + ADDED cgihandler.py Index: cgihandler.py ================================================================== --- /dev/null +++ cgihandler.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import os +import sys +import wsgiref.handlers + +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] + +import gluon.main + +wsgiref.handlers.CGIHandler().run(gluon.main.wsgibase) + ADDED epydoc.conf Index: epydoc.conf ================================================================== --- /dev/null +++ epydoc.conf @@ -0,0 +1,150 @@ +[epydoc] # Epydoc section marker (required by ConfigParser) + +# The list of objects to document. Objects can be named using +# dotted names, module filenames, or package directory names. +# Aliases for this option include "objects" and "values". +modules: gluon/*.py + +# The type of output that should be generated. Should be one +# of: html, text, latex, dvi, ps, pdf. +#output: latex +output: html + +# The path to the output directory. May be relative or absolute. +target: applications/examples/static/epydoc +#target: docs + +# An integer indicating how verbose epydoc should be. The default +# value is 0; negative values will suppress warnings and errors; +# positive values will give more verbose output. +verbosity: 0 + +# A boolean value indicating that Epydoc should show a traceback +# in case of unexpected error. By default don't show tracebacks +debug: 0 + +# If True, don't try to use colors or cursor control when doing +# textual output. The default False assumes a rich text prompt +simple-term: 0 + +### Generation options + +# The default markup language for docstrings, for modules that do +# not define __docformat__. Defaults to epytext. +docformat: epytext + +# Whether or not parsing should be used to examine objects. +parse: yes + +# Whether or not introspection should be used to examine objects. +introspect: yes + +# Don't examine in any way the modules whose dotted name match this +# regular expression pattern. +#exclude: + +# Don't perform introspection on the modules whose dotted name match this +# regular expression pattern. +#exclude-introspect: + +# Don't perform parsing on the modules whose dotted name match this +# regular expression pattern. +#exclude-parse: + +# The format for showing inheritance objects. +# It should be one of: 'grouped', 'listed', 'included'. +inheritance: listed + +# Whether or not to include private variables. (Even if included, +# private variables will be hidden by default.) +private: yes + +# Whether or not to list each module's imports. +#imports: no + +# Whether or not to include syntax highlighted source code in +# the output (HTML only). +sourcecode: yes + +# Whether or not to include a page with Epydoc log, containing +# effective option at the time of generation and the reported logs. +include-log: no + +### Output options + +# The documented project's name. +name: web2py Web Framework + +# The CSS stylesheet for HTML output. Can be the name of a built-in +# stylesheet, or the name of a file. +css: epydoc.css + +# The documented project's URL. +url: http://www.web2py.com + +# HTML code for the project link in the navigation bar. If left +# unspecified, the project link will be generated based on the +# project's name and URL. +# link: web2py + +# The "top" page for the documentation. Can be a URL, the name +# of a module or class, or one of the special names "trees.html", +# "indices.html", or "help.html" +#top: os.path + +# An alternative help file. The named file should contain the +# body of an HTML file; navigation bars will be added to it. +#help: my_helpfile.html + +# Whether or not to include a frames-based table of contents. +frames: yes + +# Whether each class should be listed in its own section when +# generating LaTeX or PDF output. +separate-classes: no + + +### API linking options + +# Define a new API document. A new interpreted text role +# will be created +#external-api: epydoc + +# Use the records in this file to resolve objects in the API named NAME. +#external-api-file: epydoc:api-objects.txt + +# Use this URL prefix to configure the string returned for external API. +#external-api-root: epydoc:http://epydoc.sourceforge.net/api + + +### Graph options + +# The list of graph types that should be automatically included +# in the output. Graphs are generated using the Graphviz "dot" +# executable. Graph types include: "classtree", "callgraph", +# "umlclasstree". Use "all" to include all graph types +# graph: umlclasstree +# graph: + +# The path to the Graphviz "dot" executable, used to generate +# graphs. +#dotpath: C:/home/graphviz/bin/dot.exe +#dotpath: /Applications/Graphviz.app/Contents/MacOS/dot + +# The name of one or more pstat files (generated by the profile +# or hotshot module). These are used to generate call graphs. +#pstat: profile.out + +# Specify the font used to generate Graphviz graphs. +# (e.g., helvetica or times). +# graph-font: Bitstream Vera Sans +graph-font: Helvetica + +# Specify the font size used to generate Graphviz graphs. +graph-font-size: 10 + +### Return value options + +# The condition upon which Epydoc should exit with a non-zero +# exit status. Possible values are error, warning, docstring_warning +#fail-on: error ADDED epydoc.css Index: epydoc.css ================================================================== --- /dev/null +++ epydoc.css @@ -0,0 +1,410 @@ + + +/* Epydoc CSS Stylesheet + * + * This stylesheet can be used to customize the appearance of epydoc's + * HTML output. + * + */ + +/* Default Colors & Styles + * - Set the default foreground & background color with 'body'; and + * link colors with 'a:link' and 'a:visited'. + * - Use bold for decision list terms. + * - The heading styles defined here are used for headings *within* + * docstring descriptions. All headings used by epydoc itself use + * either class='epydoc' or class='toc' (CSS styles for both + * defined below). +body { background: #ffffff; color: #000000; } +a:link { color: #0000ff; } +a:visited { color: #204080; } +dt { font-weight: bold; } +h1 { font-size: +140%; font-style: italic; + font-weight: bold; } +h2 { font-size: +125%; font-style: italic; + font-weight: bold; } +h3 { font-size: +110%; font-style: italic; + font-weight: normal; } +code { font-size: 100%; } + */ + +body { background-color: #fff; color: #585858; font-size: 10pt; font-family: georgia, serif; } +a {color: #FF5C1F; } +a:hover { text-decoration: underline; } +a:visited { color: #FF5C1F;} +dt { font-weight: bold; } +h1 { font-size: +140%; font-style: italic; + font-weight: bold; } +h2 { color: #185360; font-size: +125%; font-style: italic; + font-weight: bold; } +h3 { color: #185360; font-size: +110%; font-style: italic; + font-weight: normal; } +code { font-size: 100%; } + +/* Page Header & Footer + * - The standard page header consists of a navigation bar (with + * pointers to standard pages such as 'home' and 'trees'); a + * breadcrumbs list, which can be used to navigate to containing + * classes or modules; options links, to show/hide private + * variables and to show/hide frames; and a page title (using + *

    ). The page title may be followed by a link to the + * corresponding source code (using 'span.codelink'). + * - The footer consists of a navigation bar, a timestamp, and a + * pointer to epydoc's homepage. + +h1.epydoc { margin: 0; font-size: +140%; font-weight: bold; } +h2.epydoc { font-size: +130%; font-weight: bold; } +h3.epydoc { font-size: +115%; font-weight: bold; } +td h3.epydoc { font-size: +115%; font-weight: bold; + margin-bottom: 0; } +table.navbar { background: #a0c0ff; color: #000000; + border: 2px groove #c0d0d0; } +table.navbar table { color: #000000; } +th.navbar-select { background: #70b0ff; + color: #000000; } +table.navbar a { text-decoration: none; } +table.navbar a:link { color: #0000ff; } +table.navbar a:visited { color: #204080; } +span.breadcrumbs { font-size: 85%; font-weight: bold; } +span.options { font-size: 70%; } +span.codelink { font-size: 85%; } +td.footer { font-size: 85%; } +*/ + +h1.epydoc { margin: 0; font-size: +140%; font-weight: bold; } +h2.epydoc { font-size: +130%; font-weight: bold; } +h3.epydoc { font-size: +115%; font-weight: bold; } +td h3.epydoc { font-size: +115%; font-weight: bold; + margin-bottom: 0; } +table.navbar { background: url('title.png') repeat-x; + #background: #a0c0ff; + color: #FF5C1F; + #border: 2px groove #c0d0d0; } + +table.navbar table { color: #FF5C1F; } +th.navbar-select { background: #fff; + color: #195866; } + +table.navbar a { text-decoration: none; + color: #FF5C1F;} +table.navbar a:link { color: #FF5C1F; } +table.navbar a:visited { color: #FF5C1F; } + +span.breadcrumbs { font-size: 85%; font-weight: bold; } +span.options { font-size: 70%; } +span.codelink { font-size: 85%; } +td.footer { font-size: 85%; } + + +/* Table Headers + * - Each summary table and details section begins with a 'header' + * row. This row contains a section title (marked by + * 'span.table-header') as well as a show/hide private link + * (marked by 'span.options', defined above). + * - Summary tables that contain user-defined groups mark those + * groups using 'group header' rows. + +td.table-header { background: #70b0ff; color: #000000; + border: 1px solid #608090; } +td.table-header table { color: #000000; } +td.table-header table a:link { color: #0000ff; } +td.table-header table a:visited { color: #204080; } +span.table-header { font-size: 120%; font-weight: bold; } +th.group-header { background: #c0e0f8; color: #000000; + text-align: left; font-style: italic; + font-size: 115%; + border: 1px solid #608090; } +*/ +td.table-header { background: #258396; color: #000000; + border: 1px solid #608090; } + +td.table-header table { color: #fff; } +td.table-header table a:link { color: #FF5C1F; } +td.table-header table a:visited { color: #FF5C1F; } +span.table-header { font-size: 120%; font-weight: bold; } +th.group-header { background: #185360; color: #fff; + text-align: left; font-style: italic; + font-size: 115%; + border: 1px solid #608090; } + +/* Summary Tables (functions, variables, etc) + * - Each object is described by a single row of the table with + * two cells. The left cell gives the object's type, and is + * marked with 'code.summary-type'. The right cell gives the + * object's name and a summary description. + * - CSS styles for the table's header and group headers are + * defined above, under 'Table Headers' + */ +table.summary { border-collapse: collapse; + background: #e8f0f8; color: #000000; + border: 1px solid #608090; + margin-bottom: 0.5em; } +td.summary { border: 1px solid #608090; } +code.summary-type { font-size: 85%; } +table.summary a:link { color: #FF5C1F; } +table.summary a:visited { color: #FF5C1F; } + + +/* Details Tables (functions, variables, etc) + * - Each object is described in its own div. + * - A single-row summary table w/ table-header is used as + * a header for each details section (CSS style for table-header + * is defined above, under 'Table Headers'). + */ +table.details { border-collapse: collapse; + background: #e8f0f8; color: #585858; + border: 1px solid #608090; + margin: .2em 0 0 0; } +table.details table { color: #fff; } +table.details a:link { color: #FF5C1F; } +table.details a:visited { color: #FF5C1F; } + +/* Fields */ +dl.fields { margin-left: 2em; margin-top: 1em; + margin-bottom: 1em; } +dl.fields dd ul { margin-left: 0em; padding-left: 0em; } +div.fields { margin-left: 2em; } +div.fields p { margin-bottom: 0.5em; } + +/* Index tables (identifier index, term index, etc) + * - link-index is used for indices containing lists of links + * (namely, the identifier index & term index). + * - index-where is used in link indices for the text indicating + * the container/source for each link. + * - metadata-index is used for indices containing metadata + * extracted from fields (namely, the bug index & todo index). + */ +table.link-index { border-collapse: collapse; + background: #e8f0f8; color: #000000; + border: 1px solid #608090; } +td.link-index { border-width: 0px; } +table.link-index a:link { color: #FF5C1F; } +table.link-index a:visited { color: #FF5C1F; } +span.index-where { font-size: 70%; } +table.metadata-index { border-collapse: collapse; + background: #e8f0f8; color: #000000; + border: 1px solid #608090; + margin: .2em 0 0 0; } +td.metadata-index { border-width: 1px; border-style: solid; } +table.metadata-index a:link { color: #FF5C1F; } +table.metadata-index a:visited { color: #FF5C1F; } + +/* Function signatures + * - sig* is used for the signature in the details section. + * - .summary-sig* is used for the signature in the summary + * table, and when listing property accessor functions. +.sig-name { color: #006080; } +.sig-arg { color: #008060; } +.sig-default { color: #602000; } +.summary-sig { font-family: monospace; } +.summary-sig-name { color: #006080; font-weight: bold; } +table.summary a.summary-sig-name:link + { color: #006080; font-weight: bold; } +table.summary a.summary-sig-name:visited + { color: #006080; font-weight: bold; } +.summary-sig-arg { color: #006040; } +.summary-sig-default { color: #501800; } + * */ +.sig-name { color: #FF5C1F; } +.sig-arg { color: #008060; } +.sig-default { color: #602000; } +.summary-sig { font-family: monospace; } +.summary-sig-name { color: #FF5C1F; font-weight: bold; } +table.summary a.summary-sig-name:link + { color: #FF5C1F; font-weight: bold; } +table.summary a.summary-sig-name:visited + { color: #FF5C1F; font-weight: bold; } +.summary-sig-arg { color: #006040; } +.summary-sig-default { color: #FF5C1F; } + + +/* To render variables, classes etc. like functions */ +table.summary .summary-name { color: #FF5C1F; font-weight: bold; + font-family: monospace; } +table.summary + a.summary-name:link { color: #FF5C1F; font-weight: bold; + font-family: monospace; } +table.summary + a.summary-name:visited { color: #FF5C1F; font-weight: bold; + font-family: monospace; } + +/* Variable values + * - In the 'variable details' sections, each variable's value is + * listed in a 'pre.variable' box. The width of this box is + * restricted to 80 chars; if the value's repr is longer than + * this it will be wrapped, using a backslash marked with + * class 'variable-linewrap'. If the value's repr is longer + * than 3 lines, the rest will be elided; and an ellipsis + * marker ('...' marked with 'variable-ellipsis') will be used. + * - If the value is a string, its quote marks will be marked + * with 'variable-quote'. + * - If the variable is a regexp, it is syntax-highlighted using + * the re* CSS classes. + */ +pre.variable { padding: .5em; margin: 0; + background: #dce4ec; color: #000000; + border: 1px solid #708890; } +.variable-linewrap { color: #604000; font-weight: bold; } +.variable-ellipsis { color: #604000; font-weight: bold; } +.variable-quote { color: #604000; font-weight: bold; } +.variable-group { color: #008000; font-weight: bold; } +.variable-op { color: #604000; font-weight: bold; } +.variable-string { color: #006030; } +.variable-unknown { color: #a00000; font-weight: bold; } +.re { color: #000000; } +.re-char { color: #006030; } +.re-op { color: #600000; } +.re-group { color: #003060; } +.re-ref { color: #404040; } + +/* Base tree + * - Used by class pages to display the base class hierarchy. + */ +pre.base-tree { font-size: 80%; margin: 0; } + +/* Frames-based table of contents headers + * - Consists of two frames: one for selecting modules; and + * the other listing the contents of the selected module. + * - h1.toc is used for each frame's heading + * - h2.toc is used for subheadings within each frame. + */ +h1.toc { text-align: center; font-size: 105%; + margin: 0; font-weight: bold; + padding: 0; } +h2.toc { font-size: 100%; font-weight: bold; + margin: 0.5em 0 0 -0.3em; } + +/* Syntax Highlighting for Source Code + * - doctest examples are displayed in a 'pre.py-doctest' block. + * If the example is in a details table entry, then it will use + * the colors specified by the 'table pre.py-doctest' line. + * - Source code listings are displayed in a 'pre.py-src' block. + * Each line is marked with 'span.py-line' (used to draw a line + * down the left margin, separating the code from the line + * numbers). Line numbers are displayed with 'span.py-lineno'. + * The expand/collapse block toggle button is displayed with + * 'a.py-toggle' (Note: the CSS style for 'a.py-toggle' should not + * modify the font size of the text.) + * - If a source code page is opened with an anchor, then the + * corresponding code block will be highlighted. The code + * block's header is highlighted with 'py-highlight-hdr'; and + * the code block's body is highlighted with 'py-highlight'. + * - The remaining py-* classes are used to perform syntax + * highlighting (py-string for string literals, py-name for names, + * etc.) + +pre.py-doctest { padding: .5em; margin: 1em; + background: #e8f0f8; color: #000000; + border: 1px solid #708890; } +table pre.py-doctest { background: #dce4ec; + color: #000000; } +pre.py-src { border: 2px solid #000000; + background: #f0f0f0; color: #000000; } +.py-line { border-left: 2px solid #000000; + margin-left: .2em; padding-left: .4em; } +.py-lineno { font-style: italic; font-size: 90%; + padding-left: .5em; } +a.py-toggle { text-decoration: none; } +div.py-highlight-hdr { border-top: 2px solid #000000; + border-bottom: 2px solid #000000; + background: #d8e8e8; } +div.py-highlight { border-bottom: 2px solid #000000; + background: #d0e0e0; } +.py-prompt { color: #005050; font-weight: bold;} +.py-more { color: #005050; font-weight: bold;} +.py-string { color: #006030; } +.py-comment { color: #003060; } +.py-keyword { color: #600000; } +.py-output { color: #404040; } +.py-name { color: #000050; } +.py-name:link { color: #000050 !important; } +.py-name:visited { color: #000050 !important; } +.py-number { color: #005000; } +.py-defname { color: #000060; font-weight: bold; } +.py-def-name { color: #000060; font-weight: bold; } +.py-base-class { color: #000060; } +.py-param { color: #000060; } +.py-docstring { color: #006030; } +.py-decorator { color: #804020; } + */ +/* Use this if you don't want links to names underlined: */ +/*a.py-name { text-decoration: none; }*/ + +pre.py-doctest { padding: .5em; margin: 1em; + background: #e8f0f8; color: #000000; + border: 1px solid #708890; } +table pre.py-doctest { background: #dce4ec; + color: #000000; } +pre.py-src { border: 2px solid #000000; + background: #f0f0f0; color: #000000; } +.py-line { border-left: 2px solid #000000; + margin-left: .2em; padding-left: .4em; } +.py-lineno { font-style: italic; font-size: 90%; + padding-left: .5em; } +a.py-toggle { text-decoration: none; } +div.py-highlight-hdr { border-top: 2px solid #000000; + border-bottom: 2px solid #000000; + background: #d8e8e8; } +div.py-highlight { border-bottom: 2px solid #000000; + background: #d0e0e0; } +.py-prompt { color: #005050; font-weight: bold;} +.py-more { color: #005050; font-weight: bold;} +.py-string { color: green; } +.py-comment { color: green; } +.py-keyword { color: blue; } +.py-output { color: #404040; } +.py-name { color: #585858;} +.py-name:link { color: #FF5C1F !important; } +.py-name:visited { color: #FF5C1F !important; } +.py-number { color: #005000; } +.py-defname { color: #FF5C1F; font-weight: bold; } +.py-def-name { color: #FF5C1F; font-weight: bold; } +.py-base-class { color: #FF5C1F; } +.py-param { color: #000060; } +.py-docstring { color: green; } +.py-decorator { color: #804020; } + +/* Graphs & Diagrams + * - These CSS styles are used for graphs & diagrams generated using + * Graphviz dot. 'img.graph-without-title' is used for bare + * diagrams (to remove the border created by making the image + * clickable). + */ +img.graph-without-title { border: none; } +img.graph-with-title { border: 1px solid #000000; } +span.graph-title { font-weight: bold; } +span.graph-caption { } + +/* General-purpose classes + * - 'p.indent-wrapped-lines' defines a paragraph whose first line + * is not indented, but whose subsequent lines are. + * - The 'nomargin-top' class is used to remove the top margin (e.g. + * from lists). The 'nomargin' class is used to remove both the + * top and bottom margin (but not the left or right margin -- + * for lists, that would cause the bullets to disappear.) + */ +p.indent-wrapped-lines { padding: 0 0 0 7em; text-indent: -7em; + margin: 0; } +.nomargin-top { margin-top: 0; } +.nomargin { margin-top: 0; margin-bottom: 0; } + +/* HTML Log */ +div.log-block { padding: 0; margin: .5em 0 .5em 0; + background: #e8f0f8; color: #000000; + border: 1px solid #000000; } +div.log-error { padding: .1em .3em .1em .3em; margin: 4px; + background: #ffb0b0; color: #000000; + border: 1px solid #000000; } +div.log-warning { padding: .1em .3em .1em .3em; margin: 4px; + background: #ffffb0; color: #000000; + border: 1px solid #000000; } +div.log-info { padding: .1em .3em .1em .3em; margin: 4px; + background: #b0ffb0; color: #000000; + border: 1px solid #000000; } +h2.log-hdr { background: #70b0ff; color: #000000; + margin: 0; padding: 0em 0.5em 0em 0.5em; + border-bottom: 1px solid #000000; font-size: 110%; } +p.log { font-weight: bold; margin: .5em 0 .5em 0; } +tr.opt-changed { color: #000000; font-weight: bold; } +tr.opt-default { color: #606060; } +pre.log { margin: 0; padding: 0; padding-left: 1em; } ADDED fcgihandler.py Index: fcgihandler.py ================================================================== --- /dev/null +++ fcgihandler.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +This is a handler for lighttpd+fastcgi +This file has to be in the PYTHONPATH +Put something like this in the lighttpd.conf file: + +server.port = 8000 +server.bind = '127.0.0.1' +server.event-handler = 'freebsd-kqueue' +server.modules = ('mod_rewrite', 'mod_fastcgi') +server.error-handler-404 = '/test.fcgi' +server.document-root = '/somewhere/web2py' +server.errorlog = '/tmp/error.log' +fastcgi.server = ('.fcgi' => + ('localhost' => + ('min-procs' => 1, + 'socket' => '/tmp/fcgi.sock' + ) + ) + ) +""" + +LOGGING = False +SOFTCRON = False + +import sys +import os + +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] + +import gluon.main +import gluon.contrib.gateways.fcgi as fcgi + +if LOGGING: + application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, + logfilename='httpserver.log', + profilerfilename=None) +else: + application = gluon.main.wsgibase + +if SOFTCRON: + from gluon.settings import global_settings + global_settings.web2py_crontype = 'soft' + +fcgi.WSGIServer(application, bindAddress='/tmp/fcgi.sock').run() + ADDED gaehandler.py Index: gaehandler.py ================================================================== --- /dev/null +++ gaehandler.py @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +############################################################################## +# Configuration parameters for Google App Engine +############################################################################## +KEEP_CACHED = False # request a dummy url every 10secs to force caching app +LOG_STATS = False # web2py level log statistics +APPSTATS = True # GAE level usage statistics and profiling +DEBUG = False # debug mode +AUTO_RETRY = True # force gae to retry commit on failure +# +# Read more about APPSTATS here +# http://googleappengine.blogspot.com/2010/03/easy-performance-profiling-with.html +# can be accessed from: +# http://localhost:8080/_ah/stats +############################################################################## +# All tricks in this file developed by Robin Bhattacharyya +############################################################################## + + +import time +import os +import sys +import logging +import cPickle +import pickle +import wsgiref.handlers +import datetime + +path = os.path.dirname(os.path.abspath(__file__)) +sys.path = [path]+[p for p in sys.path if not p==path] + +sys.modules['cPickle'] = sys.modules['pickle'] + + +from gluon.settings import global_settings +from google.appengine.api.labs import taskqueue +from google.appengine.ext import webapp +from google.appengine.ext.webapp.util import run_wsgi_app + + +global_settings.web2py_runtime_gae = True +global_settings.db_sessions = True +if os.environ.get('SERVER_SOFTWARE', '').startswith('Devel'): + (global_settings.web2py_runtime, DEBUG) = \ + ('gae:development', True) +else: + (global_settings.web2py_runtime, DEBUG) = \ + ('gae:production', False) + + +import gluon.main + + +def log_stats(fun): + """Function that will act as a decorator to make logging""" + def newfun(env, res): + """Log the execution time of the passed function""" + timer = lambda t: (t.time(), t.clock()) + (t0, c0) = timer(time) + executed_function = fun(env, res) + (t1, c1) = timer(time) + log_info = """**** Request: %.2fms/%.2fms (real time/cpu time)""" + log_info = log_info % ((t1 - t0) * 1000, (c1 - c0) * 1000) + logging.info(log_info) + return executed_function + return newfun + + +logging.basicConfig(level=logging.INFO) + + +def wsgiapp(env, res): + """Return the wsgiapp""" + if env['PATH_INFO'] == '/_ah/queue/default': + if KEEP_CACHED: + delta = datetime.timedelta(seconds=10) + taskqueue.add(eta=datetime.datetime.now() + delta) + res('200 OK',[('Content-Type','text/plain')]) + return [''] + env['PATH_INFO'] = env['PATH_INFO'].encode('utf8') + + #this deals with a problem where GAE development server seems to forget + # the path between requests + if global_settings.web2py_runtime == 'gae:development': + gluon.admin.create_missing_folders() + + return gluon.main.wsgibase(env, res) + + +if LOG_STATS or DEBUG: + wsgiapp = log_stats(wsgiapp) + + +if AUTO_RETRY: + from gluon.contrib.gae_retry import autoretry_datastore_timeouts + autoretry_datastore_timeouts() + + +def main(): + """Run the wsgi app""" + if APPSTATS: + run_wsgi_app(wsgiapp) + else: + wsgiref.handlers.CGIHandler().run(wsgiapp) + +if __name__ == '__main__': + main() + ADDED gluon/__init__.py Index: gluon/__init__.py ================================================================== --- /dev/null +++ gluon/__init__.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + + +Web2Py framework modules +======================== +""" + +__all__ = ['A', 'B', 'BEAUTIFY', 'BODY', 'BR', 'CAT', 'CENTER', 'CLEANUP', 'CODE', 'CRYPT', 'DAL', 'DIV', 'EM', 'EMBED', 'FIELDSET', 'FORM', 'Field', 'H1', 'H2', 'H3', 'H4', 'H5', 'H6', 'HEAD', 'HR', 'HTML', 'HTTP', 'I', 'IFRAME', 'IMG', 'INPUT', 'IS_ALPHANUMERIC', 'IS_DATE', 'IS_DATETIME', 'IS_DATETIME_IN_RANGE', 'IS_DATE_IN_RANGE', 'IS_DECIMAL_IN_RANGE', 'IS_EMAIL', 'IS_EMPTY_OR', 'IS_EQUAL_TO', 'IS_EXPR', 'IS_FLOAT_IN_RANGE', 'IS_IMAGE', 'IS_INT_IN_RANGE', 'IS_IN_DB', 'IS_IN_SET', 'IS_IPV4', 'IS_LENGTH', 'IS_LIST_OF', 'IS_LOWER', 'IS_MATCH', 'IS_NOT_EMPTY', 'IS_NOT_IN_DB', 'IS_NULL_OR', 'IS_SLUG', 'IS_STRONG', 'IS_TIME', 'IS_UPLOAD_FILENAME', 'IS_UPPER', 'IS_URL', 'LABEL', 'LEGEND', 'LI', 'LINK', 'MARKMIN', 'MENU', 'META', 'OBJECT', 'OL', 'ON', 'OPTGROUP', 'OPTION', 'P', 'PRE', 'SCRIPT', 'SELECT', 'SPAN', 'SQLFORM', 'SQLTABLE', 'STYLE', 'TABLE', 'TAG', 'TBODY', 'TD', 'TEXTAREA', 'TFOOT', 'TH', 'THEAD', 'TITLE', 'TR', 'TT', 'UL', 'URL', 'XHTML', 'XML','redirect','current','embed64'] + +from globals import current +from html import * +from validators import * +from http import redirect, HTTP +from dal import DAL, Field +from sqlhtml import SQLFORM, SQLTABLE + + + + + ADDED gluon/__init__.pyc Index: gluon/__init__.pyc ================================================================== --- /dev/null +++ gluon/__init__.pyc cannot compute difference between binary files ADDED gluon/admin.py Index: gluon/admin.py ================================================================== --- /dev/null +++ gluon/admin.py @@ -0,0 +1,455 @@ +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Utility functions for the Admin application +=========================================== +""" +import os +import sys +import traceback +import zipfile +import urllib +from shutil import rmtree +from utils import web2py_uuid +from fileutils import w2p_pack, w2p_unpack, w2p_pack_plugin, w2p_unpack_plugin +from fileutils import up, fix_newlines, abspath, recursive_unlink +from fileutils import read_file, write_file, parse_version +from restricted import RestrictedError +from settings import global_settings + +def apath(path='', r=None): + """ + Builds a path inside an application folder + + Parameters + ---------- + path: + path within the application folder + r: + the global request object + + """ + + opath = up(r.folder) + while path[:3] == '../': + (opath, path) = (up(opath), path[3:]) + return os.path.join(opath, path).replace('\\', '/') + + +def app_pack(app, request): + """ + Builds a w2p package for the application + + Parameters + ---------- + app: + application name + request: + the global request object + + Returns + ------- + filename: + filename of the w2p file or None on error + """ + try: + app_cleanup(app, request) + filename = apath('../deposit/%s.w2p' % app, request) + w2p_pack(filename, apath(app, request)) + return filename + except Exception: + return False + + +def app_pack_compiled(app, request): + """ + Builds a w2p bytecode-compiled package for the application + + Parameters + ---------- + app: + application name + request: + the global request object + + Returns + ------- + filename: + filename of the w2p file or None on error + """ + + try: + filename = apath('../deposit/%s.w2p' % app, request) + w2p_pack(filename, apath(app, request), compiled=True) + return filename + except Exception: + return None + +def app_cleanup(app, request): + """ + Removes session, cache and error files + + Parameters + ---------- + app: + application name + request: + the global request object + """ + r = True + + # Remove error files + path = apath('%s/errors/' % app, request) + if os.path.exists(path): + for f in os.listdir(path): + try: + if f[:1]!='.': os.unlink(os.path.join(path,f)) + except IOError: + r = False + + # Remove session files + path = apath('%s/sessions/' % app, request) + if os.path.exists(path): + for f in os.listdir(path): + try: + if f[:1]!='.': recursive_unlink(os.path.join(path,f)) + except IOError: + r = False + + # Remove cache files + path = apath('%s/sessions/' % app, request) + if os.path.exists(path): + for f in os.listdir(path): + try: + if f[:1]!='.': os.unlink(os.path.join(path,f)) + except IOError: + r = False + return r + + +def app_compile(app, request): + """ + Compiles the application + + Parameters + ---------- + app: + application name + request: + the global request object + """ + from compileapp import compile_application, remove_compiled_application + folder = apath(app, request) + try: + compile_application(folder) + return None + except (Exception, RestrictedError): + tb = traceback.format_exc(sys.exc_info) + remove_compiled_application(folder) + return tb + +def app_create(app, request,force=False,key=None): + """ + Create a copy of welcome.w2p (scaffolding) app + + Parameters + ---------- + app: + application name + request: + the global request object + + """ + try: + path = apath(app, request) + os.mkdir(path) + except: + if not force: + return False + try: + w2p_unpack('welcome.w2p', path) + for subfolder in ['models','views','controllers', 'databases', + 'modules','cron','errors','sessions', + 'languages','static','private','uploads']: + subpath = os.path.join(path,subfolder) + if not os.path.exists(subpath): + os.mkdir(subpath) + db = os.path.join(path, 'models', 'db.py') + if os.path.exists(db): + data = read_file(db) + data = data.replace('', + 'sha512:'+(key or web2py_uuid())) + write_file(db, data) + return True + except: + rmtree(path) + return False + + +def app_install(app, fobj, request, filename, overwrite=None): + """ + Installs an application: + + - Identifies file type by filename + - Writes `fobj` contents to the `../deposit/` folder + - Calls `w2p_unpack()` to do the job. + + Parameters + ---------- + app: + new application name + fobj: + file object containing the application to be installed + request: + the global request object + filename: + original filename of the `fobj`, required to determine extension + + Returns + ------- + upname: + name of the file where app is temporarily stored or `None` on failure + """ + did_mkdir = False + if filename[-4:] == '.w2p': + extension = 'w2p' + elif filename[-7:] == '.tar.gz': + extension = 'tar.gz' + else: + extension = 'tar' + upname = apath('../deposit/%s.%s' % (app, extension), request) + + try: + write_file(upname, fobj.read(), 'wb') + path = apath(app, request) + if not overwrite: + os.mkdir(path) + did_mkdir = True + w2p_unpack(upname, path) + if extension != 'tar': + os.unlink(upname) + fix_newlines(path) + return upname + except Exception: + if did_mkdir: + rmtree(path) + return False + + +def app_uninstall(app, request): + """ + Uninstalls the application. + + Parameters + ---------- + app: + application name + request: + the global request object + + Returns + ------- + `True` on success, `False` on failure + """ + try: + # Hey App, this is your end... + path = apath(app, request) + rmtree(path) + return True + except Exception: + return False + +def plugin_pack(app, plugin_name, request): + """ + Builds a w2p package for the application + + Parameters + ---------- + app: + application name + plugin_name: + the name of the plugin without plugin_ prefix + request: + the current request app + + Returns + ------- + filename: + filename of the w2p file or None on error + """ + try: + filename = apath('../deposit/web2py.plugin.%s.w2p' % plugin_name, request) + w2p_pack_plugin(filename, apath(app, request), plugin_name) + return filename + except Exception: + return False + +def plugin_install(app, fobj, request, filename): + """ + Installs an application: + + - Identifies file type by filename + - Writes `fobj` contents to the `../deposit/` folder + - Calls `w2p_unpack()` to do the job. + + Parameters + ---------- + app: + new application name + fobj: + file object containing the application to be installed + request: + the global request object + filename: + original filename of the `fobj`, required to determine extension + + Returns + ------- + upname: + name of the file where app is temporarily stored or `None` on failure + """ + + upname = apath('../deposit/%s' % filename, request) + + try: + write_file(upname, fobj.read(), 'wb') + path = apath(app, request) + w2p_unpack_plugin(upname, path) + fix_newlines(path) + return upname + except Exception: + os.unlink(upname) + return False + +def check_new_version(myversion, version_URL): + """ + Compares current web2py's version with the latest stable web2py version. + + Parameters + ---------- + myversion: + the current version as stored in file `web2py/VERSION` + version_URL: + the URL that contains the version of the latest stable release + + Returns + ------- + state: + `True` if upgrade available, `False` if current version if up-to-date, + -1 on error + version: + the most up-to-version available + """ + try: + from urllib import urlopen + version = parse_version(urlopen(version_URL).read()) + except Exception: + return -1, myversion + + if version > myversion: + return True, version + else: + return False, version + +def unzip(filename, dir, subfolder=''): + """ + Unzips filename into dir (.zip only, no .gz etc) + if subfolder!='' it unzip only files in subfolder + """ + filename = abspath(filename) + if not zipfile.is_zipfile(filename): + raise RuntimeError, 'Not a valid zipfile' + zf = zipfile.ZipFile(filename) + if not subfolder.endswith('/'): + subfolder = subfolder + '/' + n = len(subfolder) + for name in sorted(zf.namelist()): + if not name.startswith(subfolder): + continue + #print name[n:] + if name.endswith('/'): + folder = os.path.join(dir,name[n:]) + if not os.path.exists(folder): + os.mkdir(folder) + else: + write_file(os.path.join(dir, name[n:]), zf.read(name), 'wb') + + +def upgrade(request, url='http://web2py.com'): + """ + Upgrades web2py (src, osx, win) is a new version is posted. + It detects whether src, osx or win is running and downloads the right one + + Parameters + ---------- + request: + the current request object, required to determine version and path + url: + the incomplete url where to locate the latest web2py + actual url is url+'/examples/static/web2py_(src|osx|win).zip' + + Returns + ------- + True on success, False on failure (network problem or old version) + """ + web2py_version = request.env.web2py_version + gluon_parent = request.env.gluon_parent + if not gluon_parent.endswith('/'): + gluon_parent = gluon_parent + '/' + (check, version) = check_new_version(web2py_version, + url+'/examples/default/version') + if not check: + return (False, 'Already latest version') + if os.path.exists(os.path.join(gluon_parent, 'web2py.exe')): + version_type = 'win' + destination = gluon_parent + subfolder = 'web2py/' + elif gluon_parent.endswith('/Contents/Resources/'): + version_type = 'osx' + destination = gluon_parent[:-len('/Contents/Resources/')] + subfolder = 'web2py/web2py.app/' + else: + version_type = 'src' + destination = gluon_parent + subfolder = 'web2py/' + + full_url = url + '/examples/static/web2py_%s.zip' % version_type + filename = abspath('web2py_%s_downloaded.zip' % version_type) + file = None + try: + write_file(filename, urllib.urlopen(full_url).read(), 'wb') + except Exception,e: + return False, e + try: + unzip(filename, destination, subfolder) + return True, None + except Exception,e: + return False, e + +def add_path_first(path): + sys.path = [path]+[p for p in sys.path if (not p==path and not p==(path+'/'))] + +def create_missing_folders(): + if not global_settings.web2py_runtime_gae: + for path in ('applications', 'deposit', 'site-packages', 'logs'): + path = abspath(path, gluon=True) + if not os.path.exists(path): + os.mkdir(path) + paths = (global_settings.gluon_parent, abspath('site-packages', gluon=True), abspath('gluon', gluon=True), '') + [add_path_first(path) for path in paths] + +def create_missing_app_folders(request): + if not global_settings.web2py_runtime_gae: + if request.folder not in global_settings.app_folders: + for subfolder in ('models', 'views', 'controllers', 'databases', + 'modules', 'cron', 'errors', 'sessions', + 'languages', 'static', 'private', 'uploads'): + path = os.path.join(request.folder, subfolder) + if not os.path.exists(path): + os.mkdir(path) + global_settings.app_folders.add(request.folder) + + + ADDED gluon/admin.pyc Index: gluon/admin.pyc ================================================================== --- /dev/null +++ gluon/admin.pyc cannot compute difference between binary files ADDED gluon/cache.py Index: gluon/cache.py ================================================================== --- /dev/null +++ gluon/cache.py @@ -0,0 +1,432 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Basic caching classes and methods +================================= + +- Cache - The generic caching object interfacing with the others +- CacheInRam - providing caching in ram +- CacheInDisk - provides caches on disk + +Memcache is also available via a different module (see gluon.contrib.memcache) + +When web2py is running on Google App Engine, +caching will be provided by the GAE memcache +(see gluon.contrib.gae_memcache) +""" + +import time +import portalocker +import shelve +import thread +import os +import logging +import re + +logger = logging.getLogger("web2py.cache") + +__all__ = ['Cache'] + + +DEFAULT_TIME_EXPIRE = 300 + + +class CacheAbstract(object): + """ + Abstract class for cache implementations. + Main function is now to provide referenced api documentation. + + Use CacheInRam or CacheOnDisk instead which are derived from this class. + """ + + cache_stats_name = 'web2py_cache_statistics' + + def __init__(self, request=None): + """ + Paremeters + ---------- + request: + the global request object + """ + raise NotImplementedError + + def __call__(self, key, f, + time_expire = DEFAULT_TIME_EXPIRE): + """ + Tries retrieve the value corresponding to `key` from the cache of the + object exists and if it did not expire, else it called the function `f` + and stores the output in the cache corresponding to `key`. In the case + the output of the function is returned. + + :param key: the key of the object to be store or retrieved + :param f: the function, whose output is to be cached + :param time_expire: expiration of the cache in microseconds + + - `time_expire` is used to compare the current time with the time when + the requested object was last saved in cache. It does not affect + future requests. + - Setting `time_expire` to 0 or negative value forces the cache to + refresh. + + If the function `f` is `None` the cache is cleared. + """ + raise NotImplementedError + + def clear(self, regex=None): + """ + Clears the cache of all keys that match the provided regular expression. + If no regular expression is provided, it clears all entries in cache. + + Parameters + ---------- + regex: + if provided, only keys matching the regex will be cleared. + Otherwise all keys are cleared. + """ + + raise NotImplementedError + + def increment(self, key, value=1): + """ + Increments the cached value for the given key by the amount in value + + Parameters + ---------- + key: + key for the cached object to be incremeneted + value: + amount of the increment (defaults to 1, can be negative) + """ + raise NotImplementedError + + def _clear(self, storage, regex): + """ + Auxiliary function called by `clear` to search and clear cache entries + """ + r = re.compile(regex) + for (key, value) in storage.items(): + if r.match(str(key)): + del storage[key] + +class CacheInRam(CacheAbstract): + """ + Ram based caching + + This is implemented as global (per process, shared by all threads) + dictionary. + A mutex-lock mechanism avoid conflicts. + """ + + locker = thread.allocate_lock() + meta_storage = {} + + def __init__(self, request=None): + self.locker.acquire() + self.request = request + if request: + app = request.application + else: + app = '' + if not app in self.meta_storage: + self.storage = self.meta_storage[app] = {CacheAbstract.cache_stats_name: { + 'hit_total': 0, + 'misses': 0, + }} + else: + self.storage = self.meta_storage[app] + self.locker.release() + + def clear(self, regex=None): + self.locker.acquire() + storage = self.storage + if regex is None: + storage.clear() + else: + self._clear(storage, regex) + + if not CacheAbstract.cache_stats_name in storage.keys(): + storage[CacheAbstract.cache_stats_name] = { + 'hit_total': 0, + 'misses': 0, + } + + self.locker.release() + + def __call__(self, key, f, + time_expire = DEFAULT_TIME_EXPIRE): + """ + Attention! cache.ram does not copy the cached object. It just stores a reference to it. + Turns out the deepcopying the object has some problems: + 1) would break backward compatibility + 2) would be limiting because people may want to cache live objects + 3) would work unless we deepcopy no storage and retrival which would make things slow. + Anyway. You can deepcopy explicitly in the function generating the value to be cached. + """ + + dt = time_expire + + self.locker.acquire() + item = self.storage.get(key, None) + if item and f is None: + del self.storage[key] + self.storage[CacheAbstract.cache_stats_name]['hit_total'] += 1 + self.locker.release() + + if f is None: + return None + if item and (dt is None or item[0] > time.time() - dt): + return item[1] + value = f() + + self.locker.acquire() + self.storage[key] = (time.time(), value) + self.storage[CacheAbstract.cache_stats_name]['misses'] += 1 + self.locker.release() + return value + + def increment(self, key, value=1): + self.locker.acquire() + try: + if key in self.storage: + value = self.storage[key][1] + value + self.storage[key] = (time.time(), value) + except BaseException, e: + self.locker.release() + raise e + self.locker.release() + return value + + +class CacheOnDisk(CacheAbstract): + """ + Disk based cache + + This is implemented as a shelve object and it is shared by multiple web2py + processes (and threads) as long as they share the same filesystem. + The file is locked wen accessed. + + Disk cache provides persistance when web2py is started/stopped but it slower + than `CacheInRam` + + Values stored in disk cache must be pickable. + """ + + speedup_checks = set() + + def __init__(self, request, folder=None): + self.request = request + + # Lets test if the cache folder exists, if not + # we are going to create it + folder = folder or os.path.join(request.folder, 'cache') + + if not os.path.exists(folder): + os.mkdir(folder) + + ### we need this because of a possible bug in shelve that may + ### or may not lock + self.locker_name = os.path.join(folder,'cache.lock') + self.shelve_name = os.path.join(folder,'cache.shelve') + + locker, locker_locked = None, False + speedup_key = (folder,CacheAbstract.cache_stats_name) + if not speedup_key in self.speedup_checks or \ + not os.path.exists(self.shelve_name): + try: + locker = open(self.locker_name, 'a') + portalocker.lock(locker, portalocker.LOCK_EX) + locker_locked = True + storage = shelve.open(self.shelve_name) + try: + if not storage.has_key(CacheAbstract.cache_stats_name): + storage[CacheAbstract.cache_stats_name] = { + 'hit_total': 0, + 'misses': 0, + } + storage.sync() + finally: + storage.close() + self.speedup_checks.add(speedup_key) + except ImportError: + pass # no module _bsddb, ignoring exception now so it makes a ticket only if used + except: + logger.error('corrupted file %s, will try delete it!' \ + % self.shelve_name) + try: + os.unlink(self.shelve_name) + except IOError: + logger.warn('unable to delete file %s' % self.shelve_name) + if locker_locked: + portalocker.unlock(locker) + if locker: + locker.close() + + def clear(self, regex=None): + locker = open(self.locker_name,'a') + portalocker.lock(locker, portalocker.LOCK_EX) + storage = shelve.open(self.shelve_name) + try: + if regex is None: + storage.clear() + else: + self._clear(storage, regex) + if not CacheAbstract.cache_stats_name in storage.keys(): + storage[CacheAbstract.cache_stats_name] = { + 'hit_total': 0, + 'misses': 0, + } + storage.sync() + finally: + storage.close() + portalocker.unlock(locker) + locker.close() + + def __call__(self, key, f, + time_expire = DEFAULT_TIME_EXPIRE): + dt = time_expire + + locker = open(self.locker_name,'a') + portalocker.lock(locker, portalocker.LOCK_EX) + storage = shelve.open(self.shelve_name) + + item = storage.get(key, None) + if item and f is None: + del storage[key] + + storage[CacheAbstract.cache_stats_name] = { + 'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'] + 1, + 'misses': storage[CacheAbstract.cache_stats_name]['misses'] + } + + storage.sync() + + portalocker.unlock(locker) + locker.close() + + if f is None: + return None + if item and (dt is None or item[0] > time.time() - dt): + return item[1] + value = f() + + locker = open(self.locker_name,'a') + portalocker.lock(locker, portalocker.LOCK_EX) + storage[key] = (time.time(), value) + + storage[CacheAbstract.cache_stats_name] = { + 'hit_total': storage[CacheAbstract.cache_stats_name]['hit_total'], + 'misses': storage[CacheAbstract.cache_stats_name]['misses'] + 1 + } + + storage.sync() + + storage.close() + portalocker.unlock(locker) + locker.close() + + return value + + def increment(self, key, value=1): + locker = open(self.locker_name,'a') + portalocker.lock(locker, portalocker.LOCK_EX) + storage = shelve.open(self.shelve_name) + try: + if key in storage: + value = storage[key][1] + value + storage[key] = (time.time(), value) + storage.sync() + finally: + storage.close() + portalocker.unlock(locker) + locker.close() + return value + + +class Cache(object): + """ + Sets up generic caching, creating an instance of both CacheInRam and + CacheOnDisk. + In case of GAE will make use of gluon.contrib.gae_memcache. + + - self.ram is an instance of CacheInRam + - self.disk is an instance of CacheOnDisk + """ + + def __init__(self, request): + """ + Parameters + ---------- + request: + the global request object + """ + # GAE will have a special caching + import settings + if settings.global_settings.web2py_runtime_gae: + from contrib.gae_memcache import MemcacheClient + self.ram=self.disk=MemcacheClient(request) + else: + # Otherwise use ram (and try also disk) + self.ram = CacheInRam(request) + try: + self.disk = CacheOnDisk(request) + except IOError: + logger.warning('no cache.disk (IOError)') + except AttributeError: + # normally not expected anymore, as GAE has already + # been accounted for + logger.warning('no cache.disk (AttributeError)') + + def __call__(self, + key = None, + time_expire = DEFAULT_TIME_EXPIRE, + cache_model = None): + """ + Decorator function that can be used to cache any function/method. + + Example:: + + @cache('key', 5000, cache.ram) + def f(): + return time.ctime() + + When the function f is called, web2py tries to retrieve + the value corresponding to `key` from the cache of the + object exists and if it did not expire, else it calles the function `f` + and stores the output in the cache corresponding to `key`. In the case + the output of the function is returned. + + :param key: the key of the object to be store or retrieved + :param time_expire: expiration of the cache in microseconds + :param cache_model: `cache.ram`, `cache.disk`, or other + (like `cache.memcache` if defined). It defaults to `cache.ram`. + + Notes + ----- + `time_expire` is used to compare the curret time with the time when the + requested object was last saved in cache. It does not affect future + requests. + Setting `time_expire` to 0 or negative value forces the cache to + refresh. + + If the function `f` is an action, we suggest using + `request.env.path_info` as key. + """ + if not cache_model: + cache_model = self.ram + + def tmp(func): + def action(): + return cache_model(key, func, time_expire) + action.__name___ = func.__name__ + action.__doc__ = func.__doc__ + return action + + return tmp + + + ADDED gluon/cache.pyc Index: gluon/cache.pyc ================================================================== --- /dev/null +++ gluon/cache.pyc cannot compute difference between binary files ADDED gluon/cfs.py Index: gluon/cfs.py ================================================================== --- /dev/null +++ gluon/cfs.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Functions required to execute app components +============================================ + +FOR INTERNAL USE ONLY +""" + +import os +import stat +import thread +from fileutils import read_file + +cfs = {} # for speed-up +cfs_lock = thread.allocate_lock() # and thread safety + + +def getcfs(key, filename, filter=None): + """ + Caches the *filtered* file `filename` with `key` until the file is + modified. + + :param key: the cache key + :param filename: the file to cache + :param filter: is the function used for filtering. Normally `filename` is a + .py file and `filter` is a function that bytecode compiles the file. + In this way the bytecode compiled file is cached. (Default = None) + + This is used on Google App Engine since pyc files cannot be saved. + """ + t = os.stat(filename)[stat.ST_MTIME] + cfs_lock.acquire() + item = cfs.get(key, None) + cfs_lock.release() + if item and item[0] == t: + return item[1] + if not filter: + data = read_file(filename) + else: + data = filter() + cfs_lock.acquire() + cfs[key] = (t, data) + cfs_lock.release() + return data + + + ADDED gluon/cfs.pyc Index: gluon/cfs.pyc ================================================================== --- /dev/null +++ gluon/cfs.pyc cannot compute difference between binary files ADDED gluon/compileapp.py Index: gluon/compileapp.py ================================================================== --- /dev/null +++ gluon/compileapp.py @@ -0,0 +1,573 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Functions required to execute app components +============================================ + +FOR INTERNAL USE ONLY +""" + +import re +import sys +import fnmatch +import os +import copy +import random +import __builtin__ +from storage import Storage, List +from template import parse_template +from restricted import restricted, compile2 +from fileutils import mktree, listdir, read_file, write_file +from myregex import regex_expose +from languages import translator +from dal import BaseAdapter, SQLDB, SQLField, DAL, Field +from sqlhtml import SQLFORM, SQLTABLE +from cache import Cache +from globals import current +import settings +from cfs import getcfs +import html +import validators +from http import HTTP, redirect +import marshal +import shutil +import imp +import logging +logger = logging.getLogger("web2py") +import rewrite + +try: + import py_compile +except: + logger.warning('unable to import py_compile') + +is_gae = settings.global_settings.web2py_runtime_gae +is_jython = settings.global_settings.is_jython = 'java' in sys.platform.lower() or hasattr(sys, 'JYTHON_JAR') or str(sys.copyright).find('Jython') > 0 + +TEST_CODE = \ + r""" +def _TEST(): + import doctest, sys, cStringIO, types, cgi, gluon.fileutils + if not gluon.fileutils.check_credentials(request): + raise HTTP(401, web2py_error='invalid credentials') + stdout = sys.stdout + html = '

    Testing controller "%s.py" ... done.


    \n' \ + % request.controller + for key in sorted([key for key in globals() if not key in __symbols__+['_TEST']]): + eval_key = eval(key) + if type(eval_key) == types.FunctionType: + number_doctests = sum([len(ds.examples) for ds in doctest.DocTestFinder().find(eval_key)]) + if number_doctests>0: + sys.stdout = cStringIO.StringIO() + name = '%s/controllers/%s.py in %s.__doc__' \ + % (request.folder, request.controller, key) + doctest.run_docstring_examples(eval_key, + globals(), False, name=name) + report = sys.stdout.getvalue().strip() + if report: + pf = 'failed' + else: + pf = 'passed' + html += '

    Function %s [%s]

    \n' \ + % (pf, key, pf) + if report: + html += CODE(report, language='web2py', \ + link='/examples/global/vars/').xml() + html += '
    \n' + else: + html += \ + '

    Function %s [no doctests]


    \n' \ + % (key) + response._vars = html + sys.stdout = stdout +_TEST() +""" + +class mybuiltin(object): + """ + NOTE could simple use a dict and populate it, + NOTE not sure if this changes things though if monkey patching import..... + """ + #__builtins__ + def __getitem__(self, key): + try: + return getattr(__builtin__, key) + except AttributeError: + raise KeyError, key + def __setitem__(self, key, value): + setattr(self, key, value) + +class LoadFactory(object): + """ + Attention: this helper is new and experimental + """ + def __init__(self,environment): + self.environment = environment + def __call__(self, c=None, f='index', args=None, vars=None, + extension=None, target=None,ajax=False,ajax_trap=False, + url=None,user_signature=False, content='loading...',**attr): + if args is None: args = [] + vars = Storage(vars or {}) + import globals + target = target or 'c'+str(random.random())[2:] + attr['_id']=target + request = self.environment['request'] + if '.' in f: + f, extension = f.split('.',1) + if url or ajax: + url = url or html.URL(request.application, c, f, r=request, + args=args, vars=vars, extension=extension, + user_signature=user_signature) + script = html.SCRIPT('web2py_component("%s","%s")' % (url, target), + _type="text/javascript") + return html.TAG[''](script, html.DIV(content,**attr)) + else: + if not isinstance(args,(list,tuple)): + args = [args] + c = c or request.controller + + other_request = Storage() + for key, value in request.items(): + other_request[key] = value + other_request['env'] = Storage() + for key, value in request.env.items(): + other_request.env['key'] = value + other_request.controller = c + other_request.function = f + other_request.extension = extension or request.extension + other_request.args = List(args) + other_request.vars = vars + other_request.get_vars = vars + other_request.post_vars = Storage() + other_response = globals.Response() + other_request.env.path_info = '/' + \ + '/'.join([request.application,c,f] + \ + map(str, other_request.args)) + other_request.env.query_string = \ + vars and html.URL(vars=vars).split('?')[1] or '' + other_request.env.http_web2py_component_location = \ + request.env.path_info + other_request.cid = target + other_request.env.http_web2py_component_element = target + other_response.view = '%s/%s.%s' % (c,f, other_request.extension) + other_environment = copy.copy(self.environment) + other_response._view_environment = other_environment + other_response.generic_patterns = \ + copy.copy(current.response.generic_patterns) + other_environment['request'] = other_request + other_environment['response'] = other_response + + ## some magic here because current are thread-locals + + original_request, current.request = current.request, other_request + original_response, current.response = current.response, other_response + page = run_controller_in(c, f, other_environment) + if isinstance(page, dict): + other_response._vars = page + for key in page: + other_response._view_environment[key] = page[key] + run_view_in(other_response._view_environment) + page = other_response.body.getvalue() + current.request, current.response = original_request, original_response + js = None + if ajax_trap: + link = html.URL(request.application, c, f, r=request, + args=args, vars=vars, extension=extension, + user_signature=user_signature) + js = "web2py_trap_form('%s','%s');" % (link, target) + script = js and html.SCRIPT(js,_type="text/javascript") or '' + return html.TAG[''](html.DIV(html.XML(page),**attr),script) + + +def local_import_aux(name, force=False, app='welcome'): + """ + In apps, instead of importing a local module + (in applications/app/modules) with:: + + import a.b.c as d + + you should do:: + + d = local_import('a.b.c') + + or (to force a reload): + + d = local_import('a.b.c', reload=True) + + This prevents conflict between applications and un-necessary execs. + It can be used to import any module, including regular Python modules. + """ + items = name.replace('/','.') + name = "applications.%s.modules.%s" % (app, items) + module = __import__(name) + for item in name.split(".")[1:]: + module = getattr(module, item) + if force: + reload(module) + return module + + +""" +OLD IMPLEMENTATION: + items = name.replace('/','.').split('.') + filename, modulepath = items[-1], os.path.join(apath,'modules',*items[:-1]) + imp.acquire_lock() + try: + file=None + (file,path,desc) = imp.find_module(filename,[modulepath]+sys.path) + if not path in sys.modules or reload: + if is_gae: + module={} + execfile(path,{},module) + module=Storage(module) + else: + module = imp.load_module(path,file,path,desc) + sys.modules[path] = module + else: + module = sys.modules[path] + except Exception, e: + module = None + if file: + file.close() + imp.release_lock() + if not module: + raise ImportError, "cannot find module %s in %s" % (filename, modulepath) + return module +""" + +def build_environment(request, response, session, store_current=True): + """ + Build the environment dictionary into which web2py files are executed. + """ + + environment = {} + for key in html.__all__: + environment[key] = getattr(html, key) + for key in validators.__all__: + environment[key] = getattr(validators, key) + if not request.env: + request.env = Storage() + + t = environment['T'] = translator(request) + c = environment['cache'] = Cache(request) + if store_current: + current.request = request + current.response = response + current.session = session + current.T = t + current.cache = c + + global __builtins__ + if is_jython: # jython hack + __builtins__ = mybuiltin() + else: + __builtins__['__import__'] = __builtin__.__import__ ### WHY? + environment['__builtins__'] = __builtins__ + environment['HTTP'] = HTTP + environment['redirect'] = redirect + environment['request'] = request + environment['response'] = response + environment['session'] = session + environment['DAL'] = DAL + environment['Field'] = Field + environment['SQLDB'] = SQLDB # for backward compatibility + environment['SQLField'] = SQLField # for backward compatibility + environment['SQLFORM'] = SQLFORM + environment['SQLTABLE'] = SQLTABLE + environment['LOAD'] = LoadFactory(environment) + environment['local_import'] = \ + lambda name, reload=False, app=request.application:\ + local_import_aux(name,reload,app) + BaseAdapter.set_folder(os.path.join(request.folder, 'databases')) + response._view_environment = copy.copy(environment) + return environment + + +def save_pyc(filename): + """ + Bytecode compiles the file `filename` + """ + py_compile.compile(filename) + + +def read_pyc(filename): + """ + Read the code inside a bytecode compiled file if the MAGIC number is + compatible + + :returns: a code object + """ + data = read_file(filename, 'rb') + if not is_gae and data[:4] != imp.get_magic(): + raise SystemError, 'compiled code is incompatible' + return marshal.loads(data[8:]) + + +def compile_views(folder): + """ + Compiles all the views in the application specified by `folder` + """ + + path = os.path.join(folder, 'views') + for file in listdir(path, '^[\w/\-]+(\.\w+)+$'): + data = parse_template(file, path) + filename = ('views/%s.py' % file).replace('/', '_').replace('\\', '_') + filename = os.path.join(folder, 'compiled', filename) + write_file(filename, data) + save_pyc(filename) + os.unlink(filename) + + +def compile_models(folder): + """ + Compiles all the models in the application specified by `folder` + """ + + path = os.path.join(folder, 'models') + for file in listdir(path, '.+\.py$'): + data = read_file(os.path.join(path, file)) + filename = os.path.join(folder, 'compiled','models',file) + mktree(filename) + write_file(filename, data) + save_pyc(filename) + os.unlink(filename) + + +def compile_controllers(folder): + """ + Compiles all the controllers in the application specified by `folder` + """ + + path = os.path.join(folder, 'controllers') + for file in listdir(path, '.+\.py$'): + ### why is this here? save_pyc(os.path.join(path, file)) + data = read_file(os.path.join(path,file)) + exposed = regex_expose.findall(data) + for function in exposed: + command = data + "\nresponse._vars=response._caller(%s)\n" % \ + function + filename = os.path.join(folder, 'compiled', ('controllers/' + + file[:-3]).replace('/', '_') + + '_' + function + '.py') + write_file(filename, command) + save_pyc(filename) + os.unlink(filename) + + +def run_models_in(environment): + """ + Runs all models (in the app specified by the current folder) + It tries pre-compiled models first before compiling them. + """ + + folder = environment['request'].folder + c = environment['request'].controller + f = environment['request'].function + cpath = os.path.join(folder, 'compiled') + if os.path.exists(cpath): + for model in listdir(cpath, '^models_\w+\.pyc$', 0): + restricted(read_pyc(model), environment, layer=model) + path = os.path.join(cpath, 'models') + models = listdir(path, '^\w+\.pyc$',0,sort=False) + compiled=True + else: + path = os.path.join(folder, 'models') + models = listdir(path, '^\w+\.py$',0,sort=False) + compiled=False + paths = (path, os.path.join(path,c), os.path.join(path,c,f)) + for model in models: + if not os.path.split(model)[0] in paths and c!='appadmin': + continue + elif compiled: + code = read_pyc(model) + elif is_gae: + code = getcfs(model, model, + lambda: compile2(read_file(model), model)) + else: + code = getcfs(model, model, None) + restricted(code, environment, layer=model) + + +def run_controller_in(controller, function, environment): + """ + Runs the controller.function() (for the app specified by + the current folder). + It tries pre-compiled controller_function.pyc first before compiling it. + """ + + # if compiled should run compiled! + + folder = environment['request'].folder + path = os.path.join(folder, 'compiled') + badc = 'invalid controller (%s/%s)' % (controller, function) + badf = 'invalid function (%s/%s)' % (controller, function) + if os.path.exists(path): + filename = os.path.join(path, 'controllers_%s_%s.pyc' + % (controller, function)) + if not os.path.exists(filename): + raise HTTP(404, + rewrite.thread.routes.error_message % badf, + web2py_error=badf) + restricted(read_pyc(filename), environment, layer=filename) + elif function == '_TEST': + # TESTING: adjust the path to include site packages + from settings import global_settings + from admin import abspath, add_path_first + paths = (global_settings.gluon_parent, abspath('site-packages', gluon=True), abspath('gluon', gluon=True), '') + [add_path_first(path) for path in paths] + # TESTING END + + filename = os.path.join(folder, 'controllers/%s.py' + % controller) + if not os.path.exists(filename): + raise HTTP(404, + rewrite.thread.routes.error_message % badc, + web2py_error=badc) + environment['__symbols__'] = environment.keys() + code = read_file(filename) + code += TEST_CODE + restricted(code, environment, layer=filename) + else: + filename = os.path.join(folder, 'controllers/%s.py' + % controller) + if not os.path.exists(filename): + raise HTTP(404, + rewrite.thread.routes.error_message % badc, + web2py_error=badc) + code = read_file(filename) + exposed = regex_expose.findall(code) + if not function in exposed: + raise HTTP(404, + rewrite.thread.routes.error_message % badf, + web2py_error=badf) + code = "%s\nresponse._vars=response._caller(%s)\n" % (code, function) + if is_gae: + layer = filename + ':' + function + code = getcfs(layer, filename, lambda: compile2(code,layer)) + restricted(code, environment, filename) + response = environment['response'] + vars=response._vars + if response.postprocessing: + for p in response.postprocessing: + vars = p(vars) + if isinstance(vars,unicode): + vars = vars.encode('utf8') + if hasattr(vars,'xml'): + vars = vars.xml() + return vars + +def run_view_in(environment): + """ + Executes the view for the requested action. + The view is the one specified in `response.view` or determined by the url + or `view/generic.extension` + It tries the pre-compiled views_controller_function.pyc before compiling it. + """ + + request = environment['request'] + response = environment['response'] + folder = request.folder + path = os.path.join(folder, 'compiled') + badv = 'invalid view (%s)' % response.view + patterns = response.generic_patterns or [] + regex = re.compile('|'.join(fnmatch.translate(r) for r in patterns)) + short_action = '%(controller)s/%(function)s.%(extension)s' % request + allow_generic = patterns and regex.search(short_action) + if not isinstance(response.view, str): + ccode = parse_template(response.view, os.path.join(folder, 'views'), + context=environment) + restricted(ccode, environment, 'file stream') + elif os.path.exists(path): + x = response.view.replace('/', '_') + files = ['views_%s.pyc' % x] + if allow_generic: + files.append('views_generic.%s.pyc' % request.extension) + # for backward compatibility + if request.extension == 'html': + files.append('views_%s.pyc' % x[:-5]) + if allow_generic: + files.append('views_generic.pyc') + # end backward compatibility code + for f in files: + filename = os.path.join(path,f) + if os.path.exists(filename): + code = read_pyc(filename) + restricted(code, environment, layer=filename) + return + raise HTTP(404, + rewrite.thread.routes.error_message % badv, + web2py_error=badv) + else: + filename = os.path.join(folder, 'views', response.view) + if not os.path.exists(filename) and allow_generic: + response.view = 'generic.' + request.extension + filename = os.path.join(folder, 'views', response.view) + if not os.path.exists(filename): + raise HTTP(404, + rewrite.thread.routes.error_message % badv, + web2py_error=badv) + layer = filename + if is_gae: + ccode = getcfs(layer, filename, + lambda: compile2(parse_template(response.view, + os.path.join(folder, 'views'), + context=environment),layer)) + else: + ccode = parse_template(response.view, + os.path.join(folder, 'views'), + context=environment) + restricted(ccode, environment, layer) + +def remove_compiled_application(folder): + """ + Deletes the folder `compiled` containing the compiled application. + """ + try: + shutil.rmtree(os.path.join(folder, 'compiled')) + path = os.path.join(folder, 'controllers') + for file in listdir(path,'.*\.pyc$',drop=False): + os.unlink(file) + except OSError: + pass + + +def compile_application(folder): + """ + Compiles all models, views, controller for the application in `folder`. + """ + remove_compiled_application(folder) + os.mkdir(os.path.join(folder, 'compiled')) + compile_models(folder) + compile_controllers(folder) + compile_views(folder) + + +def test(): + """ + Example:: + + >>> import traceback, types + >>> environment={'x':1} + >>> open('a.py', 'w').write('print 1/x') + >>> save_pyc('a.py') + >>> os.unlink('a.py') + >>> if type(read_pyc('a.pyc'))==types.CodeType: print 'code' + code + >>> exec read_pyc('a.pyc') in environment + 1 + """ + + return + + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + ADDED gluon/compileapp.pyc Index: gluon/compileapp.pyc ================================================================== --- /dev/null +++ gluon/compileapp.pyc cannot compute difference between binary files ADDED gluon/contenttype.py Index: gluon/contenttype.py ================================================================== --- /dev/null +++ gluon/contenttype.py @@ -0,0 +1,721 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +CONTENT_TYPE dictionary created against freedesktop.org' shared mime info +database version 0.70. +""" + +__all__ = ['contenttype'] + +CONTENT_TYPE = { + '.load': 'text/html', + '.123': 'application/vnd.lotus-1-2-3', + '.3ds': 'image/x-3ds', + '.3g2': 'video/3gpp', + '.3ga': 'video/3gpp', + '.3gp': 'video/3gpp', + '.3gpp': 'video/3gpp', + '.602': 'application/x-t602', + '.669': 'audio/x-mod', + '.7z': 'application/x-7z-compressed', + '.a': 'application/x-archive', + '.aac': 'audio/mp4', + '.abw': 'application/x-abiword', + '.abw.crashed': 'application/x-abiword', + '.abw.gz': 'application/x-abiword', + '.ac3': 'audio/ac3', + '.ace': 'application/x-ace', + '.adb': 'text/x-adasrc', + '.ads': 'text/x-adasrc', + '.afm': 'application/x-font-afm', + '.ag': 'image/x-applix-graphics', + '.ai': 'application/illustrator', + '.aif': 'audio/x-aiff', + '.aifc': 'audio/x-aiff', + '.aiff': 'audio/x-aiff', + '.al': 'application/x-perl', + '.alz': 'application/x-alz', + '.amr': 'audio/amr', + '.ani': 'application/x-navi-animation', + '.anim[1-9j]': 'video/x-anim', + '.anx': 'application/annodex', + '.ape': 'audio/x-ape', + '.arj': 'application/x-arj', + '.arw': 'image/x-sony-arw', + '.as': 'application/x-applix-spreadsheet', + '.asc': 'text/plain', + '.asf': 'video/x-ms-asf', + '.asp': 'application/x-asp', + '.ass': 'text/x-ssa', + '.asx': 'audio/x-ms-asx', + '.atom': 'application/atom+xml', + '.au': 'audio/basic', + '.avi': 'video/x-msvideo', + '.aw': 'application/x-applix-word', + '.awb': 'audio/amr-wb', + '.awk': 'application/x-awk', + '.axa': 'audio/annodex', + '.axv': 'video/annodex', + '.bak': 'application/x-trash', + '.bcpio': 'application/x-bcpio', + '.bdf': 'application/x-font-bdf', + '.bib': 'text/x-bibtex', + '.bin': 'application/octet-stream', + '.blend': 'application/x-blender', + '.blender': 'application/x-blender', + '.bmp': 'image/bmp', + '.bz': 'application/x-bzip', + '.bz2': 'application/x-bzip', + '.c': 'text/x-csrc', + '.c++': 'text/x-c++src', + '.cab': 'application/vnd.ms-cab-compressed', + '.cb7': 'application/x-cb7', + '.cbr': 'application/x-cbr', + '.cbt': 'application/x-cbt', + '.cbz': 'application/x-cbz', + '.cc': 'text/x-c++src', + '.cdf': 'application/x-netcdf', + '.cdr': 'application/vnd.corel-draw', + '.cer': 'application/x-x509-ca-cert', + '.cert': 'application/x-x509-ca-cert', + '.cgm': 'image/cgm', + '.chm': 'application/x-chm', + '.chrt': 'application/x-kchart', + '.class': 'application/x-java', + '.cls': 'text/x-tex', + '.cmake': 'text/x-cmake', + '.cpio': 'application/x-cpio', + '.cpio.gz': 'application/x-cpio-compressed', + '.cpp': 'text/x-c++src', + '.cr2': 'image/x-canon-cr2', + '.crt': 'application/x-x509-ca-cert', + '.crw': 'image/x-canon-crw', + '.cs': 'text/x-csharp', + '.csh': 'application/x-csh', + '.css': 'text/css', + '.cssl': 'text/css', + '.csv': 'text/csv', + '.cue': 'application/x-cue', + '.cur': 'image/x-win-bitmap', + '.cxx': 'text/x-c++src', + '.d': 'text/x-dsrc', + '.dar': 'application/x-dar', + '.dbf': 'application/x-dbf', + '.dc': 'application/x-dc-rom', + '.dcl': 'text/x-dcl', + '.dcm': 'application/dicom', + '.dcr': 'image/x-kodak-dcr', + '.dds': 'image/x-dds', + '.deb': 'application/x-deb', + '.der': 'application/x-x509-ca-cert', + '.desktop': 'application/x-desktop', + '.dia': 'application/x-dia-diagram', + '.diff': 'text/x-patch', + '.divx': 'video/x-msvideo', + '.djv': 'image/vnd.djvu', + '.djvu': 'image/vnd.djvu', + '.dng': 'image/x-adobe-dng', + '.doc': 'application/msword', + '.docbook': 'application/docbook+xml', + '.docm': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', + '.dot': 'text/vnd.graphviz', + '.dsl': 'text/x-dsl', + '.dtd': 'application/xml-dtd', + '.dtx': 'text/x-tex', + '.dv': 'video/dv', + '.dvi': 'application/x-dvi', + '.dvi.bz2': 'application/x-bzdvi', + '.dvi.gz': 'application/x-gzdvi', + '.dwg': 'image/vnd.dwg', + '.dxf': 'image/vnd.dxf', + '.e': 'text/x-eiffel', + '.egon': 'application/x-egon', + '.eif': 'text/x-eiffel', + '.el': 'text/x-emacs-lisp', + '.emf': 'image/x-emf', + '.emp': 'application/vnd.emusic-emusic_package', + '.ent': 'application/xml-external-parsed-entity', + '.eps': 'image/x-eps', + '.eps.bz2': 'image/x-bzeps', + '.eps.gz': 'image/x-gzeps', + '.epsf': 'image/x-eps', + '.epsf.bz2': 'image/x-bzeps', + '.epsf.gz': 'image/x-gzeps', + '.epsi': 'image/x-eps', + '.epsi.bz2': 'image/x-bzeps', + '.epsi.gz': 'image/x-gzeps', + '.epub': 'application/epub+zip', + '.erl': 'text/x-erlang', + '.es': 'application/ecmascript', + '.etheme': 'application/x-e-theme', + '.etx': 'text/x-setext', + '.exe': 'application/x-ms-dos-executable', + '.exr': 'image/x-exr', + '.ez': 'application/andrew-inset', + '.f': 'text/x-fortran', + '.f90': 'text/x-fortran', + '.f95': 'text/x-fortran', + '.fb2': 'application/x-fictionbook+xml', + '.fig': 'image/x-xfig', + '.fits': 'image/fits', + '.fl': 'application/x-fluid', + '.flac': 'audio/x-flac', + '.flc': 'video/x-flic', + '.fli': 'video/x-flic', + '.flv': 'video/x-flv', + '.flw': 'application/x-kivio', + '.fo': 'text/x-xslfo', + '.for': 'text/x-fortran', + '.g3': 'image/fax-g3', + '.gb': 'application/x-gameboy-rom', + '.gba': 'application/x-gba-rom', + '.gcrd': 'text/directory', + '.ged': 'application/x-gedcom', + '.gedcom': 'application/x-gedcom', + '.gen': 'application/x-genesis-rom', + '.gf': 'application/x-tex-gf', + '.gg': 'application/x-sms-rom', + '.gif': 'image/gif', + '.glade': 'application/x-glade', + '.gmo': 'application/x-gettext-translation', + '.gnc': 'application/x-gnucash', + '.gnd': 'application/gnunet-directory', + '.gnucash': 'application/x-gnucash', + '.gnumeric': 'application/x-gnumeric', + '.gnuplot': 'application/x-gnuplot', + '.gp': 'application/x-gnuplot', + '.gpg': 'application/pgp-encrypted', + '.gplt': 'application/x-gnuplot', + '.gra': 'application/x-graphite', + '.gsf': 'application/x-font-type1', + '.gsm': 'audio/x-gsm', + '.gtar': 'application/x-tar', + '.gv': 'text/vnd.graphviz', + '.gvp': 'text/x-google-video-pointer', + '.gz': 'application/x-gzip', + '.h': 'text/x-chdr', + '.h++': 'text/x-c++hdr', + '.hdf': 'application/x-hdf', + '.hh': 'text/x-c++hdr', + '.hp': 'text/x-c++hdr', + '.hpgl': 'application/vnd.hp-hpgl', + '.hpp': 'text/x-c++hdr', + '.hs': 'text/x-haskell', + '.htm': 'text/html', + '.html': 'text/html', + '.hwp': 'application/x-hwp', + '.hwt': 'application/x-hwt', + '.hxx': 'text/x-c++hdr', + '.ica': 'application/x-ica', + '.icb': 'image/x-tga', + '.icns': 'image/x-icns', + '.ico': 'image/vnd.microsoft.icon', + '.ics': 'text/calendar', + '.idl': 'text/x-idl', + '.ief': 'image/ief', + '.iff': 'image/x-iff', + '.ilbm': 'image/x-ilbm', + '.ime': 'text/x-imelody', + '.imy': 'text/x-imelody', + '.ins': 'text/x-tex', + '.iptables': 'text/x-iptables', + '.iso': 'application/x-cd-image', + '.iso9660': 'application/x-cd-image', + '.it': 'audio/x-it', + '.j2k': 'image/jp2', + '.jad': 'text/vnd.sun.j2me.app-descriptor', + '.jar': 'application/x-java-archive', + '.java': 'text/x-java', + '.jng': 'image/x-jng', + '.jnlp': 'application/x-java-jnlp-file', + '.jp2': 'image/jp2', + '.jpc': 'image/jp2', + '.jpe': 'image/jpeg', + '.jpeg': 'image/jpeg', + '.jpf': 'image/jp2', + '.jpg': 'image/jpeg', + '.jpr': 'application/x-jbuilder-project', + '.jpx': 'image/jp2', + '.js': 'application/javascript', + '.json': 'application/json', + '.jsonp': 'application/jsonp', + '.k25': 'image/x-kodak-k25', + '.kar': 'audio/midi', + '.karbon': 'application/x-karbon', + '.kdc': 'image/x-kodak-kdc', + '.kdelnk': 'application/x-desktop', + '.kexi': 'application/x-kexiproject-sqlite3', + '.kexic': 'application/x-kexi-connectiondata', + '.kexis': 'application/x-kexiproject-shortcut', + '.kfo': 'application/x-kformula', + '.kil': 'application/x-killustrator', + '.kino': 'application/smil', + '.kml': 'application/vnd.google-earth.kml+xml', + '.kmz': 'application/vnd.google-earth.kmz', + '.kon': 'application/x-kontour', + '.kpm': 'application/x-kpovmodeler', + '.kpr': 'application/x-kpresenter', + '.kpt': 'application/x-kpresenter', + '.kra': 'application/x-krita', + '.ksp': 'application/x-kspread', + '.kud': 'application/x-kugar', + '.kwd': 'application/x-kword', + '.kwt': 'application/x-kword', + '.la': 'application/x-shared-library-la', + '.latex': 'text/x-tex', + '.ldif': 'text/x-ldif', + '.lha': 'application/x-lha', + '.lhs': 'text/x-literate-haskell', + '.lhz': 'application/x-lhz', + '.log': 'text/x-log', + '.ltx': 'text/x-tex', + '.lua': 'text/x-lua', + '.lwo': 'image/x-lwo', + '.lwob': 'image/x-lwo', + '.lws': 'image/x-lws', + '.ly': 'text/x-lilypond', + '.lyx': 'application/x-lyx', + '.lz': 'application/x-lzip', + '.lzh': 'application/x-lha', + '.lzma': 'application/x-lzma', + '.lzo': 'application/x-lzop', + '.m': 'text/x-matlab', + '.m15': 'audio/x-mod', + '.m2t': 'video/mpeg', + '.m3u': 'audio/x-mpegurl', + '.m3u8': 'audio/x-mpegurl', + '.m4': 'application/x-m4', + '.m4a': 'audio/mp4', + '.m4b': 'audio/x-m4b', + '.m4v': 'video/mp4', + '.mab': 'application/x-markaby', + '.man': 'application/x-troff-man', + '.mbox': 'application/mbox', + '.md': 'application/x-genesis-rom', + '.mdb': 'application/vnd.ms-access', + '.mdi': 'image/vnd.ms-modi', + '.me': 'text/x-troff-me', + '.med': 'audio/x-mod', + '.metalink': 'application/metalink+xml', + '.mgp': 'application/x-magicpoint', + '.mid': 'audio/midi', + '.midi': 'audio/midi', + '.mif': 'application/x-mif', + '.minipsf': 'audio/x-minipsf', + '.mka': 'audio/x-matroska', + '.mkv': 'video/x-matroska', + '.ml': 'text/x-ocaml', + '.mli': 'text/x-ocaml', + '.mm': 'text/x-troff-mm', + '.mmf': 'application/x-smaf', + '.mml': 'text/mathml', + '.mng': 'video/x-mng', + '.mo': 'application/x-gettext-translation', + '.mo3': 'audio/x-mo3', + '.moc': 'text/x-moc', + '.mod': 'audio/x-mod', + '.mof': 'text/x-mof', + '.moov': 'video/quicktime', + '.mov': 'video/quicktime', + '.movie': 'video/x-sgi-movie', + '.mp+': 'audio/x-musepack', + '.mp2': 'video/mpeg', + '.mp3': 'audio/mpeg', + '.mp4': 'video/mp4', + '.mpc': 'audio/x-musepack', + '.mpe': 'video/mpeg', + '.mpeg': 'video/mpeg', + '.mpg': 'video/mpeg', + '.mpga': 'audio/mpeg', + '.mpp': 'audio/x-musepack', + '.mrl': 'text/x-mrml', + '.mrml': 'text/x-mrml', + '.mrw': 'image/x-minolta-mrw', + '.ms': 'text/x-troff-ms', + '.msi': 'application/x-msi', + '.msod': 'image/x-msod', + '.msx': 'application/x-msx-rom', + '.mtm': 'audio/x-mod', + '.mup': 'text/x-mup', + '.mxf': 'application/mxf', + '.n64': 'application/x-n64-rom', + '.nb': 'application/mathematica', + '.nc': 'application/x-netcdf', + '.nds': 'application/x-nintendo-ds-rom', + '.nef': 'image/x-nikon-nef', + '.nes': 'application/x-nes-rom', + '.nfo': 'text/x-nfo', + '.not': 'text/x-mup', + '.nsc': 'application/x-netshow-channel', + '.nsv': 'video/x-nsv', + '.o': 'application/x-object', + '.obj': 'application/x-tgif', + '.ocl': 'text/x-ocl', + '.oda': 'application/oda', + '.odb': 'application/vnd.oasis.opendocument.database', + '.odc': 'application/vnd.oasis.opendocument.chart', + '.odf': 'application/vnd.oasis.opendocument.formula', + '.odg': 'application/vnd.oasis.opendocument.graphics', + '.odi': 'application/vnd.oasis.opendocument.image', + '.odm': 'application/vnd.oasis.opendocument.text-master', + '.odp': 'application/vnd.oasis.opendocument.presentation', + '.ods': 'application/vnd.oasis.opendocument.spreadsheet', + '.odt': 'application/vnd.oasis.opendocument.text', + '.oga': 'audio/ogg', + '.ogg': 'video/x-theora+ogg', + '.ogm': 'video/x-ogm+ogg', + '.ogv': 'video/ogg', + '.ogx': 'application/ogg', + '.old': 'application/x-trash', + '.oleo': 'application/x-oleo', + '.opml': 'text/x-opml+xml', + '.ora': 'image/openraster', + '.orf': 'image/x-olympus-orf', + '.otc': 'application/vnd.oasis.opendocument.chart-template', + '.otf': 'application/x-font-otf', + '.otg': 'application/vnd.oasis.opendocument.graphics-template', + '.oth': 'application/vnd.oasis.opendocument.text-web', + '.otp': 'application/vnd.oasis.opendocument.presentation-template', + '.ots': 'application/vnd.oasis.opendocument.spreadsheet-template', + '.ott': 'application/vnd.oasis.opendocument.text-template', + '.owl': 'application/rdf+xml', + '.oxt': 'application/vnd.openofficeorg.extension', + '.p': 'text/x-pascal', + '.p10': 'application/pkcs10', + '.p12': 'application/x-pkcs12', + '.p7b': 'application/x-pkcs7-certificates', + '.p7s': 'application/pkcs7-signature', + '.pack': 'application/x-java-pack200', + '.pak': 'application/x-pak', + '.par2': 'application/x-par2', + '.pas': 'text/x-pascal', + '.patch': 'text/x-patch', + '.pbm': 'image/x-portable-bitmap', + '.pcd': 'image/x-photo-cd', + '.pcf': 'application/x-cisco-vpn-settings', + '.pcf.gz': 'application/x-font-pcf', + '.pcf.z': 'application/x-font-pcf', + '.pcl': 'application/vnd.hp-pcl', + '.pcx': 'image/x-pcx', + '.pdb': 'chemical/x-pdb', + '.pdc': 'application/x-aportisdoc', + '.pdf': 'application/pdf', + '.pdf.bz2': 'application/x-bzpdf', + '.pdf.gz': 'application/x-gzpdf', + '.pef': 'image/x-pentax-pef', + '.pem': 'application/x-x509-ca-cert', + '.perl': 'application/x-perl', + '.pfa': 'application/x-font-type1', + '.pfb': 'application/x-font-type1', + '.pfx': 'application/x-pkcs12', + '.pgm': 'image/x-portable-graymap', + '.pgn': 'application/x-chess-pgn', + '.pgp': 'application/pgp-encrypted', + '.php': 'application/x-php', + '.php3': 'application/x-php', + '.php4': 'application/x-php', + '.pict': 'image/x-pict', + '.pict1': 'image/x-pict', + '.pict2': 'image/x-pict', + '.pickle': 'application/python-pickle', + '.pk': 'application/x-tex-pk', + '.pkipath': 'application/pkix-pkipath', + '.pkr': 'application/pgp-keys', + '.pl': 'application/x-perl', + '.pla': 'audio/x-iriver-pla', + '.pln': 'application/x-planperfect', + '.pls': 'audio/x-scpls', + '.pm': 'application/x-perl', + '.png': 'image/png', + '.pnm': 'image/x-portable-anymap', + '.pntg': 'image/x-macpaint', + '.po': 'text/x-gettext-translation', + '.por': 'application/x-spss-por', + '.pot': 'text/x-gettext-translation-template', + '.ppm': 'image/x-portable-pixmap', + '.pps': 'application/vnd.ms-powerpoint', + '.ppt': 'application/vnd.ms-powerpoint', + '.pptm': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + '.pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', + '.ppz': 'application/vnd.ms-powerpoint', + '.prc': 'application/x-palm-database', + '.ps': 'application/postscript', + '.ps.bz2': 'application/x-bzpostscript', + '.ps.gz': 'application/x-gzpostscript', + '.psd': 'image/vnd.adobe.photoshop', + '.psf': 'audio/x-psf', + '.psf.gz': 'application/x-gz-font-linux-psf', + '.psflib': 'audio/x-psflib', + '.psid': 'audio/prs.sid', + '.psw': 'application/x-pocket-word', + '.pw': 'application/x-pw', + '.py': 'text/x-python', + '.pyc': 'application/x-python-bytecode', + '.pyo': 'application/x-python-bytecode', + '.qif': 'image/x-quicktime', + '.qt': 'video/quicktime', + '.qtif': 'image/x-quicktime', + '.qtl': 'application/x-quicktime-media-link', + '.qtvr': 'video/quicktime', + '.ra': 'audio/vnd.rn-realaudio', + '.raf': 'image/x-fuji-raf', + '.ram': 'application/ram', + '.rar': 'application/x-rar', + '.ras': 'image/x-cmu-raster', + '.raw': 'image/x-panasonic-raw', + '.rax': 'audio/vnd.rn-realaudio', + '.rb': 'application/x-ruby', + '.rdf': 'application/rdf+xml', + '.rdfs': 'application/rdf+xml', + '.reg': 'text/x-ms-regedit', + '.rej': 'application/x-reject', + '.rgb': 'image/x-rgb', + '.rle': 'image/rle', + '.rm': 'application/vnd.rn-realmedia', + '.rmj': 'application/vnd.rn-realmedia', + '.rmm': 'application/vnd.rn-realmedia', + '.rms': 'application/vnd.rn-realmedia', + '.rmvb': 'application/vnd.rn-realmedia', + '.rmx': 'application/vnd.rn-realmedia', + '.roff': 'text/troff', + '.rp': 'image/vnd.rn-realpix', + '.rpm': 'application/x-rpm', + '.rss': 'application/rss+xml', + '.rt': 'text/vnd.rn-realtext', + '.rtf': 'application/rtf', + '.rtx': 'text/richtext', + '.rv': 'video/vnd.rn-realvideo', + '.rvx': 'video/vnd.rn-realvideo', + '.s3m': 'audio/x-s3m', + '.sam': 'application/x-amipro', + '.sami': 'application/x-sami', + '.sav': 'application/x-spss-sav', + '.scm': 'text/x-scheme', + '.sda': 'application/vnd.stardivision.draw', + '.sdc': 'application/vnd.stardivision.calc', + '.sdd': 'application/vnd.stardivision.impress', + '.sdp': 'application/sdp', + '.sds': 'application/vnd.stardivision.chart', + '.sdw': 'application/vnd.stardivision.writer', + '.sgf': 'application/x-go-sgf', + '.sgi': 'image/x-sgi', + '.sgl': 'application/vnd.stardivision.writer', + '.sgm': 'text/sgml', + '.sgml': 'text/sgml', + '.sh': 'application/x-shellscript', + '.shar': 'application/x-shar', + '.shn': 'application/x-shorten', + '.siag': 'application/x-siag', + '.sid': 'audio/prs.sid', + '.sik': 'application/x-trash', + '.sis': 'application/vnd.symbian.install', + '.sisx': 'x-epoc/x-sisx-app', + '.sit': 'application/x-stuffit', + '.siv': 'application/sieve', + '.sk': 'image/x-skencil', + '.sk1': 'image/x-skencil', + '.skr': 'application/pgp-keys', + '.slk': 'text/spreadsheet', + '.smaf': 'application/x-smaf', + '.smc': 'application/x-snes-rom', + '.smd': 'application/vnd.stardivision.mail', + '.smf': 'application/vnd.stardivision.math', + '.smi': 'application/x-sami', + '.smil': 'application/smil', + '.sml': 'application/smil', + '.sms': 'application/x-sms-rom', + '.snd': 'audio/basic', + '.so': 'application/x-sharedlib', + '.spc': 'application/x-pkcs7-certificates', + '.spd': 'application/x-font-speedo', + '.spec': 'text/x-rpm-spec', + '.spl': 'application/x-shockwave-flash', + '.spx': 'audio/x-speex', + '.sql': 'text/x-sql', + '.sr2': 'image/x-sony-sr2', + '.src': 'application/x-wais-source', + '.srf': 'image/x-sony-srf', + '.srt': 'application/x-subrip', + '.ssa': 'text/x-ssa', + '.stc': 'application/vnd.sun.xml.calc.template', + '.std': 'application/vnd.sun.xml.draw.template', + '.sti': 'application/vnd.sun.xml.impress.template', + '.stm': 'audio/x-stm', + '.stw': 'application/vnd.sun.xml.writer.template', + '.sty': 'text/x-tex', + '.sub': 'text/x-subviewer', + '.sun': 'image/x-sun-raster', + '.sv4cpio': 'application/x-sv4cpio', + '.sv4crc': 'application/x-sv4crc', + '.svg': 'image/svg+xml', + '.svgz': 'image/svg+xml-compressed', + '.swf': 'application/x-shockwave-flash', + '.sxc': 'application/vnd.sun.xml.calc', + '.sxd': 'application/vnd.sun.xml.draw', + '.sxg': 'application/vnd.sun.xml.writer.global', + '.sxi': 'application/vnd.sun.xml.impress', + '.sxm': 'application/vnd.sun.xml.math', + '.sxw': 'application/vnd.sun.xml.writer', + '.sylk': 'text/spreadsheet', + '.t': 'text/troff', + '.t2t': 'text/x-txt2tags', + '.tar': 'application/x-tar', + '.tar.bz': 'application/x-bzip-compressed-tar', + '.tar.bz2': 'application/x-bzip-compressed-tar', + '.tar.gz': 'application/x-compressed-tar', + '.tar.lzma': 'application/x-lzma-compressed-tar', + '.tar.lzo': 'application/x-tzo', + '.tar.xz': 'application/x-xz-compressed-tar', + '.tar.z': 'application/x-tarz', + '.tbz': 'application/x-bzip-compressed-tar', + '.tbz2': 'application/x-bzip-compressed-tar', + '.tcl': 'text/x-tcl', + '.tex': 'text/x-tex', + '.texi': 'text/x-texinfo', + '.texinfo': 'text/x-texinfo', + '.tga': 'image/x-tga', + '.tgz': 'application/x-compressed-tar', + '.theme': 'application/x-theme', + '.themepack': 'application/x-windows-themepack', + '.tif': 'image/tiff', + '.tiff': 'image/tiff', + '.tk': 'text/x-tcl', + '.tlz': 'application/x-lzma-compressed-tar', + '.tnef': 'application/vnd.ms-tnef', + '.tnf': 'application/vnd.ms-tnef', + '.toc': 'application/x-cdrdao-toc', + '.torrent': 'application/x-bittorrent', + '.tpic': 'image/x-tga', + '.tr': 'text/troff', + '.ts': 'application/x-linguist', + '.tsv': 'text/tab-separated-values', + '.tta': 'audio/x-tta', + '.ttc': 'application/x-font-ttf', + '.ttf': 'application/x-font-ttf', + '.ttx': 'application/x-font-ttx', + '.txt': 'text/plain', + '.txz': 'application/x-xz-compressed-tar', + '.tzo': 'application/x-tzo', + '.ufraw': 'application/x-ufraw', + '.ui': 'application/x-designer', + '.uil': 'text/x-uil', + '.ult': 'audio/x-mod', + '.uni': 'audio/x-mod', + '.uri': 'text/x-uri', + '.url': 'text/x-uri', + '.ustar': 'application/x-ustar', + '.vala': 'text/x-vala', + '.vapi': 'text/x-vala', + '.vcf': 'text/directory', + '.vcs': 'text/calendar', + '.vct': 'text/directory', + '.vda': 'image/x-tga', + '.vhd': 'text/x-vhdl', + '.vhdl': 'text/x-vhdl', + '.viv': 'video/vivo', + '.vivo': 'video/vivo', + '.vlc': 'audio/x-mpegurl', + '.vob': 'video/mpeg', + '.voc': 'audio/x-voc', + '.vor': 'application/vnd.stardivision.writer', + '.vst': 'image/x-tga', + '.wav': 'audio/x-wav', + '.wax': 'audio/x-ms-asx', + '.wb1': 'application/x-quattropro', + '.wb2': 'application/x-quattropro', + '.wb3': 'application/x-quattropro', + '.wbmp': 'image/vnd.wap.wbmp', + '.wcm': 'application/vnd.ms-works', + '.wdb': 'application/vnd.ms-works', + '.wk1': 'application/vnd.lotus-1-2-3', + '.wk3': 'application/vnd.lotus-1-2-3', + '.wk4': 'application/vnd.lotus-1-2-3', + '.wks': 'application/vnd.ms-works', + '.wma': 'audio/x-ms-wma', + '.wmf': 'image/x-wmf', + '.wml': 'text/vnd.wap.wml', + '.wmls': 'text/vnd.wap.wmlscript', + '.wmv': 'video/x-ms-wmv', + '.wmx': 'audio/x-ms-asx', + '.wp': 'application/vnd.wordperfect', + '.wp4': 'application/vnd.wordperfect', + '.wp5': 'application/vnd.wordperfect', + '.wp6': 'application/vnd.wordperfect', + '.wpd': 'application/vnd.wordperfect', + '.wpg': 'application/x-wpg', + '.wpl': 'application/vnd.ms-wpl', + '.wpp': 'application/vnd.wordperfect', + '.wps': 'application/vnd.ms-works', + '.wri': 'application/x-mswrite', + '.wrl': 'model/vrml', + '.wv': 'audio/x-wavpack', + '.wvc': 'audio/x-wavpack-correction', + '.wvp': 'audio/x-wavpack', + '.wvx': 'audio/x-ms-asx', + '.x3f': 'image/x-sigma-x3f', + '.xac': 'application/x-gnucash', + '.xbel': 'application/x-xbel', + '.xbl': 'application/xml', + '.xbm': 'image/x-xbitmap', + '.xcf': 'image/x-xcf', + '.xcf.bz2': 'image/x-compressed-xcf', + '.xcf.gz': 'image/x-compressed-xcf', + '.xhtml': 'application/xhtml+xml', + '.xi': 'audio/x-xi', + '.xla': 'application/vnd.ms-excel', + '.xlc': 'application/vnd.ms-excel', + '.xld': 'application/vnd.ms-excel', + '.xlf': 'application/x-xliff', + '.xliff': 'application/x-xliff', + '.xll': 'application/vnd.ms-excel', + '.xlm': 'application/vnd.ms-excel', + '.xls': 'application/vnd.ms-excel', + '.xlsm': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + '.xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + '.xlt': 'application/vnd.ms-excel', + '.xlw': 'application/vnd.ms-excel', + '.xm': 'audio/x-xm', + '.xmf': 'audio/x-xmf', + '.xmi': 'text/x-xmi', + '.xml': 'application/xml', + '.xpm': 'image/x-xpixmap', + '.xps': 'application/vnd.ms-xpsdocument', + '.xsl': 'application/xml', + '.xslfo': 'text/x-xslfo', + '.xslt': 'application/xml', + '.xspf': 'application/xspf+xml', + '.xul': 'application/vnd.mozilla.xul+xml', + '.xwd': 'image/x-xwindowdump', + '.xyz': 'chemical/x-pdb', + '.xz': 'application/x-xz', + '.w2p': 'application/w2p', + '.z': 'application/x-compress', + '.zabw': 'application/x-abiword', + '.zip': 'application/zip', + '.zoo': 'application/x-zoo', + } + + +def contenttype(filename, default='text/plain'): + """ + Returns the Content-Type string matching extension of the given filename. + """ + + i = filename.rfind('.') + if i>=0: + default = CONTENT_TYPE.get(filename[i:].lower(),default) + j = filename.rfind('.', 0, i) + if j>=0: + default = CONTENT_TYPE.get(filename[j:].lower(),default) + if default.startswith('text/'): + default += '; charset=utf-8' + return default + + + ADDED gluon/contenttype.pyc Index: gluon/contenttype.pyc ================================================================== --- /dev/null +++ gluon/contenttype.pyc cannot compute difference between binary files ADDED gluon/contrib/AuthorizeNet.py Index: gluon/contrib/AuthorizeNet.py ================================================================== --- /dev/null +++ gluon/contrib/AuthorizeNet.py @@ -0,0 +1,261 @@ +""" +AIM class to credit card payment with authorize.net + +Fork of authnet code written by John Conde +http://www.johnconde.net/blog/integrate-the-authorizenet-aim-api-with-python-3-2/ +Unkown license, assuming public domain + +Modifed by Massimo Di Pierro + +- ported from Python 3.x run on Python 2.4+ +- fixed a couple of bugs +- merged with test so single file +- namedtuple from http://code.activestate.com/recipes/500261/ + +""" + +__all__ = ['AIM'] + +from operator import itemgetter +import urllib + +_known_tuple_types = {} + +class NamedTupleBase(tuple): + """Base class for named tuples with the __new__ operator set, named tuples + yielded by the namedtuple() function will subclass this and add + properties.""" + def __new__(cls, *args, **kws): + """Create a new instance of this fielded tuple""" + # May need to unpack named field values here + if kws: + values = list(args) + [None]*(len(cls._fields) - len(args)) + fields = dict((val, idx) for idx, val in enumerate(cls._fields)) + for kw, val in kws.iteritems(): + assert kw in kws, "%r not in field list" % kw + values[fields[kw]] = val + args = tuple(values) + return tuple.__new__(cls, args) + +def namedtuple(typename, fieldnames): + """ + >>> import namedtuples + >>> tpl = namedtuples.namedtuple(['a', 'b', 'c']) + >>> tpl(1, 2, 3) + (1, 2, 3) + >>> tpl(1, 2, 3).b + 2 + >>> tpl(c=1, a=2, b=3) + (2, 3, 1) + >>> tpl(c=1, a=2, b=3).b + 3 + >>> tpl(c='pads with nones') + (None, None, 'pads with nones') + >>> tpl(b='pads with nones') + (None, 'pads with nones', None) + >>> + """ + # Split up a string, some people do this + if isinstance(fieldnames, basestring): + fieldnames = fieldnames.replace(',', ' ').split() + # Convert anything iterable that enumerates fields to a tuple now + fieldname_tuple = tuple(str(field) for field in fieldnames) + # See if we've cached this + if fieldname_tuple in _known_tuple_types: + return _known_tuple_types[fieldname_tuple] + # Make the type + new_tuple_type = type(typename, (NamedTupleBase,), {}) + # Set the hidden field + new_tuple_type._fields = fieldname_tuple + # Add the getters + for i, field in enumerate(fieldname_tuple): + setattr(new_tuple_type, field, property(itemgetter(i))) + # Cache + _known_tuple_types[fieldname_tuple] = new_tuple_type + # Done + return new_tuple_type + +class AIM: + + class AIMError(Exception): + def __init__(self, value): + self.parameter = value + def __str__(self): + return str(self.parameter) + + def __init__(self, login, transkey, testmode=False): + if str(login).strip() == '' or login == None: + raise AIM.AIMError('No login name provided') + if str(transkey).strip() == '' or transkey == None: + raise AIM.AIMError('No transaction key provided') + if testmode != True and testmode != False: + raise AIM.AIMError('Invalid value for testmode. Must be True or False. "{0}" given.'.format(testmode)) + + self.testmode = testmode + self.proxy = None; + self.delimiter = '|' + self.results = [] + self.error = True + self.success = False + self.declined = False + + self.parameters = {} + self.setParameter('x_delim_data', 'true') + self.setParameter('x_delim_char', self.delimiter) + self.setParameter('x_relay_response', 'FALSE') + self.setParameter('x_url', 'FALSE') + self.setParameter('x_version', '3.1') + self.setParameter('x_method', 'CC') + self.setParameter('x_type', 'AUTH_CAPTURE') + self.setParameter('x_login', login) + self.setParameter('x_tran_key', transkey) + + def process(self): + encoded_args = urllib.urlencode(self.parameters) + if self.testmode == True: + url = 'https://test.authorize.net/gateway/transact.dll' + else: + url = 'https://secure.authorize.net/gateway/transact.dll' + + if self.proxy == None: + self.results += str(urllib.urlopen(url, encoded_args).read()).split(self.delimiter) + else: + opener = urllib.FancyURLopener(self.proxy) + opened = opener.open(url, encoded_args) + try: + self.results += str(opened.read()).split(self.delimiter) + finally: + opened.close() + Results = namedtuple('Results', 'ResultResponse ResponseSubcode ResponseCode ResponseText AuthCode \ + AVSResponse TransactionID InvoiceNumber Description Amount PaymentMethod \ + TransactionType CustomerID CHFirstName CHLastName Company BillingAddress \ + BillingCity BillingState BillingZip BillingCountry Phone Fax Email ShippingFirstName \ + ShippingLastName ShippingCompany ShippingAddress ShippingCity ShippingState \ + ShippingZip ShippingCountry TaxAmount DutyAmount FreightAmount TaxExemptFlag \ + PONumber MD5Hash CVVResponse CAVVResponse') + self.response = Results(*tuple(r for r in self.results)[0:40]) + + if self.getResultResponseFull() == 'Approved': + self.error = False + self.success = True + self.declined = False + elif self.getResultResponseFull() == 'Declined': + self.error = False + self.success = False + self.declined = True + else: + raise AIM.AIMError(self.response.ResponseText) + + def setTransaction(self, creditcard, expiration, total, cvv=None, tax=None, invoice=None): + if str(creditcard).strip() == '' or creditcard == None: + raise AIM.AIMError('No credit card number passed to setTransaction(): {0}'.format(creditcard)) + if str(expiration).strip() == '' or expiration == None: + raise AIM.AIMError('No expiration number to setTransaction(): {0}'.format(expiration)) + if str(total).strip() == '' or total == None: + raise AIM.AIMError('No total amount passed to setTransaction(): {0}'.format(total)) + + self.setParameter('x_card_num', creditcard) + self.setParameter('x_exp_date', expiration) + self.setParameter('x_amount', total) + if cvv != None: + self.setParameter('x_card_code', cvv) + if tax != None: + self.setParameter('x_tax', tax) + if invoice != None: + self.setParameter('x_invoice_num', invoice) + + def setTransactionType(self, transtype=None): + types = ['AUTH_CAPTURE', 'AUTH_ONLY', 'PRIOR_AUTH_CAPTURE', 'CREDIT', 'CAPTURE_ONLY', 'VOID'] + if transtype.upper() not in types: + raise AIM.AIMError('Incorrect Transaction Type passed to setTransactionType(): {0}'.format(transtype)) + self.setParameter('x_type', transtype.upper()) + + def setProxy(self, proxy=None): + if str(proxy).strip() == '' or proxy == None: + raise AIM.AIMError('No proxy passed to setProxy()') + self.proxy = {'http': str(proxy).strip()} + + def setParameter(self, key=None, value=None): + if key != None and value != None and str(key).strip() != '' and str(value).strip() != '': + self.parameters[key] = str(value).strip() + else: + raise AIM.AIMError('Incorrect parameters passed to setParameter(): {0}:{1}'.format(key, value)) + + def isApproved(self): + return self.success + + def isDeclined(self): + return self.declined + + def isError(self): + return self.error + + def getResultResponseFull(self): + responses = ['', 'Approved', 'Declined', 'Error'] + return responses[int(self.results[0])] + +def process(creditcard,expiration,total,cvv=None,tax=None,invoice=None, + login='cnpdev4289', transkey='SR2P8g4jdEn7vFLQ',testmode=True): + payment = AIM(login,transkey,testmode) + expiration = expiration.replace('/','') + payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice) + try: + payment.process() + return payment.isApproved() + except AIM.AIMError: + return False + +def test(): + import socket + import sys + from time import time + + creditcard = '4427802641004797' + expiration = '122012' + total = '1.00' + cvv = '123' + tax = '0.00' + invoice = str(time())[4:10] # get a random invoice number + + try: + payment = AIM('cnpdev4289', 'SR2P8g4jdEn7vFLQ', True) + payment.setTransaction(creditcard, expiration, total, cvv, tax, invoice) + payment.setParameter('x_duplicate_window', 180) # three minutes duplicate windows + payment.setParameter('x_cust_id', '1324') # customer ID + payment.setParameter('x_first_name', 'John') + payment.setParameter('x_last_name', 'Conde') + payment.setParameter('x_company', 'Test Company') + payment.setParameter('x_address', '1234 Main Street') + payment.setParameter('x_city', 'Townsville') + payment.setParameter('x_state', 'NJ') + payment.setParameter('x_zip', '12345') + payment.setParameter('x_country', 'US') + payment.setParameter('x_phone', '800-555-1234') + payment.setParameter('x_description', 'Test Transaction') + payment.setParameter('x_customer_ip', socket.gethostbyname(socket.gethostname())) + payment.setParameter('x_email', 'john@example.com') + payment.setParameter('x_email_customer', False) + payment.process() + if payment.isApproved(): + print 'Response Code: ', payment.response.ResponseCode + print 'Response Text: ', payment.response.ResponseText + print 'Response: ', payment.getResultResponseFull() + print 'Transaction ID: ', payment.response.TransactionID + print 'CVV Result: ', payment.response.CVVResponse + print 'Approval Code: ', payment.response.AuthCode + print 'AVS Result: ', payment.response.AVSResponse + elif payment.isDeclined(): + print 'Your credit card was declined by your bank' + elif payment.isError(): + raise AIM.AIMError('An uncaught error occurred') + except AIM.AIMError, e: + print "Exception thrown:", e + print 'An error occured' + print 'approved',payment.isApproved() + print 'declined',payment.isDeclined() + print 'error',payment.isError() + +if __name__=='__main__': + test() + + ADDED gluon/contrib/__init__.py Index: gluon/contrib/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/__init__.py @@ -0,0 +1,3 @@ + + + ADDED gluon/contrib/comet_messaging.py Index: gluon/contrib/comet_messaging.py ================================================================== --- /dev/null +++ gluon/contrib/comet_messaging.py @@ -0,0 +1,192 @@ +#!/usr/bin/python +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.com/gimite/web-socket-js + +1) install tornado (requires Tornado 2.1) + + easy_install tornado + +2) start this app: + + python gluon/contrib/comet_messaging.py -k mykey -p 8888 + +3) from any web2py app you can post messages with + + from gluon.contrib.comet_messaging import comet_send + comet_send('http://127.0.0.1:8888','Hello World','mykey','mygroup') + +4) from any template you can receive them with + + + +When the server posts a message, all clients connected to the page will popup an alert message +Or if you want to send json messages and store evaluated json in a var called data: + + + +- All communications between web2py and comet_messaging will be digitally signed with hmac. +- All validation is handled on the web2py side and there is no need to modify comet_messaging.py +- Multiple web2py instances can talk with one or more comet_messaging servers. +- "ws://127.0.0.1:8888/realtime/" must be contain the IP of the comet_messaging server. +- Via group='mygroup' name you can support multiple groups of clients (think of many chat-rooms) + +Here is a complete sample web2py action: + + def index(): + form=LOAD('default','ajax_form',ajax=True) + script=SCRIPT(''' + jQuery(document).ready(function(){ + var callback=function(e){alert(e.data)}; + if(!web2py_comet('ws://127.0.0.1:8888/realtime/mygroup',callback)) + alert("html5 websocket not supported by your browser, try Google Chrome"); + }); + ''') + return dict(form=form, script=script) + + def ajax_form(): + form=SQLFORM.factory(Field('message')) + if form.accepts(request,session): + from gluon.contrib.comet_messaging import comet_send + comet_send('http://127.0.0.1:8888',form.vars.message,'mykey','mygroup') + return form + +Acknowledgements: +Tornado code inspired by http://thomas.pelletier.im/2010/08/websocket-tornado-redis/ + +""" + +import tornado.httpserver +import tornado.websocket +import tornado.ioloop +import tornado.web +import hmac +import sys +import optparse +import urllib +import time + +listeners = {} +names = {} +tokens = {} + +def comet_send(url,message,hmac_key=None,group='default'): + sig = hmac_key and hmac.new(hmac_key,message).hexdigest() or '' + params = urllib.urlencode({'message': message, 'signature': sig, 'group':group}) + f = urllib.urlopen(url, params) + data= f.read() + f.close() + return data + +class PostHandler(tornado.web.RequestHandler): + """ + only authorized parties can post messages + """ + def post(self): + if hmac_key and not 'signature' in self.request.arguments: return 'false' + if 'message' in self.request.arguments: + message = self.request.arguments['message'][0] + group = self.request.arguments.get('group',['default'])[0] + print '%s:MESSAGE to %s:%s' % (time.time(), group, message) + if hmac_key: + signature = self.request.arguments['signature'][0] + if not hmac.new(hmac_key,message).hexdigest()==signature: return 'false' + for client in listeners.get(group,[]): client.write_message(message) + return 'true' + return 'false' + +class TokenHandler(tornado.web.RequestHandler): + """ + if running with -t post a token to allow a client to join using the token + the message here is the token (any uuid) + allows only authorized parties to joins, for example, a chat + """ + def post(self): + if hmac_key and not 'message' in self.request.arguments: return 'false' + if 'message' in self.request.arguments: + message = self.request.arguments['message'][0] + if hmac_key: + signature = self.request.arguments['signature'][0] + if not hmac.new(hmac_key,message).hexdigest()==signature: return 'false' + tokens[message] = None + return 'true' + return 'false' + +class DistributeHandler(tornado.websocket.WebSocketHandler): + def open(self,params): + group,token,name = params.split('/')+[None,None] + self.group = group or 'default' + self.token = token or 'none' + self.name = name or 'anonymous' + # only authorized parties can join + if DistributeHandler.tokens: + if not self.token in tokens or not token[self.token]==None: + self.close() + else: + tokens[self.token] = self + if not self.group in listeners: listeners[self.group]=[] + # notify clients that a member has joined the groups + for client in listeners.get(self.group,[]): client.write_message('+'+self.name) + listeners[self.group].append(self) + names[self] = self.name + print '%s:CONNECT to %s' % (time.time(), self.group) + def on_message(self, message): + pass + def on_close(self): + if self.group in listeners: listeners[self.group].remove(self) + del names[self] + # notify clients that a member has left the groups + for client in listeners.get(self.group,[]): client.write_message('-'+self.name) + print '%s:DISCONNECT from %s' % (time.time(), self.group) + +if __name__ == "__main__": + usage = __doc__ + version= "" + parser = optparse.OptionParser(usage, None, optparse.Option, version) + parser.add_option('-p', + '--port', + default='8888', + dest='port', + help='socket') + parser.add_option('-l', + '--listen', + default='0.0.0.0', + dest='address', + help='listener address') + parser.add_option('-k', + '--hmac_key', + default='', + dest='hmac_key', + help='hmac_key') + parser.add_option('-t', + '--tokens', + action='store_true', + default=False, + dest='tokens', + help='require tockens to join') + (options, args) = parser.parse_args() + hmac_key = options.hmac_key + DistributeHandler.tokens = options.tokens + urls=[ + (r'/', PostHandler), + (r'/token', TokenHandler), + (r'/realtime/(.*)', DistributeHandler)] + application = tornado.web.Application(urls, auto_reload=True) + http_server = tornado.httpserver.HTTPServer(application) + http_server.listen(int(options.port), address=options.address) + tornado.ioloop.IOLoop.instance().start() + + ADDED gluon/contrib/feedparser.py Index: gluon/contrib/feedparser.py ================================================================== --- /dev/null +++ gluon/contrib/feedparser.py @@ -0,0 +1,3908 @@ +#!/usr/bin/env python +"""Universal feed parser + +Handles RSS 0.9x, RSS 1.0, RSS 2.0, CDF, Atom 0.3, and Atom 1.0 feeds + +Visit http://feedparser.org/ for the latest version +Visit http://feedparser.org/docs/ for the latest documentation + +Required: Python 2.4 or later +Recommended: CJKCodecs and iconv_codec +""" + +__version__ = "5.0.1" +__license__ = """Copyright (c) 2002-2008, Mark Pilgrim, All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS' +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE.""" +__author__ = "Mark Pilgrim " +__contributors__ = ["Jason Diamond ", + "John Beimler ", + "Fazal Majid ", + "Aaron Swartz ", + "Kevin Marks ", + "Sam Ruby ", + "Ade Oshineye ", + "Martin Pool ", + "Kurt McKee "] + +# HTTP "User-Agent" header to send to servers when downloading feeds. +# If you are embedding feedparser in a larger application, you should +# change this to your application name and URL. +USER_AGENT = "UniversalFeedParser/%s +http://feedparser.org/" % __version__ + +# HTTP "Accept" header to send to servers when downloading feeds. If you don't +# want to send an Accept header, set this to None. +ACCEPT_HEADER = "application/atom+xml,application/rdf+xml,application/rss+xml,application/x-netcdf,application/xml;q=0.9,text/xml;q=0.2,*/*;q=0.1" + +# List of preferred XML parsers, by SAX driver name. These will be tried first, +# but if they're not installed, Python will keep searching through its own list +# of pre-installed parsers until it finds one that supports everything we need. +PREFERRED_XML_PARSERS = ["drv_libxml2"] + +# If you want feedparser to automatically run HTML markup through HTML Tidy, set +# this to 1. Requires mxTidy +# or utidylib . +TIDY_MARKUP = 0 + +# List of Python interfaces for HTML Tidy, in order of preference. Only useful +# if TIDY_MARKUP = 1 +PREFERRED_TIDY_INTERFACES = ["uTidy", "mxTidy"] + +# If you want feedparser to automatically resolve all relative URIs, set this +# to 1. +RESOLVE_RELATIVE_URIS = 1 + +# If you want feedparser to automatically sanitize all potentially unsafe +# HTML content, set this to 1. +SANITIZE_HTML = 1 + +# ---------- Python 3 modules (make it work if possible) ---------- +try: + import rfc822 +except ImportError: + from email import _parseaddr as rfc822 + +try: + # Python 3.1 introduces bytes.maketrans and simultaneously + # deprecates string.maketrans; use bytes.maketrans if possible + _maketrans = bytes.maketrans +except (NameError, AttributeError): + import string + _maketrans = string.maketrans + +# base64 support for Atom feeds that contain embedded binary data +try: + import base64, binascii +except ImportError: + base64 = binascii = None +else: + # Python 3.1 deprecates decodestring in favor of decodebytes + _base64decode = getattr(base64, 'decodebytes', base64.decodestring) + +def _s2bytes(s): + # Convert a UTF-8 str to bytes if the interpreter is Python 3 + try: + return bytes(s, 'utf8') + except (NameError, TypeError): + # In Python 2.5 and below, bytes doesn't exist (NameError) + # In Python 2.6 and above, bytes and str are the same (TypeError) + return s + +def _l2bytes(l): + # Convert a list of ints to bytes if the interpreter is Python 3 + try: + if bytes is not str: + # In Python 2.6 and above, this call won't raise an exception + # but it will return bytes([65]) as '[65]' instead of 'A' + return bytes(l) + raise NameError + except NameError: + return ''.join(map(chr, l)) + +# If you want feedparser to allow all URL schemes, set this to () +# List culled from Python's urlparse documentation at: +# http://docs.python.org/library/urlparse.html +# as well as from "URI scheme" at Wikipedia: +# https://secure.wikimedia.org/wikipedia/en/wiki/URI_scheme +# Many more will likely need to be added! +ACCEPTABLE_URI_SCHEMES = ( + 'file', 'ftp', 'gopher', 'h323', 'hdl', 'http', 'https', 'imap', 'mailto', + 'mms', 'news', 'nntp', 'prospero', 'rsync', 'rtsp', 'rtspu', 'sftp', + 'shttp', 'sip', 'sips', 'snews', 'svn', 'svn+ssh', 'telnet', 'wais', + # Additional common-but-unofficial schemes + 'aim', 'callto', 'cvs', 'facetime', 'feed', 'git', 'gtalk', 'irc', 'ircs', + 'irc6', 'itms', 'mms', 'msnim', 'skype', 'ssh', 'smb', 'svn', 'ymsg', +) +#ACCEPTABLE_URI_SCHEMES = () + +# ---------- required modules (should come with any Python distribution) ---------- +import cgi +import copy +import datetime +import re +import struct +import sys +import time +import types +import urllib +import urllib2 +import urlparse + +from htmlentitydefs import name2codepoint, codepoint2name, entitydefs + +try: + from io import BytesIO as _StringIO +except ImportError: + try: + from cStringIO import StringIO as _StringIO + except ImportError: + from StringIO import StringIO as _StringIO + +# ---------- optional modules (feedparser will work without these, but with reduced functionality) ---------- + +# gzip is included with most Python distributions, but may not be available if you compiled your own +try: + import gzip +except ImportError: + gzip = None +try: + import zlib +except ImportError: + zlib = None + +# If a real XML parser is available, feedparser will attempt to use it. feedparser has +# been tested with the built-in SAX parser, PyXML, and libxml2. On platforms where the +# Python distribution does not come with an XML parser (such as Mac OS X 10.2 and some +# versions of FreeBSD), feedparser will quietly fall back on regex-based parsing. +try: + import xml.sax + from xml.sax.saxutils import escape as _xmlescape +except ImportError: + _XML_AVAILABLE = 0 + def _xmlescape(data,entities={}): + data = data.replace('&', '&') + data = data.replace('>', '>') + data = data.replace('<', '<') + for char, entity in entities: + data = data.replace(char, entity) + return data +else: + try: + xml.sax.make_parser(PREFERRED_XML_PARSERS) # test for valid parsers + except xml.sax.SAXReaderNotAvailable: + _XML_AVAILABLE = 0 + else: + _XML_AVAILABLE = 1 + +# sgmllib is not available by default in Python 3; if the end user doesn't have +# it available then we'll lose illformed XML parsing, content santizing, and +# microformat support (at least while feedparser depends on BeautifulSoup). +try: + import sgmllib +except ImportError: + # This is probably Python 3, which doesn't include sgmllib anymore + _SGML_AVAILABLE = 0 + + # Mock sgmllib enough to allow subclassing later on + class sgmllib(object): + class SGMLParser(object): + def goahead(self, i): + pass + def parse_starttag(self, i): + pass +else: + _SGML_AVAILABLE = 1 + + # sgmllib defines a number of module-level regular expressions that are + # insufficient for the XML parsing feedparser needs. Rather than modify + # the variables directly in sgmllib, they're defined here using the same + # names, and the compiled code objects of several sgmllib.SGMLParser + # methods are copied into _BaseHTMLProcessor so that they execute in + # feedparser's scope instead of sgmllib's scope. + charref = re.compile('&#(\d+|[xX][0-9a-fA-F]+);') + tagfind = re.compile('[a-zA-Z][-_.:a-zA-Z0-9]*') + + # Unfortunately, these must be copied over to prevent NameError exceptions + attrfind = sgmllib.attrfind + entityref = sgmllib.entityref + incomplete = sgmllib.incomplete + interesting = sgmllib.interesting + shorttag = sgmllib.shorttag + shorttagopen = sgmllib.shorttagopen + starttagopen = sgmllib.starttagopen + + class _EndBracketRegEx: + def __init__(self): + # Overriding the built-in sgmllib.endbracket regex allows the + # parser to find angle brackets embedded in element attributes. + self.endbracket = re.compile('''([^'"<>]|"[^"]*"(?=>|/|\s|\w+=)|'[^']*'(?=>|/|\s|\w+=))*(?=[<>])|.*?(?=[<>])''') + def search(self, target, index=0): + match = self.endbracket.match(target, index) + if match is not None: + # Returning a new object in the calling thread's context + # resolves a thread-safety. + return EndBracketMatch(match) + return None + class EndBracketMatch: + def __init__(self, match): + self.match = match + def start(self, n): + return self.match.end(n) + endbracket = _EndBracketRegEx() + + +# cjkcodecs and iconv_codec provide support for more character encodings. +# Both are available from http://cjkpython.i18n.org/ +try: + import cjkcodecs.aliases +except ImportError: + pass +try: + import iconv_codec +except ImportError: + pass + +# chardet library auto-detects character encodings +# Download from http://chardet.feedparser.org/ +try: + import chardet +except ImportError: + chardet = None + +# BeautifulSoup parser used for parsing microformats from embedded HTML content +# http://www.crummy.com/software/BeautifulSoup/ +# feedparser is tested with BeautifulSoup 3.0.x, but it might work with the +# older 2.x series. If it doesn't, and you can figure out why, I'll accept a +# patch and modify the compatibility statement accordingly. +try: + import BeautifulSoup +except ImportError: + BeautifulSoup = None + +# ---------- don't touch these ---------- +class ThingsNobodyCaresAboutButMe(Exception): pass +class CharacterEncodingOverride(ThingsNobodyCaresAboutButMe): pass +class CharacterEncodingUnknown(ThingsNobodyCaresAboutButMe): pass +class NonXMLContentType(ThingsNobodyCaresAboutButMe): pass +class UndeclaredNamespace(Exception): pass + +SUPPORTED_VERSIONS = {'': u'unknown', + 'rss090': u'RSS 0.90', + 'rss091n': u'RSS 0.91 (Netscape)', + 'rss091u': u'RSS 0.91 (Userland)', + 'rss092': u'RSS 0.92', + 'rss093': u'RSS 0.93', + 'rss094': u'RSS 0.94', + 'rss20': u'RSS 2.0', + 'rss10': u'RSS 1.0', + 'rss': u'RSS (unknown version)', + 'atom01': u'Atom 0.1', + 'atom02': u'Atom 0.2', + 'atom03': u'Atom 0.3', + 'atom10': u'Atom 1.0', + 'atom': u'Atom (unknown version)', + 'cdf': u'CDF', + } + +class FeedParserDict(dict): + keymap = {'channel': 'feed', + 'items': 'entries', + 'guid': 'id', + 'date': 'updated', + 'date_parsed': 'updated_parsed', + 'description': ['summary', 'subtitle'], + 'url': ['href'], + 'modified': 'updated', + 'modified_parsed': 'updated_parsed', + 'issued': 'published', + 'issued_parsed': 'published_parsed', + 'copyright': 'rights', + 'copyright_detail': 'rights_detail', + 'tagline': 'subtitle', + 'tagline_detail': 'subtitle_detail'} + def __getitem__(self, key): + if key == 'category': + try: + return dict.__getitem__(self, 'tags')[0]['term'] + except IndexError: + raise KeyError, "object doesn't have key 'category'" + elif key == 'enclosures': + norel = lambda link: FeedParserDict([(name,value) for (name,value) in link.items() if name!='rel']) + return [norel(link) for link in dict.__getitem__(self, 'links') if link['rel']==u'enclosure'] + elif key == 'license': + for link in dict.__getitem__(self, 'links'): + if link['rel']==u'license' and link.has_key('href'): + return link['href'] + elif key == 'categories': + return [(tag['scheme'], tag['term']) for tag in dict.__getitem__(self, 'tags')] + else: + realkey = self.keymap.get(key, key) + if isinstance(realkey, list): + for k in realkey: + if dict.__contains__(self, k): + return dict.__getitem__(self, k) + elif dict.__contains__(self, realkey): + return dict.__getitem__(self, realkey) + return dict.__getitem__(self, key) + + def __contains__(self, key): + try: + self.__getitem__(key) + except KeyError: + return False + else: + return True + + has_key = __contains__ + + def get(self, key, default=None): + try: + return self.__getitem__(key) + except KeyError: + return default + + def __setitem__(self, key, value): + key = self.keymap.get(key, key) + if isinstance(key, list): + key = key[0] + return dict.__setitem__(self, key, value) + + def setdefault(self, key, value): + if key not in self: + self[key] = value + return value + return self[key] + + def __getattr__(self, key): + # __getattribute__() is called first; this will be called + # only if an attribute was not already found + try: + return self.__getitem__(key) + except KeyError: + raise AttributeError, "object has no attribute '%s'" % key + + +_ebcdic_to_ascii_map = None +def _ebcdic_to_ascii(s): + global _ebcdic_to_ascii_map + if not _ebcdic_to_ascii_map: + emap = ( + 0,1,2,3,156,9,134,127,151,141,142,11,12,13,14,15, + 16,17,18,19,157,133,8,135,24,25,146,143,28,29,30,31, + 128,129,130,131,132,10,23,27,136,137,138,139,140,5,6,7, + 144,145,22,147,148,149,150,4,152,153,154,155,20,21,158,26, + 32,160,161,162,163,164,165,166,167,168,91,46,60,40,43,33, + 38,169,170,171,172,173,174,175,176,177,93,36,42,41,59,94, + 45,47,178,179,180,181,182,183,184,185,124,44,37,95,62,63, + 186,187,188,189,190,191,192,193,194,96,58,35,64,39,61,34, + 195,97,98,99,100,101,102,103,104,105,196,197,198,199,200,201, + 202,106,107,108,109,110,111,112,113,114,203,204,205,206,207,208, + 209,126,115,116,117,118,119,120,121,122,210,211,212,213,214,215, + 216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231, + 123,65,66,67,68,69,70,71,72,73,232,233,234,235,236,237, + 125,74,75,76,77,78,79,80,81,82,238,239,240,241,242,243, + 92,159,83,84,85,86,87,88,89,90,244,245,246,247,248,249, + 48,49,50,51,52,53,54,55,56,57,250,251,252,253,254,255 + ) + _ebcdic_to_ascii_map = _maketrans( \ + _l2bytes(range(256)), _l2bytes(emap)) + return s.translate(_ebcdic_to_ascii_map) + +_cp1252 = { + unichr(128): unichr(8364), # euro sign + unichr(130): unichr(8218), # single low-9 quotation mark + unichr(131): unichr( 402), # latin small letter f with hook + unichr(132): unichr(8222), # double low-9 quotation mark + unichr(133): unichr(8230), # horizontal ellipsis + unichr(134): unichr(8224), # dagger + unichr(135): unichr(8225), # double dagger + unichr(136): unichr( 710), # modifier letter circumflex accent + unichr(137): unichr(8240), # per mille sign + unichr(138): unichr( 352), # latin capital letter s with caron + unichr(139): unichr(8249), # single left-pointing angle quotation mark + unichr(140): unichr( 338), # latin capital ligature oe + unichr(142): unichr( 381), # latin capital letter z with caron + unichr(145): unichr(8216), # left single quotation mark + unichr(146): unichr(8217), # right single quotation mark + unichr(147): unichr(8220), # left double quotation mark + unichr(148): unichr(8221), # right double quotation mark + unichr(149): unichr(8226), # bullet + unichr(150): unichr(8211), # en dash + unichr(151): unichr(8212), # em dash + unichr(152): unichr( 732), # small tilde + unichr(153): unichr(8482), # trade mark sign + unichr(154): unichr( 353), # latin small letter s with caron + unichr(155): unichr(8250), # single right-pointing angle quotation mark + unichr(156): unichr( 339), # latin small ligature oe + unichr(158): unichr( 382), # latin small letter z with caron + unichr(159): unichr( 376)} # latin capital letter y with diaeresis + +_urifixer = re.compile('^([A-Za-z][A-Za-z0-9+-.]*://)(/*)(.*?)') +def _urljoin(base, uri): + uri = _urifixer.sub(r'\1\3', uri) + #try: + uri = urlparse.urljoin(base, uri) + if not isinstance(uri, unicode): + return uri.decode('utf-8', 'ignore') + return uri + #except: + # uri = urlparse.urlunparse([urllib.quote(part) for part in urlparse.urlparse(uri)]) + # return urlparse.urljoin(base, uri) + +class _FeedParserMixin: + namespaces = {'': '', + 'http://backend.userland.com/rss': '', + 'http://blogs.law.harvard.edu/tech/rss': '', + 'http://purl.org/rss/1.0/': '', + 'http://my.netscape.com/rdf/simple/0.9/': '', + 'http://example.com/newformat#': '', + 'http://example.com/necho': '', + 'http://purl.org/echo/': '', + 'uri/of/echo/namespace#': '', + 'http://purl.org/pie/': '', + 'http://purl.org/atom/ns#': '', + 'http://www.w3.org/2005/Atom': '', + 'http://purl.org/rss/1.0/modules/rss091#': '', + + 'http://webns.net/mvcb/': 'admin', + 'http://purl.org/rss/1.0/modules/aggregation/': 'ag', + 'http://purl.org/rss/1.0/modules/annotate/': 'annotate', + 'http://media.tangent.org/rss/1.0/': 'audio', + 'http://backend.userland.com/blogChannelModule': 'blogChannel', + 'http://web.resource.org/cc/': 'cc', + 'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons', + 'http://purl.org/rss/1.0/modules/company': 'co', + 'http://purl.org/rss/1.0/modules/content/': 'content', + 'http://my.theinfo.org/changed/1.0/rss/': 'cp', + 'http://purl.org/dc/elements/1.1/': 'dc', + 'http://purl.org/dc/terms/': 'dcterms', + 'http://purl.org/rss/1.0/modules/email/': 'email', + 'http://purl.org/rss/1.0/modules/event/': 'ev', + 'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner', + 'http://freshmeat.net/rss/fm/': 'fm', + 'http://xmlns.com/foaf/0.1/': 'foaf', + 'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo', + 'http://postneo.com/icbm/': 'icbm', + 'http://purl.org/rss/1.0/modules/image/': 'image', + 'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes', + 'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes', + 'http://purl.org/rss/1.0/modules/link/': 'l', + 'http://search.yahoo.com/mrss': 'media', + #Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace + 'http://search.yahoo.com/mrss/': 'media', + 'http://madskills.com/public/xml/rss/module/pingback/': 'pingback', + 'http://prismstandard.org/namespaces/1.2/basic/': 'prism', + 'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf', + 'http://www.w3.org/2000/01/rdf-schema#': 'rdfs', + 'http://purl.org/rss/1.0/modules/reference/': 'ref', + 'http://purl.org/rss/1.0/modules/richequiv/': 'reqv', + 'http://purl.org/rss/1.0/modules/search/': 'search', + 'http://purl.org/rss/1.0/modules/slash/': 'slash', + 'http://schemas.xmlsoap.org/soap/envelope/': 'soap', + 'http://purl.org/rss/1.0/modules/servicestatus/': 'ss', + 'http://hacks.benhammersley.com/rss/streaming/': 'str', + 'http://purl.org/rss/1.0/modules/subscription/': 'sub', + 'http://purl.org/rss/1.0/modules/syndication/': 'sy', + 'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf', + 'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo', + 'http://purl.org/rss/1.0/modules/threading/': 'thr', + 'http://purl.org/rss/1.0/modules/textinput/': 'ti', + 'http://madskills.com/public/xml/rss/module/trackback/':'trackback', + 'http://wellformedweb.org/commentAPI/': 'wfw', + 'http://purl.org/rss/1.0/modules/wiki/': 'wiki', + 'http://www.w3.org/1999/xhtml': 'xhtml', + 'http://www.w3.org/1999/xlink': 'xlink', + 'http://www.w3.org/XML/1998/namespace': 'xml' +} + _matchnamespaces = {} + + can_be_relative_uri = ['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'] + can_contain_relative_uris = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] + can_contain_dangerous_markup = ['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'] + html_types = [u'text/html', u'application/xhtml+xml'] + + def __init__(self, baseuri=None, baselang=None, encoding=u'utf-8'): + if not self._matchnamespaces: + for k, v in self.namespaces.items(): + self._matchnamespaces[k.lower()] = v + self.feeddata = FeedParserDict() # feed-level data + self.encoding = encoding # character encoding + self.entries = [] # list of entry-level data + self.version = u'' # feed type/version, see SUPPORTED_VERSIONS + self.namespacesInUse = {} # dictionary of namespaces defined by the feed + + # the following are used internally to track state; + # this is really out of control and should be refactored + self.infeed = 0 + self.inentry = 0 + self.incontent = 0 + self.intextinput = 0 + self.inimage = 0 + self.inauthor = 0 + self.incontributor = 0 + self.inpublisher = 0 + self.insource = 0 + self.sourcedata = FeedParserDict() + self.contentparams = FeedParserDict() + self._summaryKey = None + self.namespacemap = {} + self.elementstack = [] + self.basestack = [] + self.langstack = [] + self.baseuri = baseuri or u'' + self.lang = baselang or None + self.svgOK = 0 + self.hasTitle = 0 + if baselang: + self.feeddata['language'] = baselang.replace('_','-') + + def _normalize_attributes(self, kv): + k = kv[0].lower() + v = k in ('rel', 'type') and kv[1].lower() or kv[1] + # the sgml parser doesn't handle entities in attributes, nor + # does it pass the attribute values through as unicode, while + # strict xml parsers do -- account for this difference + if isinstance(self, _LooseFeedParser): + v = v.replace('&', '&') + if not isinstance(v, unicode): + v = v.decode('utf-8') + return (k, v) + + def unknown_starttag(self, tag, attrs): + # normalize attrs + attrs = map(self._normalize_attributes, attrs) + + # track xml:base and xml:lang + attrsD = dict(attrs) + baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri + if not isinstance(baseuri, unicode): + baseuri = baseuri.decode(self.encoding, 'ignore') + # ensure that self.baseuri is always an absolute URI that + # uses a whitelisted URI scheme (e.g. not `javscript:`) + if self.baseuri: + self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri + else: + self.baseuri = _urljoin(self.baseuri, baseuri) + lang = attrsD.get('xml:lang', attrsD.get('lang')) + if lang == '': + # xml:lang could be explicitly set to '', we need to capture that + lang = None + elif lang is None: + # if no xml:lang is specified, use parent lang + lang = self.lang + if lang: + if tag in ('feed', 'rss', 'rdf:RDF'): + self.feeddata['language'] = lang.replace('_','-') + self.lang = lang + self.basestack.append(self.baseuri) + self.langstack.append(lang) + + # track namespaces + for prefix, uri in attrs: + if prefix.startswith('xmlns:'): + self.trackNamespace(prefix[6:], uri) + elif prefix == 'xmlns': + self.trackNamespace(None, uri) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'): + if tag in ['xhtml:div', 'div']: + return # typepad does this 10/2007 + # element declared itself as escaped markup, but it isn't really + self.contentparams['type'] = u'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml': + if tag.find(':') <> -1: + prefix, tag = tag.split(':', 1) + namespace = self.namespacesInUse.get(prefix, '') + if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML': + attrs.append(('xmlns',namespace)) + if tag=='svg' and namespace=='http://www.w3.org/2000/svg': + attrs.append(('xmlns',namespace)) + if tag == 'svg': + self.svgOK += 1 + return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0) + + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + + # special hack for better tracking of empty textinput/image elements in illformed feeds + if (not prefix) and tag not in ('title', 'link', 'description', 'name'): + self.intextinput = 0 + if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'): + self.inimage = 0 + + # call special handler (if defined) or default handler + methodname = '_start_' + prefix + suffix + try: + method = getattr(self, methodname) + return method(attrsD) + except AttributeError: + # Since there's no handler or something has gone wrong we explicitly add the element and its attributes + unknown_tag = prefix + suffix + if len(attrsD) == 0: + # No attributes so merge it into the encosing dictionary + return self.push(unknown_tag, 1) + else: + # Has attributes so create it in its own dictionary + context = self._getContext() + context[unknown_tag] = attrsD + + def unknown_endtag(self, tag): + # match namespaces + if tag.find(':') <> -1: + prefix, suffix = tag.split(':', 1) + else: + prefix, suffix = '', tag + prefix = self.namespacemap.get(prefix, prefix) + if prefix: + prefix = prefix + '_' + if suffix == 'svg' and self.svgOK: + self.svgOK -= 1 + + # call special handler (if defined) or default handler + methodname = '_end_' + prefix + suffix + try: + if self.svgOK: + raise AttributeError() + method = getattr(self, methodname) + method() + except AttributeError: + self.pop(prefix + suffix) + + # track inline content + if self.incontent and self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'): + # element declared itself as escaped markup, but it isn't really + if tag in ['xhtml:div', 'div']: + return # typepad does this 10/2007 + self.contentparams['type'] = u'application/xhtml+xml' + if self.incontent and self.contentparams.get('type') == u'application/xhtml+xml': + tag = tag.split(':')[-1] + self.handle_data('' % tag, escape=0) + + # track xml:base and xml:lang going out of scope + if self.basestack: + self.basestack.pop() + if self.basestack and self.basestack[-1]: + self.baseuri = self.basestack[-1] + if self.langstack: + self.langstack.pop() + if self.langstack: # and (self.langstack[-1] is not None): + self.lang = self.langstack[-1] + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + if not self.elementstack: + return + ref = ref.lower() + if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'): + text = '&#%s;' % ref + else: + if ref[0] == 'x': + c = int(ref[1:], 16) + else: + c = int(ref) + text = unichr(c).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + if not self.elementstack: + return + if ref in ('lt', 'gt', 'quot', 'amp', 'apos'): + text = '&%s;' % ref + elif ref in self.entities.keys(): + text = self.entities[ref] + if text.startswith('&#') and text.endswith(';'): + return self.handle_entityref(text) + else: + try: + name2codepoint[ref] + except KeyError: + text = '&%s;' % ref + else: + text = unichr(name2codepoint[ref]).encode('utf-8') + self.elementstack[-1][2].append(text) + + def handle_data(self, text, escape=1): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + if not self.elementstack: + return + if escape and self.contentparams.get('type') == u'application/xhtml+xml': + text = _xmlescape(text) + self.elementstack[-1][2].append(text) + + def handle_comment(self, text): + # called for each comment, e.g. + pass + + def handle_pi(self, text): + # called for each processing instruction, e.g. + pass + + def handle_decl(self, text): + pass + + def parse_declaration(self, i): + # override internal declaration handler to handle CDATA blocks + if self.rawdata[i:i+9] == '', i) + if k == -1: + # CDATA block began but didn't finish + k = len(self.rawdata) + return k + self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0) + return k+3 + else: + k = self.rawdata.find('>', i) + if k >= 0: + return k+1 + else: + # We have an incomplete CDATA block. + return k + + def mapContentType(self, contentType): + contentType = contentType.lower() + if contentType == 'text' or contentType == 'plain': + contentType = u'text/plain' + elif contentType == 'html': + contentType = u'text/html' + elif contentType == 'xhtml': + contentType = u'application/xhtml+xml' + return contentType + + def trackNamespace(self, prefix, uri): + loweruri = uri.lower() + if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/') and not self.version: + self.version = u'rss090' + if loweruri == 'http://purl.org/rss/1.0/' and not self.version: + self.version = u'rss10' + if loweruri == 'http://www.w3.org/2005/atom' and not self.version: + self.version = u'atom10' + if loweruri.find(u'backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + uri = u'http://backend.userland.com/rss' + loweruri = uri + if self._matchnamespaces.has_key(loweruri): + self.namespacemap[prefix] = self._matchnamespaces[loweruri] + self.namespacesInUse[self._matchnamespaces[loweruri]] = uri + else: + self.namespacesInUse[prefix or ''] = uri + + def resolveURI(self, uri): + return _urljoin(self.baseuri or u'', uri) + + def decodeEntities(self, element, data): + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs]) + + def push(self, element, expectingText): + self.elementstack.append([element, expectingText, []]) + + def pop(self, element, stripWhitespace=1): + if not self.elementstack: + return + if self.elementstack[-1][0] != element: + return + + element, expectingText, pieces = self.elementstack.pop() + + if self.version == u'atom10' and self.contentparams.get('type', u'text') == u'application/xhtml+xml': + # remove enclosing child element, but only if it is a
    and + # only if all the remaining content is nested underneath it. + # This means that the divs would be retained in the following: + #
    foo
    bar
    + while pieces and len(pieces)>1 and not pieces[-1].strip(): + del pieces[-1] + while pieces and len(pieces)>1 and not pieces[0].strip(): + del pieces[0] + if pieces and (pieces[0] == '
    ' or pieces[0].startswith('
    ': + depth = 0 + for piece in pieces[:-1]: + if piece.startswith(''): + depth += 1 + else: + pieces = pieces[1:-1] + + # Ensure each piece is a str for Python 3 + for (i, v) in enumerate(pieces): + if not isinstance(v, unicode): + pieces[i] = v.decode('utf-8') + + output = u''.join(pieces) + if stripWhitespace: + output = output.strip() + if not expectingText: + return output + + # decode base64 content + if base64 and self.contentparams.get('base64', 0): + try: + output = _base64decode(output) + except binascii.Error: + pass + except binascii.Incomplete: + pass + except TypeError: + # In Python 3, base64 takes and outputs bytes, not str + # This may not be the most correct way to accomplish this + output = _base64decode(output.encode('utf-8')).decode('utf-8') + + # resolve relative URIs + if (element in self.can_be_relative_uri) and output: + output = self.resolveURI(output) + + # decode entities within embedded markup + if not self.contentparams.get('base64', 0): + output = self.decodeEntities(element, output) + + # some feed formats require consumers to guess + # whether the content is html or plain text + if not self.version.startswith(u'atom') and self.contentparams.get('type') == u'text/plain': + if self.lookslikehtml(output): + self.contentparams['type'] = u'text/html' + + # remove temporary cruft from contentparams + try: + del self.contentparams['mode'] + except KeyError: + pass + try: + del self.contentparams['base64'] + except KeyError: + pass + + is_htmlish = self.mapContentType(self.contentparams.get('type', u'text/html')) in self.html_types + # resolve relative URIs within embedded markup + if is_htmlish and RESOLVE_RELATIVE_URIS: + if element in self.can_contain_relative_uris: + output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', u'text/html')) + + # parse microformats + # (must do this before sanitizing because some microformats + # rely on elements that we sanitize) + if is_htmlish and element in ['content', 'description', 'summary']: + mfresults = _parseMicroformats(output, self.baseuri, self.encoding) + if mfresults: + for tag in mfresults.get('tags', []): + self._addTag(tag['term'], tag['scheme'], tag['label']) + for enclosure in mfresults.get('enclosures', []): + self._start_enclosure(enclosure) + for xfn in mfresults.get('xfn', []): + self._addXFN(xfn['relationships'], xfn['href'], xfn['name']) + vcard = mfresults.get('vcard') + if vcard: + self._getContext()['vcard'] = vcard + + # sanitize embedded markup + if is_htmlish and SANITIZE_HTML: + if element in self.can_contain_dangerous_markup: + output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', u'text/html')) + + if self.encoding and not isinstance(output, unicode): + output = output.decode(self.encoding, 'ignore') + + # address common error where people take data that is already + # utf-8, presume that it is iso-8859-1, and re-encode it. + if self.encoding in (u'utf-8', u'utf-8_INVALID_PYTHON_3') and isinstance(output, unicode): + try: + output = output.encode('iso-8859-1').decode('utf-8') + except (UnicodeEncodeError, UnicodeDecodeError): + pass + + # map win-1252 extensions to the proper code points + if isinstance(output, unicode): + output = u''.join([c in _cp1252.keys() and _cp1252[c] or c for c in output]) + + # categories/tags/keywords/whatever are handled in _end_category + if element == 'category': + return output + + if element == 'title' and self.hasTitle: + return output + + # store output in appropriate place(s) + if self.inentry and not self.insource: + if element == 'content': + self.entries[-1].setdefault(element, []) + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element].append(contentparams) + elif element == 'link': + if not self.inimage: + # query variables in urls in link elements are improperly + # converted from `?a=1&b=2` to `?a=1&b;=2` as if they're + # unhandled character references. fix this special case. + output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) + self.entries[-1][element] = output + if output: + self.entries[-1]['links'][-1]['href'] = output + else: + if element == 'description': + element = 'summary' + self.entries[-1][element] = output + if self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + self.entries[-1][element + '_detail'] = contentparams + elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage): + context = self._getContext() + if element == 'description': + element = 'subtitle' + context[element] = output + if element == 'link': + # fix query variables; see above for the explanation + output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output) + context[element] = output + context['links'][-1]['href'] = output + elif self.incontent: + contentparams = copy.deepcopy(self.contentparams) + contentparams['value'] = output + context[element + '_detail'] = contentparams + return output + + def pushContent(self, tag, attrsD, defaultContentType, expectingText): + self.incontent += 1 + if self.lang: + self.lang=self.lang.replace('_','-') + self.contentparams = FeedParserDict({ + 'type': self.mapContentType(attrsD.get('type', defaultContentType)), + 'language': self.lang, + 'base': self.baseuri}) + self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams) + self.push(tag, expectingText) + + def popContent(self, tag): + value = self.pop(tag) + self.incontent -= 1 + self.contentparams.clear() + return value + + # a number of elements in a number of RSS variants are nominally plain + # text, but this is routinely ignored. This is an attempt to detect + # the most common cases. As false positives often result in silent + # data loss, this function errs on the conservative side. + @staticmethod + def lookslikehtml(s): + # must have a close tag or a entity reference to qualify + if not (re.search(r'',s) or re.search("&#?\w+;",s)): + return + + # all tags must be in a restricted subset of valid HTML tags + if filter(lambda t: t.lower() not in _HTMLSanitizer.acceptable_elements, + re.findall(r' -1: + prefix = name[:colonpos] + suffix = name[colonpos+1:] + prefix = self.namespacemap.get(prefix, prefix) + name = prefix + ':' + suffix + return name + + def _getAttribute(self, attrsD, name): + return attrsD.get(self._mapToStandardPrefix(name)) + + def _isBase64(self, attrsD, contentparams): + if attrsD.get('mode', '') == 'base64': + return 1 + if self.contentparams['type'].startswith(u'text/'): + return 0 + if self.contentparams['type'].endswith(u'+xml'): + return 0 + if self.contentparams['type'].endswith(u'/xml'): + return 0 + return 1 + + def _itsAnHrefDamnIt(self, attrsD): + href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None))) + if href: + try: + del attrsD['url'] + except KeyError: + pass + try: + del attrsD['uri'] + except KeyError: + pass + attrsD['href'] = href + return attrsD + + def _save(self, key, value, overwrite=False): + context = self._getContext() + if overwrite: + context[key] = value + else: + context.setdefault(key, value) + + def _start_rss(self, attrsD): + versionmap = {'0.91': u'rss091u', + '0.92': u'rss092', + '0.93': u'rss093', + '0.94': u'rss094'} + #If we're here then this is an RSS feed. + #If we don't have a version or have a version that starts with something + #other than RSS then there's been a mistake. Correct it. + if not self.version or not self.version.startswith(u'rss'): + attr_version = attrsD.get('version', '') + version = versionmap.get(attr_version) + if version: + self.version = version + elif attr_version.startswith('2.'): + self.version = u'rss20' + else: + self.version = u'rss' + + def _start_channel(self, attrsD): + self.infeed = 1 + self._cdf_common(attrsD) + + def _cdf_common(self, attrsD): + if attrsD.has_key('lastmod'): + self._start_modified({}) + self.elementstack[-1][-1] = attrsD['lastmod'] + self._end_modified() + if attrsD.has_key('href'): + self._start_link({}) + self.elementstack[-1][-1] = attrsD['href'] + self._end_link() + + def _start_feed(self, attrsD): + self.infeed = 1 + versionmap = {'0.1': u'atom01', + '0.2': u'atom02', + '0.3': u'atom03'} + if not self.version: + attr_version = attrsD.get('version') + version = versionmap.get(attr_version) + if version: + self.version = version + else: + self.version = u'atom' + + def _end_channel(self): + self.infeed = 0 + _end_feed = _end_channel + + def _start_image(self, attrsD): + context = self._getContext() + if not self.inentry: + context.setdefault('image', FeedParserDict()) + self.inimage = 1 + self.hasTitle = 0 + self.push('image', 0) + + def _end_image(self): + self.pop('image') + self.inimage = 0 + + def _start_textinput(self, attrsD): + context = self._getContext() + context.setdefault('textinput', FeedParserDict()) + self.intextinput = 1 + self.hasTitle = 0 + self.push('textinput', 0) + _start_textInput = _start_textinput + + def _end_textinput(self): + self.pop('textinput') + self.intextinput = 0 + _end_textInput = _end_textinput + + def _start_author(self, attrsD): + self.inauthor = 1 + self.push('author', 1) + # Append a new FeedParserDict when expecting an author + context = self._getContext() + context.setdefault('authors', []) + context['authors'].append(FeedParserDict()) + _start_managingeditor = _start_author + _start_dc_author = _start_author + _start_dc_creator = _start_author + _start_itunes_author = _start_author + + def _end_author(self): + self.pop('author') + self.inauthor = 0 + self._sync_author_detail() + _end_managingeditor = _end_author + _end_dc_author = _end_author + _end_dc_creator = _end_author + _end_itunes_author = _end_author + + def _start_itunes_owner(self, attrsD): + self.inpublisher = 1 + self.push('publisher', 0) + + def _end_itunes_owner(self): + self.pop('publisher') + self.inpublisher = 0 + self._sync_author_detail('publisher') + + def _start_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('contributor', 0) + + def _end_contributor(self): + self.pop('contributor') + self.incontributor = 0 + + def _start_dc_contributor(self, attrsD): + self.incontributor = 1 + context = self._getContext() + context.setdefault('contributors', []) + context['contributors'].append(FeedParserDict()) + self.push('name', 0) + + def _end_dc_contributor(self): + self._end_name() + self.incontributor = 0 + + def _start_name(self, attrsD): + self.push('name', 0) + _start_itunes_name = _start_name + + def _end_name(self): + value = self.pop('name') + if self.inpublisher: + self._save_author('name', value, 'publisher') + elif self.inauthor: + self._save_author('name', value) + elif self.incontributor: + self._save_contributor('name', value) + elif self.intextinput: + context = self._getContext() + context['name'] = value + _end_itunes_name = _end_name + + def _start_width(self, attrsD): + self.push('width', 0) + + def _end_width(self): + value = self.pop('width') + try: + value = int(value) + except ValueError: + value = 0 + if self.inimage: + context = self._getContext() + context['width'] = value + + def _start_height(self, attrsD): + self.push('height', 0) + + def _end_height(self): + value = self.pop('height') + try: + value = int(value) + except ValueError: + value = 0 + if self.inimage: + context = self._getContext() + context['height'] = value + + def _start_url(self, attrsD): + self.push('href', 1) + _start_homepage = _start_url + _start_uri = _start_url + + def _end_url(self): + value = self.pop('href') + if self.inauthor: + self._save_author('href', value) + elif self.incontributor: + self._save_contributor('href', value) + _end_homepage = _end_url + _end_uri = _end_url + + def _start_email(self, attrsD): + self.push('email', 0) + _start_itunes_email = _start_email + + def _end_email(self): + value = self.pop('email') + if self.inpublisher: + self._save_author('email', value, 'publisher') + elif self.inauthor: + self._save_author('email', value) + elif self.incontributor: + self._save_contributor('email', value) + _end_itunes_email = _end_email + + def _getContext(self): + if self.insource: + context = self.sourcedata + elif self.inimage and self.feeddata.has_key('image'): + context = self.feeddata['image'] + elif self.intextinput: + context = self.feeddata['textinput'] + elif self.inentry: + context = self.entries[-1] + else: + context = self.feeddata + return context + + def _save_author(self, key, value, prefix='author'): + context = self._getContext() + context.setdefault(prefix + '_detail', FeedParserDict()) + context[prefix + '_detail'][key] = value + self._sync_author_detail() + context.setdefault('authors', [FeedParserDict()]) + context['authors'][-1][key] = value + + def _save_contributor(self, key, value): + context = self._getContext() + context.setdefault('contributors', [FeedParserDict()]) + context['contributors'][-1][key] = value + + def _sync_author_detail(self, key='author'): + context = self._getContext() + detail = context.get('%s_detail' % key) + if detail: + name = detail.get('name') + email = detail.get('email') + if name and email: + context[key] = u'%s (%s)' % (name, email) + elif name: + context[key] = name + elif email: + context[key] = email + else: + author, email = context.get(key), None + if not author: + return + emailmatch = re.search(ur'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author) + if emailmatch: + email = emailmatch.group(0) + # probably a better way to do the following, but it passes all the tests + author = author.replace(email, u'') + author = author.replace(u'()', u'') + author = author.replace(u'<>', u'') + author = author.replace(u'<>', u'') + author = author.strip() + if author and (author[0] == u'('): + author = author[1:] + if author and (author[-1] == u')'): + author = author[:-1] + author = author.strip() + if author or email: + context.setdefault('%s_detail' % key, FeedParserDict()) + if author: + context['%s_detail' % key]['name'] = author + if email: + context['%s_detail' % key]['email'] = email + + def _start_subtitle(self, attrsD): + self.pushContent('subtitle', attrsD, u'text/plain', 1) + _start_tagline = _start_subtitle + _start_itunes_subtitle = _start_subtitle + + def _end_subtitle(self): + self.popContent('subtitle') + _end_tagline = _end_subtitle + _end_itunes_subtitle = _end_subtitle + + def _start_rights(self, attrsD): + self.pushContent('rights', attrsD, u'text/plain', 1) + _start_dc_rights = _start_rights + _start_copyright = _start_rights + + def _end_rights(self): + self.popContent('rights') + _end_dc_rights = _end_rights + _end_copyright = _end_rights + + def _start_item(self, attrsD): + self.entries.append(FeedParserDict()) + self.push('item', 0) + self.inentry = 1 + self.guidislink = 0 + self.hasTitle = 0 + id = self._getAttribute(attrsD, 'rdf:about') + if id: + context = self._getContext() + context['id'] = id + self._cdf_common(attrsD) + _start_entry = _start_item + + def _end_item(self): + self.pop('item') + self.inentry = 0 + _end_entry = _end_item + + def _start_dc_language(self, attrsD): + self.push('language', 1) + _start_language = _start_dc_language + + def _end_dc_language(self): + self.lang = self.pop('language') + _end_language = _end_dc_language + + def _start_dc_publisher(self, attrsD): + self.push('publisher', 1) + _start_webmaster = _start_dc_publisher + + def _end_dc_publisher(self): + self.pop('publisher') + self._sync_author_detail('publisher') + _end_webmaster = _end_dc_publisher + + def _start_published(self, attrsD): + self.push('published', 1) + _start_dcterms_issued = _start_published + _start_issued = _start_published + + def _end_published(self): + value = self.pop('published') + self._save('published_parsed', _parse_date(value), overwrite=True) + _end_dcterms_issued = _end_published + _end_issued = _end_published + + def _start_updated(self, attrsD): + self.push('updated', 1) + _start_modified = _start_updated + _start_dcterms_modified = _start_updated + _start_pubdate = _start_updated + _start_dc_date = _start_updated + _start_lastbuilddate = _start_updated + + def _end_updated(self): + value = self.pop('updated') + parsed_value = _parse_date(value) + self._save('updated_parsed', parsed_value, overwrite=True) + _end_modified = _end_updated + _end_dcterms_modified = _end_updated + _end_pubdate = _end_updated + _end_dc_date = _end_updated + _end_lastbuilddate = _end_updated + + def _start_created(self, attrsD): + self.push('created', 1) + _start_dcterms_created = _start_created + + def _end_created(self): + value = self.pop('created') + self._save('created_parsed', _parse_date(value), overwrite=True) + _end_dcterms_created = _end_created + + def _start_expirationdate(self, attrsD): + self.push('expired', 1) + + def _end_expirationdate(self): + self._save('expired_parsed', _parse_date(self.pop('expired')), overwrite=True) + + def _start_cc_license(self, attrsD): + context = self._getContext() + value = self._getAttribute(attrsD, 'rdf:resource') + attrsD = FeedParserDict() + attrsD['rel'] = u'license' + if value: + attrsD['href']=value + context.setdefault('links', []).append(attrsD) + + def _start_creativecommons_license(self, attrsD): + self.push('license', 1) + _start_creativeCommons_license = _start_creativecommons_license + + def _end_creativecommons_license(self): + value = self.pop('license') + context = self._getContext() + attrsD = FeedParserDict() + attrsD['rel'] = u'license' + if value: + attrsD['href'] = value + context.setdefault('links', []).append(attrsD) + del context['license'] + _end_creativeCommons_license = _end_creativecommons_license + + def _addXFN(self, relationships, href, name): + context = self._getContext() + xfn = context.setdefault('xfn', []) + value = FeedParserDict({'relationships': relationships, 'href': href, 'name': name}) + if value not in xfn: + xfn.append(value) + + def _addTag(self, term, scheme, label): + context = self._getContext() + tags = context.setdefault('tags', []) + if (not term) and (not scheme) and (not label): + return + value = FeedParserDict({'term': term, 'scheme': scheme, 'label': label}) + if value not in tags: + tags.append(value) + + def _start_category(self, attrsD): + term = attrsD.get('term') + scheme = attrsD.get('scheme', attrsD.get('domain')) + label = attrsD.get('label') + self._addTag(term, scheme, label) + self.push('category', 1) + _start_dc_subject = _start_category + _start_keywords = _start_category + + def _start_media_category(self, attrsD): + attrsD.setdefault('scheme', u'http://search.yahoo.com/mrss/category_schema') + self._start_category(attrsD) + + def _end_itunes_keywords(self): + for term in self.pop('itunes_keywords').split(): + self._addTag(term, u'http://www.itunes.com/', None) + + def _start_itunes_category(self, attrsD): + self._addTag(attrsD.get('text'), u'http://www.itunes.com/', None) + self.push('category', 1) + + def _end_category(self): + value = self.pop('category') + if not value: + return + context = self._getContext() + tags = context['tags'] + if value and len(tags) and not tags[-1]['term']: + tags[-1]['term'] = value + else: + self._addTag(value, None, None) + _end_dc_subject = _end_category + _end_keywords = _end_category + _end_itunes_category = _end_category + _end_media_category = _end_category + + def _start_cloud(self, attrsD): + self._getContext()['cloud'] = FeedParserDict(attrsD) + + def _start_link(self, attrsD): + attrsD.setdefault('rel', u'alternate') + if attrsD['rel'] == u'self': + attrsD.setdefault('type', u'application/atom+xml') + else: + attrsD.setdefault('type', u'text/html') + context = self._getContext() + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + expectingText = self.infeed or self.inentry or self.insource + context.setdefault('links', []) + if not (self.inentry and self.inimage): + context['links'].append(FeedParserDict(attrsD)) + if attrsD.has_key('href'): + expectingText = 0 + if (attrsD.get('rel') == u'alternate') and (self.mapContentType(attrsD.get('type')) in self.html_types): + context['link'] = attrsD['href'] + else: + self.push('link', expectingText) + + def _end_link(self): + value = self.pop('link') + context = self._getContext() + + def _start_guid(self, attrsD): + self.guidislink = (attrsD.get('ispermalink', 'true') == 'true') + self.push('id', 1) + + def _end_guid(self): + value = self.pop('id') + self._save('guidislink', self.guidislink and not self._getContext().has_key('link')) + if self.guidislink: + # guid acts as link, but only if 'ispermalink' is not present or is 'true', + # and only if the item doesn't already have a link element + self._save('link', value) + + def _start_title(self, attrsD): + if self.svgOK: + return self.unknown_starttag('title', attrsD.items()) + self.pushContent('title', attrsD, u'text/plain', self.infeed or self.inentry or self.insource) + _start_dc_title = _start_title + _start_media_title = _start_title + + def _end_title(self): + if self.svgOK: + return + value = self.popContent('title') + if not value: + return + context = self._getContext() + self.hasTitle = 1 + _end_dc_title = _end_title + + def _end_media_title(self): + hasTitle = self.hasTitle + self._end_title() + self.hasTitle = hasTitle + + def _start_description(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self.pushContent('description', attrsD, u'text/html', self.infeed or self.inentry or self.insource) + _start_dc_description = _start_description + + def _start_abstract(self, attrsD): + self.pushContent('description', attrsD, u'text/plain', self.infeed or self.inentry or self.insource) + + def _end_description(self): + if self._summaryKey == 'content': + self._end_content() + else: + value = self.popContent('description') + self._summaryKey = None + _end_abstract = _end_description + _end_dc_description = _end_description + + def _start_info(self, attrsD): + self.pushContent('info', attrsD, u'text/plain', 1) + _start_feedburner_browserfriendly = _start_info + + def _end_info(self): + self.popContent('info') + _end_feedburner_browserfriendly = _end_info + + def _start_generator(self, attrsD): + if attrsD: + attrsD = self._itsAnHrefDamnIt(attrsD) + if attrsD.has_key('href'): + attrsD['href'] = self.resolveURI(attrsD['href']) + self._getContext()['generator_detail'] = FeedParserDict(attrsD) + self.push('generator', 1) + + def _end_generator(self): + value = self.pop('generator') + context = self._getContext() + if context.has_key('generator_detail'): + context['generator_detail']['name'] = value + + def _start_admin_generatoragent(self, attrsD): + self.push('generator', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('generator') + self._getContext()['generator_detail'] = FeedParserDict({'href': value}) + + def _start_admin_errorreportsto(self, attrsD): + self.push('errorreportsto', 1) + value = self._getAttribute(attrsD, 'rdf:resource') + if value: + self.elementstack[-1][2].append(value) + self.pop('errorreportsto') + + def _start_summary(self, attrsD): + context = self._getContext() + if context.has_key('summary'): + self._summaryKey = 'content' + self._start_content(attrsD) + else: + self._summaryKey = 'summary' + self.pushContent(self._summaryKey, attrsD, u'text/plain', 1) + _start_itunes_summary = _start_summary + + def _end_summary(self): + if self._summaryKey == 'content': + self._end_content() + else: + self.popContent(self._summaryKey or 'summary') + self._summaryKey = None + _end_itunes_summary = _end_summary + + def _start_enclosure(self, attrsD): + attrsD = self._itsAnHrefDamnIt(attrsD) + context = self._getContext() + attrsD['rel'] = u'enclosure' + context.setdefault('links', []).append(FeedParserDict(attrsD)) + + def _start_source(self, attrsD): + if 'url' in attrsD: + # This means that we're processing a source element from an RSS 2.0 feed + self.sourcedata['href'] = attrsD[u'url'] + self.push('source', 1) + self.insource = 1 + self.hasTitle = 0 + + def _end_source(self): + self.insource = 0 + value = self.pop('source') + if value: + self.sourcedata['title'] = value + self._getContext()['source'] = copy.deepcopy(self.sourcedata) + self.sourcedata.clear() + + def _start_content(self, attrsD): + self.pushContent('content', attrsD, u'text/plain', 1) + src = attrsD.get('src') + if src: + self.contentparams['src'] = src + self.push('content', 1) + + def _start_body(self, attrsD): + self.pushContent('content', attrsD, u'application/xhtml+xml', 1) + _start_xhtml_body = _start_body + + def _start_content_encoded(self, attrsD): + self.pushContent('content', attrsD, u'text/html', 1) + _start_fullitem = _start_content_encoded + + def _end_content(self): + copyToSummary = self.mapContentType(self.contentparams.get('type')) in ([u'text/plain'] + self.html_types) + value = self.popContent('content') + if copyToSummary: + self._save('summary', value) + + _end_body = _end_content + _end_xhtml_body = _end_content + _end_content_encoded = _end_content + _end_fullitem = _end_content + + def _start_itunes_image(self, attrsD): + self.push('itunes_image', 0) + if attrsD.get('href'): + self._getContext()['image'] = FeedParserDict({'href': attrsD.get('href')}) + _start_itunes_link = _start_itunes_image + + def _end_itunes_block(self): + value = self.pop('itunes_block', 0) + self._getContext()['itunes_block'] = (value == 'yes') and 1 or 0 + + def _end_itunes_explicit(self): + value = self.pop('itunes_explicit', 0) + # Convert 'yes' -> True, 'clean' to False, and any other value to None + # False and None both evaluate as False, so the difference can be ignored + # by applications that only need to know if the content is explicit. + self._getContext()['itunes_explicit'] = (None, False, True)[(value == 'yes' and 2) or value == 'clean' or 0] + + def _start_media_content(self, attrsD): + context = self._getContext() + context.setdefault('media_content', []) + context['media_content'].append(attrsD) + + def _start_media_thumbnail(self, attrsD): + context = self._getContext() + context.setdefault('media_thumbnail', []) + self.push('url', 1) # new + context['media_thumbnail'].append(attrsD) + + def _end_media_thumbnail(self): + url = self.pop('url') + context = self._getContext() + if url != None and len(url.strip()) != 0: + if not context['media_thumbnail'][-1].has_key('url'): + context['media_thumbnail'][-1]['url'] = url + + def _start_media_player(self, attrsD): + self.push('media_player', 0) + self._getContext()['media_player'] = FeedParserDict(attrsD) + + def _end_media_player(self): + value = self.pop('media_player') + context = self._getContext() + context['media_player']['content'] = value + + def _start_newlocation(self, attrsD): + self.push('newlocation', 1) + + def _end_newlocation(self): + url = self.pop('newlocation') + context = self._getContext() + # don't set newlocation if the context isn't right + if context is not self.feeddata: + return + context['newlocation'] = _makeSafeAbsoluteURI(self.baseuri, url.strip()) + +if _XML_AVAILABLE: + class _StrictFeedParser(_FeedParserMixin, xml.sax.handler.ContentHandler): + def __init__(self, baseuri, baselang, encoding): + xml.sax.handler.ContentHandler.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + self.bozo = 0 + self.exc = None + self.decls = {} + + def startPrefixMapping(self, prefix, uri): + if not uri: + return + # Jython uses '' instead of None; standardize on None + prefix = prefix or None + self.trackNamespace(prefix, uri) + if prefix and uri == 'http://www.w3.org/1999/xlink': + self.decls['xmlns:' + prefix] = uri + + def startElementNS(self, name, qname, attrs): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if lowernamespace.find(u'backend.userland.com/rss') <> -1: + # match any backend.userland.com namespace + namespace = u'http://backend.userland.com/rss' + lowernamespace = namespace + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = None + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if givenprefix and (prefix == None or (prefix == '' and lowernamespace == '')) and not self.namespacesInUse.has_key(givenprefix): + raise UndeclaredNamespace, "'%s' is not associated with a namespace" % givenprefix + localname = str(localname).lower() + + # qname implementation is horribly broken in Python 2.1 (it + # doesn't report any), and slightly broken in Python 2.2 (it + # doesn't report the xml: namespace). So we match up namespaces + # with a known list first, and then possibly override them with + # the qnames the SAX parser gives us (if indeed it gives us any + # at all). Thanks to MatejC for helping me test this and + # tirelessly telling me that it didn't work yet. + attrsD, self.decls = self.decls, {} + if localname=='math' and namespace=='http://www.w3.org/1998/Math/MathML': + attrsD['xmlns']=namespace + if localname=='svg' and namespace=='http://www.w3.org/2000/svg': + attrsD['xmlns']=namespace + + if prefix: + localname = prefix.lower() + ':' + localname + elif namespace and not qname: #Expat + for name,value in self.namespacesInUse.items(): + if name and value == namespace: + localname = name + ':' + localname + break + + for (namespace, attrlocalname), attrvalue in attrs.items(): + lowernamespace = (namespace or '').lower() + prefix = self._matchnamespaces.get(lowernamespace, '') + if prefix: + attrlocalname = prefix + ':' + attrlocalname + attrsD[str(attrlocalname).lower()] = attrvalue + for qname in attrs.getQNames(): + attrsD[str(qname).lower()] = attrs.getValueByQName(qname) + self.unknown_starttag(localname, attrsD.items()) + + def characters(self, text): + self.handle_data(text) + + def endElementNS(self, name, qname): + namespace, localname = name + lowernamespace = str(namespace or '').lower() + if qname and qname.find(':') > 0: + givenprefix = qname.split(':')[0] + else: + givenprefix = '' + prefix = self._matchnamespaces.get(lowernamespace, givenprefix) + if prefix: + localname = prefix + ':' + localname + elif namespace and not qname: #Expat + for name,value in self.namespacesInUse.items(): + if name and value == namespace: + localname = name + ':' + localname + break + localname = str(localname).lower() + self.unknown_endtag(localname) + + def error(self, exc): + self.bozo = 1 + self.exc = exc + + def fatalError(self, exc): + self.error(exc) + raise exc + +class _BaseHTMLProcessor(sgmllib.SGMLParser): + special = re.compile('''[<>'"]''') + bare_ampersand = re.compile("&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)") + elements_no_end_tag = [ + 'area', 'base', 'basefont', 'br', 'col', 'command', 'embed', 'frame', + 'hr', 'img', 'input', 'isindex', 'keygen', 'link', 'meta', 'param', + 'source', 'track', 'wbr' + ] + + def __init__(self, encoding, _type): + self.encoding = encoding + self._type = _type + sgmllib.SGMLParser.__init__(self) + + def reset(self): + self.pieces = [] + sgmllib.SGMLParser.reset(self) + + def _shorttag_replace(self, match): + tag = match.group(1) + if tag in self.elements_no_end_tag: + return '<' + tag + ' />' + else: + return '<' + tag + '>' + + # By declaring these methods and overriding their compiled code + # with the code from sgmllib, the original code will execute in + # feedparser's scope instead of sgmllib's. This means that the + # `tagfind` and `charref` regular expressions will be found as + # they're declared above, not as they're declared in sgmllib. + def goahead(self, i): + pass + goahead.func_code = sgmllib.SGMLParser.goahead.func_code + + def __parse_starttag(self, i): + pass + __parse_starttag.func_code = sgmllib.SGMLParser.parse_starttag.func_code + + def parse_starttag(self,i): + j = self.__parse_starttag(i) + if self._type == 'application/xhtml+xml': + if j>2 and self.rawdata[j-2:j]=='/>': + self.unknown_endtag(self.lasttag) + return j + + def feed(self, data): + data = re.compile(r'', self._shorttag_replace, data) # bug [ 1399464 ] Bad regexp for _shorttag_replace + data = re.sub(r'<([^<>\s]+?)\s*/>', self._shorttag_replace, data) + data = data.replace(''', "'") + data = data.replace('"', '"') + try: + bytes + if bytes is str: + raise NameError + self.encoding = self.encoding + u'_INVALID_PYTHON_3' + except NameError: + if self.encoding and isinstance(data, unicode): + data = data.encode(self.encoding) + sgmllib.SGMLParser.feed(self, data) + sgmllib.SGMLParser.close(self) + + def normalize_attrs(self, attrs): + if not attrs: + return attrs + # utility method to be called by descendants + attrs = dict([(k.lower(), v) for k, v in attrs]).items() + attrs = [(k, k in ('rel', 'type') and v.lower() or v) for k, v in attrs] + attrs.sort() + return attrs + + def unknown_starttag(self, tag, attrs): + # called for each start tag + # attrs is a list of (attr, value) tuples + # e.g. for
    , tag='pre', attrs=[('class', 'screen')]
    +        uattrs = []
    +        strattrs=''
    +        if attrs:
    +            for key, value in attrs:
    +                value=value.replace('>','>').replace('<','<').replace('"','"')
    +                value = self.bare_ampersand.sub("&", value)
    +                # thanks to Kevin Marks for this breathtaking hack to deal with (valid) high-bit attribute values in UTF-8 feeds
    +                if not isinstance(value, unicode):
    +                    value = value.decode(self.encoding, 'ignore')
    +                try:
    +                    # Currently, in Python 3 the key is already a str, and cannot be decoded again
    +                    uattrs.append((unicode(key, self.encoding), value))
    +                except TypeError:
    +                    uattrs.append((key, value))
    +            strattrs = u''.join([u' %s="%s"' % (key, value) for key, value in uattrs])
    +            if self.encoding:
    +                try:
    +                    strattrs = strattrs.encode(self.encoding)
    +                except (UnicodeEncodeError, LookupError):
    +                    pass
    +        if tag in self.elements_no_end_tag:
    +            self.pieces.append('<%(tag)s%(strattrs)s />' % locals())
    +        else:
    +            self.pieces.append('<%(tag)s%(strattrs)s>' % locals())
    +
    +    def unknown_endtag(self, tag):
    +        # called for each end tag, e.g. for 
    , tag will be 'pre' + # Reconstruct the original end tag. + if tag not in self.elements_no_end_tag: + self.pieces.append("" % locals()) + + def handle_charref(self, ref): + # called for each character reference, e.g. for ' ', ref will be '160' + # Reconstruct the original character reference. + if ref.startswith('x'): + value = unichr(int(ref[1:],16)) + else: + value = unichr(int(ref)) + + if value in _cp1252.keys(): + self.pieces.append('&#%s;' % hex(ord(_cp1252[value]))[1:]) + else: + self.pieces.append('&#%(ref)s;' % locals()) + + def handle_entityref(self, ref): + # called for each entity reference, e.g. for '©', ref will be 'copy' + # Reconstruct the original entity reference. + if name2codepoint.has_key(ref): + self.pieces.append('&%(ref)s;' % locals()) + else: + self.pieces.append('&%(ref)s' % locals()) + + def handle_data(self, text): + # called for each block of plain text, i.e. outside of any tag and + # not containing any character or entity references + # Store the original text verbatim. + self.pieces.append(text) + + def handle_comment(self, text): + # called for each HTML comment, e.g. + # Reconstruct the original comment. + self.pieces.append('' % locals()) + + def handle_pi(self, text): + # called for each processing instruction, e.g. + # Reconstruct original processing instruction. + self.pieces.append('' % locals()) + + def handle_decl(self, text): + # called for the DOCTYPE, if present, e.g. + # + # Reconstruct original DOCTYPE + self.pieces.append('' % locals()) + + _new_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9:]*\s*').match + def _scan_name(self, i, declstartpos): + rawdata = self.rawdata + n = len(rawdata) + if i == n: + return None, -1 + m = self._new_declname_match(rawdata, i) + if m: + s = m.group() + name = s.strip() + if (i + len(s)) == n: + return None, -1 # end of buffer + return name.lower(), m.end() + else: + self.handle_data(rawdata) +# self.updatepos(declstartpos, i) + return None, -1 + + def convert_charref(self, name): + return '&#%s;' % name + + def convert_entityref(self, name): + return '&%s;' % name + + def output(self): + '''Return processed HTML as a single string''' + return ''.join([str(p) for p in self.pieces]) + + def parse_declaration(self, i): + try: + return sgmllib.SGMLParser.parse_declaration(self, i) + except sgmllib.SGMLParseError: + # escape the doctype declaration and continue parsing + self.handle_data('<') + return i+1 + +class _LooseFeedParser(_FeedParserMixin, _BaseHTMLProcessor): + def __init__(self, baseuri, baselang, encoding, entities): + sgmllib.SGMLParser.__init__(self) + _FeedParserMixin.__init__(self, baseuri, baselang, encoding) + _BaseHTMLProcessor.__init__(self, encoding, 'application/xhtml+xml') + self.entities=entities + + def decodeEntities(self, element, data): + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace('"', '"') + data = data.replace(''', ''') + data = data.replace(''', ''') + if self.contentparams.has_key('type') and not self.contentparams.get('type', u'xml').endswith(u'xml'): + data = data.replace('<', '<') + data = data.replace('>', '>') + data = data.replace('&', '&') + data = data.replace('"', '"') + data = data.replace(''', "'") + return data + + def strattrs(self, attrs): + return ''.join([' %s="%s"' % (n,v.replace('"','"')) for n,v in attrs]) + +class _MicroformatsParser: + STRING = 1 + DATE = 2 + URI = 3 + NODE = 4 + EMAIL = 5 + + known_xfn_relationships = ['contact', 'acquaintance', 'friend', 'met', 'co-worker', 'coworker', 'colleague', 'co-resident', 'coresident', 'neighbor', 'child', 'parent', 'sibling', 'brother', 'sister', 'spouse', 'wife', 'husband', 'kin', 'relative', 'muse', 'crush', 'date', 'sweetheart', 'me'] + known_binary_extensions = ['zip','rar','exe','gz','tar','tgz','tbz2','bz2','z','7z','dmg','img','sit','sitx','hqx','deb','rpm','bz2','jar','rar','iso','bin','msi','mp2','mp3','ogg','ogm','mp4','m4v','m4a','avi','wma','wmv'] + + def __init__(self, data, baseuri, encoding): + self.document = BeautifulSoup.BeautifulSoup(data) + self.baseuri = baseuri + self.encoding = encoding + if isinstance(data, unicode): + data = data.encode(encoding) + self.tags = [] + self.enclosures = [] + self.xfn = [] + self.vcard = None + + def vcardEscape(self, s): + if isinstance(s, basestring): + s = s.replace(',', '\\,').replace(';', '\\;').replace('\n', '\\n') + return s + + def vcardFold(self, s): + s = re.sub(';+$', '', s) + sFolded = '' + iMax = 75 + sPrefix = '' + while len(s) > iMax: + sFolded += sPrefix + s[:iMax] + '\n' + s = s[iMax:] + sPrefix = ' ' + iMax = 74 + sFolded += sPrefix + s + return sFolded + + def normalize(self, s): + return re.sub(r'\s+', ' ', s).strip() + + def unique(self, aList): + results = [] + for element in aList: + if element not in results: + results.append(element) + return results + + def toISO8601(self, dt): + return time.strftime('%Y-%m-%dT%H:%M:%SZ', dt) + + def getPropertyValue(self, elmRoot, sProperty, iPropertyType=4, bAllowMultiple=0, bAutoEscape=0): + all = lambda x: 1 + sProperty = sProperty.lower() + bFound = 0 + bNormalize = 1 + propertyMatch = {'class': re.compile(r'\b%s\b' % sProperty)} + if bAllowMultiple and (iPropertyType != self.NODE): + snapResults = [] + containers = elmRoot(['ul', 'ol'], propertyMatch) + for container in containers: + snapResults.extend(container('li')) + bFound = (len(snapResults) != 0) + if not bFound: + snapResults = elmRoot(all, propertyMatch) + bFound = (len(snapResults) != 0) + if (not bFound) and (sProperty == 'value'): + snapResults = elmRoot('pre') + bFound = (len(snapResults) != 0) + bNormalize = not bFound + if not bFound: + snapResults = [elmRoot] + bFound = (len(snapResults) != 0) + arFilter = [] + if sProperty == 'vcard': + snapFilter = elmRoot(all, propertyMatch) + for node in snapFilter: + if node.findParent(all, propertyMatch): + arFilter.append(node) + arResults = [] + for node in snapResults: + if node not in arFilter: + arResults.append(node) + bFound = (len(arResults) != 0) + if not bFound: + if bAllowMultiple: + return [] + elif iPropertyType == self.STRING: + return '' + elif iPropertyType == self.DATE: + return None + elif iPropertyType == self.URI: + return '' + elif iPropertyType == self.NODE: + return None + else: + return None + arValues = [] + for elmResult in arResults: + sValue = None + if iPropertyType == self.NODE: + if bAllowMultiple: + arValues.append(elmResult) + continue + else: + return elmResult + sNodeName = elmResult.name.lower() + if (iPropertyType == self.EMAIL) and (sNodeName == 'a'): + sValue = (elmResult.get('href') or '').split('mailto:').pop().split('?')[0] + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (sNodeName == 'abbr'): + sValue = elmResult.get('title') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (iPropertyType == self.URI): + if sNodeName == 'a': + sValue = elmResult.get('href') + elif sNodeName == 'img': + sValue = elmResult.get('src') + elif sNodeName == 'object': + sValue = elmResult.get('data') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if (not sValue) and (sNodeName == 'img'): + sValue = elmResult.get('alt') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if not sValue: + sValue = elmResult.renderContents() + sValue = re.sub(r'<\S[^>]*>', '', sValue) + sValue = sValue.replace('\r\n', '\n') + sValue = sValue.replace('\r', '\n') + if sValue: + sValue = bNormalize and self.normalize(sValue) or sValue.strip() + if not sValue: + continue + if iPropertyType == self.DATE: + sValue = _parse_date_iso8601(sValue) + if bAllowMultiple: + arValues.append(bAutoEscape and self.vcardEscape(sValue) or sValue) + else: + return bAutoEscape and self.vcardEscape(sValue) or sValue + return arValues + + def findVCards(self, elmRoot, bAgentParsing=0): + sVCards = '' + + if not bAgentParsing: + arCards = self.getPropertyValue(elmRoot, 'vcard', bAllowMultiple=1) + else: + arCards = [elmRoot] + + for elmCard in arCards: + arLines = [] + + def processSingleString(sProperty): + sValue = self.getPropertyValue(elmCard, sProperty, self.STRING, bAutoEscape=1).decode(self.encoding) + if sValue: + arLines.append(self.vcardFold(sProperty.upper() + ':' + sValue)) + return sValue or u'' + + def processSingleURI(sProperty): + sValue = self.getPropertyValue(elmCard, sProperty, self.URI) + if sValue: + sContentType = '' + sEncoding = '' + sValueKey = '' + if sValue.startswith('data:'): + sEncoding = ';ENCODING=b' + sContentType = sValue.split(';')[0].split('/').pop() + sValue = sValue.split(',', 1).pop() + else: + elmValue = self.getPropertyValue(elmCard, sProperty) + if elmValue: + if sProperty != 'url': + sValueKey = ';VALUE=uri' + sContentType = elmValue.get('type', '').strip().split('/').pop().strip() + sContentType = sContentType.upper() + if sContentType == 'OCTET-STREAM': + sContentType = '' + if sContentType: + sContentType = ';TYPE=' + sContentType.upper() + arLines.append(self.vcardFold(sProperty.upper() + sEncoding + sContentType + sValueKey + ':' + sValue)) + + def processTypeValue(sProperty, arDefaultType, arForceType=None): + arResults = self.getPropertyValue(elmCard, sProperty, bAllowMultiple=1) + for elmResult in arResults: + arType = self.getPropertyValue(elmResult, 'type', self.STRING, 1, 1) + if arForceType: + arType = self.unique(arForceType + arType) + if not arType: + arType = arDefaultType + sValue = self.getPropertyValue(elmResult, 'value', self.EMAIL, 0) + if sValue: + arLines.append(self.vcardFold(sProperty.upper() + ';TYPE=' + ','.join(arType) + ':' + sValue)) + + # AGENT + # must do this before all other properties because it is destructive + # (removes nested class="vcard" nodes so they don't interfere with + # this vcard's other properties) + arAgent = self.getPropertyValue(elmCard, 'agent', bAllowMultiple=1) + for elmAgent in arAgent: + if re.compile(r'\bvcard\b').search(elmAgent.get('class')): + sAgentValue = self.findVCards(elmAgent, 1) + '\n' + sAgentValue = sAgentValue.replace('\n', '\\n') + sAgentValue = sAgentValue.replace(';', '\\;') + if sAgentValue: + arLines.append(self.vcardFold('AGENT:' + sAgentValue)) + # Completely remove the agent element from the parse tree + elmAgent.extract() + else: + sAgentValue = self.getPropertyValue(elmAgent, 'value', self.URI, bAutoEscape=1); + if sAgentValue: + arLines.append(self.vcardFold('AGENT;VALUE=uri:' + sAgentValue)) + + # FN (full name) + sFN = processSingleString('fn') + + # N (name) + elmName = self.getPropertyValue(elmCard, 'n') + if elmName: + sFamilyName = self.getPropertyValue(elmName, 'family-name', self.STRING, bAutoEscape=1) + sGivenName = self.getPropertyValue(elmName, 'given-name', self.STRING, bAutoEscape=1) + arAdditionalNames = self.getPropertyValue(elmName, 'additional-name', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'additional-names', self.STRING, 1, 1) + arHonorificPrefixes = self.getPropertyValue(elmName, 'honorific-prefix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-prefixes', self.STRING, 1, 1) + arHonorificSuffixes = self.getPropertyValue(elmName, 'honorific-suffix', self.STRING, 1, 1) + self.getPropertyValue(elmName, 'honorific-suffixes', self.STRING, 1, 1) + arLines.append(self.vcardFold('N:' + sFamilyName + ';' + + sGivenName + ';' + + ','.join(arAdditionalNames) + ';' + + ','.join(arHonorificPrefixes) + ';' + + ','.join(arHonorificSuffixes))) + elif sFN: + # implied "N" optimization + # http://microformats.org/wiki/hcard#Implied_.22N.22_Optimization + arNames = self.normalize(sFN).split() + if len(arNames) == 2: + bFamilyNameFirst = (arNames[0].endswith(',') or + len(arNames[1]) == 1 or + ((len(arNames[1]) == 2) and (arNames[1].endswith('.')))) + if bFamilyNameFirst: + arLines.append(self.vcardFold('N:' + arNames[0] + ';' + arNames[1])) + else: + arLines.append(self.vcardFold('N:' + arNames[1] + ';' + arNames[0])) + + # SORT-STRING + sSortString = self.getPropertyValue(elmCard, 'sort-string', self.STRING, bAutoEscape=1) + if sSortString: + arLines.append(self.vcardFold('SORT-STRING:' + sSortString)) + + # NICKNAME + arNickname = self.getPropertyValue(elmCard, 'nickname', self.STRING, 1, 1) + if arNickname: + arLines.append(self.vcardFold('NICKNAME:' + ','.join(arNickname))) + + # PHOTO + processSingleURI('photo') + + # BDAY + dtBday = self.getPropertyValue(elmCard, 'bday', self.DATE) + if dtBday: + arLines.append(self.vcardFold('BDAY:' + self.toISO8601(dtBday))) + + # ADR (address) + arAdr = self.getPropertyValue(elmCard, 'adr', bAllowMultiple=1) + for elmAdr in arAdr: + arType = self.getPropertyValue(elmAdr, 'type', self.STRING, 1, 1) + if not arType: + arType = ['intl','postal','parcel','work'] # default adr types, see RFC 2426 section 3.2.1 + sPostOfficeBox = self.getPropertyValue(elmAdr, 'post-office-box', self.STRING, 0, 1) + sExtendedAddress = self.getPropertyValue(elmAdr, 'extended-address', self.STRING, 0, 1) + sStreetAddress = self.getPropertyValue(elmAdr, 'street-address', self.STRING, 0, 1) + sLocality = self.getPropertyValue(elmAdr, 'locality', self.STRING, 0, 1) + sRegion = self.getPropertyValue(elmAdr, 'region', self.STRING, 0, 1) + sPostalCode = self.getPropertyValue(elmAdr, 'postal-code', self.STRING, 0, 1) + sCountryName = self.getPropertyValue(elmAdr, 'country-name', self.STRING, 0, 1) + arLines.append(self.vcardFold('ADR;TYPE=' + ','.join(arType) + ':' + + sPostOfficeBox + ';' + + sExtendedAddress + ';' + + sStreetAddress + ';' + + sLocality + ';' + + sRegion + ';' + + sPostalCode + ';' + + sCountryName)) + + # LABEL + processTypeValue('label', ['intl','postal','parcel','work']) + + # TEL (phone number) + processTypeValue('tel', ['voice']) + + # EMAIL + processTypeValue('email', ['internet'], ['internet']) + + # MAILER + processSingleString('mailer') + + # TZ (timezone) + processSingleString('tz') + + # GEO (geographical information) + elmGeo = self.getPropertyValue(elmCard, 'geo') + if elmGeo: + sLatitude = self.getPropertyValue(elmGeo, 'latitude', self.STRING, 0, 1) + sLongitude = self.getPropertyValue(elmGeo, 'longitude', self.STRING, 0, 1) + arLines.append(self.vcardFold('GEO:' + sLatitude + ';' + sLongitude)) + + # TITLE + processSingleString('title') + + # ROLE + processSingleString('role') + + # LOGO + processSingleURI('logo') + + # ORG (organization) + elmOrg = self.getPropertyValue(elmCard, 'org') + if elmOrg: + sOrganizationName = self.getPropertyValue(elmOrg, 'organization-name', self.STRING, 0, 1) + if not sOrganizationName: + # implied "organization-name" optimization + # http://microformats.org/wiki/hcard#Implied_.22organization-name.22_Optimization + sOrganizationName = self.getPropertyValue(elmCard, 'org', self.STRING, 0, 1) + if sOrganizationName: + arLines.append(self.vcardFold('ORG:' + sOrganizationName)) + else: + arOrganizationUnit = self.getPropertyValue(elmOrg, 'organization-unit', self.STRING, 1, 1) + arLines.append(self.vcardFold('ORG:' + sOrganizationName + ';' + ';'.join(arOrganizationUnit))) + + # CATEGORY + arCategory = self.getPropertyValue(elmCard, 'category', self.STRING, 1, 1) + self.getPropertyValue(elmCard, 'categories', self.STRING, 1, 1) + if arCategory: + arLines.append(self.vcardFold('CATEGORIES:' + ','.join(arCategory))) + + # NOTE + processSingleString('note') + + # REV + processSingleString('rev') + + # SOUND + processSingleURI('sound') + + # UID + processSingleString('uid') + + # URL + processSingleURI('url') + + # CLASS + processSingleString('class') + + # KEY + processSingleURI('key') + + if arLines: + arLines = [u'BEGIN:vCard',u'VERSION:3.0'] + arLines + [u'END:vCard'] + # XXX - this is super ugly; properly fix this with issue 148 + for i, s in enumerate(arLines): + if not isinstance(s, unicode): + arLines[i] = s.decode('utf-8', 'ignore') + sVCards += u'\n'.join(arLines) + u'\n' + + return sVCards.strip() + + def isProbablyDownloadable(self, elm): + attrsD = elm.attrMap + if not attrsD.has_key('href'): + return 0 + linktype = attrsD.get('type', '').strip() + if linktype.startswith('audio/') or \ + linktype.startswith('video/') or \ + (linktype.startswith('application/') and not linktype.endswith('xml')): + return 1 + path = urlparse.urlparse(attrsD['href'])[2] + if path.find('.') == -1: + return 0 + fileext = path.split('.').pop().lower() + return fileext in self.known_binary_extensions + + def findTags(self): + all = lambda x: 1 + for elm in self.document(all, {'rel': re.compile(r'\btag\b')}): + href = elm.get('href') + if not href: + continue + urlscheme, domain, path, params, query, fragment = \ + urlparse.urlparse(_urljoin(self.baseuri, href)) + segments = path.split('/') + tag = segments.pop() + if not tag: + if segments: + tag = segments.pop() + else: + # there are no tags + continue + tagscheme = urlparse.urlunparse((urlscheme, domain, '/'.join(segments), '', '', '')) + if not tagscheme.endswith('/'): + tagscheme += '/' + self.tags.append(FeedParserDict({"term": tag, "scheme": tagscheme, "label": elm.string or ''})) + + def findEnclosures(self): + all = lambda x: 1 + enclosure_match = re.compile(r'\benclosure\b') + for elm in self.document(all, {'href': re.compile(r'.+')}): + if not enclosure_match.search(elm.get('rel', u'')) and not self.isProbablyDownloadable(elm): + continue + if elm.attrMap not in self.enclosures: + self.enclosures.append(elm.attrMap) + if elm.string and not elm.get('title'): + self.enclosures[-1]['title'] = elm.string + + def findXFN(self): + all = lambda x: 1 + for elm in self.document(all, {'rel': re.compile('.+'), 'href': re.compile('.+')}): + rels = elm.get('rel', u'').split() + xfn_rels = [] + for rel in rels: + if rel in self.known_xfn_relationships: + xfn_rels.append(rel) + if xfn_rels: + self.xfn.append({"relationships": xfn_rels, "href": elm.get('href', ''), "name": elm.string}) + +def _parseMicroformats(htmlSource, baseURI, encoding): + if not BeautifulSoup: + return + try: + p = _MicroformatsParser(htmlSource, baseURI, encoding) + except UnicodeEncodeError: + # sgmllib throws this exception when performing lookups of tags + # with non-ASCII characters in them. + return + p.vcard = p.findVCards(p.document) + p.findTags() + p.findEnclosures() + p.findXFN() + return {"tags": p.tags, "enclosures": p.enclosures, "xfn": p.xfn, "vcard": p.vcard} + +class _RelativeURIResolver(_BaseHTMLProcessor): + relative_uris = [('a', 'href'), + ('applet', 'codebase'), + ('area', 'href'), + ('blockquote', 'cite'), + ('body', 'background'), + ('del', 'cite'), + ('form', 'action'), + ('frame', 'longdesc'), + ('frame', 'src'), + ('iframe', 'longdesc'), + ('iframe', 'src'), + ('head', 'profile'), + ('img', 'longdesc'), + ('img', 'src'), + ('img', 'usemap'), + ('input', 'src'), + ('input', 'usemap'), + ('ins', 'cite'), + ('link', 'href'), + ('object', 'classid'), + ('object', 'codebase'), + ('object', 'data'), + ('object', 'usemap'), + ('q', 'cite'), + ('script', 'src')] + + def __init__(self, baseuri, encoding, _type): + _BaseHTMLProcessor.__init__(self, encoding, _type) + self.baseuri = baseuri + + def resolveURI(self, uri): + return _makeSafeAbsoluteURI(_urljoin(self.baseuri, uri.strip())) + + def unknown_starttag(self, tag, attrs): + attrs = self.normalize_attrs(attrs) + attrs = [(key, ((tag, key) in self.relative_uris) and self.resolveURI(value) or value) for key, value in attrs] + _BaseHTMLProcessor.unknown_starttag(self, tag, attrs) + +def _resolveRelativeURIs(htmlSource, baseURI, encoding, _type): + if not _SGML_AVAILABLE: + return htmlSource + + p = _RelativeURIResolver(baseURI, encoding, _type) + p.feed(htmlSource) + return p.output() + +def _makeSafeAbsoluteURI(base, rel=None): + # bail if ACCEPTABLE_URI_SCHEMES is empty + if not ACCEPTABLE_URI_SCHEMES: + return _urljoin(base, rel or u'') + if not base: + return rel or u'' + if not rel: + scheme = urlparse.urlparse(base)[0] + if not scheme or scheme in ACCEPTABLE_URI_SCHEMES: + return base + return u'' + uri = _urljoin(base, rel) + if uri.strip().split(':', 1)[0] not in ACCEPTABLE_URI_SCHEMES: + return u'' + return uri + +class _HTMLSanitizer(_BaseHTMLProcessor): + acceptable_elements = ['a', 'abbr', 'acronym', 'address', 'area', + 'article', 'aside', 'audio', 'b', 'big', 'blockquote', 'br', 'button', + 'canvas', 'caption', 'center', 'cite', 'code', 'col', 'colgroup', + 'command', 'datagrid', 'datalist', 'dd', 'del', 'details', 'dfn', + 'dialog', 'dir', 'div', 'dl', 'dt', 'em', 'event-source', 'fieldset', + 'figcaption', 'figure', 'footer', 'font', 'form', 'header', 'h1', + 'h2', 'h3', 'h4', 'h5', 'h6', 'hr', 'i', 'img', 'input', 'ins', + 'keygen', 'kbd', 'label', 'legend', 'li', 'm', 'map', 'menu', 'meter', + 'multicol', 'nav', 'nextid', 'ol', 'output', 'optgroup', 'option', + 'p', 'pre', 'progress', 'q', 's', 'samp', 'section', 'select', + 'small', 'sound', 'source', 'spacer', 'span', 'strike', 'strong', + 'sub', 'sup', 'table', 'tbody', 'td', 'textarea', 'time', 'tfoot', + 'th', 'thead', 'tr', 'tt', 'u', 'ul', 'var', 'video', 'noscript'] + + acceptable_attributes = ['abbr', 'accept', 'accept-charset', 'accesskey', + 'action', 'align', 'alt', 'autocomplete', 'autofocus', 'axis', + 'background', 'balance', 'bgcolor', 'bgproperties', 'border', + 'bordercolor', 'bordercolordark', 'bordercolorlight', 'bottompadding', + 'cellpadding', 'cellspacing', 'ch', 'challenge', 'char', 'charoff', + 'choff', 'charset', 'checked', 'cite', 'class', 'clear', 'color', 'cols', + 'colspan', 'compact', 'contenteditable', 'controls', 'coords', 'data', + 'datafld', 'datapagesize', 'datasrc', 'datetime', 'default', 'delay', + 'dir', 'disabled', 'draggable', 'dynsrc', 'enctype', 'end', 'face', 'for', + 'form', 'frame', 'galleryimg', 'gutter', 'headers', 'height', 'hidefocus', + 'hidden', 'high', 'href', 'hreflang', 'hspace', 'icon', 'id', 'inputmode', + 'ismap', 'keytype', 'label', 'leftspacing', 'lang', 'list', 'longdesc', + 'loop', 'loopcount', 'loopend', 'loopstart', 'low', 'lowsrc', 'max', + 'maxlength', 'media', 'method', 'min', 'multiple', 'name', 'nohref', + 'noshade', 'nowrap', 'open', 'optimum', 'pattern', 'ping', 'point-size', + 'prompt', 'pqg', 'radiogroup', 'readonly', 'rel', 'repeat-max', + 'repeat-min', 'replace', 'required', 'rev', 'rightspacing', 'rows', + 'rowspan', 'rules', 'scope', 'selected', 'shape', 'size', 'span', 'src', + 'start', 'step', 'summary', 'suppress', 'tabindex', 'target', 'template', + 'title', 'toppadding', 'type', 'unselectable', 'usemap', 'urn', 'valign', + 'value', 'variable', 'volume', 'vspace', 'vrml', 'width', 'wrap', + 'xml:lang'] + + unacceptable_elements_with_end_tag = ['script', 'applet', 'style'] + + acceptable_css_properties = ['azimuth', 'background-color', + 'border-bottom-color', 'border-collapse', 'border-color', + 'border-left-color', 'border-right-color', 'border-top-color', 'clear', + 'color', 'cursor', 'direction', 'display', 'elevation', 'float', 'font', + 'font-family', 'font-size', 'font-style', 'font-variant', 'font-weight', + 'height', 'letter-spacing', 'line-height', 'overflow', 'pause', + 'pause-after', 'pause-before', 'pitch', 'pitch-range', 'richness', + 'speak', 'speak-header', 'speak-numeral', 'speak-punctuation', + 'speech-rate', 'stress', 'text-align', 'text-decoration', 'text-indent', + 'unicode-bidi', 'vertical-align', 'voice-family', 'volume', + 'white-space', 'width'] + + # survey of common keywords found in feeds + acceptable_css_keywords = ['auto', 'aqua', 'black', 'block', 'blue', + 'bold', 'both', 'bottom', 'brown', 'center', 'collapse', 'dashed', + 'dotted', 'fuchsia', 'gray', 'green', '!important', 'italic', 'left', + 'lime', 'maroon', 'medium', 'none', 'navy', 'normal', 'nowrap', 'olive', + 'pointer', 'purple', 'red', 'right', 'solid', 'silver', 'teal', 'top', + 'transparent', 'underline', 'white', 'yellow'] + + valid_css_values = re.compile('^(#[0-9a-f]+|rgb\(\d+%?,\d*%?,?\d*%?\)?|' + + '\d{0,2}\.?\d{0,2}(cm|em|ex|in|mm|pc|pt|px|%|,|\))?)$') + + mathml_elements = ['annotation', 'annotation-xml', 'maction', 'math', + 'merror', 'mfenced', 'mfrac', 'mi', 'mmultiscripts', 'mn', 'mo', 'mover', 'mpadded', + 'mphantom', 'mprescripts', 'mroot', 'mrow', 'mspace', 'msqrt', 'mstyle', + 'msub', 'msubsup', 'msup', 'mtable', 'mtd', 'mtext', 'mtr', 'munder', + 'munderover', 'none', 'semantics'] + + mathml_attributes = ['actiontype', 'align', 'columnalign', 'columnalign', + 'columnalign', 'close', 'columnlines', 'columnspacing', 'columnspan', 'depth', + 'display', 'displaystyle', 'encoding', 'equalcolumns', 'equalrows', + 'fence', 'fontstyle', 'fontweight', 'frame', 'height', 'linethickness', + 'lspace', 'mathbackground', 'mathcolor', 'mathvariant', 'mathvariant', + 'maxsize', 'minsize', 'open', 'other', 'rowalign', 'rowalign', 'rowalign', + 'rowlines', 'rowspacing', 'rowspan', 'rspace', 'scriptlevel', 'selection', + 'separator', 'separators', 'stretchy', 'width', 'width', 'xlink:href', + 'xlink:show', 'xlink:type', 'xmlns', 'xmlns:xlink'] + + # svgtiny - foreignObject + linearGradient + radialGradient + stop + svg_elements = ['a', 'animate', 'animateColor', 'animateMotion', + 'animateTransform', 'circle', 'defs', 'desc', 'ellipse', 'foreignObject', + 'font-face', 'font-face-name', 'font-face-src', 'g', 'glyph', 'hkern', + 'linearGradient', 'line', 'marker', 'metadata', 'missing-glyph', 'mpath', + 'path', 'polygon', 'polyline', 'radialGradient', 'rect', 'set', 'stop', + 'svg', 'switch', 'text', 'title', 'tspan', 'use'] + + # svgtiny + class + opacity + offset + xmlns + xmlns:xlink + svg_attributes = ['accent-height', 'accumulate', 'additive', 'alphabetic', + 'arabic-form', 'ascent', 'attributeName', 'attributeType', + 'baseProfile', 'bbox', 'begin', 'by', 'calcMode', 'cap-height', + 'class', 'color', 'color-rendering', 'content', 'cx', 'cy', 'd', 'dx', + 'dy', 'descent', 'display', 'dur', 'end', 'fill', 'fill-opacity', + 'fill-rule', 'font-family', 'font-size', 'font-stretch', 'font-style', + 'font-variant', 'font-weight', 'from', 'fx', 'fy', 'g1', 'g2', + 'glyph-name', 'gradientUnits', 'hanging', 'height', 'horiz-adv-x', + 'horiz-origin-x', 'id', 'ideographic', 'k', 'keyPoints', 'keySplines', + 'keyTimes', 'lang', 'mathematical', 'marker-end', 'marker-mid', + 'marker-start', 'markerHeight', 'markerUnits', 'markerWidth', 'max', + 'min', 'name', 'offset', 'opacity', 'orient', 'origin', + 'overline-position', 'overline-thickness', 'panose-1', 'path', + 'pathLength', 'points', 'preserveAspectRatio', 'r', 'refX', 'refY', + 'repeatCount', 'repeatDur', 'requiredExtensions', 'requiredFeatures', + 'restart', 'rotate', 'rx', 'ry', 'slope', 'stemh', 'stemv', + 'stop-color', 'stop-opacity', 'strikethrough-position', + 'strikethrough-thickness', 'stroke', 'stroke-dasharray', + 'stroke-dashoffset', 'stroke-linecap', 'stroke-linejoin', + 'stroke-miterlimit', 'stroke-opacity', 'stroke-width', 'systemLanguage', + 'target', 'text-anchor', 'to', 'transform', 'type', 'u1', 'u2', + 'underline-position', 'underline-thickness', 'unicode', 'unicode-range', + 'units-per-em', 'values', 'version', 'viewBox', 'visibility', 'width', + 'widths', 'x', 'x-height', 'x1', 'x2', 'xlink:actuate', 'xlink:arcrole', + 'xlink:href', 'xlink:role', 'xlink:show', 'xlink:title', 'xlink:type', + 'xml:base', 'xml:lang', 'xml:space', 'xmlns', 'xmlns:xlink', 'y', 'y1', + 'y2', 'zoomAndPan'] + + svg_attr_map = None + svg_elem_map = None + + acceptable_svg_properties = [ 'fill', 'fill-opacity', 'fill-rule', + 'stroke', 'stroke-width', 'stroke-linecap', 'stroke-linejoin', + 'stroke-opacity'] + + def reset(self): + _BaseHTMLProcessor.reset(self) + self.unacceptablestack = 0 + self.mathmlOK = 0 + self.svgOK = 0 + + def unknown_starttag(self, tag, attrs): + acceptable_attributes = self.acceptable_attributes + keymap = {} + if not tag in self.acceptable_elements or self.svgOK: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack += 1 + + # add implicit namespaces to html5 inline svg/mathml + if self._type.endswith('html'): + if not dict(attrs).get('xmlns'): + if tag=='svg': + attrs.append( ('xmlns','http://www.w3.org/2000/svg') ) + if tag=='math': + attrs.append( ('xmlns','http://www.w3.org/1998/Math/MathML') ) + + # not otherwise acceptable, perhaps it is MathML or SVG? + if tag=='math' and ('xmlns','http://www.w3.org/1998/Math/MathML') in attrs: + self.mathmlOK += 1 + if tag=='svg' and ('xmlns','http://www.w3.org/2000/svg') in attrs: + self.svgOK += 1 + + # chose acceptable attributes based on tag class, else bail + if self.mathmlOK and tag in self.mathml_elements: + acceptable_attributes = self.mathml_attributes + elif self.svgOK and tag in self.svg_elements: + # for most vocabularies, lowercasing is a good idea. Many + # svg elements, however, are camel case + if not self.svg_attr_map: + lower=[attr.lower() for attr in self.svg_attributes] + mix=[a for a in self.svg_attributes if a not in lower] + self.svg_attributes = lower + self.svg_attr_map = dict([(a.lower(),a) for a in mix]) + + lower=[attr.lower() for attr in self.svg_elements] + mix=[a for a in self.svg_elements if a not in lower] + self.svg_elements = lower + self.svg_elem_map = dict([(a.lower(),a) for a in mix]) + acceptable_attributes = self.svg_attributes + tag = self.svg_elem_map.get(tag,tag) + keymap = self.svg_attr_map + elif not tag in self.acceptable_elements: + return + + # declare xlink namespace, if needed + if self.mathmlOK or self.svgOK: + if filter(lambda (n,v): n.startswith('xlink:'),attrs): + if not ('xmlns:xlink','http://www.w3.org/1999/xlink') in attrs: + attrs.append(('xmlns:xlink','http://www.w3.org/1999/xlink')) + + clean_attrs = [] + for key, value in self.normalize_attrs(attrs): + if key in acceptable_attributes: + key=keymap.get(key,key) + # make sure the uri uses an acceptable uri scheme + if key == u'href': + value = _makeSafeAbsoluteURI(value) + clean_attrs.append((key,value)) + elif key=='style': + clean_value = self.sanitize_style(value) + if clean_value: + clean_attrs.append((key,clean_value)) + _BaseHTMLProcessor.unknown_starttag(self, tag, clean_attrs) + + def unknown_endtag(self, tag): + if not tag in self.acceptable_elements: + if tag in self.unacceptable_elements_with_end_tag: + self.unacceptablestack -= 1 + if self.mathmlOK and tag in self.mathml_elements: + if tag == 'math' and self.mathmlOK: + self.mathmlOK -= 1 + elif self.svgOK and tag in self.svg_elements: + tag = self.svg_elem_map.get(tag,tag) + if tag == 'svg' and self.svgOK: + self.svgOK -= 1 + else: + return + _BaseHTMLProcessor.unknown_endtag(self, tag) + + def handle_pi(self, text): + pass + + def handle_decl(self, text): + pass + + def handle_data(self, text): + if not self.unacceptablestack: + _BaseHTMLProcessor.handle_data(self, text) + + def sanitize_style(self, style): + # disallow urls + style=re.compile('url\s*\(\s*[^\s)]+?\s*\)\s*').sub(' ',style) + + # gauntlet + if not re.match("""^([:,;#%.\sa-zA-Z0-9!]|\w-\w|'[\s\w]+'|"[\s\w]+"|\([\d,\s]+\))*$""", style): + return '' + # This replaced a regexp that used re.match and was prone to pathological back-tracking. + if re.sub("\s*[-\w]+\s*:\s*[^:;]*;?", '', style).strip(): + return '' + + clean = [] + for prop,value in re.findall("([-\w]+)\s*:\s*([^:;]*)",style): + if not value: + continue + if prop.lower() in self.acceptable_css_properties: + clean.append(prop + ': ' + value + ';') + elif prop.split('-')[0].lower() in ['background','border','margin','padding']: + for keyword in value.split(): + if not keyword in self.acceptable_css_keywords and \ + not self.valid_css_values.match(keyword): + break + else: + clean.append(prop + ': ' + value + ';') + elif self.svgOK and prop.lower() in self.acceptable_svg_properties: + clean.append(prop + ': ' + value + ';') + + return ' '.join(clean) + + def parse_comment(self, i, report=1): + ret = _BaseHTMLProcessor.parse_comment(self, i, report) + if ret >= 0: + return ret + # if ret == -1, this may be a malicious attempt to circumvent + # sanitization, or a page-destroying unclosed comment + match = re.compile(r'--[^>]*>').search(self.rawdata, i+4) + if match: + return match.end() + # unclosed comment; deliberately fail to handle_data() + return len(self.rawdata) + + +def _sanitizeHTML(htmlSource, encoding, _type): + if not _SGML_AVAILABLE: + return htmlSource + p = _HTMLSanitizer(encoding, _type) + htmlSource = htmlSource.replace(''): + data = data.split('>', 1)[1] + if data.count(' stream + + This function lets you define parsers that take any input source + (URL, pathname to local or network file, or actual data as a string) + and deal with it in a uniform manner. Returned object is guaranteed + to have all the basic stdio read methods (read, readline, readlines). + Just .close() the object when you're done with it. + + If the etag argument is supplied, it will be used as the value of an + If-None-Match request header. + + If the modified argument is supplied, it can be a tuple of 9 integers + (as returned by gmtime() in the standard Python time module) or a date + string in any format supported by feedparser. Regardless, it MUST + be in GMT (Greenwich Mean Time). It will be reformatted into an + RFC 1123-compliant date and used as the value of an If-Modified-Since + request header. + + If the agent argument is supplied, it will be used as the value of a + User-Agent request header. + + If the referrer argument is supplied, it will be used as the value of a + Referer[sic] request header. + + If handlers is supplied, it is a list of handlers used to build a + urllib2 opener. + + if request_headers is supplied it is a dictionary of HTTP request headers + that will override the values generated by FeedParser. + """ + + if hasattr(url_file_stream_or_string, 'read'): + return url_file_stream_or_string + + if url_file_stream_or_string == '-': + return sys.stdin + + if isinstance(url_file_stream_or_string, basestring) \ + and urlparse.urlparse(url_file_stream_or_string)[0] in ('http', 'https', 'ftp', 'file', 'feed'): + # Deal with the feed URI scheme + if url_file_stream_or_string.startswith('feed:http'): + url_file_stream_or_string = url_file_stream_or_string[5:] + elif url_file_stream_or_string.startswith('feed:'): + url_file_stream_or_string = 'http:' + url_file_stream_or_string[5:] + if not agent: + agent = USER_AGENT + # test for inline user:password for basic auth + auth = None + if base64: + urltype, rest = urllib.splittype(url_file_stream_or_string) + realhost, rest = urllib.splithost(rest) + if realhost: + user_passwd, realhost = urllib.splituser(realhost) + if user_passwd: + url_file_stream_or_string = '%s://%s%s' % (urltype, realhost, rest) + auth = base64.standard_b64encode(user_passwd).strip() + + # iri support + if isinstance(url_file_stream_or_string, unicode): + url_file_stream_or_string = _convert_to_idn(url_file_stream_or_string) + + # try to open with urllib2 (to use optional headers) + request = _build_urllib2_request(url_file_stream_or_string, agent, etag, modified, referrer, auth, request_headers) + opener = apply(urllib2.build_opener, tuple(handlers + [_FeedURLHandler()])) + opener.addheaders = [] # RMK - must clear so we only send our custom User-Agent + try: + return opener.open(request) + finally: + opener.close() # JohnD + + # try to open with native open function (if url_file_stream_or_string is a filename) + try: + return open(url_file_stream_or_string, 'rb') + except IOError: + pass + + # treat url_file_stream_or_string as string + if isinstance(url_file_stream_or_string, unicode): + return _StringIO(url_file_stream_or_string.encode('utf-8')) + return _StringIO(url_file_stream_or_string) + +def _convert_to_idn(url): + """Convert a URL to IDN notation""" + # this function should only be called with a unicode string + # strategy: if the host cannot be encoded in ascii, then + # it'll be necessary to encode it in idn form + parts = list(urlparse.urlsplit(url)) + try: + parts[1].encode('ascii') + except UnicodeEncodeError: + # the url needs to be converted to idn notation + host = parts[1].rsplit(':', 1) + newhost = [] + port = u'' + if len(host) == 2: + port = host.pop() + for h in host[0].split('.'): + newhost.append(h.encode('idna').decode('utf-8')) + parts[1] = '.'.join(newhost) + if port: + parts[1] += ':' + port + return urlparse.urlunsplit(parts) + else: + return url + +def _build_urllib2_request(url, agent, etag, modified, referrer, auth, request_headers): + request = urllib2.Request(url) + request.add_header('User-Agent', agent) + if etag: + request.add_header('If-None-Match', etag) + if isinstance(modified, basestring): + modified = _parse_date(modified) + elif isinstance(modified, datetime.datetime): + modified = modified.utctimetuple() + if modified: + # format into an RFC 1123-compliant timestamp. We can't use + # time.strftime() since the %a and %b directives can be affected + # by the current locale, but RFC 2616 states that dates must be + # in English. + short_weekdays = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + request.add_header('If-Modified-Since', '%s, %02d %s %04d %02d:%02d:%02d GMT' % (short_weekdays[modified[6]], modified[2], months[modified[1] - 1], modified[0], modified[3], modified[4], modified[5])) + if referrer: + request.add_header('Referer', referrer) + if gzip and zlib: + request.add_header('Accept-encoding', 'gzip, deflate') + elif gzip: + request.add_header('Accept-encoding', 'gzip') + elif zlib: + request.add_header('Accept-encoding', 'deflate') + else: + request.add_header('Accept-encoding', '') + if auth: + request.add_header('Authorization', 'Basic %s' % auth) + if ACCEPT_HEADER: + request.add_header('Accept', ACCEPT_HEADER) + # use this for whatever -- cookies, special headers, etc + # [('Cookie','Something'),('x-special-header','Another Value')] + for header_name, header_value in request_headers.items(): + request.add_header(header_name, header_value) + request.add_header('A-IM', 'feed') # RFC 3229 support + return request + +_date_handlers = [] +def registerDateHandler(func): + '''Register a date handler function (takes string, returns 9-tuple date in GMT)''' + _date_handlers.insert(0, func) + +# ISO-8601 date parsing routines written by Fazal Majid. +# The ISO 8601 standard is very convoluted and irregular - a full ISO 8601 +# parser is beyond the scope of feedparser and would be a worthwhile addition +# to the Python library. +# A single regular expression cannot parse ISO 8601 date formats into groups +# as the standard is highly irregular (for instance is 030104 2003-01-04 or +# 0301-04-01), so we use templates instead. +# Please note the order in templates is significant because we need a +# greedy match. +_iso8601_tmpl = ['YYYY-?MM-?DD', 'YYYY-0MM?-?DD', 'YYYY-MM', 'YYYY-?OOO', + 'YY-?MM-?DD', 'YY-?OOO', 'YYYY', + '-YY-?MM', '-OOO', '-YY', + '--MM-?DD', '--MM', + '---DD', + 'CC', ''] +_iso8601_re = [ + tmpl.replace( + 'YYYY', r'(?P\d{4})').replace( + 'YY', r'(?P\d\d)').replace( + 'MM', r'(?P[01]\d)').replace( + 'DD', r'(?P[0123]\d)').replace( + 'OOO', r'(?P[0123]\d\d)').replace( + 'CC', r'(?P\d\d$)') + + r'(T?(?P\d{2}):(?P\d{2})' + + r'(:(?P\d{2}))?' + + r'(\.(?P\d+))?' + + r'(?P[+-](?P\d{2})(:(?P\d{2}))?|Z)?)?' + for tmpl in _iso8601_tmpl] +try: + del tmpl +except NameError: + pass +_iso8601_matches = [re.compile(regex).match for regex in _iso8601_re] +try: + del regex +except NameError: + pass +def _parse_date_iso8601(dateString): + '''Parse a variety of ISO-8601-compatible formats like 20040105''' + m = None + for _iso8601_match in _iso8601_matches: + m = _iso8601_match(dateString) + if m: + break + if not m: + return + if m.span() == (0, 0): + return + params = m.groupdict() + ordinal = params.get('ordinal', 0) + if ordinal: + ordinal = int(ordinal) + else: + ordinal = 0 + year = params.get('year', '--') + if not year or year == '--': + year = time.gmtime()[0] + elif len(year) == 2: + # ISO 8601 assumes current century, i.e. 93 -> 2093, NOT 1993 + year = 100 * int(time.gmtime()[0] / 100) + int(year) + else: + year = int(year) + month = params.get('month', '-') + if not month or month == '-': + # ordinals are NOT normalized by mktime, we simulate them + # by setting month=1, day=ordinal + if ordinal: + month = 1 + else: + month = time.gmtime()[1] + month = int(month) + day = params.get('day', 0) + if not day: + # see above + if ordinal: + day = ordinal + elif params.get('century', 0) or \ + params.get('year', 0) or params.get('month', 0): + day = 1 + else: + day = time.gmtime()[2] + else: + day = int(day) + # special case of the century - is the first year of the 21st century + # 2000 or 2001 ? The debate goes on... + if 'century' in params.keys(): + year = (int(params['century']) - 1) * 100 + 1 + # in ISO 8601 most fields are optional + for field in ['hour', 'minute', 'second', 'tzhour', 'tzmin']: + if not params.get(field, None): + params[field] = 0 + hour = int(params.get('hour', 0)) + minute = int(params.get('minute', 0)) + second = int(float(params.get('second', 0))) + # weekday is normalized by mktime(), we can ignore it + weekday = 0 + daylight_savings_flag = -1 + tm = [year, month, day, hour, minute, second, weekday, + ordinal, daylight_savings_flag] + # ISO 8601 time zone adjustments + tz = params.get('tz') + if tz and tz != 'Z': + if tz[0] == '-': + tm[3] += int(params.get('tzhour', 0)) + tm[4] += int(params.get('tzmin', 0)) + elif tz[0] == '+': + tm[3] -= int(params.get('tzhour', 0)) + tm[4] -= int(params.get('tzmin', 0)) + else: + return None + # Python's time.mktime() is a wrapper around the ANSI C mktime(3c) + # which is guaranteed to normalize d/m/y/h/m/s. + # Many implementations have bugs, but we'll pretend they don't. + return time.localtime(time.mktime(tuple(tm))) +registerDateHandler(_parse_date_iso8601) + +# 8-bit date handling routines written by ytrewq1. +_korean_year = u'\ub144' # b3e2 in euc-kr +_korean_month = u'\uc6d4' # bff9 in euc-kr +_korean_day = u'\uc77c' # c0cf in euc-kr +_korean_am = u'\uc624\uc804' # bfc0 c0fc in euc-kr +_korean_pm = u'\uc624\ud6c4' # bfc0 c8c4 in euc-kr + +_korean_onblog_date_re = \ + re.compile('(\d{4})%s\s+(\d{2})%s\s+(\d{2})%s\s+(\d{2}):(\d{2}):(\d{2})' % \ + (_korean_year, _korean_month, _korean_day)) +_korean_nate_date_re = \ + re.compile(u'(\d{4})-(\d{2})-(\d{2})\s+(%s|%s)\s+(\d{,2}):(\d{,2}):(\d{,2})' % \ + (_korean_am, _korean_pm)) +def _parse_date_onblog(dateString): + '''Parse a string according to the OnBlog 8-bit date format''' + m = _korean_onblog_date_re.match(dateString) + if not m: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_onblog) + +def _parse_date_nate(dateString): + '''Parse a string according to the Nate 8-bit date format''' + m = _korean_nate_date_re.match(dateString) + if not m: + return + hour = int(m.group(5)) + ampm = m.group(4) + if (ampm == _korean_pm): + hour += 12 + hour = str(hour) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': hour, 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': '+09:00'} + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_nate) + +_mssql_date_re = \ + re.compile('(\d{4})-(\d{2})-(\d{2})\s+(\d{2}):(\d{2}):(\d{2})(\.\d+)?') +def _parse_date_mssql(dateString): + '''Parse a string according to the MS SQL date format''' + m = _mssql_date_re.match(dateString) + if not m: + return + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s:%(second)s%(zonediff)s' % \ + {'year': m.group(1), 'month': m.group(2), 'day': m.group(3),\ + 'hour': m.group(4), 'minute': m.group(5), 'second': m.group(6),\ + 'zonediff': '+09:00'} + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_mssql) + +# Unicode strings for Greek date strings +_greek_months = \ + { \ + u'\u0399\u03b1\u03bd': u'Jan', # c9e1ed in iso-8859-7 + u'\u03a6\u03b5\u03b2': u'Feb', # d6e5e2 in iso-8859-7 + u'\u039c\u03ac\u03ce': u'Mar', # ccdcfe in iso-8859-7 + u'\u039c\u03b1\u03ce': u'Mar', # cce1fe in iso-8859-7 + u'\u0391\u03c0\u03c1': u'Apr', # c1f0f1 in iso-8859-7 + u'\u039c\u03ac\u03b9': u'May', # ccdce9 in iso-8859-7 + u'\u039c\u03b1\u03ca': u'May', # cce1fa in iso-8859-7 + u'\u039c\u03b1\u03b9': u'May', # cce1e9 in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bd': u'Jun', # c9effded in iso-8859-7 + u'\u0399\u03bf\u03bd': u'Jun', # c9efed in iso-8859-7 + u'\u0399\u03bf\u03cd\u03bb': u'Jul', # c9effdeb in iso-8859-7 + u'\u0399\u03bf\u03bb': u'Jul', # c9f9eb in iso-8859-7 + u'\u0391\u03cd\u03b3': u'Aug', # c1fde3 in iso-8859-7 + u'\u0391\u03c5\u03b3': u'Aug', # c1f5e3 in iso-8859-7 + u'\u03a3\u03b5\u03c0': u'Sep', # d3e5f0 in iso-8859-7 + u'\u039f\u03ba\u03c4': u'Oct', # cfeaf4 in iso-8859-7 + u'\u039d\u03bf\u03ad': u'Nov', # cdefdd in iso-8859-7 + u'\u039d\u03bf\u03b5': u'Nov', # cdefe5 in iso-8859-7 + u'\u0394\u03b5\u03ba': u'Dec', # c4e5ea in iso-8859-7 + } + +_greek_wdays = \ + { \ + u'\u039a\u03c5\u03c1': u'Sun', # caf5f1 in iso-8859-7 + u'\u0394\u03b5\u03c5': u'Mon', # c4e5f5 in iso-8859-7 + u'\u03a4\u03c1\u03b9': u'Tue', # d4f1e9 in iso-8859-7 + u'\u03a4\u03b5\u03c4': u'Wed', # d4e5f4 in iso-8859-7 + u'\u03a0\u03b5\u03bc': u'Thu', # d0e5ec in iso-8859-7 + u'\u03a0\u03b1\u03c1': u'Fri', # d0e1f1 in iso-8859-7 + u'\u03a3\u03b1\u03b2': u'Sat', # d3e1e2 in iso-8859-7 + } + +_greek_date_format_re = \ + re.compile(u'([^,]+),\s+(\d{2})\s+([^\s]+)\s+(\d{4})\s+(\d{2}):(\d{2}):(\d{2})\s+([^\s]+)') + +def _parse_date_greek(dateString): + '''Parse a string according to a Greek 8-bit date format.''' + m = _greek_date_format_re.match(dateString) + if not m: + return + wday = _greek_wdays[m.group(1)] + month = _greek_months[m.group(3)] + rfc822date = '%(wday)s, %(day)s %(month)s %(year)s %(hour)s:%(minute)s:%(second)s %(zonediff)s' % \ + {'wday': wday, 'day': m.group(2), 'month': month, 'year': m.group(4),\ + 'hour': m.group(5), 'minute': m.group(6), 'second': m.group(7),\ + 'zonediff': m.group(8)} + return _parse_date_rfc822(rfc822date) +registerDateHandler(_parse_date_greek) + +# Unicode strings for Hungarian date strings +_hungarian_months = \ + { \ + u'janu\u00e1r': u'01', # e1 in iso-8859-2 + u'febru\u00e1ri': u'02', # e1 in iso-8859-2 + u'm\u00e1rcius': u'03', # e1 in iso-8859-2 + u'\u00e1prilis': u'04', # e1 in iso-8859-2 + u'm\u00e1ujus': u'05', # e1 in iso-8859-2 + u'j\u00fanius': u'06', # fa in iso-8859-2 + u'j\u00falius': u'07', # fa in iso-8859-2 + u'augusztus': u'08', + u'szeptember': u'09', + u'okt\u00f3ber': u'10', # f3 in iso-8859-2 + u'november': u'11', + u'december': u'12', + } + +_hungarian_date_format_re = \ + re.compile(u'(\d{4})-([^-]+)-(\d{,2})T(\d{,2}):(\d{2})((\+|-)(\d{,2}:\d{2}))') + +def _parse_date_hungarian(dateString): + '''Parse a string according to a Hungarian 8-bit date format.''' + m = _hungarian_date_format_re.match(dateString) + if not m or m.group(2) not in _hungarian_months: + return None + month = _hungarian_months[m.group(2)] + day = m.group(3) + if len(day) == 1: + day = '0' + day + hour = m.group(4) + if len(hour) == 1: + hour = '0' + hour + w3dtfdate = '%(year)s-%(month)s-%(day)sT%(hour)s:%(minute)s%(zonediff)s' % \ + {'year': m.group(1), 'month': month, 'day': day,\ + 'hour': hour, 'minute': m.group(5),\ + 'zonediff': m.group(6)} + return _parse_date_w3dtf(w3dtfdate) +registerDateHandler(_parse_date_hungarian) + +# W3DTF-style date parsing adapted from PyXML xml.utils.iso8601, written by +# Drake and licensed under the Python license. Removed all range checking +# for month, day, hour, minute, and second, since mktime will normalize +# these later +def _parse_date_w3dtf(dateString): + def __extract_date(m): + year = int(m.group('year')) + if year < 100: + year = 100 * int(time.gmtime()[0] / 100) + int(year) + if year < 1000: + return 0, 0, 0 + julian = m.group('julian') + if julian: + julian = int(julian) + month = julian / 30 + 1 + day = julian % 30 + 1 + jday = None + while jday != julian: + t = time.mktime((year, month, day, 0, 0, 0, 0, 0, 0)) + jday = time.gmtime(t)[-2] + diff = abs(jday - julian) + if jday > julian: + if diff < day: + day = day - diff + else: + month = month - 1 + day = 31 + elif jday < julian: + if day + diff < 28: + day = day + diff + else: + month = month + 1 + return year, month, day + month = m.group('month') + day = 1 + if month is None: + month = 1 + else: + month = int(month) + day = m.group('day') + if day: + day = int(day) + else: + day = 1 + return year, month, day + + def __extract_time(m): + if not m: + return 0, 0, 0 + hours = m.group('hours') + if not hours: + return 0, 0, 0 + hours = int(hours) + minutes = int(m.group('minutes')) + seconds = m.group('seconds') + if seconds: + seconds = int(seconds) + else: + seconds = 0 + return hours, minutes, seconds + + def __extract_tzd(m): + '''Return the Time Zone Designator as an offset in seconds from UTC.''' + if not m: + return 0 + tzd = m.group('tzd') + if not tzd: + return 0 + if tzd == 'Z': + return 0 + hours = int(m.group('tzdhours')) + minutes = m.group('tzdminutes') + if minutes: + minutes = int(minutes) + else: + minutes = 0 + offset = (hours*60 + minutes) * 60 + if tzd[0] == '+': + return -offset + return offset + + __date_re = ('(?P\d\d\d\d)' + '(?:(?P-|)' + '(?:(?P\d\d)(?:(?P=dsep)(?P\d\d))?' + '|(?P\d\d\d)))?') + __tzd_re = '(?P[-+](?P\d\d)(?::?(?P\d\d))|Z)' + __tzd_rx = re.compile(__tzd_re) + __time_re = ('(?P\d\d)(?P:|)(?P\d\d)' + '(?:(?P=tsep)(?P\d\d)(?:[.,]\d+)?)?' + + __tzd_re) + __datetime_re = '%s(?:T%s)?' % (__date_re, __time_re) + __datetime_rx = re.compile(__datetime_re) + m = __datetime_rx.match(dateString) + if (m is None) or (m.group() != dateString): + return + gmt = __extract_date(m) + __extract_time(m) + (0, 0, 0) + if gmt[0] == 0: + return + return time.gmtime(time.mktime(gmt) + __extract_tzd(m) - time.timezone) +registerDateHandler(_parse_date_w3dtf) + +def _parse_date_rfc822(dateString): + '''Parse an RFC822, RFC1123, RFC2822, or asctime-style date''' + data = dateString.split() + if not data: + return None + if data[0][-1] in (',', '.') or data[0].lower() in rfc822._daynames: + del data[0] + if len(data) == 4: + s = data[3] + i = s.find('+') + if i > 0: + data[3:] = [s[:i], s[i+1:]] + else: + data.append('') + dateString = " ".join(data) + # Account for the Etc/GMT timezone by stripping 'Etc/' + elif len(data) == 5 and data[4].lower().startswith('etc/'): + data[4] = data[4][4:] + dateString = " ".join(data) + if len(data) < 5: + dateString += ' 00:00:00 GMT' + tm = rfc822.parsedate_tz(dateString) + if tm: + # Jython doesn't adjust for 2-digit years like CPython does, + # so account for it by shifting the year so that it's in the + # range 1970-2069 (1970 being the year of the Unix epoch). + if tm[0] < 100: + tm = (tm[0] + (1900, 2000)[tm[0] < 70],) + tm[1:] + return time.gmtime(rfc822.mktime_tz(tm)) +# rfc822.py defines several time zones, but we define some extra ones. +# 'ET' is equivalent to 'EST', etc. +_additional_timezones = {'AT': -400, 'ET': -500, 'CT': -600, 'MT': -700, 'PT': -800} +rfc822._timezones.update(_additional_timezones) +registerDateHandler(_parse_date_rfc822) + +def _parse_date_perforce(aDateString): + """parse a date in yyyy/mm/dd hh:mm:ss TTT format""" + # Fri, 2006/09/15 08:19:53 EDT + _my_date_pattern = re.compile( \ + r'(\w{,3}), (\d{,4})/(\d{,2})/(\d{2}) (\d{,2}):(\d{2}):(\d{2}) (\w{,3})') + + m = _my_date_pattern.search(aDateString) + if m is None: + return None + dow, year, month, day, hour, minute, second, tz = m.groups() + months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] + dateString = "%s, %s %s %s %s:%s:%s %s" % (dow, day, months[int(month) - 1], year, hour, minute, second, tz) + tm = rfc822.parsedate_tz(dateString) + if tm: + return time.gmtime(rfc822.mktime_tz(tm)) +registerDateHandler(_parse_date_perforce) + +def _parse_date(dateString): + '''Parses a variety of date formats into a 9-tuple in GMT''' + if not dateString: + return None + for handler in _date_handlers: + try: + date9tuple = handler(dateString) + except (KeyError, OverflowError, ValueError): + continue + if not date9tuple: + continue + if len(date9tuple) != 9: + continue + return date9tuple + return None + +def _getCharacterEncoding(http_headers, xml_data): + '''Get the character encoding of the XML document + + http_headers is a dictionary + xml_data is a raw string (not Unicode) + + This is so much trickier than it sounds, it's not even funny. + According to RFC 3023 ('XML Media Types'), if the HTTP Content-Type + is application/xml, application/*+xml, + application/xml-external-parsed-entity, or application/xml-dtd, + the encoding given in the charset parameter of the HTTP Content-Type + takes precedence over the encoding given in the XML prefix within the + document, and defaults to 'utf-8' if neither are specified. But, if + the HTTP Content-Type is text/xml, text/*+xml, or + text/xml-external-parsed-entity, the encoding given in the XML prefix + within the document is ALWAYS IGNORED and only the encoding given in + the charset parameter of the HTTP Content-Type header should be + respected, and it defaults to 'us-ascii' if not specified. + + Furthermore, discussion on the atom-syntax mailing list with the + author of RFC 3023 leads me to the conclusion that any document + served with a Content-Type of text/* and no charset parameter + must be treated as us-ascii. (We now do this.) And also that it + must always be flagged as non-well-formed. (We now do this too.) + + If Content-Type is unspecified (input was local file or non-HTTP source) + or unrecognized (server just got it totally wrong), then go by the + encoding given in the XML prefix of the document and default to + 'iso-8859-1' as per the HTTP specification (RFC 2616). + + Then, assuming we didn't find a character encoding in the HTTP headers + (and the HTTP Content-type allowed us to look in the body), we need + to sniff the first few bytes of the XML data and try to determine + whether the encoding is ASCII-compatible. Section F of the XML + specification shows the way here: + http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + + If the sniffed encoding is not ASCII-compatible, we need to make it + ASCII compatible so that we can sniff further into the XML declaration + to find the encoding attribute, which will tell us the true encoding. + + Of course, none of this guarantees that we will be able to parse the + feed in the declared character encoding (assuming it was declared + correctly, which many are not). CJKCodecs and iconv_codec help a lot; + you should definitely install them if you can. + http://cjkpython.i18n.org/ + ''' + + def _parseHTTPContentType(content_type): + '''takes HTTP Content-Type header and returns (content type, charset) + + If no charset is specified, returns (content type, '') + If no content type is specified, returns ('', '') + Both return parameters are guaranteed to be lowercase strings + ''' + content_type = content_type or '' + content_type, params = cgi.parse_header(content_type) + charset = params.get('charset', '').replace("'", "") + if not isinstance(charset, unicode): + charset = charset.decode('utf-8', 'ignore') + return content_type, charset + + sniffed_xml_encoding = u'' + xml_encoding = u'' + true_encoding = u'' + http_content_type, http_encoding = _parseHTTPContentType(http_headers.get('content-type', http_headers.get('Content-type'))) + # Must sniff for non-ASCII-compatible character encodings before + # searching for XML declaration. This heuristic is defined in + # section F of the XML specification: + # http://www.w3.org/TR/REC-xml/#sec-guessing-no-ext-info + try: + if xml_data[:4] == _l2bytes([0x4c, 0x6f, 0xa7, 0x94]): + # EBCDIC + xml_data = _ebcdic_to_ascii(xml_data) + elif xml_data[:4] == _l2bytes([0x00, 0x3c, 0x00, 0x3f]): + # UTF-16BE + sniffed_xml_encoding = u'utf-16be' + xml_data = unicode(xml_data, 'utf-16be').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xfe, 0xff])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): + # UTF-16BE with BOM + sniffed_xml_encoding = u'utf-16be' + xml_data = unicode(xml_data[2:], 'utf-16be').encode('utf-8') + elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x3f, 0x00]): + # UTF-16LE + sniffed_xml_encoding = u'utf-16le' + xml_data = unicode(xml_data, 'utf-16le').encode('utf-8') + elif (len(xml_data) >= 4) and (xml_data[:2] == _l2bytes([0xff, 0xfe])) and (xml_data[2:4] != _l2bytes([0x00, 0x00])): + # UTF-16LE with BOM + sniffed_xml_encoding = u'utf-16le' + xml_data = unicode(xml_data[2:], 'utf-16le').encode('utf-8') + elif xml_data[:4] == _l2bytes([0x00, 0x00, 0x00, 0x3c]): + # UTF-32BE + sniffed_xml_encoding = u'utf-32be' + xml_data = unicode(xml_data, 'utf-32be').encode('utf-8') + elif xml_data[:4] == _l2bytes([0x3c, 0x00, 0x00, 0x00]): + # UTF-32LE + sniffed_xml_encoding = u'utf-32le' + xml_data = unicode(xml_data, 'utf-32le').encode('utf-8') + elif xml_data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): + # UTF-32BE with BOM + sniffed_xml_encoding = u'utf-32be' + xml_data = unicode(xml_data[4:], 'utf-32be').encode('utf-8') + elif xml_data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): + # UTF-32LE with BOM + sniffed_xml_encoding = u'utf-32le' + xml_data = unicode(xml_data[4:], 'utf-32le').encode('utf-8') + elif xml_data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): + # UTF-8 with BOM + sniffed_xml_encoding = u'utf-8' + xml_data = unicode(xml_data[3:], 'utf-8').encode('utf-8') + else: + # ASCII-compatible + pass + xml_encoding_match = re.compile(_s2bytes('^<\?.*encoding=[\'"](.*?)[\'"].*\?>')).match(xml_data) + except UnicodeDecodeError: + xml_encoding_match = None + if xml_encoding_match: + xml_encoding = xml_encoding_match.groups()[0].decode('utf-8').lower() + if sniffed_xml_encoding and (xml_encoding in (u'iso-10646-ucs-2', u'ucs-2', u'csunicode', u'iso-10646-ucs-4', u'ucs-4', u'csucs4', u'utf-16', u'utf-32', u'utf_16', u'utf_32', u'utf16', u'u16')): + xml_encoding = sniffed_xml_encoding + acceptable_content_type = 0 + application_content_types = (u'application/xml', u'application/xml-dtd', u'application/xml-external-parsed-entity') + text_content_types = (u'text/xml', u'text/xml-external-parsed-entity') + if (http_content_type in application_content_types) or \ + (http_content_type.startswith(u'application/') and http_content_type.endswith(u'+xml')): + acceptable_content_type = 1 + true_encoding = http_encoding or xml_encoding or u'utf-8' + elif (http_content_type in text_content_types) or \ + (http_content_type.startswith(u'text/')) and http_content_type.endswith(u'+xml'): + acceptable_content_type = 1 + true_encoding = http_encoding or u'us-ascii' + elif http_content_type.startswith(u'text/'): + true_encoding = http_encoding or u'us-ascii' + elif http_headers and (not (http_headers.has_key('content-type') or http_headers.has_key('Content-type'))): + true_encoding = xml_encoding or u'iso-8859-1' + else: + true_encoding = xml_encoding or u'utf-8' + # some feeds claim to be gb2312 but are actually gb18030. + # apparently MSIE and Firefox both do the following switch: + if true_encoding.lower() == u'gb2312': + true_encoding = u'gb18030' + return true_encoding, http_encoding, xml_encoding, sniffed_xml_encoding, acceptable_content_type + +def _toUTF8(data, encoding): + '''Changes an XML data stream on the fly to specify a new encoding + + data is a raw sequence of bytes (not Unicode) that is presumed to be in %encoding already + encoding is a string recognized by encodings.aliases + ''' + # strip Byte Order Mark (if present) + if (len(data) >= 4) and (data[:2] == _l2bytes([0xfe, 0xff])) and (data[2:4] != _l2bytes([0x00, 0x00])): + encoding = 'utf-16be' + data = data[2:] + elif (len(data) >= 4) and (data[:2] == _l2bytes([0xff, 0xfe])) and (data[2:4] != _l2bytes([0x00, 0x00])): + encoding = 'utf-16le' + data = data[2:] + elif data[:3] == _l2bytes([0xef, 0xbb, 0xbf]): + encoding = 'utf-8' + data = data[3:] + elif data[:4] == _l2bytes([0x00, 0x00, 0xfe, 0xff]): + encoding = 'utf-32be' + data = data[4:] + elif data[:4] == _l2bytes([0xff, 0xfe, 0x00, 0x00]): + encoding = 'utf-32le' + data = data[4:] + newdata = unicode(data, encoding) + declmatch = re.compile('^<\?xml[^>]*?>') + newdecl = '''''' + if declmatch.search(newdata): + newdata = declmatch.sub(newdecl, newdata) + else: + newdata = newdecl + u'\n' + newdata + return newdata.encode('utf-8') + +def _stripDoctype(data): + '''Strips DOCTYPE from XML document, returns (rss_version, stripped_data) + + rss_version may be 'rss091n' or None + stripped_data is the same XML document, minus the DOCTYPE + ''' + start = re.search(_s2bytes('<\w'), data) + start = start and start.start() or -1 + head,data = data[:start+1], data[start+1:] + + entity_pattern = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE) + entity_results=entity_pattern.findall(head) + head = entity_pattern.sub(_s2bytes(''), head) + doctype_pattern = re.compile(_s2bytes(r'^\s*]*?)>'), re.MULTILINE) + doctype_results = doctype_pattern.findall(head) + doctype = doctype_results and doctype_results[0] or _s2bytes('') + if doctype.lower().count(_s2bytes('netscape')): + version = u'rss091n' + else: + version = None + + # only allow in 'safe' inline entity definitions + replacement=_s2bytes('') + if len(doctype_results)==1 and entity_results: + safe_pattern=re.compile(_s2bytes('\s+(\w+)\s+"(&#\w+;|[^&"]*)"')) + safe_entities=filter(lambda e: safe_pattern.match(e),entity_results) + if safe_entities: + replacement=_s2bytes('\n \n]>') + data = doctype_pattern.sub(replacement, head) + data + + return version, data, dict(replacement and [(k.decode('utf-8'), v.decode('utf-8')) for k, v in safe_pattern.findall(replacement)]) + +def parse(url_file_stream_or_string, etag=None, modified=None, agent=None, referrer=None, handlers=None, request_headers=None, response_headers=None): + '''Parse a feed from a URL, file, stream, or string. + + request_headers, if given, is a dict from http header name to value to add + to the request; this overrides internally generated values. + ''' + + if handlers is None: + handlers = [] + if request_headers is None: + request_headers = {} + if response_headers is None: + response_headers = {} + + result = FeedParserDict() + result['feed'] = FeedParserDict() + result['entries'] = [] + result['bozo'] = 0 + if not isinstance(handlers, list): + handlers = [handlers] + try: + f = _open_resource(url_file_stream_or_string, etag, modified, agent, referrer, handlers, request_headers) + data = f.read() + except Exception, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = None + f = None + + if hasattr(f, 'headers'): + result['headers'] = dict(f.headers) + # overwrite existing headers using response_headers + if 'headers' in result: + result['headers'].update(response_headers) + elif response_headers: + result['headers'] = copy.deepcopy(response_headers) + + # if feed is gzip-compressed, decompress it + if f and data and 'headers' in result: + if gzip and 'gzip' in (result['headers'].get('content-encoding'), result['headers'].get('Content-Encoding')): + try: + data = gzip.GzipFile(fileobj=_StringIO(data)).read() + except (IOError, struct.error), e: + # IOError can occur if the gzip header is bad + # struct.error can occur if the data is damaged + # Some feeds claim to be gzipped but they're not, so + # we get garbage. Ideally, we should re-request the + # feed without the 'Accept-encoding: gzip' header, + # but we don't. + result['bozo'] = 1 + result['bozo_exception'] = e + data = None + elif zlib and 'deflate' in (result['headers'].get('content-encoding'), result['headers'].get('Content-Encoding')): + try: + data = zlib.decompress(data) + except zlib.error, e: + result['bozo'] = 1 + result['bozo_exception'] = e + data = None + + # save HTTP headers + if 'headers' in result: + if 'etag' in result['headers'] or 'ETag' in result['headers']: + etag = result['headers'].get('etag', result['headers'].get('ETag', u'')) + if not isinstance(etag, unicode): + etag = etag.decode('utf-8', 'ignore') + if etag: + result['etag'] = etag + if 'last-modified' in result['headers'] or 'Last-Modified' in result['headers']: + modified = result['headers'].get('last-modified', result['headers'].get('Last-Modified')) + if modified: + result['modified'] = _parse_date(modified) + if hasattr(f, 'url'): + if not isinstance(f.url, unicode): + result['href'] = f.url.decode('utf-8', 'ignore') + else: + result['href'] = f.url + result['status'] = 200 + if hasattr(f, 'status'): + result['status'] = f.status + if hasattr(f, 'close'): + f.close() + + if data is None: + return result + + # there are four encodings to keep track of: + # - http_encoding is the encoding declared in the Content-Type HTTP header + # - xml_encoding is the encoding declared in the >> autoretry_datastore_timeouts() + + Should only be called once, subsequent calls have no effect. + + >>> autoretry_datastore_timeouts() # no effect + + Default (5) attempts: .1, .2, .4, .8, 1.6 seconds + + Parameters can each be specified as floats. + + :param attempts: maximum number of times to retry. + :param interval: base seconds to sleep between retries. + :param exponent: rate of exponential back-off. + """ + + import time, logging + from google.appengine.api import apiproxy_stub_map + from google.appengine.runtime import apiproxy_errors + from google.appengine.datastore import datastore_pb + + attempts = float(attempts) + interval = float(interval) + exponent = float(exponent) + wrapped = apiproxy_stub_map.MakeSyncCall + errors = {datastore_pb.Error.TIMEOUT:'Timeout', + datastore_pb.Error.CONCURRENT_TRANSACTION:'TransactionFailedError'} + + def wrapper(*args, **kwargs): + count = 0.0 + while True: + try: + return wrapped(*args, **kwargs) + except apiproxy_errors.ApplicationError, err: + errno = err.application_error + if errno not in errors: raise + sleep = (exponent ** count) * interval + count += 1.0 + if count > attempts: raise + msg = "Datastore %s: retry #%d in %s seconds.\n%s" + vals = '' + if count == 1.0: + vals = '\n'.join([str(a) for a in args]) + logging.warning(msg % (errors[errno], count, sleep, vals)) + time.sleep(sleep) + + setattr(wrapper, '_autoretry_datastore_timeouts', False) + if getattr(wrapped, '_autoretry_datastore_timeouts', True): + apiproxy_stub_map.MakeSyncCall = wrapper + + ADDED gluon/contrib/gateways/__init__.py Index: gluon/contrib/gateways/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/gateways/__init__.py @@ -0,0 +1,2 @@ + + ADDED gluon/contrib/gateways/fcgi.py Index: gluon/contrib/gateways/fcgi.py ================================================================== --- /dev/null +++ gluon/contrib/gateways/fcgi.py @@ -0,0 +1,1332 @@ +# Copyright (c) 2002, 2003, 2005, 2006 Allan Saddi +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# 1. Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# 2. Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND +# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE +# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS +# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF +# SUCH DAMAGE. +# +# $Id$ + +""" +fcgi - a FastCGI/WSGI gateway. + +For more information about FastCGI, see . + +For more information about the Web Server Gateway Interface, see +. + +Example usage: + + #!/usr/bin/env python + from myapplication import app # Assume app is your WSGI application object + from fcgi import WSGIServer + WSGIServer(app).run() + +See the documentation for WSGIServer/Server for more information. + +On most platforms, fcgi will fallback to regular CGI behavior if run in a +non-FastCGI context. If you want to force CGI behavior, set the environment +variable FCGI_FORCE_CGI to "Y" or "y". +""" + +__author__ = 'Allan Saddi ' +__version__ = '$Revision$' + +import sys +import os +import signal +import struct +import cStringIO as StringIO +import select +import socket +import errno +import traceback + +try: + import thread + import threading + thread_available = True +except ImportError: + import dummy_thread as thread + import dummy_threading as threading + thread_available = False + +# Apparently 2.3 doesn't define SHUT_WR? Assume it is 1 in this case. +if not hasattr(socket, 'SHUT_WR'): + socket.SHUT_WR = 1 + +__all__ = ['WSGIServer'] + +# Constants from the spec. +FCGI_LISTENSOCK_FILENO = 0 + +FCGI_HEADER_LEN = 8 + +FCGI_VERSION_1 = 1 + +FCGI_BEGIN_REQUEST = 1 +FCGI_ABORT_REQUEST = 2 +FCGI_END_REQUEST = 3 +FCGI_PARAMS = 4 +FCGI_STDIN = 5 +FCGI_STDOUT = 6 +FCGI_STDERR = 7 +FCGI_DATA = 8 +FCGI_GET_VALUES = 9 +FCGI_GET_VALUES_RESULT = 10 +FCGI_UNKNOWN_TYPE = 11 +FCGI_MAXTYPE = FCGI_UNKNOWN_TYPE + +FCGI_NULL_REQUEST_ID = 0 + +FCGI_KEEP_CONN = 1 + +FCGI_RESPONDER = 1 +FCGI_AUTHORIZER = 2 +FCGI_FILTER = 3 + +FCGI_REQUEST_COMPLETE = 0 +FCGI_CANT_MPX_CONN = 1 +FCGI_OVERLOADED = 2 +FCGI_UNKNOWN_ROLE = 3 + +FCGI_MAX_CONNS = 'FCGI_MAX_CONNS' +FCGI_MAX_REQS = 'FCGI_MAX_REQS' +FCGI_MPXS_CONNS = 'FCGI_MPXS_CONNS' + +FCGI_Header = '!BBHHBx' +FCGI_BeginRequestBody = '!HB5x' +FCGI_EndRequestBody = '!LB3x' +FCGI_UnknownTypeBody = '!B7x' + +FCGI_EndRequestBody_LEN = struct.calcsize(FCGI_EndRequestBody) +FCGI_UnknownTypeBody_LEN = struct.calcsize(FCGI_UnknownTypeBody) + +if __debug__: + import time + + # Set non-zero to write debug output to a file. + DEBUG = 0 + DEBUGLOG = '/tmp/fcgi.log' + + def _debug(level, msg): + if DEBUG < level: + return + + try: + f = open(DEBUGLOG, 'a') + f.write('%sfcgi: %s\n' % (time.ctime()[4:-4], msg)) + f.close() + except: + pass + +class InputStream(object): + """ + File-like object representing FastCGI input streams (FCGI_STDIN and + FCGI_DATA). Supports the minimum methods required by WSGI spec. + """ + def __init__(self, conn): + self._conn = conn + + # See Server. + self._shrinkThreshold = conn.server.inputStreamShrinkThreshold + + self._buf = '' + self._bufList = [] + self._pos = 0 # Current read position. + self._avail = 0 # Number of bytes currently available. + + self._eof = False # True when server has sent EOF notification. + + def _shrinkBuffer(self): + """Gets rid of already read data (since we can't rewind).""" + if self._pos >= self._shrinkThreshold: + self._buf = self._buf[self._pos:] + self._avail -= self._pos + self._pos = 0 + + assert self._avail >= 0 + + def _waitForData(self): + """Waits for more data to become available.""" + self._conn.process_input() + + def read(self, n=-1): + if self._pos == self._avail and self._eof: + return '' + while True: + if n < 0 or (self._avail - self._pos) < n: + # Not enough data available. + if self._eof: + # And there's no more coming. + newPos = self._avail + break + else: + # Wait for more data. + self._waitForData() + continue + else: + newPos = self._pos + n + break + # Merge buffer list, if necessary. + if self._bufList: + self._buf += ''.join(self._bufList) + self._bufList = [] + r = self._buf[self._pos:newPos] + self._pos = newPos + self._shrinkBuffer() + return r + + def readline(self, length=None): + if self._pos == self._avail and self._eof: + return '' + while True: + # Unfortunately, we need to merge the buffer list early. + if self._bufList: + self._buf += ''.join(self._bufList) + self._bufList = [] + # Find newline. + i = self._buf.find('\n', self._pos) + if i < 0: + # Not found? + if self._eof: + # No more data coming. + newPos = self._avail + break + else: + # Wait for more to come. + self._waitForData() + continue + else: + newPos = i + 1 + break + if length is not None: + if self._pos + length < newPos: + newPos = self._pos + length + r = self._buf[self._pos:newPos] + self._pos = newPos + self._shrinkBuffer() + return r + + def readlines(self, sizehint=0): + total = 0 + lines = [] + line = self.readline() + while line: + lines.append(line) + total += len(line) + if 0 < sizehint <= total: + break + line = self.readline() + return lines + + def __iter__(self): + return self + + def next(self): + r = self.readline() + if not r: + raise StopIteration + return r + + def add_data(self, data): + if not data: + self._eof = True + else: + self._bufList.append(data) + self._avail += len(data) + +class MultiplexedInputStream(InputStream): + """ + A version of InputStream meant to be used with MultiplexedConnections. + Assumes the MultiplexedConnection (the producer) and the Request + (the consumer) are running in different threads. + """ + def __init__(self, conn): + super(MultiplexedInputStream, self).__init__(conn) + + # Arbitrates access to this InputStream (it's used simultaneously + # by a Request and its owning Connection object). + lock = threading.RLock() + + # Notifies Request thread that there is new data available. + self._lock = threading.Condition(lock) + + def _waitForData(self): + # Wait for notification from add_data(). + self._lock.wait() + + def read(self, n=-1): + self._lock.acquire() + try: + return super(MultiplexedInputStream, self).read(n) + finally: + self._lock.release() + + def readline(self, length=None): + self._lock.acquire() + try: + return super(MultiplexedInputStream, self).readline(length) + finally: + self._lock.release() + + def add_data(self, data): + self._lock.acquire() + try: + super(MultiplexedInputStream, self).add_data(data) + self._lock.notify() + finally: + self._lock.release() + +class OutputStream(object): + """ + FastCGI output stream (FCGI_STDOUT/FCGI_STDERR). By default, calls to + write() or writelines() immediately result in Records being sent back + to the server. Buffering should be done in a higher level! + """ + def __init__(self, conn, req, type, buffered=False): + self._conn = conn + self._req = req + self._type = type + self._buffered = buffered + self._bufList = [] # Used if buffered is True + self.dataWritten = False + self.closed = False + + def _write(self, data): + length = len(data) + while length: + toWrite = min(length, self._req.server.maxwrite - FCGI_HEADER_LEN) + + rec = Record(self._type, self._req.requestId) + rec.contentLength = toWrite + rec.contentData = data[:toWrite] + self._conn.writeRecord(rec) + + data = data[toWrite:] + length -= toWrite + + def write(self, data): + assert not self.closed + + if not data: + return + + self.dataWritten = True + + if self._buffered: + self._bufList.append(data) + else: + self._write(data) + + def writelines(self, lines): + assert not self.closed + + for line in lines: + self.write(line) + + def flush(self): + # Only need to flush if this OutputStream is actually buffered. + if self._buffered: + data = ''.join(self._bufList) + self._bufList = [] + self._write(data) + + # Though available, the following should NOT be called by WSGI apps. + def close(self): + """Sends end-of-stream notification, if necessary.""" + if not self.closed and self.dataWritten: + self.flush() + rec = Record(self._type, self._req.requestId) + self._conn.writeRecord(rec) + self.closed = True + +class TeeOutputStream(object): + """ + Simple wrapper around two or more output file-like objects that copies + written data to all streams. + """ + def __init__(self, streamList): + self._streamList = streamList + + def write(self, data): + for f in self._streamList: + f.write(data) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def flush(self): + for f in self._streamList: + f.flush() + +class StdoutWrapper(object): + """ + Wrapper for sys.stdout so we know if data has actually been written. + """ + def __init__(self, stdout): + self._file = stdout + self.dataWritten = False + + def write(self, data): + if data: + self.dataWritten = True + self._file.write(data) + + def writelines(self, lines): + for line in lines: + self.write(line) + + def __getattr__(self, name): + return getattr(self._file, name) + +def decode_pair(s, pos=0): + """ + Decodes a name/value pair. + + The number of bytes decoded as well as the name/value pair + are returned. + """ + nameLength = ord(s[pos]) + if nameLength & 128: + nameLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff + pos += 4 + else: + pos += 1 + + valueLength = ord(s[pos]) + if valueLength & 128: + valueLength = struct.unpack('!L', s[pos:pos+4])[0] & 0x7fffffff + pos += 4 + else: + pos += 1 + + name = s[pos:pos+nameLength] + pos += nameLength + value = s[pos:pos+valueLength] + pos += valueLength + + return (pos, (name, value)) + +def encode_pair(name, value): + """ + Encodes a name/value pair. + + The encoded string is returned. + """ + nameLength = len(name) + if nameLength < 128: + s = chr(nameLength) + else: + s = struct.pack('!L', nameLength | 0x80000000L) + + valueLength = len(value) + if valueLength < 128: + s += chr(valueLength) + else: + s += struct.pack('!L', valueLength | 0x80000000L) + + return s + name + value + +class Record(object): + """ + A FastCGI Record. + + Used for encoding/decoding records. + """ + def __init__(self, type=FCGI_UNKNOWN_TYPE, requestId=FCGI_NULL_REQUEST_ID): + self.version = FCGI_VERSION_1 + self.type = type + self.requestId = requestId + self.contentLength = 0 + self.paddingLength = 0 + self.contentData = '' + + def _recvall(sock, length): + """ + Attempts to receive length bytes from a socket, blocking if necessary. + (Socket may be blocking or non-blocking.) + """ + dataList = [] + recvLen = 0 + while length: + try: + data = sock.recv(length) + except socket.error, e: + if e[0] == errno.EAGAIN: + select.select([sock], [], []) + continue + else: + raise + if not data: # EOF + break + dataList.append(data) + dataLen = len(data) + recvLen += dataLen + length -= dataLen + return ''.join(dataList), recvLen + _recvall = staticmethod(_recvall) + + def read(self, sock): + """Read and decode a Record from a socket.""" + try: + header, length = self._recvall(sock, FCGI_HEADER_LEN) + except: + raise EOFError + + if length < FCGI_HEADER_LEN: + raise EOFError + + self.version, self.type, self.requestId, self.contentLength, \ + self.paddingLength = struct.unpack(FCGI_Header, header) + + if __debug__: _debug(9, 'read: fd = %d, type = %d, requestId = %d, ' + 'contentLength = %d' % + (sock.fileno(), self.type, self.requestId, + self.contentLength)) + + if self.contentLength: + try: + self.contentData, length = self._recvall(sock, + self.contentLength) + except: + raise EOFError + + if length < self.contentLength: + raise EOFError + + if self.paddingLength: + try: + self._recvall(sock, self.paddingLength) + except: + raise EOFError + + def _sendall(sock, data): + """ + Writes data to a socket and does not return until all the data is sent. + """ + length = len(data) + while length: + try: + sent = sock.send(data) + except socket.error, e: + if e[0] == errno.EAGAIN: + select.select([], [sock], []) + continue + else: + raise + data = data[sent:] + length -= sent + _sendall = staticmethod(_sendall) + + def write(self, sock): + """Encode and write a Record to a socket.""" + self.paddingLength = -self.contentLength & 7 + + if __debug__: _debug(9, 'write: fd = %d, type = %d, requestId = %d, ' + 'contentLength = %d' % + (sock.fileno(), self.type, self.requestId, + self.contentLength)) + + header = struct.pack(FCGI_Header, self.version, self.type, + self.requestId, self.contentLength, + self.paddingLength) + self._sendall(sock, header) + if self.contentLength: + self._sendall(sock, self.contentData) + if self.paddingLength: + self._sendall(sock, '\x00'*self.paddingLength) + +class Request(object): + """ + Represents a single FastCGI request. + + These objects are passed to your handler and is the main interface + between your handler and the fcgi module. The methods should not + be called by your handler. However, server, params, stdin, stdout, + stderr, and data are free for your handler's use. + """ + def __init__(self, conn, inputStreamClass): + self._conn = conn + + self.server = conn.server + self.params = {} + self.stdin = inputStreamClass(conn) + self.stdout = OutputStream(conn, self, FCGI_STDOUT) + self.stderr = OutputStream(conn, self, FCGI_STDERR, buffered=True) + self.data = inputStreamClass(conn) + + def run(self): + """Runs the handler, flushes the streams, and ends the request.""" + try: + protocolStatus, appStatus = self.server.handler(self) + except: + traceback.print_exc(file=self.stderr) + self.stderr.flush() + if not self.stdout.dataWritten: + self.server.error(self) + + protocolStatus, appStatus = FCGI_REQUEST_COMPLETE, 0 + + if __debug__: _debug(1, 'protocolStatus = %d, appStatus = %d' % + (protocolStatus, appStatus)) + + self._flush() + self._end(appStatus, protocolStatus) + + def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): + self._conn.end_request(self, appStatus, protocolStatus) + + def _flush(self): + self.stdout.close() + self.stderr.close() + +class CGIRequest(Request): + """A normal CGI request disguised as a FastCGI request.""" + def __init__(self, server): + # These are normally filled in by Connection. + self.requestId = 1 + self.role = FCGI_RESPONDER + self.flags = 0 + self.aborted = False + + self.server = server + self.params = dict(os.environ) + self.stdin = sys.stdin + self.stdout = StdoutWrapper(sys.stdout) # Oh, the humanity! + self.stderr = sys.stderr + self.data = StringIO.StringIO() + + def _end(self, appStatus=0L, protocolStatus=FCGI_REQUEST_COMPLETE): + sys.exit(appStatus) + + def _flush(self): + # Not buffered, do nothing. + pass + +class Connection(object): + """ + A Connection with the web server. + + Each Connection is associated with a single socket (which is + connected to the web server) and is responsible for handling all + the FastCGI message processing for that socket. + """ + _multiplexed = False + _inputStreamClass = InputStream + + def __init__(self, sock, addr, server): + self._sock = sock + self._addr = addr + self.server = server + + # Active Requests for this Connection, mapped by request ID. + self._requests = {} + + def _cleanupSocket(self): + """Close the Connection's socket.""" + try: + self._sock.shutdown(socket.SHUT_WR) + except: + return + try: + while True: + r, w, e = select.select([self._sock], [], []) + if not r or not self._sock.recv(1024): + break + except: + pass + self._sock.close() + + def run(self): + """Begin processing data from the socket.""" + self._keepGoing = True + while self._keepGoing: + try: + self.process_input() + except EOFError: + break + except (select.error, socket.error), e: + if e[0] == errno.EBADF: # Socket was closed by Request. + break + raise + + self._cleanupSocket() + + def process_input(self): + """Attempt to read a single Record from the socket and process it.""" + # Currently, any children Request threads notify this Connection + # that it is no longer needed by closing the Connection's socket. + # We need to put a timeout on select, otherwise we might get + # stuck in it indefinitely... (I don't like this solution.) + while self._keepGoing: + try: + r, w, e = select.select([self._sock], [], [], 1.0) + except ValueError: + # Sigh. ValueError gets thrown sometimes when passing select + # a closed socket. + raise EOFError + if r: break + if not self._keepGoing: + return + rec = Record() + rec.read(self._sock) + + if rec.type == FCGI_GET_VALUES: + self._do_get_values(rec) + elif rec.type == FCGI_BEGIN_REQUEST: + self._do_begin_request(rec) + elif rec.type == FCGI_ABORT_REQUEST: + self._do_abort_request(rec) + elif rec.type == FCGI_PARAMS: + self._do_params(rec) + elif rec.type == FCGI_STDIN: + self._do_stdin(rec) + elif rec.type == FCGI_DATA: + self._do_data(rec) + elif rec.requestId == FCGI_NULL_REQUEST_ID: + self._do_unknown_type(rec) + else: + # Need to complain about this. + pass + + def writeRecord(self, rec): + """ + Write a Record to the socket. + """ + rec.write(self._sock) + + def end_request(self, req, appStatus=0L, + protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): + """ + End a Request. + + Called by Request objects. An FCGI_END_REQUEST Record is + sent to the web server. If the web server no longer requires + the connection, the socket is closed, thereby ending this + Connection (run() returns). + """ + rec = Record(FCGI_END_REQUEST, req.requestId) + rec.contentData = struct.pack(FCGI_EndRequestBody, appStatus, + protocolStatus) + rec.contentLength = FCGI_EndRequestBody_LEN + self.writeRecord(rec) + + if remove: + del self._requests[req.requestId] + + if __debug__: _debug(2, 'end_request: flags = %d' % req.flags) + + if not (req.flags & FCGI_KEEP_CONN) and not self._requests: + self._cleanupSocket() + self._keepGoing = False + + def _do_get_values(self, inrec): + """Handle an FCGI_GET_VALUES request from the web server.""" + outrec = Record(FCGI_GET_VALUES_RESULT) + + pos = 0 + while pos < inrec.contentLength: + pos, (name, value) = decode_pair(inrec.contentData, pos) + cap = self.server.capability.get(name) + if cap is not None: + outrec.contentData += encode_pair(name, str(cap)) + + outrec.contentLength = len(outrec.contentData) + self.writeRecord(outrec) + + def _do_begin_request(self, inrec): + """Handle an FCGI_BEGIN_REQUEST from the web server.""" + role, flags = struct.unpack(FCGI_BeginRequestBody, inrec.contentData) + + req = self.server.request_class(self, self._inputStreamClass) + req.requestId, req.role, req.flags = inrec.requestId, role, flags + req.aborted = False + + if not self._multiplexed and self._requests: + # Can't multiplex requests. + self.end_request(req, 0L, FCGI_CANT_MPX_CONN, remove=False) + else: + self._requests[inrec.requestId] = req + + def _do_abort_request(self, inrec): + """ + Handle an FCGI_ABORT_REQUEST from the web server. + + We just mark a flag in the associated Request. + """ + req = self._requests.get(inrec.requestId) + if req is not None: + req.aborted = True + + def _start_request(self, req): + """Run the request.""" + # Not multiplexed, so run it inline. + req.run() + + def _do_params(self, inrec): + """ + Handle an FCGI_PARAMS Record. + + If the last FCGI_PARAMS Record is received, start the request. + """ + req = self._requests.get(inrec.requestId) + if req is not None: + if inrec.contentLength: + pos = 0 + while pos < inrec.contentLength: + pos, (name, value) = decode_pair(inrec.contentData, pos) + req.params[name] = value + else: + self._start_request(req) + + def _do_stdin(self, inrec): + """Handle the FCGI_STDIN stream.""" + req = self._requests.get(inrec.requestId) + if req is not None: + req.stdin.add_data(inrec.contentData) + + def _do_data(self, inrec): + """Handle the FCGI_DATA stream.""" + req = self._requests.get(inrec.requestId) + if req is not None: + req.data.add_data(inrec.contentData) + + def _do_unknown_type(self, inrec): + """Handle an unknown request type. Respond accordingly.""" + outrec = Record(FCGI_UNKNOWN_TYPE) + outrec.contentData = struct.pack(FCGI_UnknownTypeBody, inrec.type) + outrec.contentLength = FCGI_UnknownTypeBody_LEN + self.writeRecord(rec) + +class MultiplexedConnection(Connection): + """ + A version of Connection capable of handling multiple requests + simultaneously. + """ + _multiplexed = True + _inputStreamClass = MultiplexedInputStream + + def __init__(self, sock, addr, server): + super(MultiplexedConnection, self).__init__(sock, addr, server) + + # Used to arbitrate access to self._requests. + lock = threading.RLock() + + # Notification is posted everytime a request completes, allowing us + # to quit cleanly. + self._lock = threading.Condition(lock) + + def _cleanupSocket(self): + # Wait for any outstanding requests before closing the socket. + self._lock.acquire() + while self._requests: + self._lock.wait() + self._lock.release() + + super(MultiplexedConnection, self)._cleanupSocket() + + def writeRecord(self, rec): + # Must use locking to prevent intermingling of Records from different + # threads. + self._lock.acquire() + try: + # Probably faster than calling super. ;) + rec.write(self._sock) + finally: + self._lock.release() + + def end_request(self, req, appStatus=0L, + protocolStatus=FCGI_REQUEST_COMPLETE, remove=True): + self._lock.acquire() + try: + super(MultiplexedConnection, self).end_request(req, appStatus, + protocolStatus, + remove) + self._lock.notify() + finally: + self._lock.release() + + def _do_begin_request(self, inrec): + self._lock.acquire() + try: + super(MultiplexedConnection, self)._do_begin_request(inrec) + finally: + self._lock.release() + + def _do_abort_request(self, inrec): + self._lock.acquire() + try: + super(MultiplexedConnection, self)._do_abort_request(inrec) + finally: + self._lock.release() + + def _start_request(self, req): + thread.start_new_thread(req.run, ()) + + def _do_params(self, inrec): + self._lock.acquire() + try: + super(MultiplexedConnection, self)._do_params(inrec) + finally: + self._lock.release() + + def _do_stdin(self, inrec): + self._lock.acquire() + try: + super(MultiplexedConnection, self)._do_stdin(inrec) + finally: + self._lock.release() + + def _do_data(self, inrec): + self._lock.acquire() + try: + super(MultiplexedConnection, self)._do_data(inrec) + finally: + self._lock.release() + +class Server(object): + """ + The FastCGI server. + + Waits for connections from the web server, processing each + request. + + If run in a normal CGI context, it will instead instantiate a + CGIRequest and run the handler through there. + """ + request_class = Request + cgirequest_class = CGIRequest + + # Limits the size of the InputStream's string buffer to this size + the + # server's maximum Record size. Since the InputStream is not seekable, + # we throw away already-read data once this certain amount has been read. + inputStreamShrinkThreshold = 102400 - 8192 + + def __init__(self, handler=None, maxwrite=8192, bindAddress=None, + umask=None, multiplexed=False): + """ + handler, if present, must reference a function or method that + takes one argument: a Request object. If handler is not + specified at creation time, Server *must* be subclassed. + (The handler method below is abstract.) + + maxwrite is the maximum number of bytes (per Record) to write + to the server. I've noticed mod_fastcgi has a relatively small + receive buffer (8K or so). + + bindAddress, if present, must either be a string or a 2-tuple. If + present, run() will open its own listening socket. You would use + this if you wanted to run your application as an 'external' FastCGI + app. (i.e. the webserver would no longer be responsible for starting + your app) If a string, it will be interpreted as a filename and a UNIX + socket will be opened. If a tuple, the first element, a string, + is the interface name/IP to bind to, and the second element (an int) + is the port number. + + Set multiplexed to True if you want to handle multiple requests + per connection. Some FastCGI backends (namely mod_fastcgi) don't + multiplex requests at all, so by default this is off (which saves + on thread creation/locking overhead). If threads aren't available, + this keyword is ignored; it's not possible to multiplex requests + at all. + """ + if handler is not None: + self.handler = handler + self.maxwrite = maxwrite + if thread_available: + try: + import resource + # Attempt to glean the maximum number of connections + # from the OS. + maxConns = resource.getrlimit(resource.RLIMIT_NOFILE)[0] + except ImportError: + maxConns = 100 # Just some made up number. + maxReqs = maxConns + if multiplexed: + self._connectionClass = MultiplexedConnection + maxReqs *= 5 # Another made up number. + else: + self._connectionClass = Connection + self.capability = { + FCGI_MAX_CONNS: maxConns, + FCGI_MAX_REQS: maxReqs, + FCGI_MPXS_CONNS: multiplexed and 1 or 0 + } + else: + self._connectionClass = Connection + self.capability = { + # If threads aren't available, these are pretty much correct. + FCGI_MAX_CONNS: 1, + FCGI_MAX_REQS: 1, + FCGI_MPXS_CONNS: 0 + } + self._bindAddress = bindAddress + self._umask = umask + + def _setupSocket(self): + if self._bindAddress is None: # Run as a normal FastCGI? + isFCGI = True + + sock = socket.fromfd(FCGI_LISTENSOCK_FILENO, socket.AF_INET, + socket.SOCK_STREAM) + try: + sock.getpeername() + except socket.error, e: + if e[0] == errno.ENOTSOCK: + # Not a socket, assume CGI context. + isFCGI = False + elif e[0] != errno.ENOTCONN: + raise + + # FastCGI/CGI discrimination is broken on Mac OS X. + # Set the environment variable FCGI_FORCE_CGI to "Y" or "y" + # if you want to run your app as a simple CGI. (You can do + # this with Apache's mod_env [not loaded by default in OS X + # client, ha ha] and the SetEnv directive.) + if not isFCGI or \ + os.environ.get('FCGI_FORCE_CGI', 'N').upper().startswith('Y'): + req = self.cgirequest_class(self) + req.run() + sys.exit(0) + else: + # Run as a server + oldUmask = None + if type(self._bindAddress) is str: + # Unix socket + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + os.unlink(self._bindAddress) + except OSError: + pass + if self._umask is not None: + oldUmask = os.umask(self._umask) + else: + # INET socket + assert type(self._bindAddress) is tuple + assert len(self._bindAddress) == 2 + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + + sock.bind(self._bindAddress) + sock.listen(socket.SOMAXCONN) + + if oldUmask is not None: + os.umask(oldUmask) + + return sock + + def _cleanupSocket(self, sock): + """Closes the main socket.""" + sock.close() + + def _installSignalHandlers(self): + self._oldSIGs = [(x,signal.getsignal(x)) for x in + (signal.SIGHUP, signal.SIGINT, signal.SIGTERM)] + signal.signal(signal.SIGHUP, self._hupHandler) + signal.signal(signal.SIGINT, self._intHandler) + signal.signal(signal.SIGTERM, self._intHandler) + + def _restoreSignalHandlers(self): + for signum,handler in self._oldSIGs: + signal.signal(signum, handler) + + def _hupHandler(self, signum, frame): + self._hupReceived = True + self._keepGoing = False + + def _intHandler(self, signum, frame): + self._keepGoing = False + + def run(self, timeout=1.0): + """ + The main loop. Exits on SIGHUP, SIGINT, SIGTERM. Returns True if + SIGHUP was received, False otherwise. + """ + web_server_addrs = os.environ.get('FCGI_WEB_SERVER_ADDRS') + if web_server_addrs is not None: + web_server_addrs = map(lambda x: x.strip(), + web_server_addrs.split(',')) + + sock = self._setupSocket() + + self._keepGoing = True + self._hupReceived = False + + # Install signal handlers. + self._installSignalHandlers() + + while self._keepGoing: + try: + r, w, e = select.select([sock], [], [], timeout) + except select.error, e: + if e[0] == errno.EINTR: + continue + raise + + if r: + try: + clientSock, addr = sock.accept() + except socket.error, e: + if e[0] in (errno.EINTR, errno.EAGAIN): + continue + raise + + if web_server_addrs and \ + (len(addr) != 2 or addr[0] not in web_server_addrs): + clientSock.close() + continue + + # Instantiate a new Connection and begin processing FastCGI + # messages (either in a new thread or this thread). + conn = self._connectionClass(clientSock, addr, self) + thread.start_new_thread(conn.run, ()) + + self._mainloopPeriodic() + + # Restore signal handlers. + self._restoreSignalHandlers() + + self._cleanupSocket(sock) + + return self._hupReceived + + def _mainloopPeriodic(self): + """ + Called with just about each iteration of the main loop. Meant to + be overridden. + """ + pass + + def _exit(self, reload=False): + """ + Protected convenience method for subclasses to force an exit. Not + really thread-safe, which is why it isn't public. + """ + if self._keepGoing: + self._keepGoing = False + self._hupReceived = reload + + def handler(self, req): + """ + Default handler, which just raises an exception. Unless a handler + is passed at initialization time, this must be implemented by + a subclass. + """ + raise NotImplementedError, self.__class__.__name__ + '.handler' + + def error(self, req): + """ + Called by Request if an exception occurs within the handler. May and + should be overridden. + """ + import cgitb + req.stdout.write('Content-Type: text/html\r\n\r\n' + + cgitb.html(sys.exc_info())) + +class WSGIServer(Server): + """ + FastCGI server that supports the Web Server Gateway Interface. See + . + """ + def __init__(self, application, environ=None, + multithreaded=True, **kw): + """ + environ, if present, must be a dictionary-like object. Its + contents will be copied into application's environ. Useful + for passing application-specific variables. + + Set multithreaded to False if your application is not MT-safe. + """ + if kw.has_key('handler'): + del kw['handler'] # Doesn't make sense to let this through + super(WSGIServer, self).__init__(**kw) + + if environ is None: + environ = {} + + self.application = application + self.environ = environ + self.multithreaded = multithreaded + + # Used to force single-threadedness + self._app_lock = thread.allocate_lock() + + def handler(self, req): + """Special handler for WSGI.""" + if req.role != FCGI_RESPONDER: + return FCGI_UNKNOWN_ROLE, 0 + + # Mostly taken from example CGI gateway. + environ = req.params + environ.update(self.environ) + + environ['wsgi.version'] = (1,0) + environ['wsgi.input'] = req.stdin + if self._bindAddress is None: + stderr = req.stderr + else: + stderr = TeeOutputStream((sys.stderr, req.stderr)) + environ['wsgi.errors'] = stderr + environ['wsgi.multithread'] = not isinstance(req, CGIRequest) and \ + thread_available and self.multithreaded + # Rationale for the following: If started by the web server + # (self._bindAddress is None) in either FastCGI or CGI mode, the + # possibility of being spawned multiple times simultaneously is quite + # real. And, if started as an external server, multiple copies may be + # spawned for load-balancing/redundancy. (Though I don't think + # mod_fastcgi supports this?) + environ['wsgi.multiprocess'] = True + environ['wsgi.run_once'] = isinstance(req, CGIRequest) + + if environ.get('HTTPS', 'off') in ('on', '1'): + environ['wsgi.url_scheme'] = 'https' + else: + environ['wsgi.url_scheme'] = 'http' + + self._sanitizeEnv(environ) + + headers_set = [] + headers_sent = [] + result = None + + def write(data): + assert type(data) is str, 'write() argument must be string' + assert headers_set, 'write() before start_response()' + + if not headers_sent: + status, responseHeaders = headers_sent[:] = headers_set + found = False + for header,value in responseHeaders: + if header.lower() == 'content-length': + found = True + break + if not found and result is not None: + try: + if len(result) == 1: + responseHeaders.append(('Content-Length', + str(len(data)))) + except: + pass + s = 'Status: %s\r\n' % status + for header in responseHeaders: + s += '%s: %s\r\n' % header + s += '\r\n' + req.stdout.write(s) + + req.stdout.write(data) + req.stdout.flush() + + def start_response(status, response_headers, exc_info=None): + if exc_info: + try: + if headers_sent: + # Re-raise if too late + raise exc_info[0], exc_info[1], exc_info[2] + finally: + exc_info = None # avoid dangling circular ref + else: + assert not headers_set, 'Headers already set!' + + assert type(status) is str, 'Status must be a string' + assert len(status) >= 4, 'Status must be at least 4 characters' + assert int(status[:3]), 'Status must begin with 3-digit code' + assert status[3] == ' ', 'Status must have a space after code' + assert type(response_headers) is list, 'Headers must be a list' + if __debug__: + for name,val in response_headers: + assert type(name) is str, 'Header names must be strings' + assert type(val) is str, 'Header values must be strings' + + headers_set[:] = [status, response_headers] + return write + + if not self.multithreaded: + self._app_lock.acquire() + try: + try: + result = self.application(environ, start_response) + try: + for data in result: + if data: + write(data) + if not headers_sent: + write('') # in case body was empty + finally: + if hasattr(result, 'close'): + result.close() + except socket.error, e: + if e[0] != errno.EPIPE: + raise # Don't let EPIPE propagate beyond server + finally: + if not self.multithreaded: + self._app_lock.release() + + return FCGI_REQUEST_COMPLETE, 0 + + def _sanitizeEnv(self, environ): + """Ensure certain values are present, if required by WSGI.""" + if not environ.has_key('SCRIPT_NAME'): + environ['SCRIPT_NAME'] = '' + if not environ.has_key('PATH_INFO'): + environ['PATH_INFO'] = '' + + # If any of these are missing, it probably signifies a broken + # server... + for name,default in [('REQUEST_METHOD', 'GET'), + ('SERVER_NAME', 'localhost'), + ('SERVER_PORT', '80'), + ('SERVER_PROTOCOL', 'HTTP/1.0')]: + if not environ.has_key(name): + environ['wsgi.errors'].write('%s: missing FastCGI param %s ' + 'required by WSGI!\n' % + (self.__class__.__name__, name)) + environ[name] = default + +if __name__ == '__main__': + def test_app(environ, start_response): + """Probably not the most efficient example.""" + import cgi + start_response('200 OK', [('Content-Type', 'text/html')]) + yield 'Hello World!\n' \ + '\n' \ + '

    Hello World!

    \n' \ + '

    ' + names = environ.keys() + names.sort() + for name in names: + yield '\n' % ( + name, cgi.escape(`environ[name]`)) + + form = cgi.FieldStorage(fp=environ['wsgi.input'], environ=environ, + keep_blank_values=1) + if form.list: + yield '' + + for field in form.list: + yield '\n' % ( + field.name, field.value) + + yield '
    %s%s
    Form data
    %s%s
    \n' \ + '\n' + + WSGIServer(test_app).run() + ADDED gluon/contrib/generics.py Index: gluon/contrib/generics.py ================================================================== --- /dev/null +++ gluon/contrib/generics.py @@ -0,0 +1,65 @@ +# fix response + +import re +import os +import cPickle +import gluon.serializers +from gluon import current +from gluon.html import markmin_serializer, TAG, HTML, BODY, UL, XML +from gluon.contenttype import contenttype +from gluon.contrib.pyfpdf import FPDF, HTMLMixin +from gluon.sanitizer import sanitize +from gluon.contrib.markmin.markmin2latex import markmin2latex +from gluon.contrib.markmin.markmin2pdf import markmin2pdf + +def wrapper(f): + def g(data): + try: + output = f(data) + except (TypeError, ValueError): + raise HTTP(405, '%s serialization error' % extension.upper()) + except ImportError: + raise HTTP(405, '%s not available' % extension.upper()) + except: + raise HTTP(405, '%s error' % extension.upper()) + return XML(ouput) + return g + +def latex_from_html(html): + markmin=TAG(html).element('body').flatten(markmin_serializer) + return XML(markmin2latex(markmin)) + +def pdflatex_from_html(html): + if os.system('which pdflatex > /dev/null')==0: + markmin=TAG(html).element('body').flatten(markmin_serializer) + out,warning,errors=markmin2pdf(markmin) + if errors: + current.response.headers['Content-Type']='text/html' + raise HTTP(405,HTML(BODY(H1('errors'), + LU(*errors), + H1('warnings'), + LU(*warnings))).xml()) + else: + return XML(out) + +def pyfpdf_from_html(html): + request = current.request + def image_map(path): + if path.startswith('/%s/static/' % request.application): + return os.path.join(request.folder,path.split('/',2)[2]) + return 'http%s://%s%s' % (request.is_https and 's' or '',request.env.http_host, path) + class MyFPDF(FPDF, HTMLMixin): pass + pdf=MyFPDF() + pdf.add_page() + html = sanitize(html, escape=False) #### should have better list of allowed tags + pdf.write_html(html,image_map=image_map) + return XML(pdf.output(dest='S')) + +def pdf_from_html(html): + # try use latex and pdflatex + if os.system('which pdflatex > /dev/null')==0: + return pdflatex_from_html(html) + else: + return pyfpdf_from_html(html) + + ADDED gluon/contrib/gql.py Index: gluon/contrib/gql.py ================================================================== --- /dev/null +++ gluon/contrib/gql.py @@ -0,0 +1,7 @@ +# this file exists for backward compatibility + +__all__ = ['DAL','Field','drivers','gae'] + +from gluon.dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType, gae + + ADDED gluon/contrib/login_methods/__init__.py Index: gluon/contrib/login_methods/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/__init__.py @@ -0,0 +1,2 @@ + + ADDED gluon/contrib/login_methods/basic_auth.py Index: gluon/contrib/login_methods/basic_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/basic_auth.py @@ -0,0 +1,25 @@ +import urllib +import urllib2 +import base64 + + +def basic_auth(server="http://127.0.0.1"): + """ + to use basic login with a different server + from gluon.contrib.login_methods.basic_auth import basic_auth + auth.settings.login_methods.append(basic_auth('http://server')) + """ + + def basic_login_aux(username, + password, + server=server): + key = base64.b64encode(username+':'+password) + headers = {'Authorization': 'Basic ' + key} + request = urllib2.Request(server, None, headers) + try: + urllib2.urlopen(request) + return True + except (urllib2.URLError, urllib2.HTTPError): + return False + return basic_login_aux + ADDED gluon/contrib/login_methods/cas_auth.py Index: gluon/contrib/login_methods/cas_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/cas_auth.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of web2py Web Framework (Copyrighted, 2007-2009). +Developed by Massimo Di Pierro . +License: GPL v2 + +Tinkered by Szabolcs Gyuris < szimszo n @ o regpreshaz dot eu> +""" + +from gluon import current, redirect + +class CasAuth( object ): + """ + Login will be done via Web2py's CAS application, instead of web2py's + login form. + + Include in your model (eg db.py):: + + from gluon.contrib.login_methods.cas_auth import CasAuth + auth.define_tables(username=True) + auth.settings.login_form=CasAuth( + urlbase = "https://[your CAS provider]/app/default/user/cas", + actions=['login','validate','logout']) + + where urlbase is the actual CAS server url without the login,logout... + Enjoy. + + ###UPDATE### + if you want to connect to a CAS version 2 JASIG Server use this: + auth.settings.login_form=CasAuth( + urlbase = "https://[Your CAS server]/cas", + actions = ['login','serviceValidate','logout'], + casversion = 2, + casusername = "cas:user") + + where casusername is the xml node returned by CAS server which contains + user's username. + + """ + def __init__(self, g=None, ### g for backward compatibility ### + urlbase = "https://web2py.com/cas/cas", + actions=['login','check','logout'], + maps=dict(username=lambda v:v.get('username',v['user']), + email=lambda v:v.get('email',None), + user_id=lambda v:v['user']), + casversion = 1, + casusername = 'cas:user' + ): + self.urlbase=urlbase + self.cas_login_url="%s/%s"%(self.urlbase,actions[0]) + self.cas_check_url="%s/%s"%(self.urlbase,actions[1]) + self.cas_logout_url="%s/%s"%(self.urlbase,actions[2]) + self.maps=maps + self.casversion = casversion + self.casusername = casusername + http_host=current.request.env.http_x_forwarded_host + if not http_host: http_host=current.request.env.http_host + if current.request.env.wsgi_url_scheme in [ 'https', 'HTTPS' ]: + scheme = 'https' + else: + scheme = 'http' + self.cas_my_url='%s://%s%s'%( scheme, http_host, current.request.env.path_info ) + + def login_url( self, next = "/" ): + current.session.token=self._CAS_login() + return next + def logout_url( self, next = "/" ): + current.session.token=None + current.session.auth=None + self._CAS_logout() + return next + def get_user( self ): + user=current.session.token + if user: + d = {'source':'web2py cas'} + for key in self.maps: + d[key]=self.maps[key](user) + return d + return None + def _CAS_login( self ): + """ + exposed as CAS.login(request) + returns a token on success, None on failed authentication + """ + import urllib + self.ticket=current.request.vars.ticket + if not current.request.vars.ticket: + redirect( "%s?service=%s"% (self.cas_login_url, + self.cas_my_url)) + else: + url="%s?service=%s&ticket=%s" % (self.cas_check_url, + self.cas_my_url, + self.ticket ) + data=urllib.urlopen( url ).read() + if data.startswith('yes') or data.startswith('no'): + data = data.split('\n') + if data[0]=='yes': + a,b,c = data[1].split( ':' )+[None,None] + return dict(user=a,email=b,username=c) + return None + import xml.dom.minidom as dom + import xml.parsers.expat as expat + try: + dxml=dom.parseString(data) + envelop = dxml.getElementsByTagName("cas:authenticationSuccess") + if len(envelop)>0: + res = dict() + for x in envelop[0].childNodes: + if x.nodeName.startswith('cas:') and len(x.childNodes): + key = x.nodeName[4:].encode('utf8') + value = x.childNodes[0].nodeValue.encode('utf8') + if not key in res: + res[key]=value + else: + if not isinstance(res[key],list): + res[key]=[res[key]] + res[key].append(value) + return res + except expat.ExpatError: pass + return None # fallback + + + def _CAS_logout( self ): + """ + exposed CAS.logout() + redirects to the CAS logout page + """ + import urllib + redirect("%s?service=%s" % (self.cas_logout_url,self.cas_my_url)) + ADDED gluon/contrib/login_methods/email_auth.py Index: gluon/contrib/login_methods/email_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/email_auth.py @@ -0,0 +1,37 @@ +import smtplib + + +def email_auth(server="smtp.gmail.com:587", + domain="@gmail.com"): + """ + to use email_login: + from gluon.contrib.login_methods.email_auth import email_auth + auth.settings.login_methods.append(email_auth("smtp.gmail.com:587", + "@gmail.com")) + """ + + def email_auth_aux(email, + password, + server=server, + domain=domain): + if domain: + if not isinstance(domain,(list,tuple)): + domain=[str(domain)] + if not [d for d in domain if email[-len(d):]==d]: + return False + (host, port) = server.split(':') + try: + server = None + server = smtplib.SMTP(host, port) + server.ehlo() + server.starttls() + server.ehlo() + server.login(email, password) + server.quit() + return True + except: + if server: + server.quit() + return False + return email_auth_aux + ADDED gluon/contrib/login_methods/extended_login_form.py Index: gluon/contrib/login_methods/extended_login_form.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/extended_login_form.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# coding: utf8 + +""" +ExtendedLoginForm is used to extend normal login form in web2py with one more login method. +So user can choose the built-in login or extended login methods. +""" + +from gluon.html import DIV + +class ExtendedLoginForm(object): + """ + Put extended_login_form under web2py/gluon/contrib/login_methods folder. + Then inside your model where defines the auth: + + auth = Auth(globals(),db) # authentication/authorization + ... + auth.define_tables() # You might like to put the code after auth.define_tables + ... # if the alt_login_form deals with tables of auth. + + alt_login_form = RPXAccount(request, + api_key="...", + domain="...", + url = "http://localhost:8000/%s/default/user/login" % request.application) + extended_login_form = ExtendedLoginForm(auth, alt_login_form, signals=['token']) + + auth.settings.login_form = extended_login_form + + Note: + Since rpx_account doesn't create the password for the user, you + might need to provide a way for user to create password to do + normal login. + + """ + + def __init__(self, + auth, + alt_login_form, + signals=[], + login_arg = 'login' + ): + self.auth = auth + self.alt_login_form = alt_login_form + self.signals = signals + self.login_arg = login_arg + + def get_user(self): + """ + Delegate the get_user to alt_login_form.get_user. + """ + if hasattr(self.alt_login_form, 'get_user'): + return self.alt_login_form.get_user() + return None # let gluon.tools.Auth.get_or_create_user do the rest + + def login_url(self, next): + """ + Optional implement for alt_login_form. + + In normal case, this should be replaced by get_user, and never get called. + """ + if hasattr(self.alt_login_form, 'login_url'): + return self.alt_login_form.login_url(next) + return self.auth.settings.login_url + + def logout_url(self, next): + """ + Optional implement for alt_login_form. + + Called if bool(alt_login_form.get_user) is True. + + If alt_login_form implemented logout_url function, it will return that function call. + """ + if hasattr(self.alt_login_form, 'logout_url'): + return self.alt_login_form.logout_url(next) + return next + + def login_form(self): + """ + Combine the auth() form with alt_login_form. + + If signals are set and a parameter in request matches any signals, + it will return the call of alt_login_form.login_form instead. + So alt_login_form can handle some particular situations, for example, + multiple steps of OpenID login inside alt_login_form.login_form. + + Otherwise it will render the normal login form combined with + alt_login_form.login_form. + """ + request = self.auth.environment.request + args = request.args + + if (self.signals and + any([True for signal in self.signals if request.vars.has_key(signal)]) + ): + return self.alt_login_form.login_form() + + self.auth.settings.login_form = self.auth + form = DIV(self.auth()) + self.auth.settings.login_form = self + + form.components.append(self.alt_login_form.login_form()) + return form + ADDED gluon/contrib/login_methods/gae_google_account.py Index: gluon/contrib/login_methods/gae_google_account.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/gae_google_account.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of web2py Web Framework (Copyrighted, 2007-2009). +Developed by Massimo Di Pierro . +License: GPL v2 + +Thanks to Hans Donner for GaeGoogleAccount. +""" + +from google.appengine.api import users + +class GaeGoogleAccount(object): + """ + Login will be done via Google's Appengine login object, instead of web2py's + login form. + + Include in your model (eg db.py):: + + from gluon.contrib.login_methods.gae_google_account import \ + GaeGoogleAccount + auth.settings.login_form=GaeGoogleAccount() + + """ + + def login_url(self, next="/"): + return users.create_login_url(next) + + def logout_url(self, next="/"): + return users.create_logout_url(next) + + def get_user(self): + user = users.get_current_user() + if user: + return dict(nickname=user.nickname(), email=user.email(), + user_id=user.user_id(), source="google account") + ADDED gluon/contrib/login_methods/ldap_auth.py Index: gluon/contrib/login_methods/ldap_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/ldap_auth.py @@ -0,0 +1,171 @@ +import sys +import logging +try: + import ldap + ldap.set_option(ldap.OPT_REFERRALS, 0) +except Exception, e: + logging.error('missing ldap, try "easy_install python-ldap"') + raise e + + +def ldap_auth(server='ldap', port=None, + base_dn='ou=users,dc=domain,dc=com', + mode='uid', secure=False, cert_path=None, bind_dn=None, bind_pw=None, filterstr='objectClass=*'): + """ + to use ldap login with MS Active Directory:: + + from gluon.contrib.login_methods.ldap_auth import ldap_auth + auth.settings.login_methods.append(ldap_auth( + mode='ad', server='my.domain.controller', + base_dn='ou=Users,dc=domain,dc=com')) + + to use ldap login with Notes Domino:: + + auth.settings.login_methods.append(ldap_auth( + mode='domino',server='my.domino.server')) + + to use ldap login with OpenLDAP:: + + auth.settings.login_methods.append(ldap_auth( + server='my.ldap.server', base_dn='ou=Users,dc=domain,dc=com')) + + to use ldap login with OpenLDAP and subtree search and (optionally) multiple DNs: + + auth.settings.login_methods.append(ldap_auth( + mode='uid_r', server='my.ldap.server', + base_dn=['ou=Users,dc=domain,dc=com','ou=Staff,dc=domain,dc=com'])) + + or (if using CN):: + + auth.settings.login_methods.append(ldap_auth( + mode='cn', server='my.ldap.server', + base_dn='ou=Users,dc=domain,dc=com')) + + If using secure ldaps:// pass secure=True and cert_path="..." + + If you need to bind to the directory with an admin account in order to search it then specify bind_dn & bind_pw to use for this. + - currently only implemented for Active Directory + + If you need to restrict the set of allowed users (e.g. to members of a department) then specify + a rfc4515 search filter string. + - currently only implemented for mode in ['ad', 'company', 'uid_r'] + """ + + def ldap_auth_aux(username, + password, + ldap_server=server, + ldap_port=port, + ldap_basedn=base_dn, + ldap_mode=mode, + ldap_binddn=bind_dn, + ldap_bindpw=bind_pw, + secure=secure, + cert_path=cert_path, + filterstr=filterstr): + try: + if secure: + if not ldap_port: + ldap_port = 636 + con = ldap.initialize( + "ldaps://" + ldap_server + ":" + str(ldap_port)) + if cert_path: + con.set_option(ldap.OPT_X_TLS_CACERTDIR, cert_path) + else: + if not ldap_port: + ldap_port = 389 + con = ldap.initialize( + "ldap://" + ldap_server + ":" + str(ldap_port)) + + if ldap_mode == 'ad': + # Microsoft Active Directory + if '@' not in username: + domain = [] + for x in ldap_basedn.split(','): + if "DC=" in x.upper(): + domain.append(x.split('=')[-1]) + username = "%s@%s" % (username, '.'.join(domain)) + username_bare = username.split("@")[0] + con.set_option(ldap.OPT_PROTOCOL_VERSION, 3) + if ldap_binddn: + # need to search directory with an admin account 1st + con.simple_bind_s(ldap_binddn, ldap_bindpw) + else: + # credentials should be in the form of username@domain.tld + con.simple_bind_s(username, password) + # this will throw an index error if the account is not found + # in the ldap_basedn + result = con.search_ext_s( + ldap_basedn, ldap.SCOPE_SUBTREE, + "(&(sAMAccountName=%s)(%s))" % (username_bare, filterstr), ["sAMAccountName"])[0][1] + if ldap_binddn: + # We know the user exists & is in the correct OU + # so now we just check the password + con.simple_bind_s(username, password) + + if ldap_mode == 'domino': + # Notes Domino + if "@" in username: + username = username.split("@")[0] + con.simple_bind_s(username, password) + + if ldap_mode == 'cn': + # OpenLDAP (CN) + dn = "cn=" + username + "," + ldap_basedn + con.simple_bind_s(dn, password) + + if ldap_mode == 'uid': + # OpenLDAP (UID) + dn = "uid=" + username + "," + ldap_basedn + con.simple_bind_s(dn, password) + + if ldap_mode == 'company': + # no DNs or password needed to search directory + dn = "" + pw = "" + # bind anonymously + con.simple_bind_s(dn, pw) + # search by e-mail address + filter = '(&(mail=' + username + ')(' + filterstr + '))' + # find the uid + attrs = ['uid'] + # perform the actual search + company_search_result=con.search_s(ldap_basedn, + ldap.SCOPE_SUBTREE, + filter, attrs) + dn = company_search_result[0][0] + # perform the real authentication test + con.simple_bind_s(dn, password) + + if ldap_mode == 'uid_r': + # OpenLDAP (UID) with subtree search and multiple DNs + if type(ldap_basedn) == type([]): + basedns = ldap_basedn + else: + basedns = [ldap_basedn] + filter = '(&(uid=%s)(%s))' % (username, filterstr) + for basedn in basedns: + try: + result = con.search_s(basedn, ldap.SCOPE_SUBTREE, filter) + if result: + user_dn = result[0][0] + # Check the password + con.simple_bind_s(user_dn, password) + con.unbind() + return True + except ldap.LDAPError, detail: + (exc_type, exc_value) = sys.exc_info()[:2] + sys.stderr.write("ldap_auth: searching %s for %s resulted in %s: %s\n" % + (basedn, filter, exc_type, exc_value)) + return False + + con.unbind() + return True + except ldap.LDAPError, e: + return False + except IndexError, ex: # for AD membership test + return False + + if filterstr[0] == '(' and filterstr[-1] == ')': # rfc4515 syntax + filterstr = filterstr[1:-1] # parens added again where used + return ldap_auth_aux + ADDED gluon/contrib/login_methods/linkedin_account.py Index: gluon/contrib/login_methods/linkedin_account.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/linkedin_account.py @@ -0,0 +1,51 @@ + +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of web2py Web Framework (Copyrighted, 2007-2009). +Developed by Massimo Di Pierro . +License: GPL v2 + +Thanks to Hans Donner for GaeGoogleAccount. +""" + +from gluon.http import HTTP +try: + import linkedin +except ImportError: + raise HTTP(400,"linkedin module not found") + +class LinkedInAccount(object): + """ + Login will be done via Google's Appengine login object, instead of web2py's + login form. + + Include in your model (eg db.py):: + + from gluon.contrib.login_methods.linkedin_account import LinkedInAccount + auth.settings.login_form=LinkedInAccount(request,KEY,SECRET,RETURN_URL) + + """ + + def __init__(self,request,key,secret,return_url): + self.request = request + self.api = linkedin.LinkedIn(key,secret,return_url) + self.token = result = self.api.requestToken() + + def login_url(self, next="/"): + return self.api.getAuthorizeURL(self.token) + + def logout_url(self, next="/"): + return '' + + def get_user(self): + result = self.request.vars.verifier and self.api.accessToken(verifier = self.request.vars.verifier ) + if result: + profile = self.api.GetProfile() + profile = self.api.GetProfile(profile).public_url = "http://www.linkedin.com/in/ozgurv" + return dict(first_name = profile.first_name, + last_name = profile.last_name, + username = profile.id) + + ADDED gluon/contrib/login_methods/loginza.py Index: gluon/contrib/login_methods/loginza.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/loginza.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" + Loginza.ru authentication for web2py + Developed by Vladimir Dronnikov (Copyright © 2011) + Email +""" + +import urllib +from gluon.html import * +from gluon.tools import fetch +from gluon.storage import Storage +import gluon.contrib.simplejson as json + +class Loginza(object): + + """ + from gluon.contrib.login_methods.loginza import Loginza + auth.settings.login_form = Loginza(request, + url = "http://localhost:8000/%s/default/user/login" % request.application) + """ + + def __init__(self, + request, + url = "", + embed = True, + auth_url = "http://loginza.ru/api/authinfo", + language = "en", + prompt = "loginza", + on_login_failure = None, + ): + + self.request = request + self.token_url = url + self.embed = embed + self.auth_url = auth_url + self.language = language + self.prompt = prompt + self.profile = None + self.on_login_failure = on_login_failure + self.mappings = Storage() + + # TODO: profile.photo is the URL to the picture + # Howto download and store it locally? + # FIXME: what if email is unique=True + + self.mappings["http://twitter.com/"] = lambda profile:\ + dict(registration_id = profile.get("identity",""), + username = profile.get("nickname",""), + email = profile.get("email",""), + last_name = profile.get("name","").get("full_name",""), + #avatar = profile.get("photo",""), + ) + self.mappings["https://www.google.com/accounts/o8/ud"] = lambda profile:\ + dict(registration_id = profile.get("identity",""), + username = profile.get("name","").get("full_name",""), + email = profile.get("email",""), + first_name = profile.get("name","").get("first_name",""), + last_name = profile.get("name","").get("last_name",""), + #avatar = profile.get("photo",""), + ) + self.mappings["http://vkontakte.ru/"] = lambda profile:\ + dict(registration_id=profile.get("identity",""), + username = profile.get("name","").get("full_name",""), + email = profile.get("email",""), + first_name = profile.get("name","").get("first_name",""), + last_name = profile.get("name","").get("last_name",""), + #avatar = profile.get("photo",""), + ) + self.mappings.default = lambda profile:\ + dict(registration_id = profile.get("identity",""), + username = profile.get("name","").get("full_name"), + email = profile.get("email",""), + first_name = profile.get("name","").get("first_name",""), + last_name = profile.get("name","").get("last_name",""), + #avatar = profile.get("photo",""), + ) + + def get_user(self): + request = self.request + if request.vars.token: + user = Storage() + data = urllib.urlencode(dict(token = request.vars.token)) + auth_info_json = fetch(self.auth_url+'?'+data) + #print auth_info_json + auth_info = json.loads(auth_info_json) + if auth_info["identity"] != None: + self.profile = auth_info + provider = self.profile["provider"] + user = self.mappings.get(provider, self.mappings.default)(self.profile) + #user["password"] = ??? + #user["avatar"] = ??? + return user + elif self.on_login_failure: + redirect(self.on_login_failure) + return None + + def login_form(self): + request = self.request + args = request.args + LOGINZA_URL = "https://loginza.ru/api/widget?lang=%s&token_url=%s&overlay=loginza" + if self.embed: + form = IFRAME(_src=LOGINZA_URL % (self.language, self.token_url), + _scrolling="no", + _frameborder="no", + _style="width:359px;height:300px;") + else: + form = DIV(A(self.prompt, _href=LOGINZA_URL % (self.language, self.token_url), _class="loginza"), + SCRIPT(_src="https://s3-eu-west-1.amazonaws.com/s1.loginza.ru/js/widget.js", _type="text/javascript")) + return form + ADDED gluon/contrib/login_methods/oauth10a_account.py Index: gluon/contrib/login_methods/oauth10a_account.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/oauth10a_account.py @@ -0,0 +1,190 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Written by Michele Comitini +License: GPL v3 + +Adds support for OAuth1.0a authentication to web2py. + +Dependencies: + - python-oauth2 (http://github.com/simplegeo/python-oauth2) + +""" + +import oauth2 as oauth +import cgi + +from urllib2 import urlopen +import urllib2 +from urllib import urlencode + +class OAuthAccount(object): + """ + Login will be done via OAuth Framework, instead of web2py's + login form. + + Include in your model (eg db.py):: + # define the auth_table before call to auth.define_tables() + auth_table = db.define_table( + auth.settings.table_user_name, + Field('first_name', length=128, default=""), + Field('last_name', length=128, default=""), + Field('username', length=128, default="", unique=True), + Field('password', 'password', length=256, + readable=False, label='Password'), + Field('registration_key', length=128, default= "", + writable=False, readable=False)) + + auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) + . + . + . + auth.define_tables() + . + . + . + + CLIENT_ID=\"\" + CLIENT_SECRET=\"\" + AUTH_URL="..." + TOKEN_URL="..." + ACCESS_TOKEN_URL="..." + from gluon.contrib.login_methods.oauth10a_account import OAuthAccount + auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET, AUTH_URL, TOKEN_URL, ACCESS_TOKEN_URL) + + """ + + def __redirect_uri(self, next=None): + """Build the uri used by the authenticating server to redirect + the client back to the page originating the auth request. + Appends the _next action to the generated url so the flows continues. + """ + r = self.request + http_host=r.env.http_x_forwarded_for + if not http_host: http_host=r.env.http_host + + url_scheme = r.env.wsgi_url_scheme + if next: + path_info = next + else: + path_info = r.env.path_info + uri = '%s://%s%s' %(url_scheme, http_host, path_info) + if r.get_vars and not next: + uri += '?' + urlencode(r.get_vars) + return uri + + + def accessToken(self): + """Return the access token generated by the authenticating server. + + If token is already in the session that one will be used. + Otherwise the token is fetched from the auth server. + + """ + + if self.session.access_token: + # return the token (TODO: does it expire?) + + return self.session.access_token + if self.session.request_token: + # Exchange the request token with an authorization token. + token = self.session.request_token + self.session.request_token = None + + # Build an authorized client + # OAuth1.0a put the verifier! + token.set_verifier(self.request.vars.oauth_verifier) + client = oauth.Client(self.consumer, token) + + + resp, content = client.request(self.access_token_url, "POST") + if str(resp['status']) != '200': + self.session.request_token = None + self.globals['redirect'](self.globals['URL'](f='user',args='logout')) + + + self.session.access_token = oauth.Token.from_string(content) + + return self.session.access_token + + self.session.access_token = None + return None + + def __init__(self, g, client_id, client_secret, auth_url, token_url, access_token_url): + self.globals = g + self.client_id = client_id + self.client_secret = client_secret + self.code = None + self.request = g['request'] + self.session = g['session'] + self.auth_url = auth_url + self.token_url = token_url + self.access_token_url = access_token_url + + # consumer init + self.consumer = oauth.Consumer(self.client_id, self.client_secret) + + + def login_url(self, next="/"): + self.__oauth_login(next) + return next + + def logout_url(self, next="/"): + self.session.request_token = None + self.session.access_token = None + return next + + def get_user(self): + '''Get user data. + + Since OAuth does not specify what a user + is, this function must be implemented for the specific + provider. + ''' + raise NotImplementedError, "Must override get_user()" + + def __oauth_login(self, next): + '''This method redirects the user to the authenticating form + on authentication server if the authentication code + and the authentication token are not available to the + application yet. + + Once the authentication code has been received this method is + called to set the access token into the session by calling + accessToken() + ''' + + if not self.accessToken(): + # setup the client + client = oauth.Client(self.consumer, None) + # Get a request token. + # oauth_callback *is REQUIRED* for OAuth1.0a + # putting it in the body seems to work. + callback_url = self.__redirect_uri(next) + data = urlencode(dict(oauth_callback=callback_url)) + resp, content = client.request(self.token_url, "POST", body=data) + if resp['status'] != '200': + self.session.request_token = None + self.globals['redirect'](self.globals['URL'](f='user',args='logout')) + + # Store the request token in session. + request_token = self.session.request_token = oauth.Token.from_string(content) + + # Redirect the user to the authentication URL and pass the callback url. + data = urlencode(dict(oauth_token=request_token.key, + oauth_callback=callback_url)) + auth_request_url = self.auth_url + '?' +data + + + HTTP = self.globals['HTTP'] + + + raise HTTP(307, + "You are not authenticated: you are being redirected to the authentication server", + Location=auth_request_url) + + return None + + + ADDED gluon/contrib/login_methods/oauth20_account.py Index: gluon/contrib/login_methods/oauth20_account.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/oauth20_account.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Written by Michele Comitini +License: GPL v3 + +Adds support for OAuth 2.0 authentication to web2py. + +OAuth 2.0 Draft: http://tools.ietf.org/html/draft-ietf-oauth-v2-10 +""" + +import time +import cgi + +from urllib2 import urlopen +import urllib2 +from urllib import urlencode + +class OAuthAccount(object): + """ + Login will be done via OAuth Framework, instead of web2py's + login form. + + Include in your model (eg db.py):: + # define the auth_table before call to auth.define_tables() + auth_table = db.define_table( + auth.settings.table_user_name, + Field('first_name', length=128, default=""), + Field('last_name', length=128, default=""), + Field('username', length=128, default="", unique=True), + Field('password', 'password', length=256, + readable=False, label='Password'), + Field('registration_key', length=128, default= "", + writable=False, readable=False)) + + auth_table.username.requires = IS_NOT_IN_DB(db, auth_table.username) + . + . + . + auth.define_tables() + . + . + . + + CLIENT_ID=\"\" + CLIENT_SECRET=\"\" + AUTH_URL="http://..." + TOKEN_URL="http://..." + from gluon.contrib.login_methods.oauth20_account import OAuthAccount + auth.settings.login_form=OAuthAccount(globals(),CLIENT_ID,CLIENT_SECRET,AUTH_URL, TOKEN_URL, **args ) + Any optional arg will be passed as is to remote server for requests. + It can be used for the optional "scope" parameters for Facebook. + """ + def __redirect_uri(self, next=None): + """Build the uri used by the authenticating server to redirect + the client back to the page originating the auth request. + Appends the _next action to the generated url so the flows continues. + """ + + r = self.request + http_host=r.env.http_x_forwarded_for + if not http_host: http_host=r.env.http_host + + url_scheme = r.env.wsgi_url_scheme + if next: + path_info = next + else: + path_info = r.env.path_info + uri = '%s://%s%s' %(url_scheme, http_host, path_info) + if r.get_vars and not next: + uri += '?' + urlencode(r.get_vars) + return uri + + + def __build_url_opener(self, uri): + """Build the url opener for managing HTTP Basic Athentication""" + # Create an OpenerDirector with support for Basic HTTP Authentication... + auth_handler = urllib2.HTTPBasicAuthHandler() + auth_handler.add_password(None, + uri, + self.client_id, + self.client_secret) + opener = urllib2.build_opener(auth_handler) + return opener + + + def accessToken(self): + """Return the access token generated by the authenticating server. + + If token is already in the session that one will be used. + Otherwise the token is fetched from the auth server. + + """ + if self.session.token and self.session.token.has_key('expires'): + expires = self.session.token['expires'] + # reuse token until expiration + if expires == 0 or expires > time.time(): + return self.session.token['access_token'] + if self.session.code: + data = dict(client_id=self.client_id, + client_secret=self.client_secret, + redirect_uri=self.session.redirect_uri, + response_type='token', code=self.session.code) + + + if self.args: + data.update(self.args) + open_url = None + opener = self.__build_url_opener(self.token_url) + try: + open_url = opener.open(self.token_url, urlencode(data)) + except urllib2.HTTPError, e: + raise Exception(e.read()) + finally: + del self.session.code # throw it away + + if open_url: + try: + tokendata = cgi.parse_qs(open_url.read()) + self.session.token = dict([(k,v[-1]) for k,v in tokendata.items()]) + # set expiration absolute time try to avoid broken + # implementations where "expires_in" becomes "expires" + if self.session.token.has_key('expires_in'): + exps = 'expires_in' + else: + exps = 'expires' + self.session.token['expires'] = int(self.session.token[exps]) + \ + time.time() + finally: + opener.close() + return self.session.token['access_token'] + + self.session.token = None + return None + + def __init__(self, g, client_id, client_secret, auth_url, token_url, **args): + self.globals = g + self.client_id = client_id + self.client_secret = client_secret + self.request = g['request'] + self.session = g['session'] + self.auth_url = auth_url + self.token_url = token_url + self.args = args + + def login_url(self, next="/"): + self.__oauth_login(next) + return next + + def logout_url(self, next="/"): + del self.session.token + return next + + def get_user(self): + '''Returns the user using the Graph API. + ''' + raise NotImplementedError, "Must override get_user()" + if not self.accessToken(): + return None + + if not self.graph: + self.graph = GraphAPI((self.accessToken())) + + user = None + try: + user = self.graph.get_object("me") + except GraphAPIError: + self.session.token = None + self.graph = None + + if user: + return dict(first_name = user['first_name'], + last_name = user['last_name'], + username = user['id']) + + + + def __oauth_login(self, next): + '''This method redirects the user to the authenticating form + on authentication server if the authentication code + and the authentication token are not available to the + application yet. + + Once the authentication code has been received this method is + called to set the access token into the session by calling + accessToken() + ''' + if not self.accessToken(): + if not self.request.vars.code: + self.session.redirect_uri=self.__redirect_uri(next) + data = dict(redirect_uri=self.session.redirect_uri, + response_type='code', + client_id=self.client_id) + if self.args: + data.update(self.args) + auth_request_url = self.auth_url + "?" +urlencode(data) + HTTP = self.globals['HTTP'] + raise HTTP(307, + "You are not authenticated: you are being redirected to the authentication server", + Location=auth_request_url) + else: + self.session.code = self.request.vars.code + self.accessToken() + return self.session.code + return None + ADDED gluon/contrib/login_methods/openid_auth.py Index: gluon/contrib/login_methods/openid_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/openid_auth.py @@ -0,0 +1,632 @@ +#!/usr/bin/env python +# coding: utf8 + +""" + OpenID authentication for web2py + + Allowed using OpenID login together with web2py built-in login. + + By default, to support OpenID login, put this in your db.py + + >>> from gluon.contrib.login_methods.openid_auth import OpenIDAuth + >>> auth.settings.login_form = OpenIDAuth(auth) + + To show OpenID list in user profile, you can add the following code + before the end of function user() of your_app/controllers/default.py + + + if (request.args and request.args(0) == "profile"): + + form = DIV(form, openid_login_form.list_user_openids()) + return dict(form=form, login_form=login_form, register_form=register_form, self_registration=self_registration) + + More detail in the description of the class OpenIDAuth. + + Requirements: + python-openid version 2.2.5 or later + + Reference: + * w2p openID + http://w2popenid.appspot.com/init/default/wiki/w2popenid + * RPX and web2py auth module + http://www.web2pyslices.com/main/slices/take_slice/28 + * built-in file: gluon/contrib/login_methods/rpx_account.py + * built-in file: gluon/tools.py (Auth class) +""" +import time +from datetime import datetime, timedelta + +from gluon import * +from gluon.storage import Storage, Messages + +try: + import openid.consumer.consumer + from openid.association import Association + from openid.store.interface import OpenIDStore + from openid.extensions.sreg import SRegRequest, SRegResponse + from openid.store import nonce + from openid.consumer.discover import DiscoveryFailure +except ImportError, err: + raise ImportError("OpenIDAuth requires python-openid package") + +DEFAULT = lambda: None + +class OpenIDAuth(object): + """ + OpenIDAuth + + It supports the logout_url, implementing the get_user and login_form + for cas usage of gluon.tools.Auth. + + It also uses the ExtendedLoginForm to allow the OpenIDAuth login_methods + combined with the standard logon/register procedure. + + It uses OpenID Consumer when render the form and begins the OpenID + authentication. + + Example: (put these code after auth.define_tables() in your models.) + + auth = Auth(globals(), db) # authentication/authorization + ... + auth.define_tables() # creates all needed tables + ... + + #include in your model after auth has been defined + from gluon.contrib.login_methods.openid_auth import OpenIDAuth + openid_login_form = OpenIDAuth(request, auth, db) + + from gluon.contrib.login_methods.extended_login_form import ExtendedLoginForm + extended_login_form = ExtendedLoginForm(request, auth, openid_login_form, + signals=['oid','janrain_nonce']) + + auth.settings.login_form = extended_login_form + """ + + def __init__(self, auth): + self.auth = auth + self.db = auth.db + + request = current.request + self.nextvar = '_next' + self.realm = 'http://%s' % request.env.http_host + self.login_url = URL(r=request, f='user', args=['login']) + self.return_to_url = self.realm + self.login_url + + self.table_alt_logins_name = "alt_logins" + if not auth.settings.table_user: + raise + self.table_user = self.auth.settings.table_user + self.openid_expiration = 15 #minutes + + self.messages = self._define_messages() + + if not self.table_alt_logins_name in self.db.tables: + self._define_alt_login_table() + + def _define_messages(self): + messages = Messages(current.T) + messages.label_alt_login_username = 'Sign-in with OpenID: ' + messages.label_add_alt_login_username = 'Add a new OpenID: ' + messages.submit_button = 'Sign in' + messages.submit_button_add = 'Add' + messages.a_delete = 'Delete' + messages.comment_openid_signin = 'What is OpenID?' + messages.comment_openid_help_title = 'Start using your OpenID' + messages.comment_openid_help_url = 'http://openid.net/get-an-openid/start-using-your-openid/' + messages.openid_fail_discover = 'Failed to discover OpenID service. Check your OpenID or "More about OpenID"?' + messages.flash_openid_expired = 'OpenID expired. Please login or authenticate OpenID again. Sorry for the inconvenient.' + messages.flash_openid_associated = 'OpenID associated' + messages.flash_associate_openid = 'Please login or register an account for this OpenID.' + messages.p_openid_not_registered = "This Open ID haven't be registered. " \ + + "Please login to associate with it or register an account for it." + messages.flash_openid_authenticated = 'OpenID authenticated successfully.' + messages.flash_openid_fail_authentication = 'OpenID authentication failed. (Error message: %s)' + messages.flash_openid_canceled = 'OpenID authentication canceled by user.' + messages.flash_openid_need_setup = 'OpenID authentication needs to be setup by the user with the provider first.' + messages.h_openid_login = 'OpenID Login' + messages.h_openid_list = 'OpenID List' + return messages + + def _define_alt_login_table(self): + """ + Define the OpenID login table. + Note: type is what I used for our project. We're going to support 'fackbook' and + 'plurk' alternate login methods. Otherwise it's always 'openid' and you + may not need it. This should be easy to changed. + (Just remove the field of "type" and remove the + "and db.alt_logins.type == type_" in _find_matched_openid function) + """ + db = self.db + table = db.define_table( + self.table_alt_logins_name, + Field('username', length=512, default=''), + Field('type', length=128, default='openid', readable=False), + Field('user', self.table_user, readable=False), + ) + table.username.requires = IS_NOT_IN_DB(db, table.username) + self.table_alt_logins = table + + def logout_url(self, next): + """ + Delete the w2popenid record in session as logout + """ + if current.session.w2popenid: + del(current.session.w2popenid) + return next + + def login_form(self): + """ + Start to process the OpenID response if 'janrain_nonce' in request parameters + and not processed yet. Else return the OpenID form for login. + """ + request = current.request + if request.vars.has_key('janrain_nonce') and not self._processed(): + self._process_response() + return self.auth() + return self._form() + + def get_user(self): + """ + It supports the logout_url, implementing the get_user and login_form + for cas usage of gluon.tools.Auth. + """ + request = current.request + args = request.args + + if args[0] == 'logout': + return True # Let logout_url got called + + if current.session.w2popenid: + w2popenid = current.session.w2popenid + db = self.db + if (w2popenid.ok is True and w2popenid.oid): # OpenID authenticated + if self._w2popenid_expired(w2popenid): + del(current.session.w2popenid) + flash = self.messages.flash_openid_expired + current.session.warning = flash + redirect(self.auth.settings.login_url) + oid = self._remove_protocol(w2popenid.oid) + alt_login = self._find_matched_openid(db, oid) + + nextvar = self.nextvar + # This OpenID not in the database. If user logged in then add it + # into database, else ask user to login or register. + if not alt_login: + if self.auth.is_logged_in(): + # TODO: ask first maybe + self._associate_user_openid(self.auth.user, oid) + if current.session.w2popenid: + del(current.session.w2popenid) + current.session.flash = self.messages.flash_openid_associated + if request.vars.has_key(nextvar): + redirect(request.vars[nextvar]) + redirect(self.auth.settings.login_next) + + if not request.vars.has_key(nextvar): + # no next var, add it and do login again + # so if user login or register can go back here to associate the OpenID + redirect(URL(r=request, + args=['login'], + vars={nextvar:self.login_url})) + self.login_form = self._form_with_notification() + current.session.flash = self.messages.flash_associate_openid + return None # need to login or register to associate this openid + + # Get existed OpenID user + user = db(self.table_user.id==alt_login.user).select().first() + if user: + if current.session.w2popenid: + del(current.session.w2popenid) + if 'username' in self.table_user.fields(): + username = 'username' + elif 'email' in self.table_user.fields(): + username = 'email' + return {username: user[username]} if user else None # login success (almost) + + return None # just start to login + + def _find_matched_openid(self, db, oid, type_='openid'): + """ + Get the matched OpenID for given + """ + query = ((db.alt_logins.username == oid) & (db.alt_logins.type == type_)) + alt_login = db(query).select().first() # Get the OpenID record + return alt_login + + def _associate_user_openid(self, user, oid): + """ + Associate the user logged in with given OpenID + """ + # print "[DB] %s authenticated" % oid + self.db.alt_logins.insert(username=oid, user=user.id) + + def _form_with_notification(self): + """ + Render the form for normal login with a notice of OpenID authenticated + """ + form = DIV() + # TODO: check when will happen + if self.auth.settings.login_form in (self.auth, self): + self.auth.settings.login_form = self.auth + form = DIV(self.auth()) + + register_note = DIV(P(self.messages.p_openid_not_registered)) + form.components.append(register_note) + return lambda: form + + def _remove_protocol(self, oid): + """ + Remove https:// or http:// from oid url + """ + protocol = 'https://' + if oid.startswith(protocol): + oid = oid[len(protocol):] + return oid + protocol = 'http://' + if oid.startswith(protocol): + oid = oid[len(protocol):] + return oid + return oid + + def _init_consumerhelper(self): + """ + Initialize the ConsumerHelper + """ + if not hasattr(self, "consumerhelper"): + self.consumerhelper = ConsumerHelper(current.session, + self.db) + return self.consumerhelper + + + def _form(self, style=None): + form = DIV(H3(self.messages.h_openid_login), self._login_form(style)) + return form + + def _login_form(self, + openid_field_label=None, + submit_button=None, + _next=None, + style=None): + """ + Render the form for OpenID login + """ + def warning_openid_fail(session): + session.warning = messages.openid_fail_discover + + style = style or """ +background-attachment: scroll; +background-repeat: no-repeat; +background-image: url("http://wiki.openid.net/f/openid-16x16.gif"); +background-position: 0% 50%; +background-color: transparent; +padding-left: 18px; +width: 400px; +""" + style = style.replace("\n","") + + request = current.request + session = current.session + messages = self.messages + hidden_next_input = "" + if _next == 'profile': + profile_url = URL(r=request, f='user', args=['profile']) + hidden_next_input = INPUT(_type="hidden", _name="_next", _value=profile_url) + form = FORM(openid_field_label or self.messages.label_alt_login_username, + INPUT(_type="input", _name="oid", + requires=IS_NOT_EMPTY(error_message=messages.openid_fail_discover), + _style=style), + hidden_next_input, + INPUT(_type="submit", _value=submit_button or messages.submit_button), + " ", + A(messages.comment_openid_signin, + _href=messages.comment_openid_help_url, + _title=messages.comment_openid_help_title, + _class='openid-identifier', + _target="_blank"), + _action=self.login_url + ) + if form.accepts(request.vars, session): + oid = request.vars.oid + consumerhelper = self._init_consumerhelper() + url = self.login_url + return_to_url = self.return_to_url + if not oid: + warning_openid_fail(session) + redirect(url) + try: + if request.vars.has_key('_next'): + return_to_url = self.return_to_url + '?_next=' + request.vars._next + url = consumerhelper.begin(oid, self.realm, return_to_url) + except DiscoveryFailure: + warning_openid_fail(session) + redirect(url) + return form + + def _processed(self): + """ + Check if w2popenid authentication is processed. + Return True if processed else False. + """ + processed = (hasattr(current.session, 'w2popenid') and + current.session.w2popenid.ok is True) + return processed + + def _set_w2popenid_expiration(self, w2popenid): + """ + Set expiration for OpenID authentication. + """ + w2popenid.expiration = datetime.now() + timedelta(minutes=self.openid_expiration) + + def _w2popenid_expired(self, w2popenid): + """ + Check if w2popenid authentication is expired. + Return True if expired else False. + """ + return (not w2popenid.expiration) or (datetime.now() > w2popenid.expiration) + + def _process_response(self): + """ + Process the OpenID by ConsumerHelper. + """ + request = current.request + request_vars = request.vars + consumerhelper = self._init_consumerhelper() + process_status = consumerhelper.process_response(request_vars, self.return_to_url) + if process_status == "success": + w2popenid = current.session.w2popenid + user_data = self.consumerhelper.sreg() + current.session.w2popenid.ok = True + self._set_w2popenid_expiration(w2popenid) + w2popenid.user_data = user_data + current.session.flash = self.messages.flash_openid_authenticated + elif process_status == "failure": + flash = self.messages.flash_openid_fail_authentication % consumerhelper.error_message + current.session.warning = flash + elif process_status == "cancel": + current.session.warning = self.messages.flash_openid_canceled + elif process_status == "setup_needed": + current.session.warning = self.messages.flash_openid_need_setup + + def list_user_openids(self): + messages = self.messages + request = current.request + if request.vars.has_key('delete_openid'): + self.remove_openid(request.vars.delete_openid) + + query = self.db.alt_logins.user == self.auth.user.id + alt_logins = self.db(query).select() + l = [] + for alt_login in alt_logins: + username = alt_login.username + delete_href = URL(r=request, f='user', + args=['profile'], + vars={'delete_openid': username}) + delete_link = A(messages.a_delete, _href=delete_href) + l.append(LI(username, " ", delete_link)) + + profile_url = URL(r=request, f='user', args=['profile']) + #return_to_url = self.return_to_url + '?' + self.nextvar + '=' + profile_url + openid_list = DIV(H3(messages.h_openid_list), UL(l), + self._login_form( + _next='profile', + submit_button=messages.submit_button_add, + openid_field_label=messages.label_add_alt_login_username) + ) + return openid_list + + + def remove_openid(self, openid): + query = self.db.alt_logins.username == openid + self.db(query).delete() + +class ConsumerHelper(object): + """ + ConsumerHelper knows the python-openid and + """ + + def __init__(self, session, db): + self.session = session + store = self._init_store(db) + self.consumer = openid.consumer.consumer.Consumer(session, store) + + def _init_store(self, db): + """ + Initialize Web2pyStore + """ + if not hasattr(self, "store"): + store = Web2pyStore(db) + session = self.session + if not session.has_key('w2popenid'): + session.w2popenid = Storage() + self.store = store + return self.store + + def begin(self, oid, realm, return_to_url): + """ + Begin the OpenID authentication + """ + w2popenid = self.session.w2popenid + w2popenid.oid = oid + auth_req = self.consumer.begin(oid) + auth_req.addExtension(SRegRequest(required=['email','nickname'])) + url = auth_req.redirectURL(return_to=return_to_url, realm=realm) + return url + + def process_response(self, request_vars, return_to_url): + """ + Complete the process and + """ + resp = self.consumer.complete(request_vars, return_to_url) + if resp: + if resp.status == openid.consumer.consumer.SUCCESS: + self.resp = resp + if hasattr(resp, "identity_url"): + self.session.w2popenid.oid = resp.identity_url + return "success" + if resp.status == openid.consumer.consumer.FAILURE: + self.error_message = resp.message + return "failure" + if resp.status == openid.consumer.consumer.CANCEL: + return "cancel" + if resp.status == openid.consumer.consumer.SETUP_NEEDED: + return "setup_needed" + return "no resp" + + def sreg(self): + """ + Try to get OpenID Simple Registation + http://openid.net/specs/openid-simple-registration-extension-1_0.html + """ + if self.resp: + resp = self.resp + sreg_resp = SRegResponse.fromSuccessResponse(resp) + return sreg_resp.data if sreg_resp else None + else: + return None + + +class Web2pyStore(OpenIDStore): + """ + Web2pyStore + + This class implements the OpenIDStore interface. OpenID stores take care + of persisting nonces and associations. The Janrain Python OpenID library + comes with implementations for file and memory storage. Web2pyStore uses + the web2py db abstration layer. See the source code docs of OpenIDStore + for a comprehensive description of this interface. + """ + + def __init__(self, database): + self.database = database + self.table_oid_associations_name = 'oid_associations' + self.table_oid_nonces_name = 'oid_nonces' + self._initDB() + + def _initDB(self): + + if self.table_oid_associations_name not in self.database: + self.database.define_table(self.table_oid_associations_name, + Field('server_url', 'string', length=2047, required=True), + Field('handle', 'string', length=255, required=True), + Field('secret', 'blob', required=True), + Field('issued', 'integer', required=True), + Field('lifetime', 'integer', required=True), + Field('assoc_type', 'string', length=64, required=True) + ) + if self.table_oid_nonces_name not in self.database: + self.database.define_table(self.table_oid_nonces_name, + Field('server_url', 'string', length=2047, required=True), + Field('timestamp', 'integer', required=True), + Field('salt', 'string', length=40, required=True) + ) + + def storeAssociation(self, server_url, association): + """ + Store associations. If there already is one with the same + server_url and handle in the table replace it. + """ + + db = self.database + query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == association.handle) + db(query).delete() + db.oid_associations.insert(server_url = server_url, + handle = association.handle, + secret = association.secret, + issued = association.issued, + lifetime = association.lifetime, + assoc_type = association.assoc_type), 'insert '*10 + + def getAssociation(self, server_url, handle=None): + """ + Return the association for server_url and handle. If handle is + not None return the latests associations for that server_url. + Return None if no association can be found. + """ + + db = self.database + query = (db.oid_associations.server_url == server_url) + if handle: + query &= (db.oid_associations.handle == handle) + rows = db(query).select(orderby=db.oid_associations.issued) + keep_assoc, _ = self._removeExpiredAssocations(rows) + if len(keep_assoc) == 0: + return None + else: + assoc = keep_assoc.pop() # pop the last one as it should be the latest one + return Association(assoc['handle'], + assoc['secret'], + assoc['issued'], + assoc['lifetime'], + assoc['assoc_type']) + + def removeAssociation(self, server_url, handle): + db = self.database + query = (db.oid_associations.server_url == server_url) & (db.oid_associations.handle == handle) + return db(query).delete() != None + + def useNonce(self, server_url, timestamp, salt): + """ + This method returns Falase if a nonce has been used before or its + timestamp is not current. + """ + + db = self.database + if abs(timestamp - time.time()) > nonce.SKEW: + return False + query = (db.oid_nonces.server_url == server_url) & (db.oid_nonces.timestamp == timestamp) & (db.oid_nonces.salt == salt) + if db(query).count() > 0: + return False + else: + db.oid_nonces.insert(server_url = server_url, + timestamp = timestamp, + salt = salt) + return True + + def _removeExpiredAssocations(self, rows): + """ + This helper function is not part of the interface. Given a list of + association rows it checks which associations have expired and + deletes them from the db. It returns a tuple of the form + ([valid_assoc], no_of_expired_assoc_deleted). + """ + + db = self.database + keep_assoc = [] + remove_assoc = [] + t1970 = time.time() + for r in rows: + if r['issued'] + r['lifetime'] < t1970: + remove_assoc.append(r) + else: + keep_assoc.append(r) + for r in remove_assoc: + del db.oid_associations[r['id']] + return (keep_assoc, len(remove_assoc)) # return tuple (list of valid associations, number of deleted associations) + + def cleanupNonces(self): + """ + Remove expired nonce entries from DB and return the number + of entries deleted. + """ + + db = self.database + query = (db.oid_nonces.timestamp < time.time() - nonce.SKEW) + return db(query).delete() + + def cleanupAssociations(self): + """ + Remove expired associations from db and return the number + of entries deleted. + """ + + db = self.database + query = (db.oid_associations.id > 0) + return self._removeExpiredAssocations(db(query).select())[1] #return number of assoc removed + + def cleanup(self): + """ + This method should be run periodically to free the db from + expired nonce and association entries. + """ + + return self.cleanupNonces(), self.cleanupAssociations() + + ADDED gluon/contrib/login_methods/pam_auth.py Index: gluon/contrib/login_methods/pam_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/pam_auth.py @@ -0,0 +1,14 @@ +from gluon.contrib.pam import authenticate + +def pam_auth(): + """ + to use pam_login: + from gluon.contrib.login_methods.pam_auth import pam_auth + auth.settings.login_methods.append(pam_auth()) + """ + + def pam_auth_aux(username, password): + return authenticate(username, password) + + return pam_auth_aux + ADDED gluon/contrib/login_methods/rpx_account.py Index: gluon/contrib/login_methods/rpx_account.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/rpx_account.py @@ -0,0 +1,127 @@ +#!/usr/bin/env python +# coding: utf8 + +""" + RPX Authentication for web2py + Developed by Nathan Freeze (Copyright © 2009) + Email + Modified by Massimo Di Pierro + + This file contains code to allow using RPXNow.com (now Jainrain.com) + services with web2py +""" + +import os +import re +import urllib +from gluon import * +from gluon.tools import fetch +from gluon.storage import Storage +import gluon.contrib.simplejson as json + +class RPXAccount(object): + + """ + from gluon.contrib.login_methods.rpx_account import RPXAccount + auth.settings.actions_disabled=['register','change_password','request_reset_password'] + auth.settings.login_form = RPXAccount(request, + api_key="...", + domain="...", + url = "http://localhost:8000/%s/default/user/login" % request.application) + """ + + def __init__(self, + request, + api_key = "", + domain = "", + url = "", + embed = True, + auth_url = "https://rpxnow.com/api/v2/auth_info", + language= "en", + prompt='rpx', + on_login_failure = None, + ): + + self.request=request + self.api_key=api_key + self.embed = embed + self.auth_url = auth_url + self.domain = domain + self.token_url = url + self.language = language + self.profile = None + self.prompt = prompt + self.on_login_failure = on_login_failure + self.mappings = Storage() + + dn = {'givenName':'','familyName':''} + self.mappings.Facebook = lambda profile, dn=dn:\ + dict(registration_id = profile.get("identifier",""), + username = profile.get("preferredUsername",""), + email = profile.get("email",""), + first_name = profile.get("name",dn).get("givenName",""), + last_name = profile.get("name",dn).get("familyName","")) + self.mappings.Google = lambda profile, dn=dn:\ + dict(registration_id=profile.get("identifier",""), + username=profile.get("preferredUsername",""), + email=profile.get("email",""), + first_name=profile.get("name",dn).get("givenName",""), + last_name=profile.get("name",dn).get("familyName","")) + self.mappings.default = lambda profile:\ + dict(registration_id=profile.get("identifier",""), + username=profile.get("preferredUsername",""), + email=profile.get("email",""), + first_name=profile.get("preferredUsername",""), + last_name='') + + def get_user(self): + request = self.request + if request.vars.token: + user = Storage() + data = urllib.urlencode(dict(apiKey = self.api_key, token=request.vars.token)) + auth_info_json = fetch(self.auth_url+'?'+data) + auth_info = json.loads(auth_info_json) + + if auth_info['stat'] == 'ok': + self.profile = auth_info['profile'] + provider = re.sub('[^\w\-]','',self.profile['providerName']) + user = self.mappings.get(provider,self.mappings.default)(self.profile) + return user + elif self.on_login_failure: + redirect(self.on_login_failure) + return None + + def login_form(self): + request = self.request + args = request.args + if self.embed: + JANRAIN_URL = \ + "https://%s.rpxnow.com/openid/embed?token_url=%s&language_preference=%s" + rpxform = IFRAME(_src=JANRAIN_URL % (self.domain,self.token_url,self.language), + _scrolling="no", + _frameborder="no", + _style="width:400px;height:240px;") + else: + JANRAIN_URL = \ + "https://%s.rpxnow.com/openid/v2/signin?token_url=%s" + rpxform = DIV(SCRIPT(_src="https://rpxnow.com/openid/v2/widget", + _type="text/javascript"), + SCRIPT("RPXNOW.overlay = true;", + "RPXNOW.language_preference = '%s';" % self.language, + "RPXNOW.realm = '%s';" % self.domain, + "RPXNOW.token_url = '%s';" % self.token_url, + "RPXNOW.show();", + _type="text/javascript")) + return rpxform + +def use_janrain(auth,filename='private/janrain.key',**kwargs): + path = os.path.join(current.request.folder,filename) + if os.path.exists(path): + request = current.request + domain,key = open(path,'r').read().strip().split(':') + host = current.request.env.http_host + url = "http://%s/%s/default/user/login" % (host,request.application) + auth.settings.actions_disabled = \ + ['register','change_password','request_reset_password'] + auth.settings.login_form = RPXAccount( + request, api_key=key,domain=domain, url = url,**kwargs) ADDED gluon/contrib/login_methods/x509_auth.py Index: gluon/contrib/login_methods/x509_auth.py ================================================================== --- /dev/null +++ gluon/contrib/login_methods/x509_auth.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Written by Michele Comitini +License: GPL v3 + +Adds support for x509 authentication. + +""" + +from gluon.globals import current +from gluon.storage import Storage +from gluon.http import HTTP,redirect + +#requires M2Crypto +from M2Crypto import X509 + + + +class X509Auth(object): + """ + Login using x509 cert from client. + + """ + + + + def __init__(self): + self.request = current.request + self.ssl_client_raw_cert = self.request.env.ssl_client_raw_cert + + # rebuild the certificate passed by the env + # this is double work, but it is the only way + # since we cannot access the web server ssl engine directly + + if self.ssl_client_raw_cert: + + x509=X509.load_cert_string(self.ssl_client_raw_cert, X509.FORMAT_PEM) + # extract it from the cert + self.serial = self.request.env.ssl_client_serial or ('%x' % x509.get_serial_number()).upper() + + + subject = x509.get_subject() + + # Reordering the subject map to a usable Storage map + # this allows us a cleaner syntax: + # cn = self.subject.cn + self.subject = Storage(filter(None, + map(lambda x: + (x,map(lambda y: + y.get_data().as_text(), + subject.get_entries_by_nid(subject.nid[x]))), + subject.nid.keys()))) + + + + def login_form(self, **args): + raise HTTP(403,'Login not allowed. No valid x509 crentials') + + + + def login_url(self, next="/"): + raise HTTP(403,'Login not allowed. No valid x509 crentials') + + + + + def logout_url(self, next="/"): + return next + + def get_user(self): + '''Returns the user info contained in the certificate. + ''' + + # We did not get the client cert? + if not self.ssl_client_raw_cert: + return None + + # Try to reconstruct some useful info for web2py auth machinery + + p = profile = dict() + + username = p['username'] = self.subject.CN or self.subject.commonName + p['first_name'] = self.subject.givenName or username + p['last_name'] = self.subject.surname + p['email'] = self.subject.Email or self.subject.emailAddress + + # IMPORTANT WE USE THE CERT SERIAL AS UNIQUE KEY FOR THE USER + p['registration_id'] = self.serial + + # If the auth table has a field certificate it will be used to + # save a PEM encoded copy of the user certificate. + + p['certificate'] = self.ssl_client_raw_cert + + return profile + + ADDED gluon/contrib/markdown/LICENSE Index: gluon/contrib/markdown/LICENSE ================================================================== --- /dev/null +++ gluon/contrib/markdown/LICENSE @@ -0,0 +1,1 @@ +markdown2.py is released under MIT license. ADDED gluon/contrib/markdown/__init__.py Index: gluon/contrib/markdown/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/markdown/__init__.py @@ -0,0 +1,17 @@ +from markdown2 import * +from gluon.html import XML + +def WIKI(text, encoding="utf8", safe_mode='escape', html4tags=False, **attributes): + if not text: + test = '' + if attributes.has_key('extras'): + extras = attributes['extras'] + del attributes['extras'] + else: + extras=None + text = text.decode(encoding,'replace') + + return XML(markdown(text,extras=extras, + safe_mode=safe_mode, html4tags=html4tags)\ + .encode(encoding,'xmlcharrefreplace'),**attributes) + ADDED gluon/contrib/markdown/markdown2.py Index: gluon/contrib/markdown/markdown2.py ================================================================== --- /dev/null +++ gluon/contrib/markdown/markdown2.py @@ -0,0 +1,1890 @@ +#!/usr/bin/env python +# Copyright (c) 2007-2008 ActiveState Corp. +# License: MIT (http://www.opensource.org/licenses/mit-license.php) + +r"""A fast and complete Python implementation of Markdown. + +[from http://daringfireball.net/projects/markdown/] +> Markdown is a text-to-HTML filter; it translates an easy-to-read / +> easy-to-write structured text format into HTML. Markdown's text +> format is most similar to that of plain text email, and supports +> features such as headers, *emphasis*, code blocks, blockquotes, and +> links. +> +> Markdown's syntax is designed not as a generic markup language, but +> specifically to serve as a front-end to (X)HTML. You can use span-level +> HTML tags anywhere in a Markdown document, and you can use block level +> HTML tags (like
    and as well). + +Module usage: + + >>> import markdown2 + >>> markdown2.markdown("*boo!*") # or use `html = markdown_path(PATH)` + u'

    boo!

    \n' + + >>> markdowner = Markdown() + >>> markdowner.convert("*boo!*") + u'

    boo!

    \n' + >>> markdowner.convert("**boom!**") + u'

    boom!

    \n' + +This implementation of Markdown implements the full "core" syntax plus a +number of extras (e.g., code syntax coloring, footnotes) as described on +. +""" + +cmdln_desc = """A fast and complete Python implementation of Markdown, a +text-to-HTML conversion tool for web writers. +""" + +# Dev Notes: +# - There is already a Python markdown processor +# (http://www.freewisdom.org/projects/python-markdown/). +# - Python's regex syntax doesn't have '\z', so I'm using '\Z'. I'm +# not yet sure if there implications with this. Compare 'pydoc sre' +# and 'perldoc perlre'. + +__version_info__ = (1, 0, 1, 16) # first three nums match Markdown.pl +__version__ = '1.0.1.16' +__author__ = "Trent Mick" + +import os +import sys +from pprint import pprint +import re +import logging +try: + from hashlib import md5 +except ImportError: + from md5 import md5 +import optparse +from random import random, randint +import codecs +from urllib import quote + + + +#---- Python version compat + +if sys.version_info[:2] < (2,4): + from sets import Set as set + def reversed(sequence): + for i in sequence[::-1]: + yield i + def _unicode_decode(s, encoding, errors='xmlcharrefreplace'): + return unicode(s, encoding, errors) +else: + def _unicode_decode(s, encoding, errors='strict'): + return s.decode(encoding, errors) + + +#---- globals + +DEBUG = False +log = logging.getLogger("markdown") + +DEFAULT_TAB_WIDTH = 4 + + +try: + import uuid +except ImportError: + SECRET_SALT = str(randint(0, 1000000)) +else: + SECRET_SALT = str(uuid.uuid4()) +def _hash_ascii(s): + #return md5(s).hexdigest() # Markdown.pl effectively does this. + return 'md5-' + md5(SECRET_SALT + s).hexdigest() +def _hash_text(s): + return 'md5-' + md5(SECRET_SALT + s.encode("utf-8")).hexdigest() + +# Table of hash values for escaped characters: +g_escape_table = dict([(ch, _hash_ascii(ch)) + for ch in '\\`*_{}[]()>#+-.!']) + + + +#---- exceptions + +class MarkdownError(Exception): + pass + + + +#---- public api + +def markdown_path(path, encoding="utf-8", + html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + fp = codecs.open(path, 'r', encoding) + try: + text = fp.read() + finally: + fp.close() + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +def markdown(text, html4tags=False, tab_width=DEFAULT_TAB_WIDTH, + safe_mode=None, extras=None, link_patterns=None, + use_file_vars=False): + return Markdown(html4tags=html4tags, tab_width=tab_width, + safe_mode=safe_mode, extras=extras, + link_patterns=link_patterns, + use_file_vars=use_file_vars).convert(text) + +class Markdown(object): + # The dict of "extras" to enable in processing -- a mapping of + # extra name to argument for the extra. Most extras do not have an + # argument, in which case the value is None. + # + # This can be set via (a) subclassing and (b) the constructor + # "extras" argument. + extras = None + + urls = None + titles = None + html_blocks = None + html_spans = None + html_removed_text = "[HTML_REMOVED]" # for compat with markdown.py + + # Used to track when we're inside an ordered or unordered list + # (see _ProcessListItems() for details): + list_level = 0 + + _ws_only_line_re = re.compile(r"^[ \t]+$", re.M) + + def __init__(self, html4tags=False, tab_width=4, safe_mode=None, + extras=None, link_patterns=None, use_file_vars=False): + if html4tags: + self.empty_element_suffix = ">" + else: + self.empty_element_suffix = " />" + self.tab_width = tab_width + + # For compatibility with earlier markdown2.py and with + # markdown.py's safe_mode being a boolean, + # safe_mode == True -> "replace" + if safe_mode is True: + self.safe_mode = "replace" + else: + self.safe_mode = safe_mode + + if self.extras is None: + self.extras = {} + elif not isinstance(self.extras, dict): + self.extras = dict([(e, None) for e in self.extras]) + if extras: + if not isinstance(extras, dict): + extras = dict([(e, None) for e in extras]) + self.extras.update(extras) + assert isinstance(self.extras, dict) + self._instance_extras = self.extras.copy() + self.link_patterns = link_patterns + self.use_file_vars = use_file_vars + self._outdent_re = re.compile(r'^(\t|[ ]{1,%d})' % tab_width, re.M) + + def reset(self): + self.urls = {} + self.titles = {} + self.html_blocks = {} + self.html_spans = {} + self.list_level = 0 + self.extras = self._instance_extras.copy() + if "footnotes" in self.extras: + self.footnotes = {} + self.footnote_ids = [] + + def convert(self, text): + """Convert the given text.""" + # Main function. The order in which other subs are called here is + # essential. Link and image substitutions need to happen before + # _EscapeSpecialChars(), so that any *'s or _'s in the + # and tags get encoded. + + # Clear the global hashes. If we don't clear these, you get conflicts + # from other articles when generating a page which contains more than + # one article (e.g. an index page that shows the N most recent + # articles): + self.reset() + + if not isinstance(text, unicode): + #TODO: perhaps shouldn't presume UTF-8 for string input? + text = unicode(text, 'utf-8') + + if self.use_file_vars: + # Look for emacs-style file variable hints. + emacs_vars = self._get_emacs_vars(text) + if "markdown-extras" in emacs_vars: + splitter = re.compile("[ ,]+") + for e in splitter.split(emacs_vars["markdown-extras"]): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + self.extras[ename] = earg + + # Standardize line endings: + text = re.sub("\r\n|\r", "\n", text) + + # Make sure $text ends with a couple of newlines: + text += "\n\n" + + # Convert all tabs to spaces. + text = self._detab(text) + + # Strip any lines consisting only of spaces and tabs. + # This makes subsequent regexen easier to write, because we can + # match consecutive blank lines with /\n+/ instead of something + # contorted like /[ \t]*\n+/ . + text = self._ws_only_line_re.sub("", text) + + if self.safe_mode: + text = self._hash_html_spans(text) + + # Turn block-level HTML blocks into hash entries + text = self._hash_html_blocks(text, raw=True) + + # Strip link definitions, store in hashes. + if "footnotes" in self.extras: + # Must do footnotes first because an unlucky footnote defn + # looks like a link defn: + # [^4]: this "looks like a link defn" + text = self._strip_footnote_definitions(text) + text = self._strip_link_definitions(text) + + text = self._run_block_gamut(text) + + if "footnotes" in self.extras: + text = self._add_footnotes(text) + + text = self._unescape_special_chars(text) + + if self.safe_mode: + text = self._unhash_html_spans(text) + + text += "\n" + return text + + _emacs_oneliner_vars_pat = re.compile(r"-\*-\s*([^\r\n]*?)\s*-\*-", re.UNICODE) + # This regular expression is intended to match blocks like this: + # PREFIX Local Variables: SUFFIX + # PREFIX mode: Tcl SUFFIX + # PREFIX End: SUFFIX + # Some notes: + # - "[ \t]" is used instead of "\s" to specifically exclude newlines + # - "(\r\n|\n|\r)" is used instead of "$" because the sre engine does + # not like anything other than Unix-style line terminators. + _emacs_local_vars_pat = re.compile(r"""^ + (?P(?:[^\r\n|\n|\r])*?) + [\ \t]*Local\ Variables:[\ \t]* + (?P.*?)(?:\r\n|\n|\r) + (?P.*?\1End:) + """, re.IGNORECASE | re.MULTILINE | re.DOTALL | re.VERBOSE) + + def _get_emacs_vars(self, text): + """Return a dictionary of emacs-style local variables. + + Parsing is done loosely according to this spec (and according to + some in-practice deviations from this): + http://www.gnu.org/software/emacs/manual/html_node/emacs/Specifying-File-Variables.html#Specifying-File-Variables + """ + emacs_vars = {} + SIZE = pow(2, 13) # 8kB + + # Search near the start for a '-*-'-style one-liner of variables. + head = text[:SIZE] + if "-*-" in head: + match = self._emacs_oneliner_vars_pat.search(head) + if match: + emacs_vars_str = match.group(1) + assert '\n' not in emacs_vars_str + emacs_var_strs = [s.strip() for s in emacs_vars_str.split(';') + if s.strip()] + if len(emacs_var_strs) == 1 and ':' not in emacs_var_strs[0]: + # While not in the spec, this form is allowed by emacs: + # -*- Tcl -*- + # where the implied "variable" is "mode". This form + # is only allowed if there are no other variables. + emacs_vars["mode"] = emacs_var_strs[0].strip() + else: + for emacs_var_str in emacs_var_strs: + try: + variable, value = emacs_var_str.strip().split(':', 1) + except ValueError: + log.debug("emacs variables error: malformed -*- " + "line: %r", emacs_var_str) + continue + # Lowercase the variable name because Emacs allows "Mode" + # or "mode" or "MoDe", etc. + emacs_vars[variable.lower()] = value.strip() + + tail = text[-SIZE:] + if "Local Variables" in tail: + match = self._emacs_local_vars_pat.search(tail) + if match: + prefix = match.group("prefix") + suffix = match.group("suffix") + lines = match.group("content").splitlines(0) + #print "prefix=%r, suffix=%r, content=%r, lines: %s"\ + # % (prefix, suffix, match.group("content"), lines) + + # Validate the Local Variables block: proper prefix and suffix + # usage. + for i, line in enumerate(lines): + if not line.startswith(prefix): + log.debug("emacs variables error: line '%s' " + "does not use proper prefix '%s'" + % (line, prefix)) + return {} + # Don't validate suffix on last line. Emacs doesn't care, + # neither should we. + if i != len(lines)-1 and not line.endswith(suffix): + log.debug("emacs variables error: line '%s' " + "does not use proper suffix '%s'" + % (line, suffix)) + return {} + + # Parse out one emacs var per line. + continued_for = None + for line in lines[:-1]: # no var on the last line ("PREFIX End:") + if prefix: line = line[len(prefix):] # strip prefix + if suffix: line = line[:-len(suffix)] # strip suffix + line = line.strip() + if continued_for: + variable = continued_for + if line.endswith('\\'): + line = line[:-1].rstrip() + else: + continued_for = None + emacs_vars[variable] += ' ' + line + else: + try: + variable, value = line.split(':', 1) + except ValueError: + log.debug("local variables error: missing colon " + "in local variables entry: '%s'" % line) + continue + # Do NOT lowercase the variable name, because Emacs only + # allows "mode" (and not "Mode", "MoDe", etc.) in this block. + value = value.strip() + if value.endswith('\\'): + value = value[:-1].rstrip() + continued_for = variable + else: + continued_for = None + emacs_vars[variable] = value + + # Unquote values. + for var, val in emacs_vars.items(): + if len(val) > 1 and (val.startswith('"') and val.endswith('"') + or val.startswith('"') and val.endswith('"')): + emacs_vars[var] = val[1:-1] + + return emacs_vars + + # Cribbed from a post by Bart Lateur: + # + _detab_re = re.compile(r'(.*?)\t', re.M) + def _detab_sub(self, match): + g1 = match.group(1) + return g1 + (' ' * (self.tab_width - len(g1) % self.tab_width)) + def _detab(self, text): + r"""Remove (leading?) tabs from a file. + + >>> m = Markdown() + >>> m._detab("\tfoo") + ' foo' + >>> m._detab(" \tfoo") + ' foo' + >>> m._detab("\t foo") + ' foo' + >>> m._detab(" foo") + ' foo' + >>> m._detab(" foo\n\tbar\tblam") + ' foo\n bar blam' + """ + if '\t' not in text: + return text + return self._detab_re.subn(self._detab_sub, text)[0] + + _block_tags_a = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math|ins|del' + _strict_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_a, + re.X | re.M) + + _block_tags_b = 'p|div|h[1-6]|blockquote|pre|table|dl|ol|ul|script|noscript|form|fieldset|iframe|math' + _liberal_tag_block_re = re.compile(r""" + ( # save in \1 + ^ # start of line (with re.M) + <(%s) # start tag = \2 + \b # word break + (.*\n)*? # any number of lines, minimally matching + .* # the matching end tag + [ \t]* # trailing spaces/tabs + (?=\n+|\Z) # followed by a newline or end of document + ) + """ % _block_tags_b, + re.X | re.M) + + def _hash_html_block_sub(self, match, raw=False): + html = match.group(1) + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + return "\n\n" + key + "\n\n" + + def _hash_html_blocks(self, text, raw=False): + """Hashify HTML blocks + + We only want to do this for block-level HTML tags, such as headers, + lists, and tables. That's because we still want to wrap

    s around + "paragraphs" that are wrapped in non-block-level tags, such as anchors, + phrase emphasis, and spans. The list of tags we're looking for is + hard-coded. + + @param raw {boolean} indicates if these are raw HTML blocks in + the original source. It makes a difference in "safe" mode. + """ + if '<' not in text: + return text + + # Pass `raw` value into our calls to self._hash_html_block_sub. + hash_html_block_sub = _curry(self._hash_html_block_sub, raw=raw) + + # First, look for nested blocks, e.g.: + #

    + #
    + # tags for inner block must be indented. + #
    + #
    + # + # The outermost tags must start at the left margin for this to match, and + # the inner nested divs must be indented. + # We need to do this before the next, more liberal match, because the next + # match will start at the first `
    ` and stop at the first `
    `. + text = self._strict_tag_block_re.sub(hash_html_block_sub, text) + + # Now match more liberally, simply from `\n` to `\n` + text = self._liberal_tag_block_re.sub(hash_html_block_sub, text) + + # Special case just for
    . It was easier to make a special + # case than to make the other regex more complicated. + if "", start_idx) + 3 + except ValueError, ex: + break + + # Start position for next comment block search. + start = end_idx + + # Validate whitespace before comment. + if start_idx: + # - Up to `tab_width - 1` spaces before start_idx. + for i in range(self.tab_width - 1): + if text[start_idx - 1] != ' ': + break + start_idx -= 1 + if start_idx == 0: + break + # - Must be preceded by 2 newlines or hit the start of + # the document. + if start_idx == 0: + pass + elif start_idx == 1 and text[0] == '\n': + start_idx = 0 # to match minute detail of Markdown.pl regex + elif text[start_idx-2:start_idx] == '\n\n': + pass + else: + break + + # Validate whitespace after comment. + # - Any number of spaces and tabs. + while end_idx < len(text): + if text[end_idx] not in ' \t': + break + end_idx += 1 + # - Must be following by 2 newlines or hit end of text. + if text[end_idx:end_idx+2] not in ('', '\n', '\n\n'): + continue + + # Escape and hash (must match `_hash_html_block_sub`). + html = text[start_idx:end_idx] + if raw and self.safe_mode: + html = self._sanitize_html(html) + key = _hash_text(html) + self.html_blocks[key] = html + text = text[:start_idx] + "\n\n" + key + "\n\n" + text[end_idx:] + + if "xml" in self.extras: + # Treat XML processing instructions and namespaced one-liner + # tags as if they were block HTML tags. E.g., if standalone + # (i.e. are their own paragraph), the following do not get + # wrapped in a

    tag: + # + # + # + _xml_oneliner_re = _xml_oneliner_re_from_tab_width(self.tab_width) + text = _xml_oneliner_re.sub(hash_html_block_sub, text) + + return text + + def _strip_link_definitions(self, text): + # Strips link definitions from text, stores the URLs and titles in + # hash references. + less_than_tab = self.tab_width - 1 + + # Link defs are in the form: + # [id]: url "optional title" + _link_def_re = re.compile(r""" + ^[ ]{0,%d}\[(.+)\]: # id = \1 + [ \t]* + \n? # maybe *one* newline + [ \t]* + ? # url = \2 + [ \t]* + (?: + \n? # maybe one newline + [ \t]* + (?<=\s) # lookbehind for whitespace + ['"(] + ([^\n]*) # title = \3 + ['")] + [ \t]* + )? # title is optional + (?:\n+|\Z) + """ % less_than_tab, re.X | re.M | re.U) + return _link_def_re.sub(self._extract_link_def_sub, text) + + def _extract_link_def_sub(self, match): + id, url, title = match.groups() + key = id.lower() # Link IDs are case-insensitive + self.urls[key] = self._encode_amps_and_angles(url) + if title: + self.titles[key] = title.replace('"', '"') + return "" + + def _extract_footnote_def_sub(self, match): + id, text = match.groups() + text = _dedent(text, skip_first_line=not text.startswith('\n')).strip() + normed_id = re.sub(r'\W', '-', id) + # Ensure footnote text ends with a couple newlines (for some + # block gamut matches). + self.footnotes[normed_id] = text + "\n\n" + return "" + + def _strip_footnote_definitions(self, text): + """A footnote definition looks like this: + + [^note-id]: Text of the note. + + May include one or more indented paragraphs. + + Where, + - The 'note-id' can be pretty much anything, though typically it + is the number of the footnote. + - The first paragraph may start on the next line, like so: + + [^note-id]: + Text of the note. + """ + less_than_tab = self.tab_width - 1 + footnote_def_re = re.compile(r''' + ^[ ]{0,%d}\[\^(.+)\]: # id = \1 + [ \t]* + ( # footnote text = \2 + # First line need not start with the spaces. + (?:\s*.*\n+) + (?: + (?:[ ]{%d} | \t) # Subsequent lines must be indented. + .*\n+ + )* + ) + # Lookahead for non-space at line-start, or end of doc. + (?:(?=^[ ]{0,%d}\S)|\Z) + ''' % (less_than_tab, self.tab_width, self.tab_width), + re.X | re.M) + return footnote_def_re.sub(self._extract_footnote_def_sub, text) + + + _hr_res = [ + re.compile(r"^[ ]{0,2}([ ]?\*[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\-[ ]?){3,}[ \t]*$", re.M), + re.compile(r"^[ ]{0,2}([ ]?\_[ ]?){3,}[ \t]*$", re.M), + ] + + def _run_block_gamut(self, text): + # These are all the transformations that form block-level + # tags like paragraphs, headers, and list items. + + text = self._do_headers(text) + + # Do Horizontal Rules: + hr = "\n tags around block-level tags. + text = self._hash_html_blocks(text) + + text = self._form_paragraphs(text) + + return text + + def _pyshell_block_sub(self, match): + lines = match.group(0).splitlines(0) + _dedentlines(lines) + indent = ' ' * self.tab_width + s = ('\n' # separate from possible cuddled paragraph + + indent + ('\n'+indent).join(lines) + + '\n\n') + return s + + def _prepare_pyshell_blocks(self, text): + """Ensure that Python interactive shell sessions are put in + code blocks -- even if not properly indented. + """ + if ">>>" not in text: + return text + + less_than_tab = self.tab_width - 1 + _pyshell_block_re = re.compile(r""" + ^([ ]{0,%d})>>>[ ].*\n # first line + ^(\1.*\S+.*\n)* # any number of subsequent lines + ^\n # ends with a blank line + """ % less_than_tab, re.M | re.X) + + return _pyshell_block_re.sub(self._pyshell_block_sub, text) + + def _run_span_gamut(self, text): + # These are all the transformations that occur *within* block-level + # tags like paragraphs, headers, and list items. + + text = self._do_code_spans(text) + + text = self._escape_special_chars(text) + + # Process anchor and image tags. + text = self._do_links(text) + + # Make links out of things like `` + # Must come after _do_links(), because you can use < and > + # delimiters in inline links like [this](). + text = self._do_auto_links(text) + + if "link-patterns" in self.extras: + text = self._do_link_patterns(text) + + text = self._encode_amps_and_angles(text) + + text = self._do_italics_and_bold(text) + + # Do hard breaks: + text = re.sub(r" {2,}\n", " + | + # auto-link (e.g., ) + <\w+[^>]*> + | + # comment + | + <\?.*?\?> # processing instruction + ) + """, re.X) + + def _escape_special_chars(self, text): + # Python markdown note: the HTML tokenization here differs from + # that in Markdown.pl, hence the behaviour for subtle cases can + # differ (I believe the tokenizer here does a better job because + # it isn't susceptible to unmatched '<' and '>' in HTML tags). + # Note, however, that '>' is not allowed in an auto-link URL + # here. + escaped = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup: + # Within tags/HTML-comments/auto-links, encode * and _ + # so they don't conflict with their use in Markdown for + # italics and strong. We're replacing each such + # character with its corresponding MD5 checksum value; + # this is likely overkill, but it should prevent us from + # colliding with the escape values by accident. + escaped.append(token.replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + else: + escaped.append(self._encode_backslash_escapes(token)) + is_html_markup = not is_html_markup + return ''.join(escaped) + + def _hash_html_spans(self, text): + # Used for safe_mode. + + def _is_auto_link(s): + if ':' in s and self._auto_link_re.match(s): + return True + elif '@' in s and self._auto_email_link_re.match(s): + return True + return False + + tokens = [] + is_html_markup = False + for token in self._sorta_html_tokenize_re.split(text): + if is_html_markup and not _is_auto_link(token): + sanitized = self._sanitize_html(token) + key = _hash_text(sanitized) + self.html_spans[key] = sanitized + tokens.append(key) + else: + tokens.append(token) + is_html_markup = not is_html_markup + return ''.join(tokens) + + def _unhash_html_spans(self, text): + for key, sanitized in self.html_spans.items(): + text = text.replace(key, sanitized) + return text + + def _sanitize_html(self, s): + if self.safe_mode == "replace": + return self.html_removed_text + elif self.safe_mode == "escape": + replacements = [ + ('&', '&'), + ('<', '<'), + ('>', '>'), + ] + for before, after in replacements: + s = s.replace(before, after) + return s + else: + raise MarkdownError("invalid value for 'safe_mode': %r (must be " + "'escape' or 'replace')" % self.safe_mode) + + _tail_of_inline_link_re = re.compile(r''' + # Match tail of: [text](/url/) or [text](/url/ "title") + \( # literal paren + [ \t]* + (?P # \1 + <.*?> + | + .*? + ) + [ \t]* + ( # \2 + (['"]) # quote char = \3 + (?P.*?) + \3 # matching quote + )? # title is optional + \) + ''', re.X | re.S) + _tail_of_reference_link_re = re.compile(r''' + # Match tail of: [text][id] + [ ]? # one optional space + (?:\n[ ]*)? # one optional newline followed by spaces + \[ + (?P<id>.*?) + \] + ''', re.X | re.S) + + def _do_links(self, text): + """Turn Markdown link shortcuts into XHTML <a> and <img> tags. + + This is a combination of Markdown.pl's _DoAnchors() and + _DoImages(). They are done together because that simplified the + approach. It was necessary to use a different approach than + Markdown.pl because of the lack of atomic matching support in + Python's regex engine used in $g_nested_brackets. + """ + MAX_LINK_TEXT_SENTINEL = 3000 # markdown2 issue 24 + + # `anchor_allowed_pos` is used to support img links inside + # anchors, but not anchors inside anchors. An anchor's start + # pos must be `>= anchor_allowed_pos`. + anchor_allowed_pos = 0 + + curr_pos = 0 + while True: # Handle the next link. + # The next '[' is the start of: + # - an inline anchor: [text](url "title") + # - a reference anchor: [text][id] + # - an inline img: ![text](url "title") + # - a reference img: ![text][id] + # - a footnote ref: [^id] + # (Only if 'footnotes' extra enabled) + # - a footnote defn: [^id]: ... + # (Only if 'footnotes' extra enabled) These have already + # been stripped in _strip_footnote_definitions() so no + # need to watch for them. + # - a link definition: [id]: url "title" + # These have already been stripped in + # _strip_link_definitions() so no need to watch for them. + # - not markup: [...anything else... + try: + start_idx = text.index('[', curr_pos) + except ValueError: + break + text_length = len(text) + + # Find the matching closing ']'. + # Markdown.pl allows *matching* brackets in link text so we + # will here too. Markdown.pl *doesn't* currently allow + # matching brackets in img alt text -- we'll differ in that + # regard. + bracket_depth = 0 + for p in range(start_idx+1, min(start_idx+MAX_LINK_TEXT_SENTINEL, + text_length)): + ch = text[p] + if ch == ']': + bracket_depth -= 1 + if bracket_depth < 0: + break + elif ch == '[': + bracket_depth += 1 + else: + # Closing bracket not found within sentinel length. + # This isn't markup. + curr_pos = start_idx + 1 + continue + link_text = text[start_idx+1:p] + + # Possibly a footnote ref? + if "footnotes" in self.extras and link_text.startswith("^"): + normed_id = re.sub(r'\W', '-', link_text[1:]) + if normed_id in self.footnotes: + self.footnote_ids.append(normed_id) + result = '<sup class="footnote-ref" id="fnref-%s">' \ + '<a href="#fn-%s">%s</a></sup>' \ + % (normed_id, normed_id, len(self.footnote_ids)) + text = text[:start_idx] + result + text[p+1:] + else: + # This id isn't defined, leave the markup alone. + curr_pos = p+1 + continue + + # Now determine what this is by the remainder. + p += 1 + if p == text_length: + return text + + # Inline anchor or img? + if text[p] == '(': # attempt at perf improvement + match = self._tail_of_inline_link_re.match(text, p) + if match: + # Handle an inline anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + + url, title = match.group("url"), match.group("title") + if url and url[0] == '<': + url = url[1:-1] # '<url>' -> 'url' + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + if title: + title_str = ' title="%s"' \ + % title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) \ + .replace('"', '"') + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url.replace('"', '"'), + link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + continue + + # Reference anchor or img? + else: + match = self._tail_of_reference_link_re.match(text, p) + if match: + # Handle a reference-style anchor or img. + is_img = start_idx > 0 and text[start_idx-1] == "!" + if is_img: + start_idx -= 1 + link_id = match.group("id").lower() + if not link_id: + link_id = link_text.lower() # for links like [this][] + if link_id in self.urls: + url = self.urls[link_id] + # We've got to encode these to avoid conflicting + # with italics/bold. + url = url.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title = self.titles.get(link_id) + if title: + title = title.replace('*', g_escape_table['*']) \ + .replace('_', g_escape_table['_']) + title_str = ' title="%s"' % title + else: + title_str = '' + if is_img: + result = '<img src="%s" alt="%s"%s%s' \ + % (url.replace('"', '"'), + link_text.replace('"', '"'), + title_str, self.empty_element_suffix) + curr_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + elif start_idx >= anchor_allowed_pos: + result = '<a href="%s"%s>%s</a>' \ + % (url, title_str, link_text) + result_head = '<a href="%s"%s>' % (url, title_str) + result = '%s%s</a>' % (result_head, link_text) + # <img> allowed from curr_pos on, <a> from + # anchor_allowed_pos on. + curr_pos = start_idx + len(result_head) + anchor_allowed_pos = start_idx + len(result) + text = text[:start_idx] + result + text[match.end():] + else: + # Anchor not allowed here. + curr_pos = start_idx + 1 + else: + # This id isn't defined, leave the markup alone. + curr_pos = match.end() + continue + + # Otherwise, it isn't markup. + curr_pos = start_idx + 1 + + return text + + + _setext_h_re = re.compile(r'^(.+)[ \t]*\n(=+|-+)[ \t]*\n+', re.M) + def _setext_h_sub(self, match): + n = {"=": 1, "-": 2}[match.group(2)[0]] + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(1)), n) + + _atx_h_re = re.compile(r''' + ^(\#{1,6}) # \1 = string of #'s + [ \t]* + (.+?) # \2 = Header text + [ \t]* + (?<!\\) # ensure not an escaped trailing '#' + \#* # optional closing #'s (not counted) + \n+ + ''', re.X | re.M) + def _atx_h_sub(self, match): + n = len(match.group(1)) + demote_headers = self.extras.get("demote-headers") + if demote_headers: + n = min(n + demote_headers, 6) + return "<h%d>%s</h%d>\n\n" \ + % (n, self._run_span_gamut(match.group(2)), n) + + def _do_headers(self, text): + # Setext-style headers: + # Header 1 + # ======== + # + # Header 2 + # -------- + text = self._setext_h_re.sub(self._setext_h_sub, text) + + # atx-style headers: + # # Header 1 + # ## Header 2 + # ## Header 2 with closing hashes ## + # ... + # ###### Header 6 + text = self._atx_h_re.sub(self._atx_h_sub, text) + + return text + + + _marker_ul_chars = '*+-' + _marker_any = r'(?:[%s]|\d+\.)' % _marker_ul_chars + _marker_ul = '(?:[%s])' % _marker_ul_chars + _marker_ol = r'(?:\d+\.)' + + def _list_sub(self, match): + lst = match.group(1) + lst_type = match.group(3) in self._marker_ul_chars and "ul" or "ol" + result = self._process_list_items(lst) + if self.list_level: + return "<%s>\n%s</%s>\n" % (lst_type, result, lst_type) + else: + return "<%s>\n%s</%s>\n\n" % (lst_type, result, lst_type) + + def _do_lists(self, text): + # Form HTML ordered (numbered) and unordered (bulleted) lists. + + for marker_pat in (self._marker_ul, self._marker_ol): + # Re-usable pattern to match any entire ul or ol list: + less_than_tab = self.tab_width - 1 + whole_list = r''' + ( # \1 = whole list + ( # \2 + [ ]{0,%d} + (%s) # \3 = first list item marker + [ \t]+ + ) + (?:.+?) + ( # \4 + \Z + | + \n{2,} + (?=\S) + (?! # Negative lookahead for another list item marker + [ \t]* + %s[ \t]+ + ) + ) + ) + ''' % (less_than_tab, marker_pat, marker_pat) + + # We use a different prefix before nested lists than top-level lists. + # See extended comment in _process_list_items(). + # + # Note: There's a bit of duplication here. My original implementation + # created a scalar regex pattern as the conditional result of the test on + # $g_list_level, and then only ran the $text =~ s{...}{...}egmx + # substitution once, using the scalar as the pattern. This worked, + # everywhere except when running under MT on my hosting account at Pair + # Networks. There, this caused all rebuilds to be killed by the reaper (or + # perhaps they crashed, but that seems incredibly unlikely given that the + # same script on the same server ran fine *except* under MT. I've spent + # more time trying to figure out why this is happening than I'd like to + # admit. My only guess, backed up by the fact that this workaround works, + # is that Perl optimizes the substition when it can figure out that the + # pattern will never change, and when this optimization isn't on, we run + # afoul of the reaper. Thus, the slightly redundant code to that uses two + # static s/// patterns rather than one conditional pattern. + + if self.list_level: + sub_list_re = re.compile("^"+whole_list, re.X | re.M | re.S) + text = sub_list_re.sub(self._list_sub, text) + else: + list_re = re.compile(r"(?:(?<=\n\n)|\A\n?)"+whole_list, + re.X | re.M | re.S) + text = list_re.sub(self._list_sub, text) + + return text + + _list_item_re = re.compile(r''' + (\n)? # leading line = \1 + (^[ \t]*) # leading whitespace = \2 + (%s) [ \t]+ # list marker = \3 + ((?:.+?) # list item text = \4 + (\n{1,2})) # eols = \5 + (?= \n* (\Z | \2 (%s) [ \t]+)) + ''' % (_marker_any, _marker_any), + re.M | re.X | re.S) + + _last_li_endswith_two_eols = False + def _list_item_sub(self, match): + item = match.group(4) + leading_line = match.group(1) + leading_space = match.group(2) + if leading_line or "\n\n" in item or self._last_li_endswith_two_eols: + item = self._run_block_gamut(self._outdent(item)) + else: + # Recursion for sub-lists: + item = self._do_lists(self._outdent(item)) + if item.endswith('\n'): + item = item[:-1] + item = self._run_span_gamut(item) + self._last_li_endswith_two_eols = (len(match.group(5)) == 2) + return "<li>%s</li>\n" % item + + def _process_list_items(self, list_str): + # Process the contents of a single ordered or unordered list, + # splitting it into individual list items. + + # The $g_list_level global keeps track of when we're inside a list. + # Each time we enter a list, we increment it; when we leave a list, + # we decrement. If it's zero, we're not in a list anymore. + # + # We do this because when we're not inside a list, we want to treat + # something like this: + # + # I recommend upgrading to version + # 8. Oops, now this line is treated + # as a sub-list. + # + # As a single paragraph, despite the fact that the second line starts + # with a digit-period-space sequence. + # + # Whereas when we're inside a list (or sub-list), that line will be + # treated as the start of a sub-list. What a kludge, huh? This is + # an aspect of Markdown's syntax that's hard to parse perfectly + # without resorting to mind-reading. Perhaps the solution is to + # change the syntax rules such that sub-lists must start with a + # starting cardinal number; e.g. "1." or "a.". + self.list_level += 1 + self._last_li_endswith_two_eols = False + list_str = list_str.rstrip('\n') + '\n' + list_str = self._list_item_re.sub(self._list_item_sub, list_str) + self.list_level -= 1 + return list_str + + def _get_pygments_lexer(self, lexer_name): + try: + from pygments import lexers, util + except ImportError: + return None + try: + return lexers.get_lexer_by_name(lexer_name) + except util.ClassNotFound: + return None + + def _color_with_pygments(self, codeblock, lexer, **formatter_opts): + import pygments + import pygments.formatters + + class HtmlCodeFormatter(pygments.formatters.HtmlFormatter): + def _wrap_code(self, inner): + """A function for use in a Pygments Formatter which + wraps in <code> tags. + """ + yield 0, "<code>" + for tup in inner: + yield tup + yield 0, "</code>" + + def wrap(self, source, outfile): + """Return the source with a code, pre, and div.""" + return self._wrap_div(self._wrap_pre(self._wrap_code(source))) + + formatter = HtmlCodeFormatter(cssclass="codehilite", **formatter_opts) + return pygments.highlight(codeblock, lexer, formatter) + + def _code_block_sub(self, match): + codeblock = match.group(1) + codeblock = self._outdent(codeblock) + codeblock = self._detab(codeblock) + codeblock = codeblock.lstrip('\n') # trim leading newlines + codeblock = codeblock.rstrip() # trim trailing whitespace + + if "code-color" in self.extras and codeblock.startswith(":::"): + lexer_name, rest = codeblock.split('\n', 1) + lexer_name = lexer_name[3:].strip() + lexer = self._get_pygments_lexer(lexer_name) + codeblock = rest.lstrip("\n") # Remove lexer declaration line. + if lexer: + formatter_opts = self.extras['code-color'] or {} + colored = self._color_with_pygments(codeblock, lexer, + **formatter_opts) + return "\n\n%s\n\n" % colored + + codeblock = self._encode_code(codeblock) + return "\n\n<pre><code>%s\n</code></pre>\n\n" % codeblock + + def _do_code_blocks(self, text): + """Process Markdown `<pre><code>` blocks.""" + code_block_re = re.compile(r''' + (?:\n\n|\A) + ( # $1 = the code block -- one or more lines, starting with a space/tab + (?: + (?:[ ]{%d} | \t) # Lines must start with a tab or a tab-width of spaces + .*\n+ + )+ + ) + ((?=^[ ]{0,%d}\S)|\Z) # Lookahead for non-space at line-start, or end of doc + ''' % (self.tab_width, self.tab_width), + re.M | re.X) + + return code_block_re.sub(self._code_block_sub, text) + + + # Rules for a code span: + # - backslash escapes are not interpreted in a code span + # - to include one or or a run of more backticks the delimiters must + # be a longer run of backticks + # - cannot start or end a code span with a backtick; pad with a + # space and that space will be removed in the emitted HTML + # See `test/tm-cases/escapes.text` for a number of edge-case + # examples. + _code_span_re = re.compile(r''' + (?<!\\) + (`+) # \1 = Opening run of ` + (?!`) # See Note A test/tm-cases/escapes.text + (.+?) # \2 = The code block + (?<!`) + \1 # Matching closer + (?!`) + ''', re.X | re.S) + + def _code_span_sub(self, match): + c = match.group(2).strip(" \t") + c = self._encode_code(c) + return "<code>%s</code>" % c + + def _do_code_spans(self, text): + # * Backtick quotes are used for <code></code> spans. + # + # * You can use multiple backticks as the delimiters if you want to + # include literal backticks in the code span. So, this input: + # + # Just type ``foo `bar` baz`` at the prompt. + # + # Will translate to: + # + # <p>Just type <code>foo `bar` baz</code> at the prompt.</p> + # + # There's no arbitrary limit to the number of backticks you + # can use as delimters. If you need three consecutive backticks + # in your code, use four for delimiters, etc. + # + # * You can use spaces to get literal backticks at the edges: + # + # ... type `` `bar` `` ... + # + # Turns to: + # + # ... type <code>`bar`</code> ... + return self._code_span_re.sub(self._code_span_sub, text) + + def _encode_code(self, text): + """Encode/escape certain characters inside Markdown code runs. + The point is that in code, these characters are literals, + and lose their special Markdown meanings. + """ + replacements = [ + # Encode all ampersands; HTML entities are not + # entities within a Markdown code span. + ('&', '&'), + # Do the angle bracket song and dance: + ('<', '<'), + ('>', '>'), + # Now, escape characters that are magic in Markdown: + ('*', g_escape_table['*']), + ('_', g_escape_table['_']), + ('{', g_escape_table['{']), + ('}', g_escape_table['}']), + ('[', g_escape_table['[']), + (']', g_escape_table[']']), + ('\\', g_escape_table['\\']), + ] + for before, after in replacements: + text = text.replace(before, after) + return text + + _strong_re = re.compile(r"(?<!\w)(\*\*|__)(?=\S)(.+?[*_]*)(?<=\S)\1(?!\w)", re.S) + _em_re = re.compile(r"(?<!\w)(\*|_)(?=\S)(.+?)(?<=\S)\1(?!\w)", re.S) + _code_friendly_strong_re = re.compile(r"\*\*(?=\S)(.+?[*_]*)(?<=\S)\*\*", re.S) + _code_friendly_em_re = re.compile(r"\*(?=\S)(.+?)(?<=\S)\*", re.S) + def _do_italics_and_bold(self, text): + # <strong> must go first: + if "code-friendly" in self.extras: + text = self._code_friendly_strong_re.sub(r"<strong>\1</strong>", text) + text = self._code_friendly_em_re.sub(r"<em>\1</em>", text) + else: + text = self._strong_re.sub(r"<strong>\2</strong>", text) + text = self._em_re.sub(r"<em>\2</em>", text) + return text + + + _block_quote_re = re.compile(r''' + ( # Wrap whole match in \1 + ( + ^[ \t]*>[ \t]? # '>' at the start of a line + .+\n # rest of the first line + (.+\n)* # subsequent consecutive lines + \n* # blanks + )+ + ) + ''', re.M | re.X) + _bq_one_level_re = re.compile('^[ \t]*>[ \t]?', re.M); + + _html_pre_block_re = re.compile(r'(\s*<pre>.+?</pre>)', re.S) + def _dedent_two_spaces_sub(self, match): + return re.sub(r'(?m)^ ', '', match.group(1)) + + def _block_quote_sub(self, match): + bq = match.group(1) + bq = self._bq_one_level_re.sub('', bq) # trim one level of quoting + bq = self._ws_only_line_re.sub('', bq) # trim whitespace-only lines + bq = self._run_block_gamut(bq) # recurse + + bq = re.sub('(?m)^', ' ', bq) + # These leading spaces screw with <pre> content, so we need to fix that: + bq = self._html_pre_block_re.sub(self._dedent_two_spaces_sub, bq) + + return "<blockquote>\n%s\n</blockquote>\n\n" % bq + + def _do_block_quotes(self, text): + if '>' not in text: + return text + return self._block_quote_re.sub(self._block_quote_sub, text) + + def _form_paragraphs(self, text): + # Strip leading and trailing lines: + text = text.strip('\n') + + # Wrap <p> tags. + grafs = re.split(r"\n{2,}", text) + for i, graf in enumerate(grafs): + if graf in self.html_blocks: + # Unhashify HTML blocks + grafs[i] = self.html_blocks[graf] + else: + # Wrap <p> tags. + graf = self._run_span_gamut(graf) + grafs[i] = "<p>" + graf.lstrip(" \t") + "</p>" + + return "\n\n".join(grafs) + + def _add_footnotes(self, text): + if self.footnotes: + footer = [ + '<div class="footnotes">', + '<hr' + self.empty_element_suffix, + '<ol>', + ] + for i, id in enumerate(self.footnote_ids): + if i != 0: + footer.append('') + footer.append('<li id="fn-%s">' % id) + footer.append(self._run_block_gamut(self.footnotes[id])) + backlink = ('<a href="#fnref-%s" ' + 'class="footnoteBackLink" ' + 'title="Jump back to footnote %d in the text.">' + '↩</a>' % (id, i+1)) + if footer[-1].endswith("</p>"): + footer[-1] = footer[-1][:-len("</p>")] \ + + ' ' + backlink + "</p>" + else: + footer.append("\n<p>%s</p>" % backlink) + footer.append('</li>') + footer.append('</ol>') + footer.append('</div>') + return text + '\n\n' + '\n'.join(footer) + else: + return text + + # Ampersand-encoding based entirely on Nat Irons's Amputator MT plugin: + # http://bumppo.net/projects/amputator/ + _ampersand_re = re.compile(r'&(?!#?[xX]?(?:[0-9a-fA-F]+|\w+);)') + _naked_lt_re = re.compile(r'<(?![a-z/?\$!])', re.I) + _naked_gt_re = re.compile(r'''(?<![a-z?!/'"-])>''', re.I) + + def _encode_amps_and_angles(self, text): + # Smart processing for ampersands and angle brackets that need + # to be encoded. + text = self._ampersand_re.sub('&', text) + + # Encode naked <'s + text = self._naked_lt_re.sub('<', text) + + # Encode naked >'s + # Note: Other markdown implementations (e.g. Markdown.pl, PHP + # Markdown) don't do this. + text = self._naked_gt_re.sub('>', text) + return text + + def _encode_backslash_escapes(self, text): + for ch, escape in g_escape_table.items(): + text = text.replace("\\"+ch, escape) + return text + + _auto_link_re = re.compile(r'<((https?|ftp):[^\'">\s]+)>', re.I) + def _auto_link_sub(self, match): + g1 = match.group(1) + return '<a href="%s">%s</a>' % (g1, g1) + + _auto_email_link_re = re.compile(r""" + < + (?:mailto:)? + ( + [-.\w]+ + \@ + [-\w]+(\.[-\w]+)*\.[a-z]+ + ) + > + """, re.I | re.X | re.U) + def _auto_email_link_sub(self, match): + return self._encode_email_address( + self._unescape_special_chars(match.group(1))) + + def _do_auto_links(self, text): + text = self._auto_link_re.sub(self._auto_link_sub, text) + text = self._auto_email_link_re.sub(self._auto_email_link_sub, text) + return text + + def _encode_email_address(self, addr): + # Input: an email address, e.g. "foo@example.com" + # + # Output: the email address as a mailto link, with each character + # of the address encoded as either a decimal or hex entity, in + # the hopes of foiling most address harvesting spam bots. E.g.: + # + # <a href="mailto:foo@e + # xample.com">foo + # @example.com</a> + # + # Based on a filter by Matthew Wickline, posted to the BBEdit-Talk + # mailing list: <http://tinyurl.com/yu7ue> + chars = [_xml_encode_email_char_at_random(ch) + for ch in "mailto:" + addr] + # Strip the mailto: from the visible part. + addr = '<a href="%s">%s</a>' \ + % (''.join(chars), ''.join(chars[7:])) + return addr + + def _do_link_patterns(self, text): + """Caveat emptor: there isn't much guarding against link + patterns being formed inside other standard Markdown links, e.g. + inside a [link def][like this]. + + Dev Notes: *Could* consider prefixing regexes with a negative + lookbehind assertion to attempt to guard against this. + """ + link_from_hash = {} + for regex, repl in self.link_patterns: + replacements = [] + for match in regex.finditer(text): + if hasattr(repl, "__call__"): + href = repl(match) + else: + href = match.expand(repl) + replacements.append((match.span(), href)) + for (start, end), href in reversed(replacements): + escaped_href = ( + href.replace('"', '"') # b/c of attr quote + # To avoid markdown <em> and <strong>: + .replace('*', g_escape_table['*']) + .replace('_', g_escape_table['_'])) + link = '<a href="%s">%s</a>' % (escaped_href, text[start:end]) + hash = _hash_text(link) + link_from_hash[hash] = link + text = text[:start] + hash + text[end:] + for hash, link in link_from_hash.items(): + text = text.replace(hash, link) + return text + + def _unescape_special_chars(self, text): + # Swap back in all the special characters we've hidden. + for ch, hash in g_escape_table.items(): + text = text.replace(hash, ch) + return text + + def _outdent(self, text): + # Remove one level of line-leading tabs or spaces + return self._outdent_re.sub('', text) + + +class MarkdownWithExtras(Markdown): + """A markdowner class that enables most extras: + + - footnotes + - code-color (only has effect if 'pygments' Python module on path) + + These are not included: + - pyshell (specific to Python-related documenting) + - code-friendly (because it *disables* part of the syntax) + - link-patterns (because you need to specify some actual + link-patterns anyway) + """ + extras = ["footnotes", "code-color"] + + +#---- internal support functions + +# From http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52549 +def _curry(*args, **kwargs): + function, args = args[0], args[1:] + def result(*rest, **kwrest): + combined = kwargs.copy() + combined.update(kwrest) + return function(*args + rest, **combined) + return result + +# Recipe: regex_from_encoded_pattern (1.0) +def _regex_from_encoded_pattern(s): + """'foo' -> re.compile(re.escape('foo')) + '/foo/' -> re.compile('foo') + '/foo/i' -> re.compile('foo', re.I) + """ + if s.startswith('/') and s.rfind('/') != 0: + # Parse it: /PATTERN/FLAGS + idx = s.rfind('/') + pattern, flags_str = s[1:idx], s[idx+1:] + flag_from_char = { + "i": re.IGNORECASE, + "l": re.LOCALE, + "s": re.DOTALL, + "m": re.MULTILINE, + "u": re.UNICODE, + } + flags = 0 + for char in flags_str: + try: + flags |= flag_from_char[char] + except KeyError: + raise ValueError("unsupported regex flag: '%s' in '%s' " + "(must be one of '%s')" + % (char, s, ''.join(flag_from_char.keys()))) + return re.compile(s[1:idx], flags) + else: # not an encoded regex + return re.compile(re.escape(s)) + +# Recipe: dedent (0.1.2) +def _dedentlines(lines, tabsize=8, skip_first_line=False): + """_dedentlines(lines, tabsize=8, skip_first_line=False) -> dedented lines + + "lines" is a list of lines to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + Same as dedent() except operates on a sequence of lines. Note: the + lines list is modified **in-place**. + """ + DEBUG = False + if DEBUG: + print "dedent: dedent(..., tabsize=%d, skip_first_line=%r)"\ + % (tabsize, skip_first_line) + indents = [] + margin = None + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + indent = 0 + for ch in line: + if ch == ' ': + indent += 1 + elif ch == '\t': + indent += tabsize - (indent % tabsize) + elif ch in '\r\n': + continue # skip all-whitespace lines + else: + break + else: + continue # skip all-whitespace lines + if DEBUG: print "dedent: indent=%d: %r" % (indent, line) + if margin is None: + margin = indent + else: + margin = min(margin, indent) + if DEBUG: print "dedent: margin=%r" % margin + + if margin is not None and margin > 0: + for i, line in enumerate(lines): + if i == 0 and skip_first_line: continue + removed = 0 + for j, ch in enumerate(line): + if ch == ' ': + removed += 1 + elif ch == '\t': + removed += tabsize - (removed % tabsize) + elif ch in '\r\n': + if DEBUG: print "dedent: %r: EOL -> strip up to EOL" % line + lines[i] = lines[i][j:] + break + else: + raise ValueError("unexpected non-whitespace char %r in " + "line %r while removing %d-space margin" + % (ch, line, margin)) + if DEBUG: + print "dedent: %r: %r -> removed %d/%d"\ + % (line, ch, removed, margin) + if removed == margin: + lines[i] = lines[i][j+1:] + break + elif removed > margin: + lines[i] = ' '*(removed-margin) + lines[i][j+1:] + break + else: + if removed: + lines[i] = lines[i][removed:] + return lines + +def _dedent(text, tabsize=8, skip_first_line=False): + """_dedent(text, tabsize=8, skip_first_line=False) -> dedented text + + "text" is the text to dedent. + "tabsize" is the tab width to use for indent width calculations. + "skip_first_line" is a boolean indicating if the first line should + be skipped for calculating the indent width and for dedenting. + This is sometimes useful for docstrings and similar. + + textwrap.dedent(s), but don't expand tabs to spaces + """ + lines = text.splitlines(1) + _dedentlines(lines, tabsize=tabsize, skip_first_line=skip_first_line) + return ''.join(lines) + + +class _memoized(object): + """Decorator that caches a function's return value each time it is called. + If called later with the same arguments, the cached value is returned, and + not re-evaluated. + + http://wiki.python.org/moin/PythonDecoratorLibrary + """ + def __init__(self, func): + self.func = func + self.cache = {} + def __call__(self, *args): + try: + return self.cache[args] + except KeyError: + self.cache[args] = value = self.func(*args) + return value + except TypeError: + # uncachable -- for instance, passing a list as an argument. + # Better to not cache than to blow up entirely. + return self.func(*args) + def __repr__(self): + """Return the function's docstring.""" + return self.func.__doc__ + + +def _xml_oneliner_re_from_tab_width(tab_width): + """Standalone XML processing instruction regex.""" + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in $1 + [ ]{0,%d} + (?: + <\?\w+\b\s+.*?\?> # XML processing instruction + | + <\w+:\w+\b\s+.*?/> # namespaced single tag + ) + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_xml_oneliner_re_from_tab_width = _memoized(_xml_oneliner_re_from_tab_width) + +def _hr_tag_re_from_tab_width(tab_width): + return re.compile(r""" + (?: + (?<=\n\n) # Starting after a blank line + | # or + \A\n? # the beginning of the doc + ) + ( # save in \1 + [ ]{0,%d} + <(hr) # start tag = \2 + \b # word break + ([^<>])*? # + /?> # the matching end tag + [ \t]* + (?=\n{2,}|\Z) # followed by a blank line or end of document + ) + """ % (tab_width - 1), re.X) +_hr_tag_re_from_tab_width = _memoized(_hr_tag_re_from_tab_width) + + +def _xml_encode_email_char_at_random(ch): + r = random() + # Roughly 10% raw, 45% hex, 45% dec. + # '@' *must* be encoded. I [John Gruber] insist. + # Issue 26: '_' must be encoded. + if r > 0.9 and ch not in "@_": + return ch + elif r < 0.45: + # The [1:] is to drop leading '0': 0x63 -> x63 + return '&#%s;' % hex(ord(ch))[1:] + else: + return '&#%s;' % ord(ch) + + + +#---- mainline + +class _NoReflowFormatter(optparse.IndentedHelpFormatter): + """An optparse formatter that does NOT reflow the description.""" + def format_description(self, description): + return description or "" + +def _test(): + import doctest + doctest.testmod() + +def main(argv=None): + if argv is None: + argv = sys.argv + if not logging.root.handlers: + logging.basicConfig() + + usage = "usage: %prog [PATHS...]" + version = "%prog "+__version__ + parser = optparse.OptionParser(prog="markdown2", usage=usage, + version=version, description=cmdln_desc, + formatter=_NoReflowFormatter()) + parser.add_option("-v", "--verbose", dest="log_level", + action="store_const", const=logging.DEBUG, + help="more verbose output") + parser.add_option("--encoding", + help="specify encoding of text content") + parser.add_option("--html4tags", action="store_true", default=False, + help="use HTML 4 style for empty element tags") + parser.add_option("-s", "--safe", metavar="MODE", dest="safe_mode", + help="sanitize literal HTML: 'escape' escapes " + "HTML meta chars, 'replace' replaces with an " + "[HTML_REMOVED] note") + parser.add_option("-x", "--extras", action="append", + help="Turn on specific extra features (not part of " + "the core Markdown spec). Supported values: " + "'code-friendly' disables _/__ for emphasis; " + "'code-color' adds code-block syntax coloring; " + "'link-patterns' adds auto-linking based on patterns; " + "'footnotes' adds the footnotes syntax;" + "'xml' passes one-liner processing instructions and namespaced XML tags;" + "'pyshell' to put unindented Python interactive shell sessions in a <code> block.") + parser.add_option("--use-file-vars", + help="Look for and use Emacs-style 'markdown-extras' " + "file var to turn on extras. See " + "<http://code.google.com/p/python-markdown2/wiki/Extras>.") + parser.add_option("--link-patterns-file", + help="path to a link pattern file") + parser.add_option("--self-test", action="store_true", + help="run internal self-tests (some doctests)") + parser.add_option("--compare", action="store_true", + help="run against Markdown.pl as well (for testing)") + parser.set_defaults(log_level=logging.INFO, compare=False, + encoding="utf-8", safe_mode=None, use_file_vars=False) + opts, paths = parser.parse_args() + log.setLevel(opts.log_level) + + if opts.self_test: + return _test() + + if opts.extras: + extras = {} + for s in opts.extras: + splitter = re.compile("[,;: ]+") + for e in splitter.split(s): + if '=' in e: + ename, earg = e.split('=', 1) + try: + earg = int(earg) + except ValueError: + pass + else: + ename, earg = e, None + extras[ename] = earg + else: + extras = None + + if opts.link_patterns_file: + link_patterns = [] + f = open(opts.link_patterns_file) + try: + for i, line in enumerate(f.readlines()): + if not line.strip(): continue + if line.lstrip().startswith("#"): continue + try: + pat, href = line.rstrip().rsplit(None, 1) + except ValueError: + raise MarkdownError("%s:%d: invalid link pattern line: %r" + % (opts.link_patterns_file, i+1, line)) + link_patterns.append( + (_regex_from_encoded_pattern(pat), href)) + finally: + f.close() + else: + link_patterns = None + + from os.path import join, dirname, abspath, exists + markdown_pl = join(dirname(dirname(abspath(__file__))), "test", + "Markdown.pl") + for path in paths: + if opts.compare: + print "==== Markdown.pl ====" + perl_cmd = 'perl %s "%s"' % (markdown_pl, path) + o = os.popen(perl_cmd) + perl_html = o.read() + o.close() + sys.stdout.write(perl_html) + print "==== markdown2.py ====" + html = markdown_path(path, encoding=opts.encoding, + html4tags=opts.html4tags, + safe_mode=opts.safe_mode, + extras=extras, link_patterns=link_patterns, + use_file_vars=opts.use_file_vars) + sys.stdout.write( + html.encode(sys.stdout.encoding or "utf-8", 'xmlcharrefreplace')) + if opts.compare: + test_dir = join(dirname(dirname(abspath(__file__))), "test") + if exists(join(test_dir, "test_markdown2.py")): + sys.path.insert(0, test_dir) + from test_markdown2 import norm_html_from_html + norm_html = norm_html_from_html(html) + norm_perl_html = norm_html_from_html(perl_html) + else: + norm_html = html + norm_perl_html = perl_html + print "==== match? %r ====" % (norm_perl_html == norm_html) + + +if __name__ == "__main__": + sys.exit( main(sys.argv) ) + + ADDED gluon/contrib/markmin/__init__.py Index: gluon/contrib/markmin/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/markmin/__init__.py @@ -0,0 +1,2 @@ + + ADDED gluon/contrib/markmin/markmin.html Index: gluon/contrib/markmin/markmin.html ================================================================== --- /dev/null +++ gluon/contrib/markmin/markmin.html @@ -0,0 +1,35 @@ +<html><body><h1>Markmin markup language</h1><h2>About</h2><p>This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the <code class="">markmin2html</code> function in the <code class="">markmin2html.py</code>.</p><p>Example of usage:</p><pre><code class="">>>> m = "Hello **world** [[link http://web2py.com]]" +>>> from markmin2html import markmin2html +>>> print markmin2html(m) +>>> from markmin2latex import markmin2latex +>>> print markmin2latex(m) +>>> from markmin2pdf import markmin2pdf # requires pdflatex +>>> print markmin2pdf(m)</code></pre><h2>Why?</h2><p>We wanted a markup language with the following requirements:</p><ul><li>less than 100 lines of functional code</li><li>easy to read</li><li>secure</li><li>support table, ul, ol, code</li><li>support html5 video and audio elements (html serialization only)</li><li>can align images and resize them</li><li>can specify class for tables and code elements</li><li>can add anchors</li><li>does not use _ for markup (since it creates odd behavior)</li><li>automatically links urls</li><li>fast</li><li>easy to extend</li><li>supports latex and pdf including references</li><li>allows to describe the markup in the markup (this document is generated from markmin syntax)</li></ul><p>(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster)</p><p>The <a href="http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827">web2py book</a> published by lulu, for example, was entirely generated with markmin2pdf from the online <a href="http://www.web2py.com/book">web2py wiki</a></p><h2>Download</h2><ul><li>http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py</li><li>http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py</li><li>http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py</li></ul><p>markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD.</p><h2>Examples</h2><h3>Bold, italic, code and links</h3><table class=""><tr><td><b>SOURCE</b> </td><td><b>OUTPUT</b></td></tr><tr><td><code class=""># title</code> </td><td><b>title</b></td></tr><tr><td><code class="">## section</code> </td><td><b>section</b></td></tr><tr><td><code class="">### subsection</code> </td><td><b>subsection</b></td></tr><tr><td><code class="">**bold**</code> </td><td><b>bold</b></td></tr><tr><td><code class="">''italic''</code> </td><td><i>italic</i></td></tr><tr><td><code class="">``verbatim``</code> </td><td><code class="">verbatim</code></td></tr><tr><td><code class="">http://google.com</code> </td><td>http://google.com</td></tr><tr><td><code class="">[[click me #myanchor]]</code></td><td><a href="#myanchor">click me</a></td></tr></table> +<h3>More on links</h3><p>The format is always <code class="">[[title link]]</code>. Notice you can nest bold, italic and code inside the link title.</p><h3>Anchors <span id="myanchor"><span></h3><p>You can place an anchor anywhere in the text using the syntax <code class="">[[name]]</code> where <i>name</i> is the name of the anchor. +You can then link the anchor with <a href="#myanchor">link</a>, i.e. <code class="">[[link #myanchor]]</code>.</p><h3>Images</h3><p><img src="http://www.web2py.com/examples/static/web2py_logo.png" alt="some image" align="right" width="200px" /> +This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code</p><p><code class="">[[some image http://www.web2py.com/examples/static/web2py_logo.png right 200px]]</code>.</p><h3>Unordered Lists</h3><pre><code class="">- Dog +- Cat +- Mouse</code></pre><p>is rendered as</p><ul><li>Dog</li><li>Cat</li><li>Mouse</li></ul><p>Two new lines between items break the list in two lists.</p><h3>Ordered Lists</h3><pre><code class="">+ Dog ++ Cat ++ Mouse</code></pre><p>is rendered as</p><ol><li>Dog</li><li>Cat</li><li>Mouse</li></ol><h3>Tables</h3><p>Something like this +<pre><code class="">--------- +**A** | **B** | **C** +0 | 0 | X +0 | X | 0 +X | 0 | 0 +-----:abc</code></pre> +is a table and is rendered as +<table class="abc"><tr><td><b>A</b></td><td><b>B</b></td><td><b>C</b></td></tr><tr><td>0</td><td>0</td><td>X</td></tr><tr><td>0</td><td>X</td><td>0</td></tr><tr><td>X</td><td>0</td><td>0</td></tr></table>Four or more dashes delimit the table and | separates the columns. +The <code class="">:abc</code> at the end sets the class for the table and it is optional.</p><h3>Blockquote</h3><p>A table with a single cell is rendered as a blockquote:</p><blockquote class="">Hello world</blockquote> +<h3>Code, <code class=""><code></code>, escaping and extra stuff</h3><pre><code class="python">def test(): + return "this is Python code"</code></pre><p>Optionally a ` inside a <code class="">``...``</code> block can be inserted escaped with !`!. +The <code class="">:python</code> after the markup is also optional. If present, by default, it is used to set the class of the <code> block. +The behavior can be overridden by passing an argument <code class="">extra</code> to the <code class="">render</code> function. For example:</p><pre><code class="python">>>> markmin2html("``aaa``:custom", + extra=dict(custom=lambda text: 'x'+text+'x'))</code></pre><p>generates</p><code class="python">'xaaax'</code><p>(the <code class="">``...``:custom</code> block is rendered by the <code class="">custom=lambda</code> function passed to <code class="">render</code>).</p><h3>Html5 support</h3><p>Markmin also supports the <video> and <audio> html5 tags using the notation: +<pre><code class="">[[title link video]] +[[title link audio]]</code></pre></p><h3>Latex</h3><p>Formulas can be embedded into HTML with <code class="">$</code><code class="">$</code>formula<code class="">$</code><code class="">$</code>. +You can use Google charts to render the formula:</p><pre><code class="">>>> LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>' +>>> markmin2html(text,{'latex':lambda code: LATEX % code.replace('"','"')})</code></pre><h3>Citations and References</h3><p>Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like</p><pre><code class="">- [[key]] value</code></pre><p>in the References will be translated into Latex</p><pre><code class="">\bibitem{key} value</code></pre><p>Here is an example of usage:</p><pre><code class="">As shown in Ref.``mdipierro``:cite + +## References +- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com</code></pre><h3>Caveats</h3><p><code class=""><ul/></code>, <code class=""><ol/></code>, <code class=""><code/></code>, <code class=""><table/></code>, <code class=""><blockquote/></code>, <code class=""><h1/></code>, ..., <code class=""><h6/></code> do not have <code class=""><p>...</p></code> around them.</p></body></html> ADDED gluon/contrib/markmin/markmin.pdf Index: gluon/contrib/markmin/markmin.pdf ================================================================== --- /dev/null +++ gluon/contrib/markmin/markmin.pdf cannot compute difference between binary files ADDED gluon/contrib/markmin/markmin2html.py Index: gluon/contrib/markmin/markmin2html.py ================================================================== --- /dev/null +++ gluon/contrib/markmin/markmin2html.py @@ -0,0 +1,459 @@ +#!/usr/bin/env python +# created my Massimo Di Pierro +# license MIT/BSD/GPL +import re +import cgi + +__all__ = ['render', 'markmin2html'] + +__doc__ = """ +# Markmin markup language + +## About + +This is a new markup language that we call markmin designed to produce high quality scientific papers and books and also put them online. We provide serializers for html, latex and pdf. It is implemented in the ``markmin2html`` function in the ``markmin2html.py``. + +Example of usage: + +`` +>>> m = "Hello **world** [[link http://web2py.com]]" +>>> from markmin2html import markmin2html +>>> print markmin2html(m) +>>> from markmin2latex import markmin2latex +>>> print markmin2latex(m) +>>> from markmin2pdf import markmin2pdf # requires pdflatex +>>> print markmin2pdf(m) +`` + +## Why? + +We wanted a markup language with the following requirements: +- less than 100 lines of functional code +- easy to read +- secure +- support table, ul, ol, code +- support html5 video and audio elements (html serialization only) +- can align images and resize them +- can specify class for tables and code elements +- can add anchors +- does not use _ for markup (since it creates odd behavior) +- automatically links urls +- fast +- easy to extend +- supports latex and pdf including references +- allows to describe the markup in the markup (this document is generated from markmin syntax) + +(results depend on text but in average for text ~100K markmin is 30% faster than markdown, for text ~10K it is 10x faster) + +The [[web2py book http://www.lulu.com/product/paperback/web2py-%283rd-edition%29/12822827]] published by lulu, for example, was entirely generated with markmin2pdf from the online [[web2py wiki http://www.web2py.com/book]] + +## Download + +- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2html.py +- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2latex.py +- http://web2py.googlecode.com/hg/gluon/contrib/markmin/markmin2pdf.py + +markmin2html.py and markmin2latex.py are single files and have no web2py dependence. Their license is BSD. + +## Examples + +### Bold, italic, code and links + +-------------------------------------------------- +**SOURCE** | **OUTPUT** +``# title`` | **title** +``## section`` | **section** +``### subsection`` | **subsection** +``**bold**`` | **bold** +``''italic''`` | ''italic'' +``!`!`verbatim`!`!`` | ``verbatim`` +``http://google.com`` | http://google.com +``[[click me #myanchor]]`` | [[click me #myanchor]] +--------------------------------------------------- + +### More on links + +The format is always ``[[title link]]``. Notice you can nest bold, italic and code inside the link title. + +### Anchors [[myanchor]] + +You can place an anchor anywhere in the text using the syntax ``[[name]]`` where ''name'' is the name of the anchor. +You can then link the anchor with [[link #myanchor]], i.e. ``[[link #myanchor]]``. + +### Images + +[[some image http://www.web2py.com/examples/static/web2py_logo.png right 200px]] +This paragraph has an image aligned to the right with a width of 200px. Its is placed using the code + +``[[some image http://www.web2py.com/examples/static/web2py_logo.png right 200px]]``. + +### Unordered Lists + +`` +- Dog +- Cat +- Mouse +`` + +is rendered as +- Dog +- Cat +- Mouse + +Two new lines between items break the list in two lists. + +### Ordered Lists + +`` ++ Dog ++ Cat ++ Mouse +`` + +is rendered as ++ Dog ++ Cat ++ Mouse + + +### Tables + +Something like this +`` +--------- +**A** | **B** | **C** +0 | 0 | X +0 | X | 0 +X | 0 | 0 +-----:abc +`` +is a table and is rendered as +--------- +**A** | **B** | **C** +0 | 0 | X +0 | X | 0 +X | 0 | 0 +-----:abc +Four or more dashes delimit the table and | separates the columns. +The ``:abc`` at the end sets the class for the table and it is optional. + +### Blockquote + +A table with a single cell is rendered as a blockquote: + +----- +Hello world +----- + +### Code, ``<code>``, escaping and extra stuff + +`` +def test(): + return "this is Python code" +``:python + +Optionally a ` inside a ``!`!`...`!`!`` block can be inserted escaped with !`!. +The ``:python`` after the markup is also optional. If present, by default, it is used to set the class of the <code> block. +The behavior can be overridden by passing an argument ``extra`` to the ``render`` function. For example: + +`` +>>> markmin2html("!`!!`!aaa!`!!`!:custom", + extra=dict(custom=lambda text: 'x'+text+'x')) +``:python + +generates + +``'xaaax'``:python + +(the ``!`!`...`!`!:custom`` block is rendered by the ``custom=lambda`` function passed to ``render``). + + +### Html5 support + +Markmin also supports the <video> and <audio> html5 tags using the notation: +`` +[[title link video]] +[[title link audio]] +`` + +### Latex and other extensions + +Formulas can be embedded into HTML with ``$````$``formula``$````$``. +You can use Google charts to render the formula: + +`` +>>> LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="ce\ +nter"/>' +>>> markmin2html(text,{'latex':lambda code: LATEX % code.replace('"','\"')}) +`` + +### Code with syntax highlighting + +This requires a syntax highlighting tool, such as the web2py CODE helper. + +`` +>>> extra={'code_cpp':lambda text: CODE(text,language='cpp').xml(), + 'code_java':lambda text: CODE(text,language='java').xml(), + 'code_python':lambda text: CODE(text,language='python').xml(), + 'code_html':lambda text: CODE(text,language='html').xml()} +>>> markmin2html(text,extra=extra) +`` + +Code can now be marked up as in this example: + +`` +!`!` +<html><body>example</body></html> +!`!`:code_html +`` + +### Citations and References + +Citations are treated as internal links in html and proper citations in latex if there is a final section called "References". Items like + +`` +- [[key]] value +`` + +in the References will be translated into Latex + +`` +\\bibitem{key} value +`` + +Here is an example of usage: + +`` +As shown in Ref.!`!`mdipierro`!`!:cite + +## References +- [[mdipierro]] web2py Manual, 3rd Edition, lulu.com +`` + +### Caveats +``<ul/>``, ``<ol/>``, ``<code/>``, ``<table/>``, ``<blockquote/>``, ``<h1/>``, ..., ``<h6/>`` do not have ``<p>...</p>`` around them. + +""" + +META = 'META' +LATEX = '<img src="http://chart.apis.google.com/chart?cht=tx&chl=%s" align="center"/>' +regex_newlines = re.compile('(\n\r)|(\r\n)') +regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$') +regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S) +regex_maps = [ + (re.compile('[ \t\r]+\n'),'\n'), + (re.compile('[ \t\r]+\n'),'\n'), + (re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'<b>\g<t></b>'), + (re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'<i>\g<t></i>'), + (re.compile('^#{6} (?P<t>[^\n]+)',re.M),'\n\n<<h6>\g<t></h6>\n'), + (re.compile('^#{5} (?P<t>[^\n]+)',re.M),'\n\n<<h5>\g<t></h5>\n'), + (re.compile('^#{4} (?P<t>[^\n]+)',re.M),'\n\n<<h4>\g<t></h4>\n'), + (re.compile('^#{3} (?P<t>[^\n]+)',re.M),'\n\n<<h3>\g<t></h3>\n'), + (re.compile('^#{2} (?P<t>[^\n]+)',re.M),'\n\n<<h2>\g<t></h2>\n'), + (re.compile('^#{1} (?P<t>[^\n]+)',re.M),'\n\n<<h1>\g<t></h1>\n'), + (re.compile('^\- +(?P<t>.*)',re.M),'<<ul><li>\g<t></li></ul>'), + (re.compile('^\+ +(?P<t>.*)',re.M),'<<ol><li>\g<t></li></ol>'), + (re.compile('</ol>\n<<ol>'),''), + (re.compile('</ul>\n<<ul>'),''), + (re.compile('<<'),'\n\n<<'), + (re.compile('\n\s+\n'),'\n\n')] +regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S) +regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]') +regex_image_width = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]') +regex_image = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]') +regex_video = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +video\]\]') +regex_audio = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +audio\]\]') +regex_link = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+)\]\]') +regex_link_popup = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) popup\]\]') +regex_link_no_anchor = re.compile('\[\[ +(?P<k>\S+)\]\]') +regex_auto = re.compile('(?<!["\w\>])(?P<k>\w+://[\w\.\-\+\?&%\/]+)',re.M) + +def render(text,extra={},allowed={},sep='p'): + """ + Arguments: + - text is the text to be processed + - extra is a dict like extra=dict(custom=lambda value: value) that process custom code + as in " ``this is custom code``:custom " + - allowed is a dictionary of list of allowed classes like + allowed = dict(code=('python','cpp','java')) + - sep can be 'p' to separate text in <p>...</p> + or can be 'br' to separate text using <br /> + + + >>> render('this is\\n# a section\\nparagraph') + '<p>this is</p><h1>a section</h1><p>paragraph</p>' + >>> render('this is\\n## a subsection\\nparagraph') + '<p>this is</p><h2>a subsection</h2><p>paragraph</p>' + >>> render('this is\\n### a subsubsection\\nparagraph') + '<p>this is</p><h3>a subsubsection</h3><p>paragraph</p>' + >>> render('**hello world**') + '<p><b>hello world</b></p>' + >>> render('``hello world``') + '<code class="">hello world</code>' + >>> render('``hello world``:python') + '<code class="python">hello world</code>' + >>> render('``\\nhello\\nworld\\n``:python') + '<pre><code class="python">hello\\nworld</code></pre>' + >>> render("''hello world''") + '<p><i>hello world</i></p>' + >>> render('** hello** **world**') + '<p>** hello** <b>world</b></p>' + + >>> render('- this\\n- is\\n- a list\\n\\nand this\\n- is\\n- another') + '<ul><li>this</li><li>is</li><li>a list</li></ul><p>and this</p><ul><li>is</li><li>another</li></ul>' + + >>> render('+ this\\n+ is\\n+ a list\\n\\nand this\\n+ is\\n+ another') + '<ol><li>this</li><li>is</li><li>a list</li></ol><p>and this</p><ol><li>is</li><li>another</li></ol>' + + >>> render("----\\na | b\\nc | d\\n----\\n") + '<table class=""><tr><td>a</td><td>b</td></tr><tr><td>c</td><td>d</td></tr></table>' + + >>> render("----\\nhello world\\n----\\n") + '<blockquote class="">hello world</blockquote>' + + >>> render('[[this is a link http://example.com]]') + '<p><a href="http://example.com">this is a link</a></p>' + + >>> render('[[this is an image http://example.com left]]') + '<p><img src="http://example.com" alt="this is an image" align="left" /></p>' + >>> render('[[this is an image http://example.com left 200px]]') + '<p><img src="http://example.com" alt="this is an image" align="left" width="200px" /></p>' + + >>> render('[[this is an image http://example.com video]]') + '<p><video src="http://example.com" controls></video></p>' + >>> render('[[this is an image http://example.com audio]]') + '<p><audio src="http://example.com" controls></audio></p>' + + >>> render('[[this is a **link** http://example.com]]') + '<p><a href="http://example.com">this is a <b>link</b></a></p>' + + >>> render("``aaa``:custom",extra=dict(custom=lambda text: 'x'+text+'x')) + 'xaaax' + >>> render(r"$$\int_a^b sin(x)dx$$") + '<code class="latex">\\\\int_a^b sin(x)dx</code>' + """ + text = str(text or '') + ############################################################# + # replace all blocks marked with ``...``:class with META + # store them into segments they will be treated as code + ############################################################# + segments, i = [], 0 + text = regex_dd.sub('``\g<latex>``:latex ',text) + text = regex_newlines.sub('\n',text) + while True: + item = regex_code.search(text,i) + if not item: break + if item.group()==META: + segments.append((None,None)) + text = text[:item.start()]+META+text[item.end():] + else: + c = item.group('c') or '' + if 'code' in allowed and not c in allowed['code']: c = '' + code = item.group('t').replace('!`!','`') + segments.append((code,c)) + text = text[:item.start()]+META+text[item.end():] + i=item.start()+3 + + ############################################################# + # do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces + ############################################################# + text = '\n'.join(t.strip() for t in text.split('\n')) + text = cgi.escape(text) + for regex, sub in regex_maps: + text = regex.sub(sub,text) + + ############################################################# + # process tables and blockquotes + ############################################################# + while True: + item = regex_table.search(text) + if not item: break + c = item.group('c') or '' + if 'table' in allowed and not c in allowed['table']: c = '' + content = item.group('t') + if ' | ' in content: + rows = content.replace('\n','</td></tr><tr><td>').replace(' | ','</td><td>') + text = text[:item.start()] + '<<table class="%s"><tr><td>'%c + rows + '</td></tr></table>' + text[item.end():] + else: + text = text[:item.start()] + '<<blockquote class="%s">'%c + content + '</blockquote>' + text[item.end():] + + ############################################################# + # deal with images, videos, audios and links + ############################################################# + + text = regex_anchor.sub('<span id="\g<t>"><span>', text) + text = regex_image_width.sub('<img src="\g<k>" alt="\g<t>" align="\g<p>" width="\g<w>" />', text) + text = regex_image.sub('<img src="\g<k>" alt="\g<t>" align="\g<p>" />', text) + text = regex_video.sub('<video src="\g<k>" controls></video>', text) + text = regex_audio.sub('<audio src="\g<k>" controls></audio>', text) + text = regex_link_popup.sub('<a href="\g<k>" target="_blank">\g<t></a>', text) + text = regex_link_no_anchor.sub('<a href="\g<k>">\g<k></a>', text) + text = regex_link.sub('<a href="\g<k>">\g<t></a>', text) + text = regex_auto.sub('<a href="\g<k>">\g<k></a>', text) + + ############################################################# + # deal with paragraphs (trick <<ul, <<ol, <<table, <<h1, etc) + # the << indicates that there should NOT be a new paragraph + # META indicates a code block therefore no new paragraph + ############################################################# + items = [item.strip() for item in text.split('\n\n')] + if sep=='p': + text = ''.join( + (p[:2]!='<<' and p!=META and '<p>%s</p>'%p or '%s'%p) \ + for p in items if p.strip()) + elif sep=='br': + text = '<br />'.join(items) + + ############################################################# + # finally get rid of << + ############################################################# + text=text.replace('<<','<') + + ############################################################# + # process all code text + ############################################################# + parts = text.split(META) + text = parts[0] + for i,(code,b) in enumerate(segments): + if code==None: + html = META + else: + if b in extra: + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + html = extra[b](code) + elif b=='cite': + html = '['+','.join('<a href="#%s" class="%s">%s</a>' \ + % (d,b,d) \ + for d in cgi.escape(code).split(','))+']' + elif b=='latex': + html = LATEX % code.replace('"','\"').replace('\n',' ') + elif code[:1]=='\n' or code[-1:]=='\n': + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + html = '<pre><code class="%s">%s</code></pre>' % (b,cgi.escape(code)) + else: + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + html = '<code class="%s">%s</code>' % (b,cgi.escape(code)) + text = text+html+parts[i+1] + return text + + +def markmin2html(text,extra={},allowed={},sep='p'): + return render(text,extra,allowed,sep) + +if __name__ == '__main__': + import sys + import doctest + if sys.argv[1:2]==['-h']: + print '<html><body>'+markmin2html(__doc__)+'</body></html>' + elif len(sys.argv)>1: + fargv = open(sys.argv[1],'r') + try: + print '<html><body>'+markmin2html(fargv.read())+'</body></html>' + finally: + fargv.close() + else: + doctest.testmod() + ADDED gluon/contrib/markmin/markmin2latex.py Index: gluon/contrib/markmin/markmin2latex.py ================================================================== --- /dev/null +++ gluon/contrib/markmin/markmin2latex.py @@ -0,0 +1,285 @@ +#!/usr/bin/env python +# created my Massimo Di Pierro +# license MIT/BSD/GPL +import re +import cgi +import sys +import doctest +from optparse import OptionParser + +__all__ = ['render','markmin2latex'] + +META = 'META' +regex_newlines = re.compile('(\n\r)|(\r\n)') +regex_dd=re.compile('\$\$(?P<latex>.*?)\$\$') +regex_code = re.compile('('+META+')|(``(?P<t>.*?)``(:(?P<c>\w+))?)',re.S) +regex_title = re.compile('^#{1} (?P<t>[^\n]+)',re.M) +regex_maps = [ + (re.compile('[ \t\r]+\n'),'\n'), + (re.compile('[ \t\r]+\n'),'\n'), + (re.compile('\*\*(?P<t>[^\s\*]+( +[^\s\*]+)*)\*\*'),'{\\\\bf \g<t>}'), + (re.compile("''(?P<t>[^\s']+( +[^\s']+)*)''"),'{\\it \g<t>}'), + (re.compile('^#{6} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'), + (re.compile('^#{5} (?P<t>[^\n]+)',re.M),'\n\n{\\\\bf \g<t>}\n'), + (re.compile('^#{4} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g<t>}\n'), + (re.compile('^#{3} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g<t>}\n'), + (re.compile('^#{2} (?P<t>[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g<t>}\n'), + (re.compile('^#{1} (?P<t>[^\n]+)',re.M),''), + (re.compile('^\- +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'), + (re.compile('^\+ +(?P<t>.*)',re.M),'\\\\begin{itemize}\n\\item \g<t>\n\\end{itemize}'), + (re.compile('\\\\end\{itemize\}\s+\\\\begin\{itemize\}'),'\n'), + (re.compile('\n\s+\n'),'\n\n')] +regex_table = re.compile('^\-{4,}\n(?P<t>.*?)\n\-{4,}(:(?P<c>\w+))?\n',re.M|re.S) + +regex_anchor = re.compile('\[\[(?P<t>\S+)\]\]') +regex_bibitem = re.compile('\-\s*\[\[(?P<t>\S+)\]\]') +regex_image_width = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center) +(?P<w>\d+px)\]\]') +regex_image = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +(?P<p>left|right|center)\]\]') +#regex_video = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +video\]\]') +#regex_audio = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+) +audio\]\]') +regex_link = re.compile('\[\[(?P<t>.*?) +(?P<k>\S+)\]\]') +regex_auto = re.compile('(?<!["\w])(?P<k>\w+://[\w\.\-\?&%]+)',re.M) +regex_commas = re.compile('[ ]+(?P<t>[,;\.])') +regex_noindent = re.compile('\n\n(?P<t>[a-z])') +regex_quote_left = re.compile('"(?=\w)') +regex_quote_right = re.compile('(?=\w\.)"') + +def latex_escape(text,pound=True): + text=text.replace('\\','{\\textbackslash}') + for c in '^_&$%{}': text=text.replace(c,'\\'+c) + text=text.replace('\\{\\textbackslash\\}','{\\textbackslash}') + if pound: text=text.replace('#','\\#') + return text + +def render(text,extra={},allowed={},sep='p',image_mapper=lambda x:x): + ############################################################# + # replace all blocks marked with ``...``:class with META + # store them into segments they will be treated as code + ############################################################# + text = str(text or '') + segments, i = [], 0 + text = regex_dd.sub('``\g<latex>``:latex ',text) + text = regex_newlines.sub('\n',text) + while True: + item = regex_code.search(text,i) + if not item: break + if item.group()==META: + segments.append((None,None)) + text = text[:item.start()]+META+text[item.end():] + else: + c = item.group('c') or '' + if 'code' in allowed and not c in allowed['code']: c = '' + code = item.group('t').replace('!`!','`') + segments.append((code,c)) + text = text[:item.start()]+META+text[item.end():] + i=item.start()+3 + + + ############################################################# + # do h1,h2,h3,h4,h5,h6,b,i,ol,ul and normalize spaces + ############################################################# + + title = regex_title.search(text) + if not title: title='Title' + else: title=title.group('t') + + text = latex_escape(text,pound=False) + + texts = text.split('## References',1) + text = regex_anchor.sub('\\label{\g<t>}', texts[0]) + if len(texts)==2: + text += '\n\\begin{thebibliography}{999}\n' + text += regex_bibitem.sub('\n\\\\bibitem{\g<t>}', texts[1]) + text += '\n\\end{thebibliography}\n' + + text = '\n'.join(t.strip() for t in text.split('\n')) + for regex, sub in regex_maps: + text = regex.sub(sub,text) + text=text.replace('#','\\#') + text=text.replace('`',"'") + + ############################################################# + # process tables and blockquotes + ############################################################# + while True: + item = regex_table.search(text) + if not item: break + c = item.group('c') or '' + if 'table' in allowed and not c in allowed['table']: c = '' + content = item.group('t') + if ' | ' in content: + rows = content.replace('\n','\\\\\n').replace(' | ',' & ') + row0,row2 = rows.split('\\\\\n',1) + cols=row0.count(' & ')+1 + cal='{'+''.join('l' for j in range(cols))+'}' + tabular = '\\begin{center}\n{\\begin{tabular}'+cal+'\\hline\n' + row0+'\\\\ \\hline\n'+row2 + ' \\\\ \\hline\n\\end{tabular}}\n\\end{center}' + if row2.count('\n')>20: tabular='\\newpage\n'+tabular + text = text[:item.start()] + tabular + text[item.end():] + else: + text = text[:item.start()] + '\\begin{quote}' + content + '\\end{quote}' + text[item.end():] + + ############################################################# + # deal with images, videos, audios and links + ############################################################# + + def sub(x): + f=image_mapper(x.group('k')) + if not f: return None + return '\n\\begin{center}\\includegraphics[width=8cm]{%s}\\end{center}\n' % (f) + text = regex_image_width.sub(sub,text) + text = regex_image.sub(sub,text) + + text = regex_link.sub('{\\\\footnotesize\\href{\g<k>}{\g<t>}}', text) + text = regex_commas.sub('\g<t>',text) + text = regex_noindent.sub('\n\\\\noindent \g<t>',text) + + ### fix paths in images + regex=re.compile('\\\\_[\w_]*\.(eps|png|jpg|gif)') + while True: + match=regex.search(text) + if not match: break + text=text[:match.start()]+text[match.start()+1:] + text = regex_quote_left.sub('``',text) + text = regex_quote_right.sub("''",text) + + ############################################################# + # process all code text + ############################################################# + parts = text.split(META) + text = parts[0] + authors = [] + for i,(code,b) in enumerate(segments): + if code==None: + html = META + else: + if b=='hidden': + html='' + elif b=='author': + author = latex_escape(code.strip()) + authors.append(author) + html='' + elif b=='inxx': + html='\inxx{%s}' % latex_escape(code) + elif b=='cite': + html='~\cite{%s}' % latex_escape(code.strip()) + elif b=='ref': + html='~\ref{%s}' % latex_escape(code.strip()) + elif b=='latex': + if '\n' in code: + html='\n\\begin{equation}\n%s\n\\end{equation}\n' % code.strip() + else: + html='$%s$' % code.strip() + elif b=='latex_eqnarray': + code=code.strip() + code='\\\\'.join(x.replace('=','&=&',1) for x in code.split('\\\\')) + html='\n\\begin{eqnarray}\n%s\n\\end{eqnarray}\n' % code + elif b.startswith('latex_'): + key=b[6:] + html='\\begin{%s}%s\\end{%s}' % (key,code,key) + elif b in extra: + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + html = extra[b](code) + elif code[:1]=='\n' or code[:-1]=='\n': + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + if code.startswith('<') or code.startswith('{{') or code.startswith('http'): + html = '\\begin{lstlisting}[keywords={}]\n%s\n\\end{lstlisting}' % code + else: + html = '\\begin{lstlisting}\n%s\n\\end{lstlisting}' % code + else: + if code[:1]=='\n': code=code[1:] + if code[-1:]=='\n': code=code[:-1] + html = '{\\ft %s}' % latex_escape(code) + try: + text = text+html+parts[i+1] + except: + text = text + '... WIKI PROCESSING ERROR ...' + break + text = text.replace(' ~\\cite','~\\cite') + return text, title, authors + +WRAPPER = """ +\\documentclass[12pt]{article} +\\usepackage{hyperref} +\\usepackage{listings} +\\usepackage{upquote} +\\usepackage{color} +\\usepackage{graphicx} +\\usepackage{grffile} +\\usepackage[utf8x]{inputenc} +\\definecolor{lg}{rgb}{0.9,0.9,0.9} +\\definecolor{dg}{rgb}{0.3,0.3,0.3} +\\def\\ft{\\small\\tt} +\\lstset{ + basicstyle=\\footnotesize, + breaklines=true, basicstyle=\\ttfamily\\color{black}\\footnotesize, + keywordstyle=\\bf\\ttfamily, + commentstyle=\\it\\ttfamily, + stringstyle=\\color{dg}\\it\\ttfamily, + numbers=left, numberstyle=\\color{dg}\\tiny, stepnumber=1, numbersep=5pt, + backgroundcolor=\\color{lg}, tabsize=4, showspaces=false, + showstringspaces=false +} +\\title{%(title)s} +\\author{%(author)s} +\\begin{document} +\\maketitle +\\tableofcontents +\\newpage +%(body)s +\\end{document} +""" + +def markmin2latex(data, image_mapper=lambda x:x, extra={}, + wrapper=WRAPPER): + body, title, authors = render(data, extra=extra, image_mapper=image_mapper) + author = '\n\\and\n'.join(a.replace('\n','\\\\\n\\footnotesize ') for a in authors) + return wrapper % dict(title=title, author=author, body=body) + +if __name__ == '__main__': + parser = OptionParser() + parser.add_option("-i", "--info", dest="info", + help="markmin help") + parser.add_option("-t", "--test", dest="test", action="store_true", + default=False) + parser.add_option("-n", "--no_wrapper", dest="no_wrapper", + action="store_true",default=False) + parser.add_option("-1", "--one", dest="one",action="store_true", + default=False,help="switch section for chapter") + parser.add_option("-w", "--wrapper", dest="wrapper", default=False, + help="latex file containing header and footer") + + (options, args) = parser.parse_args() + if options.info: + import markmin2html + markmin2latex(markmin2html.__doc__) + elif options.test: + doctest.testmod() + else: + if options.wrapper: + fwrapper = open(options.wrapper,'rb') + try: + wrapper = fwrapper.read() + finally: + fwrapper.close() + elif options.no_wrapper: + wrapper = '%(body)s' + else: + wrapper = WRAPPER + for f in args: + fargs = open(f,'r') + content_data = [] + try: + content_data.append(fargs.read()) + finally: + fargs.close() + content = '\n'.join(content_data) + output= markmin2latex(content,wrapper=wrapper) + if options.one: + output=output.replace(r'\section*{',r'\chapter*{') + output=output.replace(r'\section{',r'\chapter{') + output=output.replace(r'subsection{',r'section{') + print output + + ADDED gluon/contrib/markmin/markmin2pdf.py Index: gluon/contrib/markmin/markmin2pdf.py ================================================================== --- /dev/null +++ gluon/contrib/markmin/markmin2pdf.py @@ -0,0 +1,130 @@ +""" +Created by Massimo Di Pierro +Licese BSD +""" + +import subprocess +import os +import os.path +import re +import sys +from tempfile import mkstemp, mkdtemp, NamedTemporaryFile +from markmin2latex import markmin2latex + +__all__ = ['markmin2pdf'] + +def removeall(path): + + ERROR_STR= """Error removing %(path)s, %(error)s """ + def rmgeneric(path, __func__): + try: + __func__(path) + except OSError, (errno, strerror): + print ERROR_STR % {'path' : path, 'error': strerror } + + files=[path] + + while files: + file=files[0] + if os.path.isfile(file): + f=os.remove + rmgeneric(file, os.remove) + del files[0] + elif os.path.isdir(file): + nested = os.listdir(file) + if not nested: + rmgeneric(file, os.rmdir) + del files[0] + else: + files = [os.path.join(file,x) for x in nested] + files + + +def latex2pdf(latex, pdflatex='pdflatex', passes=3): + """ + calls pdflatex in a tempfolder + + Arguments: + + - pdflatex: path to the pdflatex command. Default is just 'pdflatex'. + - passes: defines how often pdflates should be run in the texfile. + """ + + pdflatex=pdflatex + passes=passes + warnings=[] + + # setup the envoriment + tmpdir = mkdtemp() + texfile = open(tmpdir+'/test.tex','wb') + texfile.write(latex) + texfile.seek(0) + texfile.close() + texfile = os.path.abspath(texfile.name) + + # start doing some work + for i in range(0, passes): + logfd,logname = mkstemp() + outfile=os.fdopen(logfd) + try: + ret = subprocess.call([pdflatex, + '-interaction=nonstopmode', + '-output-format', 'pdf', + '-output-directory', tmpdir, + texfile], + cwd=os.path.dirname(texfile), stdout=outfile, + stderr=subprocess.PIPE) + finally: + outfile.close() + re_errors=re.compile('^\!(.*)$',re.M) + re_warnings=re.compile('^LaTeX Warning\:(.*)$',re.M) + flog = open(logname) + try: + loglines = flog.read() + finally: + flog.close() + errors=re_errors.findall(loglines) + warnings=re_warnings.findall(loglines) + os.unlink(logname) + + pdffile=texfile.rsplit('.',1)[0]+'.pdf' + if os.path.isfile(pdffile): + fpdf = open(pdffile, 'rb') + try: + data = fpdf.read() + finally: + fpdf.close() + else: + data = None + removeall(tmpdir) + return data, warnings, errors + + +def markmin2pdf(text, image_mapper=lambda x: None, extra={}): + return latex2pdf(markmin2latex(text,image_mapper=image_mapper, extra=extra)) + + +if __name__ == '__main__': + import sys + import doctest + import markmin2html + if sys.argv[1:2]==['-h']: + data, warnings, errors = markmin2pdf(markmin2html.__doc__) + if errors: + print 'ERRORS:'+'\n'.join(errors) + print 'WARNGINS:'+'\n'.join(warnings) + else: + print data + elif len(sys.argv)>1: + fargv = open(sys.argv[1],'rb') + try: + data, warnings, errors = markmin2pdf(fargv.read()) + finally: + fargv.close() + if errors: + print 'ERRORS:'+'\n'.join(errors) + print 'WARNGINS:'+'\n'.join(warnings) + else: + print data + else: + doctest.testmod() + ADDED gluon/contrib/memdb.py Index: gluon/contrib/memdb.py ================================================================== --- /dev/null +++ gluon/contrib/memdb.py @@ -0,0 +1,908 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of web2py Web Framework (Copyrighted, 2007-2009). +Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu> and +Robin B <robi123@gmail.com>. +License: GPL v2 +""" + +__all__ = ['MEMDB', 'Field'] + +import re +import sys +import os +import types +import datetime +import thread +import cStringIO +import csv +import copy +import gluon.validators as validators +from gluon.storage import Storage +import random + +SQL_DIALECTS = {'memcache': { + 'boolean': bool, + 'string': unicode, + 'text': unicode, + 'password': unicode, + 'blob': unicode, + 'upload': unicode, + 'integer': long, + 'double': float, + 'date': datetime.date, + 'time': datetime.time, + 'datetime': datetime.datetime, + 'id': int, + 'reference': int, + 'lower': None, + 'upper': None, + 'is null': 'IS NULL', + 'is not null': 'IS NOT NULL', + 'extract': None, + 'left join': None, + }} + + +def cleanup(text): + if re.compile('[^0-9a-zA-Z_]').findall(text): + raise SyntaxError, \ + 'Can\'t cleanup \'%s\': only [0-9a-zA-Z_] allowed in table and field names' % text + return text + + +def assert_filter_fields(*fields): + for field in fields: + if isinstance(field, (Field, Expression)) and field.type\ + in ['text', 'blob']: + raise SyntaxError, 'AppEngine does not index by: %s'\ + % field.type + + +def dateobj_to_datetime(object): + + # convert dates,times to datetimes for AppEngine + + if isinstance(object, datetime.date): + object = datetime.datetime(object.year, object.month, + object.day) + if isinstance(object, datetime.time): + object = datetime.datetime( + 1970, + 1, + 1, + object.hour, + object.minute, + object.second, + object.microsecond, + ) + return object + + +def sqlhtml_validators(field_type, length): + v = { + 'boolean': [], + 'string': validators.IS_LENGTH(length), + 'text': [], + 'password': validators.IS_LENGTH(length), + 'blob': [], + 'upload': [], + 'double': validators.IS_FLOAT_IN_RANGE(-1e100, 1e100), + 'integer': validators.IS_INT_IN_RANGE(-1e100, 1e100), + 'date': validators.IS_DATE(), + 'time': validators.IS_TIME(), + 'datetime': validators.IS_DATETIME(), + 'reference': validators.IS_INT_IN_RANGE(0, 1e100), + } + try: + return v[field_type[:9]] + except KeyError: + return [] + + +class DALStorage(dict): + + """ + a dictionary that let you do d['a'] as well as d.a + """ + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self: + raise SyntaxError, 'Object \'%s\'exists and cannot be redefined' % key + self[key] = value + + def __repr__(self): + return '<DALStorage ' + dict.__repr__(self) + '>' + + +class SQLCallableList(list): + + def __call__(self): + return copy.copy(self) + + +class MEMDB(DALStorage): + + """ + an instance of this class represents a database connection + + Example:: + + db=MEMDB(Client()) + db.define_table('tablename',Field('fieldname1'), + Field('fieldname2')) + """ + + def __init__(self, client): + self._dbname = 'memdb' + self['_lastsql'] = '' + self.tables = SQLCallableList() + self._translator = SQL_DIALECTS['memcache'] + self.client = client + + def define_table( + self, + tablename, + *fields, + **args + ): + tablename = cleanup(tablename) + if tablename in dir(self) or tablename[0] == '_': + raise SyntaxError, 'invalid table name: %s' % tablename + if not tablename in self.tables: + self.tables.append(tablename) + else: + raise SyntaxError, 'table already defined: %s' % tablename + t = self[tablename] = Table(self, tablename, *fields) + t._create() + return t + + def __call__(self, where=''): + return Set(self, where) + + +class SQLALL(object): + + def __init__(self, table): + self.table = table + + +class Table(DALStorage): + + """ + an instance of this class represents a database table + + Example:: + + db=MEMDB(Client()) + db.define_table('users',Field('name')) + db.users.insert(name='me') + """ + + def __init__( + self, + db, + tablename, + *fields + ): + self._db = db + self._tablename = tablename + self.fields = SQLCallableList() + self._referenced_by = [] + fields = list(fields) + fields.insert(0, Field('id', 'id')) + for field in fields: + self.fields.append(field.name) + self[field.name] = field + field._tablename = self._tablename + field._table = self + field._db = self._db + self.ALL = SQLALL(self) + + def _create(self): + fields = [] + myfields = {} + for k in self.fields: + field = self[k] + attr = {} + if not field.type[:9] in ['id', 'reference']: + if field.notnull: + attr = dict(required=True) + if field.type[:2] == 'id': + continue + if field.type[:9] == 'reference': + referenced = field.type[10:].strip() + if not referenced: + raise SyntaxError, \ + 'Table %s: reference \'%s\' to nothing!' % (self._tablename, k) + if not referenced in self._db: + raise SyntaxError, \ + 'Table: table %s does not exist' % referenced + referee = self._db[referenced] + ftype = \ + self._db._translator[field.type[:9]]( + self._db[referenced]._tableobj) + if self._tablename in referee.fields: # ## THIS IS OK + raise SyntaxError, \ + 'Field: table \'%s\' has same name as a field ' \ + 'in referenced table \'%s\'' % (self._tablename, referenced) + self._db[referenced]._referenced_by.append((self._tablename, + field.name)) + elif not field.type in self._db._translator\ + or not self._db._translator[field.type]: + raise SyntaxError, 'Field: unkown field type %s' % field.type + self._tableobj = self._db.client + return None + + def create(self): + + # nothing to do, here for backward compatility + + pass + + def drop(self): + + # nothing to do, here for backward compatibility + + self._db(self.id > 0).delete() + + def insert(self, **fields): + id = self._create_id() + if self.update(id, **fields): + return long(id) + else: + return None + + def get(self, id): + val = self._tableobj.get(self._id_to_key(id)) + if val: + return Storage(val) + else: + return None + + def update(self, id, **fields): + for field in fields: + if not field in fields and self[field].default\ + != None: + fields[field] = self[field].default + if field in fields: + fields[field] = obj_represent(fields[field], + self[field].type, self._db) + return self._tableobj.set(self._id_to_key(id), fields) + + def delete(self, id): + return self._tableobj.delete(self._id_to_key(id)) + + def _shard_key(self, shard): + return self._id_to_key('s/%s' % shard) + + def _id_to_key(self, id): + return '__memdb__/t/%s/k/%s' % (self._tablename, str(id)) + + def _create_id(self): + shard = random.randint(10, 99) + shard_id = self._shard_key(shard) + id = self._tableobj.incr(shard_id) + if not id: + if self._tableobj.set(shard_id, '0'): + id = 0 + else: + raise Exception, 'cannot set memcache' + return long(str(shard) + str(id)) + + def __str__(self): + return self._tablename + + +class Expression(object): + + def __init__( + self, + name, + type='string', + db=None, + ): + (self.name, self.type, self._db) = (name, type, db) + + def __str__(self): + return self.name + + def __or__(self, other): # for use in sortby + assert_filter_fields(self, other) + return Expression(self.name + '|' + other.name, None, None) + + def __invert__(self): + assert_filter_fields(self) + return Expression('-' + self.name, self.type, None) + + # for use in Query + + def __eq__(self, value): + return Query(self, '=', value) + + def __ne__(self, value): + return Query(self, '!=', value) + + def __lt__(self, value): + return Query(self, '<', value) + + def __le__(self, value): + return Query(self, '<=', value) + + def __gt__(self, value): + return Query(self, '>', value) + + def __ge__(self, value): + return Query(self, '>=', value) + + # def like(self,value): return Query(self,' LIKE ',value) + # def belongs(self,value): return Query(self,' IN ',value) + # for use in both Query and sortby + + def __add__(self, other): + return Expression('%s+%s' % (self, other), 'float', None) + + def __sub__(self, other): + return Expression('%s-%s' % (self, other), 'float', None) + + def __mul__(self, other): + return Expression('%s*%s' % (self, other), 'float', None) + + def __div__(self, other): + return Expression('%s/%s' % (self, other), 'float', None) + + +class Field(Expression): + + """ + an instance of this class represents a database field + + example:: + + a = Field(name, 'string', length=32, required=False, + default=None, requires=IS_NOT_EMPTY(), notnull=False, + unique=False, uploadfield=True) + + to be used as argument of GQLDB.define_table + + allowed field types: + string, boolean, integer, double, text, blob, + date, time, datetime, upload, password + + strings must have a length or 512 by default. + fields should have a default or they will be required in SQLFORMs + the requires argument are used to validate the field input in SQLFORMs + + """ + + def __init__( + self, + fieldname, + type='string', + length=None, + default=None, + required=False, + requires=sqlhtml_validators, + ondelete='CASCADE', + notnull=False, + unique=False, + uploadfield=True, + ): + + self.name = cleanup(fieldname) + if fieldname in dir(Table) or fieldname[0] == '_': + raise SyntaxError, 'Field: invalid field name: %s' % fieldname + if isinstance(type, Table): + type = 'reference ' + type._tablename + if not length: + length = 512 + self.type = type # 'string', 'integer' + self.length = length # the length of the string + self.default = default # default value for field + self.required = required # is this field required + self.ondelete = ondelete.upper() # this is for reference fields only + self.notnull = notnull + self.unique = unique + self.uploadfield = uploadfield + if requires == sqlhtml_validators: + requires = sqlhtml_validators(type, length) + elif requires is None: + requires = [] + self.requires = requires # list of validators + + def formatter(self, value): + if value is None or not self.requires: + return value + if not isinstance(self.requires, (list, tuple)): + requires = [self.requires] + else: + requires = copy.copy(self.requires) + requires.reverse() + for item in requires: + if hasattr(item, 'formatter'): + value = item.formatter(value) + return value + + def __str__(self): + return '%s.%s' % (self._tablename, self.name) + + +MEMDB.Field = Field # ## required by gluon/globals.py session.connect + + +def obj_represent(object, fieldtype, db): + if object != None: + if fieldtype == 'date' and not isinstance(object, + datetime.date): + (y, m, d) = [int(x) for x in str(object).strip().split('-')] + object = datetime.date(y, m, d) + elif fieldtype == 'time' and not isinstance(object, datetime.time): + time_items = [int(x) for x in str(object).strip().split(':')[:3]] + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + object = datetime.time(h, mi, s) + elif fieldtype == 'datetime' and not isinstance(object, + datetime.datetime): + (y, m, d) = [int(x) for x in + str(object)[:10].strip().split('-')] + time_items = [int(x) for x in + str(object)[11:].strip().split(':')[:3]] + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + object = datetime.datetime( + y, + m, + d, + h, + mi, + s, + ) + elif fieldtype == 'integer' and not isinstance(object, long): + object = long(object) + + return object + + +class QueryException: + + def __init__(self, **a): + self.__dict__ = a + + +class Query(object): + + """ + A query object necessary to define a set. + It can be stored or can be passed to GQLDB.__call__() to obtain a Set + + Example: + query=db.users.name=='Max' + set=db(query) + records=set.select() + """ + + def __init__( + self, + left, + op=None, + right=None, + ): + if isinstance(right, (Field, Expression)): + raise SyntaxError, \ + 'Query: right side of filter must be a value or entity' + if isinstance(left, Field) and left.name == 'id': + if op == '=': + self.get_one = \ + QueryException(tablename=left._tablename, + id=long(right)) + return + else: + raise SyntaxError, 'only equality by id is supported' + raise SyntaxError, 'not supported' + + def __str__(self): + return str(self.left) + + +class Set(object): + + """ + As Set represents a set of records in the database, + the records are identified by the where=Query(...) object. + normally the Set is generated by GQLDB.__call__(Query(...)) + + given a set, for example + set=db(db.users.name=='Max') + you can: + set.update(db.users.name='Massimo') + set.delete() # all elements in the set + set.select(orderby=db.users.id,groupby=db.users.name,limitby=(0,10)) + and take subsets: + subset=set(db.users.id<5) + """ + + def __init__(self, db, where=None): + self._db = db + self._tables = [] + self.filters = [] + if hasattr(where, 'get_all'): + self.where = where + self._tables.insert(0, where.get_all) + elif hasattr(where, 'get_one') and isinstance(where.get_one, + QueryException): + self.where = where.get_one + else: + + # find out which tables are involved + + if isinstance(where, Query): + self.filters = where.left + self.where = where + self._tables = [field._tablename for (field, op, val) in + self.filters] + + def __call__(self, where): + if isinstance(self.where, QueryException) or isinstance(where, + QueryException): + raise SyntaxError, \ + 'neither self.where nor where can be a QueryException instance' + if self.where: + return Set(self._db, self.where & where) + else: + return Set(self._db, where) + + def _get_table_or_raise(self): + tablenames = list(set(self._tables)) # unique + if len(tablenames) < 1: + raise SyntaxError, 'Set: no tables selected' + if len(tablenames) > 1: + raise SyntaxError, 'Set: no join in appengine' + return self._db[tablenames[0]]._tableobj + + def _getitem_exception(self): + (tablename, id) = (self.where.tablename, self.where.id) + fields = self._db[tablename].fields + self.colnames = ['%s.%s' % (tablename, t) for t in fields] + item = self._db[tablename].get(id) + return (item, fields, tablename, id) + + def _select_except(self): + (item, fields, tablename, id) = self._getitem_exception() + if not item: + return [] + new_item = [] + for t in fields: + if t == 'id': + new_item.append(long(id)) + else: + new_item.append(getattr(item, t)) + r = [new_item] + return Rows(self._db, r, *self.colnames) + + def select(self, *fields, **attributes): + """ + Always returns a Rows object, even if it may be empty + """ + + if isinstance(self.where, QueryException): + return self._select_except() + else: + raise SyntaxError, 'select arguments not supported' + + def count(self): + return len(self.select()) + + def delete(self): + if isinstance(self.where, QueryException): + (item, fields, tablename, id) = self._getitem_exception() + if not item: + return + self._db[tablename].delete(id) + else: + raise Exception, 'deletion not implemented' + + def update(self, **update_fields): + if isinstance(self.where, QueryException): + (item, fields, tablename, id) = self._getitem_exception() + if not item: + return + for (key, value) in update_fields.items(): + setattr(item, key, value) + self._db[tablename].update(id, **item) + else: + raise Exception, 'update not implemented' + + +def update_record( + t, + s, + id, + a, + ): + item = s.get(id) + for (key, value) in a.items(): + t[key] = value + setattr(item, key, value) + s.update(id, **item) + + +class Rows(object): + + """ + A wrapper for the return value of a select. It basically represents a table. + It has an iterator and each row is represented as a dictionary. + """ + + # ## this class still needs some work to care for ID/OID + + def __init__( + self, + db, + response, + *colnames + ): + self._db = db + self.colnames = colnames + self.response = response + + def __len__(self): + return len(self.response) + + def __getitem__(self, i): + if i >= len(self.response) or i < 0: + raise SyntaxError, 'Rows: no such row: %i' % i + if len(self.response[0]) != len(self.colnames): + raise SyntaxError, 'Rows: internal error' + row = DALStorage() + for j in xrange(len(self.colnames)): + value = self.response[i][j] + if isinstance(value, unicode): + value = value.encode('utf-8') + packed = self.colnames[j].split('.') + try: + (tablename, fieldname) = packed + except: + if not '_extra' in row: + row['_extra'] = DALStorage() + row['_extra'][self.colnames[j]] = value + continue + table = self._db[tablename] + field = table[fieldname] + if not tablename in row: + row[tablename] = DALStorage() + if field.type[:9] == 'reference': + referee = field.type[10:].strip() + rid = value + row[tablename][fieldname] = rid + elif field.type == 'boolean' and value != None: + + # row[tablename][fieldname]=Set(self._db[referee].id==rid) + + if value == True or value == 'T': + row[tablename][fieldname] = True + else: + row[tablename][fieldname] = False + elif field.type == 'date' and value != None\ + and not isinstance(value, datetime.date): + (y, m, d) = [int(x) for x in + str(value).strip().split('-')] + row[tablename][fieldname] = datetime.date(y, m, d) + elif field.type == 'time' and value != None\ + and not isinstance(value, datetime.time): + time_items = [int(x) for x in + str(value).strip().split(':')[:3]] + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + row[tablename][fieldname] = datetime.time(h, mi, s) + elif field.type == 'datetime' and value != None\ + and not isinstance(value, datetime.datetime): + (y, m, d) = [int(x) for x in + str(value)[:10].strip().split('-')] + time_items = [int(x) for x in + str(value)[11:].strip().split(':')[:3]] + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + row[tablename][fieldname] = datetime.datetime( + y, + m, + d, + h, + mi, + s, + ) + else: + row[tablename][fieldname] = value + if fieldname == 'id': + id = row[tablename].id + row[tablename].update_record = lambda t = row[tablename], \ + s = self._db[tablename], id = id, **a: update_record(t, + s, id, a) + for (referee_table, referee_name) in \ + table._referenced_by: + s = self._db[referee_table][referee_name] + row[tablename][referee_table] = Set(self._db, s + == id) + if len(row.keys()) == 1: + return row[row.keys()[0]] + return row + + def __iter__(self): + """ + iterator over records + """ + + for i in xrange(len(self)): + yield self[i] + + def __str__(self): + """ + serializes the table into a csv file + """ + + s = cStringIO.StringIO() + writer = csv.writer(s) + writer.writerow(self.colnames) + c = len(self.colnames) + for i in xrange(len(self)): + row = [self.response[i][j] for j in xrange(c)] + for k in xrange(c): + if isinstance(row[k], unicode): + row[k] = row[k].encode('utf-8') + writer.writerow(row) + return s.getvalue() + + def xml(self): + """ + serializes the table using sqlhtml.SQLTABLE (if present) + """ + + return sqlhtml.SQLTABLE(self).xml() + + +def test_all(): + """ + How to run from web2py dir: + export PYTHONPATH=.:YOUR_PLATFORMS_APPENGINE_PATH + python gluon/contrib/memdb.py + + Setup the UTC timezone and database stubs + + >>> import os + >>> os.environ['TZ'] = 'UTC' + >>> import time + >>> if hasattr(time, 'tzset'): + ... time.tzset() + >>> + >>> from google.appengine.api import apiproxy_stub_map + >>> from google.appengine.api.memcache import memcache_stub + >>> apiproxy_stub_map.apiproxy = apiproxy_stub_map.APIProxyStubMap() + >>> apiproxy_stub_map.apiproxy.RegisterStub('memcache', memcache_stub.MemcacheServiceStub()) + + Create a table with all possible field types + >>> from google.appengine.api.memcache import Client + >>> db=MEMDB(Client()) + >>> tmp=db.define_table('users', Field('stringf','string',length=32,required=True), Field('booleanf','boolean',default=False), Field('passwordf','password',notnull=True), Field('blobf','blob'), Field('uploadf','upload'), Field('integerf','integer',unique=True), Field('doublef','double',unique=True,notnull=True), Field('datef','date',default=datetime.date.today()), Field('timef','time'), Field('datetimef','datetime'), migrate='test_user.table') + + Insert a field + + >>> user_id = db.users.insert(stringf='a',booleanf=True,passwordf='p',blobf='0A', uploadf=None, integerf=5,doublef=3.14, datef=datetime.date(2001,1,1), timef=datetime.time(12,30,15), datetimef=datetime.datetime(2002,2,2,12,30,15)) + >>> user_id != None + True + + Select all + + # >>> all = db().select(db.users.ALL) + + Drop the table + + # >>> db.users.drop() + + Select many entities + + >>> tmp = db.define_table(\"posts\", Field('body','text'), Field('total','integer'), Field('created_at','datetime')) + >>> many = 20 #2010 # more than 1000 single fetch limit (it can be slow) + >>> few = 5 + >>> most = many - few + >>> 0 < few < most < many + True + >>> for i in range(many): + ... f=db.posts.insert(body='', total=i,created_at=datetime.datetime(2008, 7, 6, 14, 15, 42, i)) + >>> + + # test timezones + >>> class TZOffset(datetime.tzinfo): + ... def __init__(self,offset=0): + ... self.offset = offset + ... def utcoffset(self, dt): return datetime.timedelta(hours=self.offset) + ... def dst(self, dt): return datetime.timedelta(0) + ... def tzname(self, dt): return 'UTC' + str(self.offset) + ... + >>> SERVER_OFFSET = -8 + >>> + >>> stamp = datetime.datetime(2008, 7, 6, 14, 15, 42, 828201) + >>> post_id = db.posts.insert(created_at=stamp,body='body1') + >>> naive_stamp = db(db.posts.id==post_id).select()[0].created_at + >>> utc_stamp=naive_stamp.replace(tzinfo=TZOffset()) + >>> server_stamp = utc_stamp.astimezone(TZOffset(SERVER_OFFSET)) + >>> stamp == naive_stamp + True + >>> utc_stamp == server_stamp + True + >>> rows = db(db.posts.id==post_id).select() + >>> len(rows) == 1 + True + >>> rows[0].body == 'body1' + True + >>> db(db.posts.id==post_id).delete() + >>> rows = db(db.posts.id==post_id).select() + >>> len(rows) == 0 + True + + >>> id = db.posts.insert(total='0') # coerce str to integer + >>> rows = db(db.posts.id==id).select() + >>> len(rows) == 1 + True + >>> rows[0].total == 0 + True + + Examples of insert, select, update, delete + + >>> tmp=db.define_table('person', Field('name'), Field('birth','date'), migrate='test_person.table') + >>> marco_id=db.person.insert(name=\"Marco\",birth='2005-06-22') + >>> person_id=db.person.insert(name=\"Massimo\",birth='1971-12-21') + >>> me=db(db.person.id==person_id).select()[0] # test select + >>> me.name + 'Massimo' + >>> db(db.person.id==person_id).update(name='massimo') # test update + >>> me = db(db.person.id==person_id).select()[0] + >>> me.name + 'massimo' + >>> str(me.birth) + '1971-12-21' + + # resave date to ensure it comes back the same + >>> me=db(db.person.id==person_id).update(birth=me.birth) # test update + >>> me = db(db.person.id==person_id).select()[0] + >>> me.birth + datetime.date(1971, 12, 21) + >>> db(db.person.id==marco_id).delete() # test delete + >>> len(db(db.person.id==marco_id).select()) + 0 + + Update a single record + + >>> me.update_record(name=\"Max\") + >>> me.name + 'Max' + >>> me = db(db.person.id == person_id).select()[0] + >>> me.name + 'Max' + + """ + +SQLField = Field +SQLTable = Table +SQLXorable = Expression +SQLQuery = Query +SQLSet = Set +SQLRows = Rows +SQLStorage = DALStorage + +if __name__ == '__main__': + import doctest + doctest.testmod() + + ADDED gluon/contrib/pam.py Index: gluon/contrib/pam.py ================================================================== --- /dev/null +++ gluon/contrib/pam.py @@ -0,0 +1,125 @@ +# (c) 2007 Chris AtLee <chris@atlee.ca> +# Licensed under the MIT license: +# http://www.opensource.org/licenses/mit-license.php +""" +PAM module for python + +Provides an authenticate function that will allow the caller to authenticate +a user against the Pluggable Authentication Modules (PAM) on the system. + +Implemented using ctypes, so no compilation is necessary. +""" +__all__ = ['authenticate'] + +from ctypes import CDLL, POINTER, Structure, CFUNCTYPE, cast, pointer, sizeof +from ctypes import c_void_p, c_uint, c_char_p, c_char, c_int +from ctypes.util import find_library + +LIBPAM = CDLL(find_library("pam")) +LIBC = CDLL(find_library("c")) + +CALLOC = LIBC.calloc +CALLOC.restype = c_void_p +CALLOC.argtypes = [c_uint, c_uint] + +STRDUP = LIBC.strdup +STRDUP.argstypes = [c_char_p] +STRDUP.restype = POINTER(c_char) # NOT c_char_p !!!! + +# Various constants +PAM_PROMPT_ECHO_OFF = 1 +PAM_PROMPT_ECHO_ON = 2 +PAM_ERROR_MSG = 3 +PAM_TEXT_INFO = 4 + +class PamHandle(Structure): + """wrapper class for pam_handle_t""" + _fields_ = [ + ("handle", c_void_p) + ] + + def __init__(self): + Structure.__init__(self) + self.handle = 0 + +class PamMessage(Structure): + """wrapper class for pam_message structure""" + _fields_ = [ + ("msg_style", c_int), + ("msg", c_char_p), + ] + + def __repr__(self): + return "<PamMessage %i '%s'>" % (self.msg_style, self.msg) + +class PamResponse(Structure): + """wrapper class for pam_response structure""" + _fields_ = [ + ("resp", c_char_p), + ("resp_retcode", c_int), + ] + + def __repr__(self): + return "<PamResponse %i '%s'>" % (self.resp_retcode, self.resp) + +CONV_FUNC = CFUNCTYPE(c_int, + c_int, POINTER(POINTER(PamMessage)), + POINTER(POINTER(PamResponse)), c_void_p) + +class PamConv(Structure): + """wrapper class for pam_conv structure""" + _fields_ = [ + ("conv", CONV_FUNC), + ("appdata_ptr", c_void_p) + ] + +PAM_START = LIBPAM.pam_start +PAM_START.restype = c_int +PAM_START.argtypes = [c_char_p, c_char_p, POINTER(PamConv), + POINTER(PamHandle)] + +PAM_AUTHENTICATE = LIBPAM.pam_authenticate +PAM_AUTHENTICATE.restype = c_int +PAM_AUTHENTICATE.argtypes = [PamHandle, c_int] + +def authenticate(username, password, service='login'): + """Returns True if the given username and password authenticate for the + given service. Returns False otherwise + + ``username``: the username to authenticate + + ``password``: the password in plain text + + ``service``: the PAM service to authenticate against. + Defaults to 'login'""" + @CONV_FUNC + def my_conv(n_messages, messages, p_response, app_data): + """Simple conversation function that responds to any + prompt where the echo is off with the supplied password""" + # Create an array of n_messages response objects + addr = CALLOC(n_messages, sizeof(PamResponse)) + p_response[0] = cast(addr, POINTER(PamResponse)) + for i in range(n_messages): + if messages[i].contents.msg_style == PAM_PROMPT_ECHO_OFF: + pw_copy = STRDUP(str(password)) + p_response.contents[i].resp = cast(pw_copy, c_char_p) + p_response.contents[i].resp_retcode = 0 + return 0 + + handle = PamHandle() + conv = PamConv(my_conv, 0) + retval = PAM_START(service, username, pointer(conv), pointer(handle)) + + if retval != 0: + # TODO: This is not an authentication error, something + # has gone wrong starting up PAM + return False + + retval = PAM_AUTHENTICATE(handle, 0) + return retval == 0 + +if __name__ == "__main__": + import getpass + print authenticate(getpass.getuser(), getpass.getpass()) + + ADDED gluon/contrib/populate.py Index: gluon/contrib/populate.py ================================================================== --- /dev/null +++ gluon/contrib/populate.py cannot compute difference between binary files ADDED gluon/contrib/pyfpdf/README Index: gluon/contrib/pyfpdf/README ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/README @@ -0,0 +1,1 @@ +Read more about this http://code.google.com/p/pyfpdf ADDED gluon/contrib/pyfpdf/__init__.py Index: gluon/contrib/pyfpdf/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/__init__.py @@ -0,0 +1,5 @@ +from fpdf import FPDF +from html import HTMLMixin +from template import Template + + ADDED gluon/contrib/pyfpdf/designer.py Index: gluon/contrib/pyfpdf/designer.py ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/designer.py @@ -0,0 +1,736 @@ +#!/usr/bin/python +# -*- coding: latin-1 -*- +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by the +# Free Software Foundation; either version 3, or (at your option) any later +# version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. + +"Visual Template designer for PyFPDF (using wxPython OGL library)" + +__author__ = "Mariano Reingart <reingart@gmail.com>" +__copyright__ = "Copyright (C) 2011 Mariano Reingart" +__license__ = "GPL 3.0" +__version__ = "1.01a" + +# Based on: +# * pySjetch.py wxPython sample application +# * OGL.py and other wxPython demo modules + + +import os, sys +import wx +import wx.lib.ogl as ogl +from wx.lib.wordwrap import wordwrap + +DEBUG = True + + +class CustomDialog(wx.Dialog): + "A dinamyc dialog to ask user about arbitrary fields" + + def __init__( + self, parent, ID, title, size=wx.DefaultSize, pos=wx.DefaultPosition, + style=wx.DEFAULT_DIALOG_STYLE, fields=None, data=None, + ): + + wx.Dialog.__init__ (self, parent, ID, title, pos, size, style) + + sizer = wx.BoxSizer(wx.VERTICAL) + + self.textctrls = {} + for field in fields: + box = wx.BoxSizer(wx.HORIZONTAL) + label = wx.StaticText(self, -1, field) + label.SetHelpText("This is the help text for the label") + box.Add(label, 1, wx.ALIGN_CENTRE|wx.ALL, 5) + text = wx.TextCtrl(self, -1, "", size=(80,-1)) + text.SetHelpText("Here's some help text for field #1") + if field in data: + text.SetValue(repr(data[field])) + box.Add(text, 1, wx.ALIGN_CENTRE|wx.ALL, 1) + sizer.Add(box, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.ALL, 1) + self.textctrls[field] = text + + line = wx.StaticLine(self, -1, size=(20,-1), style=wx.LI_HORIZONTAL) + sizer.Add(line, 0, wx.GROW|wx.ALIGN_CENTER_VERTICAL|wx.RIGHT|wx.TOP, 5) + + btnsizer = wx.StdDialogButtonSizer() + + btn = wx.Button(self, wx.ID_OK) + btn.SetHelpText("The OK button completes the dialog") + btn.SetDefault() + btnsizer.AddButton(btn) + + btn = wx.Button(self, wx.ID_CANCEL) + btn.SetHelpText("The Cancel button cancels the dialog. (Cool, huh?)") + btnsizer.AddButton(btn) + btnsizer.Realize() + + sizer.Add(btnsizer, 0, wx.ALIGN_CENTER_VERTICAL|wx.ALL, 5) + + self.SetSizer(sizer) + sizer.Fit(self) + + @classmethod + def do_input(Class, parent, title, fields, data): + dlg = Class(parent, -1, title, size=(350, 200), + style=wx.DEFAULT_DIALOG_STYLE, # & ~wx.CLOSE_BOX, + fields=fields, data=data + ) + dlg.CenterOnScreen() + while 1: + val = dlg.ShowModal() + if val == wx.ID_OK: + values = {} + for field in fields: + try: + values[field] = eval(dlg.textctrls[field].GetValue()) + except Exception, e: + msg = wx.MessageDialog(parent, unicode(e), + "Error in field %s" % field, + wx.OK | wx.ICON_INFORMATION + ) + msg.ShowModal() + msg.Destroy() + break + else: + return dict([(field, values[field]) for field in fields]) + else: + return None + + +class MyEvtHandler(ogl.ShapeEvtHandler): + "Custom Event Handler for Shapes" + def __init__(self, callback): + ogl.ShapeEvtHandler.__init__(self) + self.callback = callback + + def OnLeftClick(self, x, y, keys=0, attachment=0): + shape = self.GetShape() + canvas = shape.GetCanvas() + dc = wx.ClientDC(canvas) + canvas.PrepareDC(dc) + + if shape.Selected() and keys & ogl.KEY_SHIFT: + shape.Select(False, dc) + #canvas.Redraw(dc) + canvas.Refresh(False) + else: + redraw = False + shapeList = canvas.GetDiagram().GetShapeList() + toUnselect = [] + + for s in shapeList: + if s.Selected() and not keys & ogl.KEY_SHIFT: + # If we unselect it now then some of the objects in + # shapeList will become invalid (the control points are + # shapes too!) and bad things will happen... + toUnselect.append(s) + + shape.Select(True, dc) + + if toUnselect: + for s in toUnselect: + s.Select(False, dc) + ##canvas.Redraw(dc) + canvas.Refresh(False) + + self.callback() + + def OnEndDragLeft(self, x, y, keys=0, attachment=0): + shape = self.GetShape() + ogl.ShapeEvtHandler.OnEndDragLeft(self, x, y, keys, attachment) + + if not shape.Selected(): + self.OnLeftClick(x, y, keys, attachment) + + self.callback() + + def OnSizingEndDragLeft(self, pt, x, y, keys, attch): + ogl.ShapeEvtHandler.OnSizingEndDragLeft(self, pt, x, y, keys, attch) + self.callback() + + def OnMovePost(self, dc, x, y, oldX, oldY, display): + shape = self.GetShape() + ogl.ShapeEvtHandler.OnMovePost(self, dc, x, y, oldX, oldY, display) + self.callback() + if "wxMac" in wx.PlatformInfo: + shape.GetCanvas().Refresh(False) + + def OnLeftDoubleClick(self, x, y, keys = 0, attachment = 0): + self.callback("LeftDoubleClick") + + def OnRightClick(self, *dontcare): + self.callback("RightClick") + + +class Element(object): + "Visual class that represent a placeholder in the template" + + fields = ['name', 'type', + 'x1', 'y1', 'x2', 'y2', + 'font', 'size', + 'bold', 'italic', 'underline', + 'foreground', 'background', + 'align', 'text', 'priority',] + + def __init__(self, canvas=None, frame=None, zoom=5.0, static=False, **kwargs): + self.kwargs = kwargs + self.zoom = zoom + self.frame = frame + self.canvas = canvas + self.static = static + + name = kwargs['name'] + kwargs['type'] + type = kwargs['type'] + + x, y, w, h = self.set_coordinates(kwargs['x1'], kwargs['y1'], kwargs['x2'], kwargs['y2']) + + text = kwargs['text'] + + shape = self.shape = ogl.RectangleShape(w, h) + + if not static: + shape.SetDraggable(True, True) + + shape.SetX(x) + shape.SetY(y) + #if pen: shape.SetPen(pen) + #if brush: shape.SetBrush(brush) + shape.SetBrush(wx.TRANSPARENT_BRUSH) + + if type not in ('L', 'B', 'BC'): + if not static: + pen = wx.LIGHT_GREY_PEN + else: + pen = wx.RED_PEN + shape.SetPen(pen) + + self.text = kwargs['text'] + + evthandler = MyEvtHandler(self.evt_callback) + evthandler.SetShape(shape) + evthandler.SetPreviousHandler(shape.GetEventHandler()) + shape.SetEventHandler(evthandler) + shape.SetCentreResize(False) + shape.SetMaintainAspectRatio(False) + + canvas.AddShape( shape ) + + @classmethod + def new(Class, parent): + data = dict(name='some_name', type='T', + x1=5.0, y1=5.0, x2=100.0, y2=10.0, + font="Arial", size=12, + bold=False, italic=False, underline=False, + foreground= 0x000000, background=0xFFFFFF, + align="L", text="", priority=0) + data = CustomDialog.do_input(parent, 'New element', Class.fields, data) + if data: + return Class(canvas=parent.canvas, frame=parent, **data) + + def edit(self): + "Edit current element (show a dialog box with all fields)" + data = self.kwargs.copy() + x1, y1, x2, y2 = self.get_coordinates() + data.update(dict(name=self.name, + text=self.text, + x1=x1, y1=y1, x2=x2, y2=y2, + )) + data = CustomDialog.do_input(self.frame, 'Edit element', self.fields, data) + if data: + self.kwargs.update(data) + self.name = data['name'] + self.text = data['text'] + x,y, w, h = self.set_coordinates(data['x1'], data['y1'], data['x2'], data['y2']) + self.shape.SetX(x) + self.shape.SetY(y) + self.shape.SetWidth(w) + self.shape.SetHeight(h) + self.canvas.Refresh(False) + self.canvas.GetDiagram().ShowAll(1) + + def edit_text(self): + "Allow text edition (i.e. for doubleclick)" + dlg = wx.TextEntryDialog( + self.frame, 'Text for %s' % self.name, + 'Edit Text', '') + if self.text: + dlg.SetValue(self.text) + if dlg.ShowModal() == wx.ID_OK: + self.text = dlg.GetValue().encode("latin1") + dlg.Destroy() + + def copy(self): + "Return an identical duplicate" + kwargs = self.as_dict() + element = Element(canvas=self.canvas, frame=self.frame, zoom=self.zoom, static=self.static, **kwargs) + return element + + def remove(self): + "Erases visual shape from OGL canvas (element must be deleted manually)" + self.canvas.RemoveShape(self.shape) + + def move(self, dx, dy): + "Change pdf coordinates (converting to wx internal values)" + x1, y1, x2, y2 = self.get_coordinates() + x1 += dx + x2 += dx + y1 += dy + y2 += dy + x, y, w, h = self.set_coordinates(x1, y1, x2, y2) + self.shape.SetX(x) + self.shape.SetY(y) + + def evt_callback(self, evt_type=None): + "Event dispatcher" + if evt_type=="LeftDoubleClick": + self.edit_text() + if evt_type=='RightClick': + self.edit() + + # update the status bar + x1, y1, x2, y2 = self.get_coordinates() + self.frame.SetStatusText("%s (%0.2f, %0.2f) - (%0.2f, %0.2f)" % + (self.name, x1, y1, x2, y2)) + + def get_coordinates(self): + "Convert from wx to pdf coordinates" + x, y = self.shape.GetX(), self.shape.GetY() + w, h = self.shape.GetBoundingBoxMax() + w -= 1 + h -= 1 + x1 = x/self.zoom - w/self.zoom/2.0 + x2 = x/self.zoom + w/self.zoom/2.0 + y1 = y/self.zoom - h/self.zoom/2.0 + y2 = y/self.zoom + h/self.zoom/2.0 + return x1, y1, x2, y2 + + def set_coordinates(self, x1, y1, x2, y2): + "Convert from pdf to wx coordinates" + x1 = x1 * self.zoom + x2 = x2 * self.zoom + y1 = y1 * self.zoom + y2 = y2 * self.zoom + + # shapes seems to be centred, pdf coord not + w = max(x1, x2) - min(x1, x2) + 1 + h = max(y1, y2) - min(y1, y2) + 1 + x = (min(x1, x2) + w/2.0) + y = (min(y1, y2) + h/2.0) + return x, y, w, h + + def text(self, txt=None): + if txt is not None: + if not isinstance(txt,str): + txt = str(txt) + self.kwargs['text'] = txt + self.shape.ClearText() + for line in txt.split('\n'): + self.shape.AddText(unicode(line, "latin1")) + self.canvas.Refresh(False) + return self.kwargs['text'] + text = property(text, text) + + def set_x(self, x): + self.shape.SetX(x) + self.canvas.Refresh(False) + self.evt_callback() + def set_y(self, y): + self.shape.SetY(y) + self.canvas.Refresh(False) + self.evt_callback() + def get_x(self): + return self.shape.GetX() + def get_y(self): + return self.shape.GetY() + + x = property(get_x, set_x) + y = property(get_y, set_y) + + def selected(self, sel=None): + if sel is not None: + print "Setting Select(%s)" % sel + self.shape.Select(sel) + return self.shape.Selected() + selected = property(selected, selected) + + def name(self, name=None): + if name is not None: + self.kwargs['name'] = name + return self.kwargs['name'] + name = property(name, name) + + def __contains__(self, k): + "Implement in keyword for searchs" + return k in self.name.lower() or self.text and k in self.text.lower() + + def as_dict(self): + "Return a dictionary representation, used by pyfpdf" + d = self.kwargs + x1, y1, x2, y2 = self.get_coordinates() + d.update({ + 'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, + 'text': self.text}) + return d + + +class AppFrame(wx.Frame): + "OGL Designer main window" + title = "PyFPDF Template Designer (wx OGL)" + + def __init__(self): + wx.Frame.__init__( self, + None, -1, self.title, + size=(640,480), + style=wx.DEFAULT_FRAME_STYLE ) + sys.excepthook = self.except_hook + self.filename = "" + # Create a toolbar: + tsize = (16,16) + self.toolbar = self.CreateToolBar(wx.TB_HORIZONTAL | wx.NO_BORDER | wx.TB_FLAT) + + artBmp = wx.ArtProvider.GetBitmap + self.toolbar.AddSimpleTool( + wx.ID_NEW, artBmp(wx.ART_NEW, wx.ART_TOOLBAR, tsize), "New") + self.toolbar.AddSimpleTool( + wx.ID_OPEN, artBmp(wx.ART_FILE_OPEN, wx.ART_TOOLBAR, tsize), "Open") + self.toolbar.AddSimpleTool( + wx.ID_SAVE, artBmp(wx.ART_FILE_SAVE, wx.ART_TOOLBAR, tsize), "Save") + self.toolbar.AddSimpleTool( + wx.ID_SAVEAS, artBmp(wx.ART_FILE_SAVE_AS, wx.ART_TOOLBAR, tsize), + "Save As...") + #------- + self.toolbar.AddSeparator() + self.toolbar.AddSimpleTool( + wx.ID_UNDO, artBmp(wx.ART_UNDO, wx.ART_TOOLBAR, tsize), "Undo") + self.toolbar.AddSimpleTool( + wx.ID_REDO, artBmp(wx.ART_REDO, wx.ART_TOOLBAR, tsize), "Redo") + self.toolbar.AddSeparator() + #------- + self.toolbar.AddSimpleTool( + wx.ID_CUT, artBmp(wx.ART_CUT, wx.ART_TOOLBAR, tsize), "Remove") + self.toolbar.AddSimpleTool( + wx.ID_COPY, artBmp(wx.ART_COPY, wx.ART_TOOLBAR, tsize), "Duplicate") + self.toolbar.AddSimpleTool( + wx.ID_PASTE, artBmp(wx.ART_PASTE, wx.ART_TOOLBAR, tsize), "Insert") + self.toolbar.AddSeparator() + self.toolbar.AddSimpleTool( + wx.ID_FIND, artBmp(wx.ART_FIND, wx.ART_TOOLBAR, tsize), "Find") + self.toolbar.AddSeparator() + self.toolbar.AddSimpleTool( + wx.ID_PRINT, artBmp(wx.ART_PRINT, wx.ART_TOOLBAR, tsize), "Print") + self.toolbar.AddSimpleTool( + wx.ID_ABOUT, artBmp(wx.ART_HELP, wx.ART_TOOLBAR, tsize), "About") + + self.toolbar.Realize() + + self.toolbar.EnableTool(wx.ID_SAVEAS, False) + self.toolbar.EnableTool(wx.ID_UNDO, False) + self.toolbar.EnableTool(wx.ID_REDO, False) + + menu_handlers = [ + (wx.ID_NEW, self.do_new), + (wx.ID_OPEN, self.do_open), + (wx.ID_SAVE, self.do_save), + (wx.ID_PRINT, self.do_print), + (wx.ID_FIND, self.do_find), + (wx.ID_CUT, self.do_cut), + (wx.ID_COPY, self.do_copy), + (wx.ID_PASTE, self.do_paste), + (wx.ID_ABOUT, self.do_about), + ] + for menu_id, handler in menu_handlers: + self.Bind(wx.EVT_MENU, handler, id = menu_id) + + sizer = wx.BoxSizer(wx.VERTICAL) + # put stuff into sizer + + self.CreateStatusBar() + + canvas = self.canvas = ogl.ShapeCanvas( self ) + maxWidth = 1500 + maxHeight = 2000 + canvas.SetScrollbars(20, 20, maxWidth/20, maxHeight/20) + sizer.Add( canvas, 1, wx.GROW ) + + canvas.SetBackgroundColour("WHITE") # + + diagram = self.diagram = ogl.Diagram() + canvas.SetDiagram( diagram ) + diagram.SetCanvas( canvas ) + diagram.SetSnapToGrid( False ) + + # apply sizer + self.SetSizer(sizer) + self.SetAutoLayout(1) + self.Show(1) + + self.Bind(wx.EVT_CHAR_HOOK, self.on_key_event) + self.elements = [] + + def on_key_event(self, event): + """ Respond to a keypress event. + + We make the arrow keys move the selected object(s) by one pixel in + the given direction. + """ + step = 1 + if event.ControlDown(): + step = 20 + + if event.GetKeyCode() == wx.WXK_UP: + self.move_elements(0, -step) + elif event.GetKeyCode() == wx.WXK_DOWN: + self.move_elements(0, step) + elif event.GetKeyCode() == wx.WXK_LEFT: + self.move_elements(-step, 0) + elif event.GetKeyCode() == wx.WXK_RIGHT: + self.move_elements(step, 0) + elif event.GetKeyCode() == wx.WXK_DELETE: + self.do_cut() + else: + event.Skip() + + def do_new(self, evt=None): + for element in self.elements: + element.remove() + self.elements = [] + # draw paper size guides + for k, (w, h) in [('legal', (216, 356)), ('A4', (210, 297)), ('letter', (216, 279))]: + self.create_elements( + k, 'R', 0, 0, w, h, + size=70, foreground=0x808080, priority=-100, + canvas=self.canvas, frame=self, static=True) + self.diagram.ShowAll( 1 ) + + def do_open(self, evt): + dlg = wx.FileDialog( + self, message="Choose a file", + defaultDir=os.getcwd(), + defaultFile="invoice.csv", + wildcard="CSV Files (*.csv)|*.csv", + style=wx.OPEN + ) + + if dlg.ShowModal() == wx.ID_OK: + # This returns a Python list of files that were selected. + self.filename = dlg.GetPaths()[0] + + dlg.Destroy() + self.SetTitle(self.filename + " - " + self.title) + + self.do_new() + tmp = [] + f = open(self.filename) + try: + filedata = f.readlines() + finally: + f.close() + for lno, linea in enumerate(filedata): + if DEBUG: print "processing line", lno, linea + args = [] + for i,v in enumerate(linea.split(";")): + if not v.startswith("'"): + v = v.replace(",",".") + else: + v = v#.decode('latin1') + if v.strip()=='': + v = None + else: + v = eval(v.strip()) + args.append(v) + tmp.append(args) + + # sort by z-order (priority) + for args in sorted(tmp, key=lambda t: t[-1]): + if DEBUG: print args + self.create_elements(*args) + self.diagram.ShowAll( 1 ) # + + return True + + def do_save(self, evt, filename=None): + try: + from time import gmtime, strftime + ts = strftime("%Y%m%d%H%M%S", gmtime()) + os.rename(self.filename, self.filename + ts + ".bak") + except Exception, e: + if DEBUG: print e + pass + + def csv_repr(v, decimal_sep="."): + if isinstance(v, float): + return ("%0.2f" % v).replace(".", decimal_sep) + else: + return repr(v) + + f = open(self.filename, "w") + try: + for element in sorted(self.elements, key=lambda e:e.name): + if element.static: + continue + d = element.as_dict() + l = [d['name'], d['type'], + d['x1'], d['y1'], d['x2'], d['y2'], + d['font'], d['size'], + d['bold'], d['italic'], d['underline'], + d['foreground'], d['background'], + d['align'], d['text'], d['priority'], + ] + f.write(";".join([csv_repr(v) for v in l])) + f.write("\n") + finally: + f.close() + + def do_print(self, evt): + # genero el renderizador con propiedades del PDF + from template import Template + t = Template(elements=[e.as_dict() for e in self.elements if not e.static]) + t.add_page() + if not t['logo'] or not os.path.exists(t['logo']): + # put a default logo so it doesn't trow an exception + logo = os.path.join(os.path.dirname(__file__), 'tutorial','logo.png') + t.set('logo', logo) + try: + t.render(self.filename +".pdf") + except: + if DEBUG and False: + import pdb; + pdb.pm() + else: + raise + if sys.platform=="linux2": + os.system("evince ""%s""" % self.filename +".pdf") + else: + os.startfile(self.filename +".pdf") + + def do_find(self, evt): + # busco nombre o texto + dlg = wx.TextEntryDialog( + self, 'Enter text to search for', + 'Find Text', '') + if dlg.ShowModal() == wx.ID_OK: + txt = dlg.GetValue().encode("latin1").lower() + for element in self.elements: + if txt in element: + element.selected = True + print "Found:", element.name + self.canvas.Refresh(False) + dlg.Destroy() + + def do_cut(self, evt=None): + "Delete selected elements" + new_elements = [] + for element in self.elements: + if element.selected: + print "Erasing:", element.name + element.selected = False + self.canvas.Refresh(False) + element.remove() + else: + new_elements.append(element) + self.elements = new_elements + self.canvas.Refresh(False) + self.diagram.ShowAll( 1 ) + + def do_copy(self, evt): + "Duplicate selected elements" + fields = ['qty', 'dx', 'dy'] + data = {'qty': 1, 'dx': 0.0, 'dy': 5.0} + data = CustomDialog.do_input(self, 'Copy elements', fields, data) + if data: + new_elements = [] + for i in range(1, data['qty']+1): + for element in self.elements: + if element.selected: + print "Copying:", element.name + new_element = element.copy() + name = new_element.name + if len(name)>2 and name[-2:].isdigit(): + new_element.name = name[:-2] + "%02d" % (int(name[-2:])+i) + else: + new_element.name = new_element.name + "_copy" + new_element.selected = False + new_element.move(data['dx']*i, data['dy']*i) + new_elements.append(new_element) + self.elements.extend(new_elements) + self.canvas.Refresh(False) + self.diagram.ShowAll( 1 ) + + def do_paste(self, evt): + "Insert new elements" + element = Element.new(self) + if element: + self.canvas.Refresh(False) + self.elements.append(element) + self.diagram.ShowAll( 1 ) + + def create_elements(self, name, type, x1, y1, x2, y2, + font="Arial", size=12, + bold=False, italic=False, underline=False, + foreground= 0x000000, background=0xFFFFFF, + align="L", text="", priority=0, canvas=None, frame=None, static=False, + **kwargs): + element = Element(name=name, type=type, x1=x1, y1=y1, x2=x2, y2=y2, + font=font, size=size, + bold=bold, italic=italic, underline=underline, + foreground= foreground, background=background, + align=align, text=text, priority=priority, + canvas=canvas or self.canvas, frame=frame or self, + static=static) + self.elements.append(element) + + def move_elements(self, x, y): + for element in self.elements: + if element.selected: + print "moving", element.name, x, y + element.x = element.x + x + element.y = element.y + y + + def do_about(self, evt): + info = wx.AboutDialogInfo() + info.Name = self.title + info.Version = __version__ + info.Copyright = __copyright__ + info.Description = ( + "Visual Template designer for PyFPDF (using wxPython OGL library)\n" + "Input files are CSV format describing the layout, separated by ;\n" + "Use toolbar buttons to open, save, print (preview) your template, " + "and there are buttons to find, add, remove or duplicate elements.\n" + "Over an element, a double left click opens edit text dialog, " + "and a right click opens edit properties dialog. \n" + "Multiple element can be selected with shift left click. \n" + "Use arrow keys or drag-and-drop to move elements.\n" + "For further information see project webpage:" + ) + info.WebSite = ("http://code.google.com/p/pyfpdf/wiki/Templates", + "pyfpdf Google Code Project") + info.Developers = [ __author__, ] + + info.License = wordwrap(__license__, 500, wx.ClientDC(self)) + + # Then we call wx.AboutBox giving it that info object + wx.AboutBox(info) + + def except_hook(self, type, value, trace): + import traceback + exc = traceback.format_exception(type, value, trace) + for e in exc: wx.LogError(e) + wx.LogError('Unhandled Error: %s: %s'%(str(type), str(value))) + + +app = wx.PySimpleApp() +ogl.OGLInitialize() +frame = AppFrame() +app.MainLoop() +app.Destroy() + + + ADDED gluon/contrib/pyfpdf/fpdf.py Index: gluon/contrib/pyfpdf/fpdf.py ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/fpdf.py @@ -0,0 +1,1687 @@ +#!/usr/bin/env python +# -*- coding: latin-1 -*- +# ****************************************************************************** +# * Software: FPDF for python * +# * Version: 1.54c * +# * Date: 2010-09-10 * +# * License: LGPL v3.0 * +# * * +# * Original Author (PHP): Olivier PLATHEY 2004-12-31 * +# * Ported to Python 2.4 by Max (maxpat78@yahoo.it) on 2006-05 * +# * Maintainer: Mariano Reingart (reingart@gmail.com) et al since 2008 (est.) * +# * NOTE: 'I' and 'D' destinations are disabled, and simply print to STDOUT * +# *****************************************************************************/ + +from datetime import datetime +import math +import os, sys, zlib, struct + +try: + # Check if PIL is available, necessary for JPEG support. + import Image +except ImportError: + Image = None + +def substr(s, start, length=-1): + if length < 0: + length=len(s)-start + return s[start:start+length] + +def sprintf(fmt, *args): return fmt % args + +# Global variables +FPDF_VERSION='1.54b' +FPDF_FONT_DIR=os.path.join(os.path.dirname(__file__),'font') +fpdf_charwidths = {} + +class FPDF: +#Private properties +#~ page; #current page number +#~ n; #current object number +#~ offsets; #array of object offsets +#~ buffer; #buffer holding in-memory PDF +#~ pages; #array containing pages +#~ state; #current document state +#~ compress; #compression flag +#~ def_orientation; #default orientation +#~ cur_orientation; #current orientation +#~ orientation_changes; #array indicating orientation changes +#~ k; #scale factor (number of points in user unit) +#~ fw_pt,fh_pt; #dimensions of page format in points +#~ fw,fh; #dimensions of page format in user unit +#~ w_pt,h_pt; #current dimensions of page in points +#~ w,h; #current dimensions of page in user unit +#~ l_margin; #left margin +#~ t_margin; #top margin +#~ r_margin; #right margin +#~ b_margin; #page break margin +#~ c_margin; #cell margin +#~ x,y; #current position in user unit for cell positioning +#~ lasth; #height of last cell printed +#~ line_width; #line width in user unit +#~ core_fonts; #array of standard font names +#~ fonts; #array of used fonts +#~ font_files; #array of font files +#~ diffs; #array of encoding differences +#~ images; #array of used images +#~ page_links; #array of links in pages +#~ links; #array of internal links +#~ font_family; #current font family +#~ font_style; #current font style +#~ underline; #underlining flag +#~ current_font; #current font info +#~ font_size_pt; #current font size in points +#~ font_size; #current font size in user unit +#~ draw_color; #commands for drawing color +#~ fill_color; #commands for filling color +#~ text_color; #commands for text color +#~ color_flag; #indicates whether fill and text colors are different +#~ ws; #word spacing +#~ auto_page_break; #automatic page breaking +#~ page_break_trigger; #threshold used to trigger page breaks +#~ in_footer; #flag set when processing footer +#~ zoom_mode; #zoom display mode +#~ layout_mode; #layout display mode +#~ title; #title +#~ subject; #subject +#~ author; #author +#~ keywords; #keywords +#~ creator; #creator +#~ alias_nb_pages; #alias for total number of pages +#~ pdf_version; #PDF version number + +# ****************************************************************************** +# * * +# * Public methods * +# * * +# *******************************************************************************/ + def __init__(self, orientation='P',unit='mm',format='A4'): + #Some checks + self._dochecks() + #Initialization of properties + self.offsets={} + self.page=0 + self.n=2 + self.buffer='' + self.pages={} + self.orientation_changes={} + self.state=0 + self.fonts={} + self.font_files={} + self.diffs={} + self.images={} + self.page_links={} + self.links={} + self.in_footer=0 + self.lastw=0 + self.lasth=0 + self.font_family='' + self.font_style='' + self.font_size_pt=12 + self.underline=0 + self.draw_color='0 G' + self.fill_color='0 g' + self.text_color='0 g' + self.color_flag=0 + self.ws=0 + self.angle=0 + #Standard fonts + self.core_fonts={'courier':'Courier','courierB':'Courier-Bold','courierI':'Courier-Oblique','courierBI':'Courier-BoldOblique', + 'helvetica':'Helvetica','helveticaB':'Helvetica-Bold','helveticaI':'Helvetica-Oblique','helveticaBI':'Helvetica-BoldOblique', + 'times':'Times-Roman','timesB':'Times-Bold','timesI':'Times-Italic','timesBI':'Times-BoldItalic', + 'symbol':'Symbol','zapfdingbats':'ZapfDingbats'} + #Scale factor + if(unit=='pt'): + self.k=1 + elif(unit=='mm'): + self.k=72/25.4 + elif(unit=='cm'): + self.k=72/2.54 + elif(unit=='in'): + self.k=72 + else: + self.error('Incorrect unit: '+unit) + #Page format + if(isinstance(format,basestring)): + format=format.lower() + if(format=='a3'): + format=(841.89,1190.55) + elif(format=='a4'): + format=(595.28,841.89) + elif(format=='a5'): + format=(420.94,595.28) + elif(format=='letter'): + format=(612,792) + elif(format=='legal'): + format=(612,1008) + else: + self.error('Unknown page format: '+format) + self.fw_pt=format[0] + self.fh_pt=format[1] + else: + self.fw_pt=format[0]*self.k + self.fh_pt=format[1]*self.k + self.fw=self.fw_pt/self.k + self.fh=self.fh_pt/self.k + #Page orientation + orientation=orientation.lower() + if(orientation=='p' or orientation=='portrait'): + self.def_orientation='P' + self.w_pt=self.fw_pt + self.h_pt=self.fh_pt + elif(orientation=='l' or orientation=='landscape'): + self.def_orientation='L' + self.w_pt=self.fh_pt + self.h_pt=self.fw_pt + else: + self.error('Incorrect orientation: '+orientation) + self.cur_orientation=self.def_orientation + self.w=self.w_pt/self.k + self.h=self.h_pt/self.k + #Page margins (1 cm) + margin=28.35/self.k + self.set_margins(margin,margin) + #Interior cell margin (1 mm) + self.c_margin=margin/10.0 + #line width (0.2 mm) + self.line_width=.567/self.k + #Automatic page break + self.set_auto_page_break(1,2*margin) + #Full width display mode + self.set_display_mode('fullwidth') + #Enable compression + self.set_compression(1) + #Set default PDF version number + self.pdf_version='1.3' + + def set_margins(self, left,top,right=-1): + "Set left, top and right margins" + self.l_margin=left + self.t_margin=top + if(right==-1): + right=left + self.r_margin=right + + def set_left_margin(self, margin): + "Set left margin" + self.l_margin=margin + if(self.page>0 and self.x<margin): + self.x=margin + + def set_top_margin(self, margin): + "Set top margin" + self.t_margin=margin + + def set_right_margin(self, margin): + "Set right margin" + self.r_margin=margin + + def set_auto_page_break(self, auto,margin=0): + "Set auto page break mode and triggering margin" + self.auto_page_break=auto + self.b_margin=margin + self.page_break_trigger=self.h-margin + + def set_display_mode(self, zoom,layout='continuous'): + "Set display mode in viewer" + if(zoom=='fullpage' or zoom=='fullwidth' or zoom=='real' or zoom=='default' or not isinstance(zoom,basestring)): + self.zoom_mode=zoom + else: + self.error('Incorrect zoom display mode: '+zoom) + if(layout=='single' or layout=='continuous' or layout=='two' or layout=='default'): + self.layout_mode=layout + else: + self.error('Incorrect layout display mode: '+layout) + + def set_compression(self, compress): + "Set page compression" + self.compress=compress + + def set_title(self, title): + "Title of document" + self.title=title + + def set_subject(self, subject): + "Subject of document" + self.subject=subject + + def set_author(self, author): + "Author of document" + self.author=author + + def set_keywords(self, keywords): + "Keywords of document" + self.keywords=keywords + + def set_creator(self, creator): + "Creator of document" + self.creator=creator + + def alias_nb_pages(self, alias='{nb}'): + "Define an alias for total number of pages" + self.str_alias_nb_pages=alias + return alias + + def error(self, msg): + "Fatal error" + raise RuntimeError('FPDF error: '+msg) + + def open(self): + "Begin document" + self.state=1 + + def close(self): + "Terminate document" + if(self.state==3): + return + if(self.page==0): + self.add_page() + #Page footer + self.in_footer=1 + self.footer() + self.in_footer=0 + #close page + self._endpage() + #close document + self._enddoc() + + def add_page(self, orientation=''): + "Start a new page" + if(self.state==0): + self.open() + family=self.font_family + if self.underline: + style = self.font_style + 'U' + else: + style = self.font_style + size=self.font_size_pt + lw=self.line_width + dc=self.draw_color + fc=self.fill_color + tc=self.text_color + cf=self.color_flag + if(self.page>0): + #Page footer + self.in_footer=1 + self.footer() + self.in_footer=0 + #close page + self._endpage() + #Start new page + self._beginpage(orientation) + #Set line cap style to square + self._out('2 J') + #Set line width + self.line_width=lw + self._out(sprintf('%.2f w',lw*self.k)) + #Set font + if(family): + self.set_font(family,style,size) + #Set colors + self.draw_color=dc + if(dc!='0 G'): + self._out(dc) + self.fill_color=fc + if(fc!='0 g'): + self._out(fc) + self.text_color=tc + self.color_flag=cf + #Page header + self.header() + #Restore line width + if(self.line_width!=lw): + self.line_width=lw + self._out(sprintf('%.2f w',lw*self.k)) + #Restore font + if(family): + self.set_font(family,style,size) + #Restore colors + if(self.draw_color!=dc): + self.draw_color=dc + self._out(dc) + if(self.fill_color!=fc): + self.fill_color=fc + self._out(fc) + self.text_color=tc + self.color_flag=cf + + def header(self): + "Header to be implemented in your own inherited class" + pass + + def footer(self): + "Footer to be implemented in your own inherited class" + pass + + def page_no(self): + "Get current page number" + return self.page + + def set_draw_color(self, r,g=-1,b=-1): + "Set color for all stroking operations" + if((r==0 and g==0 and b==0) or g==-1): + self.draw_color=sprintf('%.3f G',r/255.0) + else: + self.draw_color=sprintf('%.3f %.3f %.3f RG',r/255.0,g/255.0,b/255.0) + if(self.page>0): + self._out(self.draw_color) + + def set_fill_color(self,r,g=-1,b=-1): + "Set color for all filling operations" + if((r==0 and g==0 and b==0) or g==-1): + self.fill_color=sprintf('%.3f g',r/255.0) + else: + self.fill_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) + self.color_flag=(self.fill_color!=self.text_color) + if(self.page>0): + self._out(self.fill_color) + + def set_text_color(self, r,g=-1,b=-1): + "Set color for text" + if((r==0 and g==0 and b==0) or g==-1): + self.text_color=sprintf('%.3f g',r/255.0) + else: + self.text_color=sprintf('%.3f %.3f %.3f rg',r/255.0,g/255.0,b/255.0) + self.color_flag=(self.fill_color!=self.text_color) + + def get_string_width(self, s): + "Get width of a string in the current font" + cw=self.current_font['cw'] + w=0 + l=len(s) + for i in xrange(0, l): + w += cw.get(s[i],0) + return w*self.font_size/1000.0 + + def set_line_width(self, width): + "Set line width" + self.line_width=width + if(self.page>0): + self._out(sprintf('%.2f w',width*self.k)) + + def line(self, x1,y1,x2,y2): + "Draw a line" + self._out(sprintf('%.2f %.2f m %.2f %.2f l S',x1*self.k,(self.h-y1)*self.k,x2*self.k,(self.h-y2)*self.k)) + + def rect(self, x,y,w,h,style=''): + "Draw a rectangle" + if(style=='F'): + op='f' + elif(style=='FD' or style=='DF'): + op='B' + else: + op='S' + self._out(sprintf('%.2f %.2f %.2f %.2f re %s',x*self.k,(self.h-y)*self.k,w*self.k,-h*self.k,op)) + + def add_font(self, family,style='',fname=''): + "Add a TrueType or Type1 font" + family=family.lower() + if(fname==''): + fname=family.replace(' ','')+style.lower()+'.font' + fname=os.path.join(FPDF_FONT_DIR,fname) + if(family=='arial'): + family='helvetica' + style=style.upper() + if(style=='IB'): + style='BI' + fontkey=family+style + if fontkey in self.fonts: + self.error('Font already added: '+family+' '+style) + execfile(fname, globals(), globals()) + if 'name' not in globals(): + self.error('Could not include font definition file') + i=len(self.fonts)+1 + self.fonts[fontkey]={'i':i,'type':type,'name':name,'desc':desc,'up':up,'ut':ut,'cw':cw,'enc':enc,'file':filename} + if(diff): + #Search existing encodings + d=0 + nb=len(self.diffs) + for i in xrange(1,nb+1): + if(self.diffs[i]==diff): + d=i + break + if(d==0): + d=nb+1 + self.diffs[d]=diff + self.fonts[fontkey]['diff']=d + if(filename): + if(type=='TrueType'): + self.font_files[filename]={'length1':originalsize} + else: + self.font_files[filename]={'length1':size1,'length2':size2} + + def set_font(self, family,style='',size=0): + "Select a font; size given in points" + family=family.lower() + if(family==''): + family=self.font_family + if(family=='arial'): + family='helvetica' + elif(family=='symbol' or family=='zapfdingbats'): + style='' + style=style.upper() + if('U' in style): + self.underline=1 + style=style.replace('U','') + else: + self.underline=0 + if(style=='IB'): + style='BI' + if(size==0): + size=self.font_size_pt + #Test if font is already selected + if(self.font_family==family and self.font_style==style and self.font_size_pt==size): + return + #Test if used for the first time + fontkey=family+style + if fontkey not in self.fonts: + #Check if one of the standard fonts + if fontkey in self.core_fonts: + if fontkey not in fpdf_charwidths: + #Load metric file + name=os.path.join(FPDF_FONT_DIR,family) + if(family=='times' or family=='helvetica'): + name+=style.lower() + execfile(name+'.font') + if fontkey not in fpdf_charwidths: + self.error('Could not include font metric file for'+fontkey) + i=len(self.fonts)+1 + self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]} + else: + self.error('Undefined font: '+family+' '+style) + #Select it + self.font_family=family + self.font_style=style + self.font_size_pt=size + self.font_size=size/self.k + self.current_font=self.fonts[fontkey] + if(self.page>0): + self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt)) + + def set_font_size(self, size): + "Set font size in points" + if(self.font_size_pt==size): + return + self.font_size_pt=size + self.font_size=size/self.k + if(self.page>0): + self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt)) + + def add_link(self): + "Create a new internal link" + n=len(self.links)+1 + self.links[n]=(0,0) + return n + + def set_link(self, link,y=0,page=-1): + "Set destination of internal link" + if(y==-1): + y=self.y + if(page==-1): + page=self.page + self.links[link]=[page,y] + + def link(self, x,y,w,h,link): + "Put a link on the page" + if not self.page in self.page_links: + self.page_links[self.page] = [] + self.page_links[self.page] += [(x*self.k,self.h_pt-y*self.k,w*self.k,h*self.k,link),] + + def text(self, x,y,txt): + "Output a string" + s=sprintf('BT %.2f %.2f Td (%s) Tj ET',x*self.k,(self.h-y)*self.k,self._escape(txt)) + if(self.underline and txt!=''): + s+=' '+self._dounderline(x,y,txt) + if(self.color_flag): + s='q '+self.text_color+' '+s+' Q' + self._out(s) + + def rotate(self, angle, x=None, y=None): + if x is None: + x = self.x + if y is None: + y = self.y; + if self.angle!=0: + self._out('Q') + self.angle = angle + if angle!=0: + angle *= math.pi/180; + c = math.cos(angle); + s = math.sin(angle); + cx = x*self.k; + cy = (self.h-y)*self.k + s = sprintf('q %.5F %.5F %.5F %.5F %.2F %.2F cm 1 0 0 1 %.2F %.2F cm',c,s,-s,c,cx,cy,-cx,-cy) + self._out(s) + + def accept_page_break(self): + "Accept automatic page break or not" + return self.auto_page_break + + def cell(self, w,h=0,txt='',border=0,ln=0,align='',fill=0,link=''): + "Output a cell" + k=self.k + if(self.y+h>self.page_break_trigger and not self.in_footer and self.accept_page_break()): + #Automatic page break + x=self.x + ws=self.ws + if(ws>0): + self.ws=0 + self._out('0 Tw') + self.add_page(self.cur_orientation) + self.x=x + if(ws>0): + self.ws=ws + self._out(sprintf('%.3f Tw',ws*k)) + if(w==0): + w=self.w-self.r_margin-self.x + s='' + if(fill==1 or border==1): + if(fill==1): + if border==1: + op='B' + else: + op='f' + else: + op='S' + s=sprintf('%.2f %.2f %.2f %.2f re %s ',self.x*k,(self.h-self.y)*k,w*k,-h*k,op) + if(isinstance(border,basestring)): + x=self.x + y=self.y + if('L' in border): + s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,x*k,(self.h-(y+h))*k) + if('T' in border): + s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-y)*k,(x+w)*k,(self.h-y)*k) + if('R' in border): + s+=sprintf('%.2f %.2f m %.2f %.2f l S ',(x+w)*k,(self.h-y)*k,(x+w)*k,(self.h-(y+h))*k) + if('B' in border): + s+=sprintf('%.2f %.2f m %.2f %.2f l S ',x*k,(self.h-(y+h))*k,(x+w)*k,(self.h-(y+h))*k) + if(txt!=''): + if(align=='R'): + dx=w-self.c_margin-self.get_string_width(txt) + elif(align=='C'): + dx=(w-self.get_string_width(txt))/2.0 + else: + dx=self.c_margin + if(self.color_flag): + s+='q '+self.text_color+' ' + txt2=txt.replace('\\','\\\\').replace(')','\\)').replace('(','\\(') + s+=sprintf('BT %.2f %.2f Td (%s) Tj ET',(self.x+dx)*k,(self.h-(self.y+.5*h+.3*self.font_size))*k,txt2) + if(self.underline): + s+=' '+self._dounderline(self.x+dx,self.y+.5*h+.3*self.font_size,txt) + if(self.color_flag): + s+=' Q' + if(link): + self.link(self.x+dx,self.y+.5*h-.5*self.font_size,self.get_string_width(txt),self.font_size,link) + if(s): + self._out(s) + self.lasth=h + if(ln>0): + #Go to next line + self.y+=h + if(ln==1): + self.x=self.l_margin + else: + self.x+=w + + def multi_cell(self, w,h,txt,border=0,align='J',fill=0, split_only=False): + "Output text with automatic or explicit line breaks" + ret = [] # if split_only = True, returns splited text cells + cw=self.current_font['cw'] + if(w==0): + w=self.w-self.r_margin-self.x + wmax=(w-2*self.c_margin)*1000.0/self.font_size + s=txt.replace("\r",'') + nb=len(s) + if(nb>0 and s[nb-1]=="\n"): + nb-=1 + b=0 + if(border): + if(border==1): + border='LTRB' + b='LRT' + b2='LR' + else: + b2='' + if('L' in border): + b2+='L' + if('R' in border): + b2+='R' + if ('T' in border): + b=b2+'T' + else: + b=b2 + sep=-1 + i=0 + j=0 + l=0 + ns=0 + nl=1 + while(i<nb): + #Get next character + c=s[i] + if(c=="\n"): + #Explicit line break + if(self.ws>0): + self.ws=0 + self._out('0 Tw') + if not split_only: + self.cell(w,h,substr(s,j,i-j),b,2,align,fill) + else: + ret.append(substr(s,j,i-j)) + i+=1 + sep=-1 + j=i + l=0 + ns=0 + nl+=1 + if(border and nl==2): + b=b2 + continue + if(c==' '): + sep=i + ls=l + ns+=1 + l+=cw.get(c,0) + if(l>wmax): + #Automatic line break + if(sep==-1): + if(i==j): + i+=1 + if(self.ws>0): + self.ws=0 + self._out('0 Tw') + if not split_only: + self.cell(w,h,substr(s,j,i-j),b,2,align,fill) + else: + ret.append(substr(s,j,i-j)) + else: + if(align=='J'): + if ns>1: + self.ws=(wmax-ls)/1000.0*self.font_size/(ns-1) + else: + self.ws=0 + self._out(sprintf('%.3f Tw',self.ws*self.k)) + if not split_only: + self.cell(w,h,substr(s,j,sep-j),b,2,align,fill) + else: + ret.append(substr(s,j,sep-j)) + i=sep+1 + sep=-1 + j=i + l=0 + ns=0 + nl+=1 + if(border and nl==2): + b=b2 + else: + i+=1 + #Last chunk + if(self.ws>0): + self.ws=0 + self._out('0 Tw') + if(border and 'B' in border): + b+='B' + if not split_only: + self.cell(w,h,substr(s,j,i-j),b,2,align,fill) + else: + ret.append(substr(s,j,i-j)) + self.x=self.l_margin + return ret + + def write(self, h,txt,link=''): + "Output text in flowing mode" + cw=self.current_font['cw'] + w=self.w-self.r_margin-self.x + wmax=(w-2*self.c_margin)*1000.0/self.font_size + s=txt.replace("\r",'') + nb=len(s) + sep=-1 + i=0 + j=0 + l=0 + nl=1 + while(i<nb): + #Get next character + c=s[i] + if(c=="\n"): + #Explicit line break + self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) + i+=1 + sep=-1 + j=i + l=0 + if(nl==1): + self.x=self.l_margin + w=self.w-self.r_margin-self.x + wmax=(w-2*self.c_margin)*1000.0/self.font_size + nl+=1 + continue + if(c==' '): + sep=i + l+=cw.get(c,0) + if(l>wmax): + #Automatic line break + if(sep==-1): + if(self.x>self.l_margin): + #Move to next line + self.x=self.l_margin + self.y+=h + w=self.w-self.r_margin-self.x + wmax=(w-2*self.c_margin)*1000.0/self.font_size + i+=1 + nl+=1 + continue + if(i==j): + i+=1 + self.cell(w,h,substr(s,j,i-j),0,2,'',0,link) + else: + self.cell(w,h,substr(s,j,sep-j),0,2,'',0,link) + i=sep+1 + sep=-1 + j=i + l=0 + if(nl==1): + self.x=self.l_margin + w=self.w-self.r_margin-self.x + wmax=(w-2*self.c_margin)*1000.0/self.font_size + nl+=1 + else: + i+=1 + #Last chunk + if(i!=j): + self.cell(l/1000.0*self.font_size,h,substr(s,j),0,0,'',0,link) + + def image(self, name,x,y,w=0,h=0,type='',link=''): + "Put an image on the page" + if not name in self.images: + #First use of image, get info + if(type==''): + pos=name.rfind('.') + if(not pos): + self.error('image file has no extension and no type was specified: '+name) + type=substr(name,pos+1) + type=type.lower() + if(type=='jpg' or type=='jpeg'): + info=self._parsejpg(name) + elif(type=='png'): + info=self._parsepng(name) + else: + #Allow for additional formats + mtd='_parse'+type + if not hasattr(self,mtd): + self.error('Unsupported image type: '+type) + info=self.mtd(name) + info['i']=len(self.images)+1 + self.images[name]=info + else: + info=self.images[name] + #Automatic width and height calculation if needed + if(w==0 and h==0): + #Put image at 72 dpi + w=info['w']/self.k + h=info['h']/self.k + if(w==0): + w=h*info['w']/info['h'] + if(h==0): + h=w*info['h']/info['w'] + self._out(sprintf('q %.2f 0 0 %.2f %.2f %.2f cm /I%d Do Q',w*self.k,h*self.k,x*self.k,(self.h-(y+h))*self.k,info['i'])) + if(link): + self.link(x,y,w,h,link) + + def ln(self, h=''): + "Line Feed; default value is last cell height" + self.x=self.l_margin + if(isinstance(h, basestring)): + self.y+=self.lasth + else: + self.y+=h + + def get_x(self): + "Get x position" + return self.x + + def set_x(self, x): + "Set x position" + if(x>=0): + self.x=x + else: + self.x=self.w+x + + def get_y(self): + "Get y position" + return self.y + + def set_y(self, y): + "Set y position and reset x" + self.x=self.l_margin + if(y>=0): + self.y=y + else: + self.y=self.h+y + + def set_xy(self, x,y): + "Set x and y positions" + self.set_y(y) + self.set_x(x) + + def output(self, name='',dest=''): + "Output PDF to some destination" + #Finish document if necessary + if(self.state<3): + self.close() + #Normalize parameters + # if(type(dest)==type(bool())): + # if dest: + # dest='D' + # else: + # dest='F' + dest=dest.upper() + if(dest==''): + if(name==''): + name='doc.pdf' + dest='I' + else: + dest='F' + if dest=='I': + print self.buffer + elif dest=='D': + print self.buffer + elif dest=='F': + #Save to local file + f=file(name,'wb') + if(not f): + self.error('Unable to create output file: '+name) + f.write(self.buffer) + f.close() + elif dest=='S': + #Return as a string + return self.buffer + else: + self.error('Incorrect output destination: '+dest) + return '' + +# ****************************************************************************** +# * * +# * Protected methods * +# * * +# *******************************************************************************/ + def _dochecks(self): + #Check for locale-related bug +# if(1.1==1): +# self.error("Don\'t alter the locale before including class file"); + #Check for decimal separator + if(sprintf('%.1f',1.0)!='1.0'): + import locale + locale.setlocale(locale.LC_NUMERIC,'C') + + def _getfontpath(self): + return FPDF_FONT_DIR+'/' + + def _putpages(self): + nb=self.page + if hasattr(self,'str_alias_nb_pages'): + #Replace number of pages + for n in xrange(1,nb+1): + self.pages[n]=self.pages[n].replace(self.str_alias_nb_pages,str(nb)) + if(self.def_orientation=='P'): + w_pt=self.fw_pt + h_pt=self.fh_pt + else: + w_pt=self.fh_pt + h_pt=self.fw_pt + if self.compress: + filter='/Filter /FlateDecode ' + else: + filter='' + for n in xrange(1,nb+1): + #Page + self._newobj() + self._out('<</Type /Page') + self._out('/Parent 1 0 R') + if n in self.orientation_changes: + self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',h_pt,w_pt)) + self._out('/Resources 2 0 R') + if self.page_links and n in self.page_links: + #Links + annots='/Annots [' + for pl in self.page_links[n]: + rect=sprintf('%.2f %.2f %.2f %.2f',pl[0],pl[1],pl[0]+pl[2],pl[1]-pl[3]) + annots+='<</Type /Annot /Subtype /Link /Rect ['+rect+'] /Border [0 0 0] ' + if(isinstance(pl[4],basestring)): + annots+='/A <</S /URI /URI '+self._textstring(pl[4])+'>>>>' + else: + l=self.links[pl[4]] + if l[0] in self.orientation_changes: + h=w_pt + else: + h=h_pt + annots+=sprintf('/Dest [%d 0 R /XYZ 0 %.2f null]>>',1+2*l[0],h-l[1]*self.k) + self._out(annots+']') + self._out('/Contents '+str(self.n+1)+' 0 R>>') + self._out('endobj') + #Page content + if self.compress: + p = zlib.compress(self.pages[n]) + else: + p = self.pages[n] + self._newobj() + self._out('<<'+filter+'/Length '+str(len(p))+'>>') + self._putstream(p) + self._out('endobj') + #Pages root + self.offsets[1]=len(self.buffer) + self._out('1 0 obj') + self._out('<</Type /Pages') + kids='/Kids [' + for i in xrange(0,nb): + kids+=str(3+2*i)+' 0 R ' + self._out(kids+']') + self._out('/Count '+str(nb)) + self._out(sprintf('/MediaBox [0 0 %.2f %.2f]',w_pt,h_pt)) + self._out('>>') + self._out('endobj') + + def _putfonts(self): + nf=self.n + for diff in self.diffs: + #Encodings + self._newobj() + self._out('<</Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences ['+self.diffs[diff]+']>>') + self._out('endobj') + for name,info in self.font_files.iteritems(): + #Font file embedding + self._newobj() + self.font_files[name]['n']=self.n + font='' + f=file(self._getfontpath()+name,'rb',1) + if(not f): + self.error('Font file not found') + font=f.read() + f.close() + compressed=(substr(name,-2)=='.z') + if(not compressed and 'length2' in info): + header=(ord(font[0])==128) + if(header): + #Strip first binary header + font=substr(font,6) + if(header and ord(font[info['length1']])==128): + #Strip second binary header + font=substr(font,0,info['length1'])+substr(font,info['length1']+6) + self._out('<</Length '+str(len(font))) + if(compressed): + self._out('/Filter /FlateDecode') + self._out('/Length1 '+str(info['length1'])) + if('length2' in info): + self._out('/Length2 '+str(info['length2'])+' /Length3 0') + self._out('>>') + self._putstream(font) + self._out('endobj') + for k,font in self.fonts.iteritems(): + #Font objects + self.fonts[k]['n']=self.n+1 + type=font['type'] + name=font['name'] + if(type=='core'): + #Standard font + self._newobj() + self._out('<</Type /Font') + self._out('/BaseFont /'+name) + self._out('/Subtype /Type1') + if(name!='Symbol' and name!='ZapfDingbats'): + self._out('/Encoding /WinAnsiEncoding') + self._out('>>') + self._out('endobj') + elif(type=='Type1' or type=='TrueType'): + #Additional Type1 or TrueType font + self._newobj() + self._out('<</Type /Font') + self._out('/BaseFont /'+name) + self._out('/Subtype /'+type) + self._out('/FirstChar 32 /LastChar 255') + self._out('/Widths '+str(self.n+1)+' 0 R') + self._out('/FontDescriptor '+str(self.n+2)+' 0 R') + if(font['enc']): + if('diff' in font): + self._out('/Encoding '+str(nf+font['diff'])+' 0 R') + else: + self._out('/Encoding /WinAnsiEncoding') + self._out('>>') + self._out('endobj') + #Widths + self._newobj() + cw=font['cw'] + s='[' + for i in xrange(32,256): + # Get doesn't rise exception; returns 0 instead of None if not set + s+=str(cw.get(chr(i)) or 0)+' ' + self._out(s+']') + self._out('endobj') + #Descriptor + self._newobj() + s='<</Type /FontDescriptor /FontName /'+name + for k,v in font['desc'].iteritems(): + s+=' /'+str(k)+' '+str(v) + filename=font['file'] + if(filename): + s+=' /FontFile' + if type!='Type1': + s+='2' + s+=' '+str(self.font_files[filename]['n'])+' 0 R' + self._out(s+'>>') + self._out('endobj') + else: + #Allow for additional types + mtd='_put'+type.lower() + if(not method_exists(self,mtd)): + self.error('Unsupported font type: '+type) + self.mtd(font) + + def _putimages(self): + filter='' + if self.compress: + filter='/Filter /FlateDecode ' + for filename,info in self.images.iteritems(): + self._newobj() + self.images[filename]['n']=self.n + self._out('<</Type /XObject') + self._out('/Subtype /Image') + self._out('/Width '+str(info['w'])) + self._out('/Height '+str(info['h'])) + if(info['cs']=='Indexed'): + self._out('/ColorSpace [/Indexed /DeviceRGB '+str(len(info['pal'])/3-1)+' '+str(self.n+1)+' 0 R]') + else: + self._out('/ColorSpace /'+info['cs']) + if(info['cs']=='DeviceCMYK'): + self._out('/Decode [1 0 1 0 1 0 1 0]') + self._out('/BitsPerComponent '+str(info['bpc'])) + if 'f' in info: + self._out('/Filter /'+info['f']) + if 'parms' in info: + self._out(info['parms']) + if('trns' in info and isinstance(info['trns'],list)): + trns='' + for i in xrange(0,len(info['trns'])): + trns+=str(info['trns'][i])+' '+str(info['trns'][i])+' ' + self._out('/Mask ['+trns+']') + self._out('/Length '+str(len(info['data']))+'>>') + self._putstream(info['data']) + self.images[filename]['data'] = None + self._out('endobj') + #Palette + if(info['cs']=='Indexed'): + self._newobj() + if self.compress: + pal=zlib.compress(info['pal']) + else: + pal=info['pal'] + self._out('<<'+filter+'/Length '+str(len(pal))+'>>') + self._putstream(pal) + self._out('endobj') + + def _putxobjectdict(self): + for image in self.images.values(): + self._out('/I'+str(image['i'])+' '+str(image['n'])+' 0 R') + + def _putresourcedict(self): + self._out('/ProcSet [/PDF /Text /ImageB /ImageC /ImageI]') + self._out('/Font <<') + for font in self.fonts.values(): + self._out('/F'+str(font['i'])+' '+str(font['n'])+' 0 R') + self._out('>>') + self._out('/XObject <<') + self._putxobjectdict() + self._out('>>') + + def _putresources(self): + self._putfonts() + self._putimages() + #Resource dictionary + self.offsets[2]=len(self.buffer) + self._out('2 0 obj') + self._out('<<') + self._putresourcedict() + self._out('>>') + self._out('endobj') + + def _putinfo(self): + self._out('/Producer '+self._textstring('PyFPDF '+FPDF_VERSION+' http://pyfpdf.googlecode.com/')) + if hasattr(self,'title'): + self._out('/Title '+self._textstring(self.title)) + if hasattr(self,'subject'): + self._out('/Subject '+self._textstring(self.subject)) + if hasattr(self,'author'): + self._out('/Author '+self._textstring(self.author)) + if hasattr (self,'keywords'): + self._out('/Keywords '+self._textstring(self.keywords)) + if hasattr(self,'creator'): + self._out('/Creator '+self._textstring(self.creator)) + self._out('/CreationDate '+self._textstring('D:'+datetime.now().strftime('%Y%m%d%H%M%S'))) + + def _putcatalog(self): + self._out('/Type /Catalog') + self._out('/Pages 1 0 R') + if(self.zoom_mode=='fullpage'): + self._out('/OpenAction [3 0 R /Fit]') + elif(self.zoom_mode=='fullwidth'): + self._out('/OpenAction [3 0 R /FitH null]') + elif(self.zoom_mode=='real'): + self._out('/OpenAction [3 0 R /XYZ null null 1]') + elif(not isinstance(self.zoom_mode,basestring)): + self._out('/OpenAction [3 0 R /XYZ null null '+(self.zoom_mode/100)+']') + if(self.layout_mode=='single'): + self._out('/PageLayout /SinglePage') + elif(self.layout_mode=='continuous'): + self._out('/PageLayout /OneColumn') + elif(self.layout_mode=='two'): + self._out('/PageLayout /TwoColumnLeft') + + def _putheader(self): + self._out('%PDF-'+self.pdf_version) + + def _puttrailer(self): + self._out('/Size '+str(self.n+1)) + self._out('/Root '+str(self.n)+' 0 R') + self._out('/Info '+str(self.n-1)+' 0 R') + + def _enddoc(self): + self._putheader() + self._putpages() + self._putresources() + #Info + self._newobj() + self._out('<<') + self._putinfo() + self._out('>>') + self._out('endobj') + #Catalog + self._newobj() + self._out('<<') + self._putcatalog() + self._out('>>') + self._out('endobj') + #Cross-ref + o=len(self.buffer) + self._out('xref') + self._out('0 '+(str(self.n+1))) + self._out('0000000000 65535 f ') + for i in xrange(1,self.n+1): + self._out(sprintf('%010d 00000 n ',self.offsets[i])) + #Trailer + self._out('trailer') + self._out('<<') + self._puttrailer() + self._out('>>') + self._out('startxref') + self._out(o) + self._out('%%EOF') + self.state=3 + + def _beginpage(self, orientation): + self.page+=1 + self.pages[self.page]='' + self.state=2 + self.x=self.l_margin + self.y=self.t_margin + self.font_family='' + #Page orientation + if(not orientation): + orientation=self.def_orientation + else: + orientation=orientation[0].upper() + if(orientation!=self.def_orientation): + self.orientation_changes[self.page]=1 + if(orientation!=self.cur_orientation): + #Change orientation + if(orientation=='P'): + self.w_pt=self.fw_pt + self.h_pt=self.fh_pt + self.w=self.fw + self.h=self.fh + else: + self.w_pt=self.fh_pt + self.h_pt=self.fw_pt + self.w=self.fh + self.h=self.fw + self.page_break_trigger=self.h-self.b_margin + self.cur_orientation=orientation + + def _endpage(self): + #End of page contents + self.state=1 + + def _newobj(self): + #Begin a new object + self.n+=1 + self.offsets[self.n]=len(self.buffer) + self._out(str(self.n)+' 0 obj') + + def _dounderline(self, x,y,txt): + #Underline text + up=self.current_font['up'] + ut=self.current_font['ut'] + w=self.get_string_width(txt)+self.ws*txt.count(' ') + return sprintf('%.2f %.2f %.2f %.2f re f',x*self.k,(self.h-(y-up/1000.0*self.font_size))*self.k,w*self.k,-ut/1000.0*self.font_size_pt) + + def _parsejpg(self, filename): + # Extract info from a JPEG file + if Image is None: + self.error('PIL not installed') + try: + f = open(filename, 'rb') + im = Image.open(f) + except Exception, e: + self.error('Missing or incorrect image file: %s. error: %s' % (filename, str(e))) + else: + a = im.size + # We shouldn't get into here, as Jpeg is RGB=8bpp right(?), but, just in case... + bpc=8 + if im.mode == 'RGB': + colspace='DeviceRGB' + elif im.mode == 'CMYK': + colspace='DeviceCMYK' + else: + colspace='DeviceGray' + + # Read whole file from the start + f.seek(0) + data = f.read() + f.close() + return {'w':a[0],'h':a[1],'cs':colspace,'bpc':bpc,'f':'DCTDecode','data':data} + + def _parsepng(self, name): + #Extract info from a PNG file + if name.startswith("http://") or name.startswith("https://"): + import urllib + f = urllib.urlopen(name) + else: + f=open(name,'rb') + if(not f): + self.error("Can't open image file: "+name) + #Check signature + if(f.read(8)!='\x89'+'PNG'+'\r'+'\n'+'\x1a'+'\n'): + self.error('Not a PNG file: '+name) + #Read header chunk + f.read(4) + if(f.read(4)!='IHDR'): + self.error('Incorrect PNG file: '+name) + w=self._freadint(f) + h=self._freadint(f) + bpc=ord(f.read(1)) + if(bpc>8): + self.error('16-bit depth not supported: '+name) + ct=ord(f.read(1)) + if(ct==0): + colspace='DeviceGray' + elif(ct==2): + colspace='DeviceRGB' + elif(ct==3): + colspace='Indexed' + else: + self.error('Alpha channel not supported: '+name) + if(ord(f.read(1))!=0): + self.error('Unknown compression method: '+name) + if(ord(f.read(1))!=0): + self.error('Unknown filter method: '+name) + if(ord(f.read(1))!=0): + self.error('Interlacing not supported: '+name) + f.read(4) + parms='/DecodeParms <</Predictor 15 /Colors ' + if ct==2: + parms+='3' + else: + parms+='1' + parms+=' /BitsPerComponent '+str(bpc)+' /Columns '+str(w)+'>>' + #Scan chunks looking for palette, transparency and image data + pal='' + trns='' + data='' + n=1 + while n != None: + n=self._freadint(f) + type=f.read(4) + if(type=='PLTE'): + #Read palette + pal=f.read(n) + f.read(4) + elif(type=='tRNS'): + #Read transparency info + t=f.read(n) + if(ct==0): + trns=[ord(substr(t,1,1)),] + elif(ct==2): + trns=[ord(substr(t,1,1)),ord(substr(t,3,1)),ord(substr(t,5,1))] + else: + pos=t.find('\x00') + if(pos!=-1): + trns=[pos,] + f.read(4) + elif(type=='IDAT'): + #Read image data block + data+=f.read(n) + f.read(4) + elif(type=='IEND'): + break + else: + f.read(n+4) + if(colspace=='Indexed' and not pal): + self.error('Missing palette in '+name) + f.close() + return {'w':w,'h':h,'cs':colspace,'bpc':bpc,'f':'FlateDecode','parms':parms,'pal':pal,'trns':trns,'data':data} + + def _freadint(self, f): + #Read a 4-byte integer from file + try: + return struct.unpack('>HH',f.read(4))[1] + except: + return None + + def _textstring(self, s): + #Format a text string + return '('+self._escape(s)+')' + + def _escape(self, s): + #Add \ before \, ( and ) + return s.replace('\\','\\\\').replace(')','\\)').replace('(','\\(') + + def _putstream(self, s): + self._out('stream') + self._out(s) + self._out('endstream') + + def _out(self, s): + #Add a line to the document + if(self.state==2): + self.pages[self.page]+=s+"\n" + else: + self.buffer+=str(s)+"\n" + + def interleaved2of5(self, txt, x, y, w=1.0, h=10.0): + "Barcode I2of5 (numeric), adds a 0 if odd lenght" + narrow = w / 3.0 + wide = w + + # wide/narrow codes for the digits + bar_char={} + bar_char['0'] = 'nnwwn' + bar_char['1'] = 'wnnnw' + bar_char['2'] = 'nwnnw' + bar_char['3'] = 'wwnnn' + bar_char['4'] = 'nnwnw' + bar_char['5'] = 'wnwnn' + bar_char['6'] = 'nwwnn' + bar_char['7'] = 'nnnww' + bar_char['8'] = 'wnnwn' + bar_char['9'] = 'nwnwn' + bar_char['A'] = 'nn' + bar_char['Z'] = 'wn' + + self.set_fill_color(0) + code = txt + # add leading zero if code-length is odd + if len(code) % 2 != 0: + code = '0' + code + + # add start and stop codes + code = 'AA' + code.lower() + 'ZA' + + for i in xrange(0, len(code), 2): + # choose next pair of digits + char_bar = code[i]; + char_space = code[i+1]; + # check whether it is a valid digit + if not char_bar in bar_char.keys(): + raise RuntimeError ('Caractr "%s" invlido para el cdigo de barras I25: ' % char_bar) + if not char_space in bar_char.keys(): + raise RuntimeError ('Caractr "%s" invlido para el cdigo de barras I25: ' % char_space) + + # create a wide/narrow-sequence (first digit=bars, second digit=spaces) + seq = '' + for s in xrange(0, len(bar_char[char_bar])): + seq += bar_char[char_bar][s] + bar_char[char_space][s] + + for bar in xrange(0, len(seq)): + # set line_width depending on value + if seq[bar] == 'n': + line_width = narrow + else: + line_width = wide + + # draw every second value, because the second digit of the pair is represented by the spaces + if bar % 2 == 0: + self.rect(x, y, line_width, h, 'F') + + x += line_width + + + def code39(self, txt, x, y, w=1.5, h=5.0): + "Barcode 3of9" + wide = w + narrow = w /3.0 + gap = narrow + + bar_char={} + bar_char['0'] = 'nnnwwnwnn' + bar_char['1'] = 'wnnwnnnnw' + bar_char['2'] = 'nnwwnnnnw' + bar_char['3'] = 'wnwwnnnnn' + bar_char['4'] = 'nnnwwnnnw' + bar_char['5'] = 'wnnwwnnnn' + bar_char['6'] = 'nnwwwnnnn' + bar_char['7'] = 'nnnwnnwnw' + bar_char['8'] = 'wnnwnnwnn' + bar_char['9'] = 'nnwwnnwnn' + bar_char['A'] = 'wnnnnwnnw' + bar_char['B'] = 'nnwnnwnnw' + bar_char['C'] = 'wnwnnwnnn' + bar_char['D'] = 'nnnnwwnnw' + bar_char['E'] = 'wnnnwwnnn' + bar_char['F'] = 'nnwnwwnnn' + bar_char['G'] = 'nnnnnwwnw' + bar_char['H'] = 'wnnnnwwnn' + bar_char['I'] = 'nnwnnwwnn' + bar_char['J'] = 'nnnnwwwnn' + bar_char['K'] = 'wnnnnnnww' + bar_char['L'] = 'nnwnnnnww' + bar_char['M'] = 'wnwnnnnwn' + bar_char['N'] = 'nnnnwnnww' + bar_char['O'] = 'wnnnwnnwn' + bar_char['P'] = 'nnwnwnnwn' + bar_char['Q'] = 'nnnnnnwww' + bar_char['R'] = 'wnnnnnwwn' + bar_char['S'] = 'nnwnnnwwn' + bar_char['T'] = 'nnnnwnwwn' + bar_char['U'] = 'wwnnnnnnw' + bar_char['V'] = 'nwwnnnnnw' + bar_char['W'] = 'wwwnnnnnn' + bar_char['X'] = 'nwnnwnnnw' + bar_char['Y'] = 'wwnnwnnnn' + bar_char['Z'] = 'nwwnwnnnn' + bar_char['-'] = 'nwnnnnwnw' + bar_char['.'] = 'wwnnnnwnn' + bar_char[' '] = 'nwwnnnwnn' + bar_char['*'] = 'nwnnwnwnn' + bar_char['$'] = 'nwnwnwnnn' + bar_char['/'] = 'nwnwnnnwn' + bar_char['+'] = 'nwnnnwnwn' + bar_char['%'] = 'nnnwnwnwn' + + self.set_fill_color(0) + code = txt + + code = code.upper() + for i in xrange (0, len(code), 2): + char_bar = code[i]; + + if not char_bar in bar_char.keys(): + raise RuntimeError ('Caracter "%s" invlido para el cdigo de barras' % char_bar) + + seq= '' + for s in xrange(0, len(bar_char[char_bar])): + seq += bar_char[char_bar][s] + + for bar in xrange(0, len(seq)): + if seq[bar] == 'n': + line_width = narrow + else: + line_width = wide + + if bar % 2 == 0: + self.rect(x,y,line_width,h,'F') + x += line_width + x += gap + +#End of class + +# Fonts: + +fpdf_charwidths['courier']={} + +for i in xrange(0,256): + fpdf_charwidths['courier'][chr(i)]=600 + fpdf_charwidths['courierB']=fpdf_charwidths['courier'] + fpdf_charwidths['courierI']=fpdf_charwidths['courier'] + fpdf_charwidths['courierBI']=fpdf_charwidths['courier'] + +fpdf_charwidths['helvetica']={ + '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278, + '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584, + ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667, + 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944, + 'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833, + 'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556, + '\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333, + '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667, + '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556, + '\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500} + +fpdf_charwidths['helveticaB']={ + '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278, + '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584, + ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722, + 'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944, + 'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889, + 'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556, + '\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333, + '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722, + '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611, + '\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556 +} + +fpdf_charwidths['helveticaBI']={ + '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278, + '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':333,'"':474,'#':556,'$':556,'%':889,'&':722,'\'':238,'(':333,')':333,'*':389,'+':584, + ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':333,';':333,'<':584,'=':584,'>':584,'?':611,'@':975,'A':722, + 'B':722,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':556,'K':722,'L':611,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944, + 'X':667,'Y':667,'Z':611,'[':333,'\\':278,']':333,'^':584,'_':556,'`':333,'a':556,'b':611,'c':556,'d':611,'e':556,'f':333,'g':611,'h':611,'i':278,'j':278,'k':556,'l':278,'m':889, + 'n':611,'o':611,'p':611,'q':611,'r':389,'s':556,'t':333,'u':611,'v':556,'w':778,'x':556,'y':556,'z':500,'{':389,'|':280,'}':389,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':278,'\x83':556, + '\x84':500,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':278,'\x92':278,'\x93':500,'\x94':500,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':556,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':280,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333, + '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':611,'\xb6':556,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722, + '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':556,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':611,'\xf1':611, + '\xf2':611,'\xf3':611,'\xf4':611,'\xf5':611,'\xf6':611,'\xf7':584,'\xf8':611,'\xf9':611,'\xfa':611,'\xfb':611,'\xfc':611,'\xfd':556,'\xfe':611,'\xff':556} + +fpdf_charwidths['helveticaI']={ + '\x00':278,'\x01':278,'\x02':278,'\x03':278,'\x04':278,'\x05':278,'\x06':278,'\x07':278,'\x08':278,'\t':278,'\n':278,'\x0b':278,'\x0c':278,'\r':278,'\x0e':278,'\x0f':278,'\x10':278,'\x11':278,'\x12':278,'\x13':278,'\x14':278,'\x15':278, + '\x16':278,'\x17':278,'\x18':278,'\x19':278,'\x1a':278,'\x1b':278,'\x1c':278,'\x1d':278,'\x1e':278,'\x1f':278,' ':278,'!':278,'"':355,'#':556,'$':556,'%':889,'&':667,'\'':191,'(':333,')':333,'*':389,'+':584, + ',':278,'-':333,'.':278,'/':278,'0':556,'1':556,'2':556,'3':556,'4':556,'5':556,'6':556,'7':556,'8':556,'9':556,':':278,';':278,'<':584,'=':584,'>':584,'?':556,'@':1015,'A':667, + 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':722,'I':278,'J':500,'K':667,'L':556,'M':833,'N':722,'O':778,'P':667,'Q':778,'R':722,'S':667,'T':611,'U':722,'V':667,'W':944, + 'X':667,'Y':667,'Z':611,'[':278,'\\':278,']':278,'^':469,'_':556,'`':333,'a':556,'b':556,'c':500,'d':556,'e':556,'f':278,'g':556,'h':556,'i':222,'j':222,'k':500,'l':222,'m':833, + 'n':556,'o':556,'p':556,'q':556,'r':333,'s':500,'t':278,'u':556,'v':500,'w':722,'x':500,'y':500,'z':500,'{':334,'|':260,'}':334,'~':584,'\x7f':350,'\x80':556,'\x81':350,'\x82':222,'\x83':556, + '\x84':333,'\x85':1000,'\x86':556,'\x87':556,'\x88':333,'\x89':1000,'\x8a':667,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':222,'\x92':222,'\x93':333,'\x94':333,'\x95':350,'\x96':556,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':500,'\x9b':333,'\x9c':944,'\x9d':350,'\x9e':500,'\x9f':667,'\xa0':278,'\xa1':333,'\xa2':556,'\xa3':556,'\xa4':556,'\xa5':556,'\xa6':260,'\xa7':556,'\xa8':333,'\xa9':737,'\xaa':370,'\xab':556,'\xac':584,'\xad':333,'\xae':737,'\xaf':333, + '\xb0':400,'\xb1':584,'\xb2':333,'\xb3':333,'\xb4':333,'\xb5':556,'\xb6':537,'\xb7':278,'\xb8':333,'\xb9':333,'\xba':365,'\xbb':556,'\xbc':834,'\xbd':834,'\xbe':834,'\xbf':611,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667, + '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':278,'\xcd':278,'\xce':278,'\xcf':278,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':584,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':667,'\xde':667,'\xdf':611,'\xe0':556,'\xe1':556,'\xe2':556,'\xe3':556,'\xe4':556,'\xe5':556,'\xe6':889,'\xe7':500,'\xe8':556,'\xe9':556,'\xea':556,'\xeb':556,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':556,'\xf1':556, + '\xf2':556,'\xf3':556,'\xf4':556,'\xf5':556,'\xf6':556,'\xf7':584,'\xf8':611,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500} + +fpdf_charwidths['symbol']={ + '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250, + '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':713,'#':500,'$':549,'%':833,'&':778,'\'':439,'(':333,')':333,'*':500,'+':549, + ',':250,'-':549,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':549,'=':549,'>':549,'?':444,'@':549,'A':722, + 'B':667,'C':722,'D':612,'E':611,'F':763,'G':603,'H':722,'I':333,'J':631,'K':722,'L':686,'M':889,'N':722,'O':722,'P':768,'Q':741,'R':556,'S':592,'T':611,'U':690,'V':439,'W':768, + 'X':645,'Y':795,'Z':611,'[':333,'\\':863,']':333,'^':658,'_':500,'`':500,'a':631,'b':549,'c':549,'d':494,'e':439,'f':521,'g':411,'h':603,'i':329,'j':603,'k':549,'l':549,'m':576, + 'n':521,'o':549,'p':549,'q':521,'r':549,'s':603,'t':439,'u':576,'v':713,'w':686,'x':493,'y':686,'z':494,'{':480,'|':200,'}':480,'~':549,'\x7f':0,'\x80':0,'\x81':0,'\x82':0,'\x83':0, + '\x84':0,'\x85':0,'\x86':0,'\x87':0,'\x88':0,'\x89':0,'\x8a':0,'\x8b':0,'\x8c':0,'\x8d':0,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0, + '\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':750,'\xa1':620,'\xa2':247,'\xa3':549,'\xa4':167,'\xa5':713,'\xa6':500,'\xa7':753,'\xa8':753,'\xa9':753,'\xaa':753,'\xab':1042,'\xac':987,'\xad':603,'\xae':987,'\xaf':603, + '\xb0':400,'\xb1':549,'\xb2':411,'\xb3':549,'\xb4':549,'\xb5':713,'\xb6':494,'\xb7':460,'\xb8':549,'\xb9':549,'\xba':549,'\xbb':549,'\xbc':1000,'\xbd':603,'\xbe':1000,'\xbf':658,'\xc0':823,'\xc1':686,'\xc2':795,'\xc3':987,'\xc4':768,'\xc5':768, + '\xc6':823,'\xc7':768,'\xc8':768,'\xc9':713,'\xca':713,'\xcb':713,'\xcc':713,'\xcd':713,'\xce':713,'\xcf':713,'\xd0':768,'\xd1':713,'\xd2':790,'\xd3':790,'\xd4':890,'\xd5':823,'\xd6':549,'\xd7':250,'\xd8':713,'\xd9':603,'\xda':603,'\xdb':1042, + '\xdc':987,'\xdd':603,'\xde':987,'\xdf':603,'\xe0':494,'\xe1':329,'\xe2':790,'\xe3':790,'\xe4':786,'\xe5':713,'\xe6':384,'\xe7':384,'\xe8':384,'\xe9':384,'\xea':384,'\xeb':384,'\xec':494,'\xed':494,'\xee':494,'\xef':494,'\xf0':0,'\xf1':329, + '\xf2':274,'\xf3':686,'\xf4':686,'\xf5':686,'\xf6':384,'\xf7':384,'\xf8':384,'\xf9':384,'\xfa':384,'\xfb':384,'\xfc':494,'\xfd':494,'\xfe':494,'\xff':0} + +fpdf_charwidths['times']={ + '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250, + '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':408,'#':500,'$':500,'%':833,'&':778,'\'':180,'(':333,')':333,'*':500,'+':564, + ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':278,';':278,'<':564,'=':564,'>':564,'?':444,'@':921,'A':722, + 'B':667,'C':667,'D':722,'E':611,'F':556,'G':722,'H':722,'I':333,'J':389,'K':722,'L':611,'M':889,'N':722,'O':722,'P':556,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':722,'W':944, + 'X':722,'Y':722,'Z':611,'[':333,'\\':278,']':333,'^':469,'_':500,'`':333,'a':444,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':500,'i':278,'j':278,'k':500,'l':278,'m':778, + 'n':500,'o':500,'p':500,'q':500,'r':333,'s':389,'t':278,'u':500,'v':500,'w':722,'x':500,'y':500,'z':444,'{':480,'|':200,'}':480,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500, + '\x84':444,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':889,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':444,'\x94':444,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':980, + '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':200,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':564,'\xad':333,'\xae':760,'\xaf':333, + '\xb0':400,'\xb1':564,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':453,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':444,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722, + '\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':564,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':722,'\xde':556,'\xdf':500,'\xe0':444,'\xe1':444,'\xe2':444,'\xe3':444,'\xe4':444,'\xe5':444,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500, + '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':564,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':500,'\xfe':500,'\xff':500} + +fpdf_charwidths['timesB']={ + '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250, + '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':555,'#':500,'$':500,'%':1000,'&':833,'\'':278,'(':333,')':333,'*':500,'+':570, + ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':930,'A':722, + 'B':667,'C':722,'D':722,'E':667,'F':611,'G':778,'H':778,'I':389,'J':500,'K':778,'L':667,'M':944,'N':722,'O':778,'P':611,'Q':778,'R':722,'S':556,'T':667,'U':722,'V':722,'W':1000, + 'X':722,'Y':722,'Z':667,'[':333,'\\':278,']':333,'^':581,'_':500,'`':333,'a':500,'b':556,'c':444,'d':556,'e':444,'f':333,'g':500,'h':556,'i':278,'j':333,'k':556,'l':278,'m':833, + 'n':556,'o':500,'p':556,'q':556,'r':444,'s':389,'t':333,'u':556,'v':500,'w':722,'x':500,'y':500,'z':444,'{':394,'|':220,'}':394,'~':520,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500, + '\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':1000,'\x8d':350,'\x8e':667,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':444,'\x9f':722,'\xa0':250,'\xa1':333,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':300,'\xab':500,'\xac':570,'\xad':333,'\xae':747,'\xaf':333, + '\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':556,'\xb6':540,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':330,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':722,'\xc1':722,'\xc2':722,'\xc3':722,'\xc4':722,'\xc5':722, + '\xc6':1000,'\xc7':722,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':778,'\xd3':778,'\xd4':778,'\xd5':778,'\xd6':778,'\xd7':570,'\xd8':778,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':722,'\xde':611,'\xdf':556,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556, + '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':500,'\xfe':556,'\xff':500} + +fpdf_charwidths['timesBI']={ + '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250, + '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':389,'"':555,'#':500,'$':500,'%':833,'&':778,'\'':278,'(':333,')':333,'*':500,'+':570, + ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':570,'=':570,'>':570,'?':500,'@':832,'A':667, + 'B':667,'C':667,'D':722,'E':667,'F':667,'G':722,'H':778,'I':389,'J':500,'K':667,'L':611,'M':889,'N':722,'O':722,'P':611,'Q':722,'R':667,'S':556,'T':611,'U':722,'V':667,'W':889, + 'X':667,'Y':611,'Z':611,'[':333,'\\':278,']':333,'^':570,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':333,'g':500,'h':556,'i':278,'j':278,'k':500,'l':278,'m':778, + 'n':556,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':556,'v':444,'w':667,'x':500,'y':444,'z':389,'{':348,'|':220,'}':348,'~':570,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500, + '\x84':500,'\x85':1000,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':556,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':611,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':500,'\x94':500,'\x95':350,'\x96':500,'\x97':1000,'\x98':333,'\x99':1000, + '\x9a':389,'\x9b':333,'\x9c':722,'\x9d':350,'\x9e':389,'\x9f':611,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':220,'\xa7':500,'\xa8':333,'\xa9':747,'\xaa':266,'\xab':500,'\xac':606,'\xad':333,'\xae':747,'\xaf':333, + '\xb0':400,'\xb1':570,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':576,'\xb6':500,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':300,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':667,'\xc1':667,'\xc2':667,'\xc3':667,'\xc4':667,'\xc5':667, + '\xc6':944,'\xc7':667,'\xc8':667,'\xc9':667,'\xca':667,'\xcb':667,'\xcc':389,'\xcd':389,'\xce':389,'\xcf':389,'\xd0':722,'\xd1':722,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':570,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':611,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':722,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':556, + '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':570,'\xf8':500,'\xf9':556,'\xfa':556,'\xfb':556,'\xfc':556,'\xfd':444,'\xfe':500,'\xff':444} + +fpdf_charwidths['timesI']={ + '\x00':250,'\x01':250,'\x02':250,'\x03':250,'\x04':250,'\x05':250,'\x06':250,'\x07':250,'\x08':250,'\t':250,'\n':250,'\x0b':250,'\x0c':250,'\r':250,'\x0e':250,'\x0f':250,'\x10':250,'\x11':250,'\x12':250,'\x13':250,'\x14':250,'\x15':250, + '\x16':250,'\x17':250,'\x18':250,'\x19':250,'\x1a':250,'\x1b':250,'\x1c':250,'\x1d':250,'\x1e':250,'\x1f':250,' ':250,'!':333,'"':420,'#':500,'$':500,'%':833,'&':778,'\'':214,'(':333,')':333,'*':500,'+':675, + ',':250,'-':333,'.':250,'/':278,'0':500,'1':500,'2':500,'3':500,'4':500,'5':500,'6':500,'7':500,'8':500,'9':500,':':333,';':333,'<':675,'=':675,'>':675,'?':500,'@':920,'A':611, + 'B':611,'C':667,'D':722,'E':611,'F':611,'G':722,'H':722,'I':333,'J':444,'K':667,'L':556,'M':833,'N':667,'O':722,'P':611,'Q':722,'R':611,'S':500,'T':556,'U':722,'V':611,'W':833, + 'X':611,'Y':556,'Z':556,'[':389,'\\':278,']':389,'^':422,'_':500,'`':333,'a':500,'b':500,'c':444,'d':500,'e':444,'f':278,'g':500,'h':500,'i':278,'j':278,'k':444,'l':278,'m':722, + 'n':500,'o':500,'p':500,'q':500,'r':389,'s':389,'t':278,'u':500,'v':444,'w':667,'x':444,'y':444,'z':389,'{':400,'|':275,'}':400,'~':541,'\x7f':350,'\x80':500,'\x81':350,'\x82':333,'\x83':500, + '\x84':556,'\x85':889,'\x86':500,'\x87':500,'\x88':333,'\x89':1000,'\x8a':500,'\x8b':333,'\x8c':944,'\x8d':350,'\x8e':556,'\x8f':350,'\x90':350,'\x91':333,'\x92':333,'\x93':556,'\x94':556,'\x95':350,'\x96':500,'\x97':889,'\x98':333,'\x99':980, + '\x9a':389,'\x9b':333,'\x9c':667,'\x9d':350,'\x9e':389,'\x9f':556,'\xa0':250,'\xa1':389,'\xa2':500,'\xa3':500,'\xa4':500,'\xa5':500,'\xa6':275,'\xa7':500,'\xa8':333,'\xa9':760,'\xaa':276,'\xab':500,'\xac':675,'\xad':333,'\xae':760,'\xaf':333, + '\xb0':400,'\xb1':675,'\xb2':300,'\xb3':300,'\xb4':333,'\xb5':500,'\xb6':523,'\xb7':250,'\xb8':333,'\xb9':300,'\xba':310,'\xbb':500,'\xbc':750,'\xbd':750,'\xbe':750,'\xbf':500,'\xc0':611,'\xc1':611,'\xc2':611,'\xc3':611,'\xc4':611,'\xc5':611, + '\xc6':889,'\xc7':667,'\xc8':611,'\xc9':611,'\xca':611,'\xcb':611,'\xcc':333,'\xcd':333,'\xce':333,'\xcf':333,'\xd0':722,'\xd1':667,'\xd2':722,'\xd3':722,'\xd4':722,'\xd5':722,'\xd6':722,'\xd7':675,'\xd8':722,'\xd9':722,'\xda':722,'\xdb':722, + '\xdc':722,'\xdd':556,'\xde':611,'\xdf':500,'\xe0':500,'\xe1':500,'\xe2':500,'\xe3':500,'\xe4':500,'\xe5':500,'\xe6':667,'\xe7':444,'\xe8':444,'\xe9':444,'\xea':444,'\xeb':444,'\xec':278,'\xed':278,'\xee':278,'\xef':278,'\xf0':500,'\xf1':500, + '\xf2':500,'\xf3':500,'\xf4':500,'\xf5':500,'\xf6':500,'\xf7':675,'\xf8':500,'\xf9':500,'\xfa':500,'\xfb':500,'\xfc':500,'\xfd':444,'\xfe':500,'\xff':444} + +fpdf_charwidths['zapfdingbats']={ + '\x00':0,'\x01':0,'\x02':0,'\x03':0,'\x04':0,'\x05':0,'\x06':0,'\x07':0,'\x08':0,'\t':0,'\n':0,'\x0b':0,'\x0c':0,'\r':0,'\x0e':0,'\x0f':0,'\x10':0,'\x11':0,'\x12':0,'\x13':0,'\x14':0,'\x15':0, + '\x16':0,'\x17':0,'\x18':0,'\x19':0,'\x1a':0,'\x1b':0,'\x1c':0,'\x1d':0,'\x1e':0,'\x1f':0,' ':278,'!':974,'"':961,'#':974,'$':980,'%':719,'&':789,'\'':790,'(':791,')':690,'*':960,'+':939, + ',':549,'-':855,'.':911,'/':933,'0':911,'1':945,'2':974,'3':755,'4':846,'5':762,'6':761,'7':571,'8':677,'9':763,':':760,';':759,'<':754,'=':494,'>':552,'?':537,'@':577,'A':692, + 'B':786,'C':788,'D':788,'E':790,'F':793,'G':794,'H':816,'I':823,'J':789,'K':841,'L':823,'M':833,'N':816,'O':831,'P':923,'Q':744,'R':723,'S':749,'T':790,'U':792,'V':695,'W':776, + 'X':768,'Y':792,'Z':759,'[':707,'\\':708,']':682,'^':701,'_':826,'`':815,'a':789,'b':789,'c':707,'d':687,'e':696,'f':689,'g':786,'h':787,'i':713,'j':791,'k':785,'l':791,'m':873, + 'n':761,'o':762,'p':762,'q':759,'r':759,'s':892,'t':892,'u':788,'v':784,'w':438,'x':138,'y':277,'z':415,'{':392,'|':392,'}':668,'~':668,'\x7f':0,'\x80':390,'\x81':390,'\x82':317,'\x83':317, + '\x84':276,'\x85':276,'\x86':509,'\x87':509,'\x88':410,'\x89':410,'\x8a':234,'\x8b':234,'\x8c':334,'\x8d':334,'\x8e':0,'\x8f':0,'\x90':0,'\x91':0,'\x92':0,'\x93':0,'\x94':0,'\x95':0,'\x96':0,'\x97':0,'\x98':0,'\x99':0, + '\x9a':0,'\x9b':0,'\x9c':0,'\x9d':0,'\x9e':0,'\x9f':0,'\xa0':0,'\xa1':732,'\xa2':544,'\xa3':544,'\xa4':910,'\xa5':667,'\xa6':760,'\xa7':760,'\xa8':776,'\xa9':595,'\xaa':694,'\xab':626,'\xac':788,'\xad':788,'\xae':788,'\xaf':788, + '\xb0':788,'\xb1':788,'\xb2':788,'\xb3':788,'\xb4':788,'\xb5':788,'\xb6':788,'\xb7':788,'\xb8':788,'\xb9':788,'\xba':788,'\xbb':788,'\xbc':788,'\xbd':788,'\xbe':788,'\xbf':788,'\xc0':788,'\xc1':788,'\xc2':788,'\xc3':788,'\xc4':788,'\xc5':788, + '\xc6':788,'\xc7':788,'\xc8':788,'\xc9':788,'\xca':788,'\xcb':788,'\xcc':788,'\xcd':788,'\xce':788,'\xcf':788,'\xd0':788,'\xd1':788,'\xd2':788,'\xd3':788,'\xd4':894,'\xd5':838,'\xd6':1016,'\xd7':458,'\xd8':748,'\xd9':924,'\xda':748,'\xdb':918, + '\xdc':927,'\xdd':928,'\xde':928,'\xdf':834,'\xe0':873,'\xe1':828,'\xe2':924,'\xe3':924,'\xe4':917,'\xe5':930,'\xe6':931,'\xe7':463,'\xe8':883,'\xe9':836,'\xea':836,'\xeb':867,'\xec':867,'\xed':696,'\xee':696,'\xef':874,'\xf0':0,'\xf1':874, + '\xf2':760,'\xf3':946,'\xf4':771,'\xf5':865,'\xf6':771,'\xf7':888,'\xf8':967,'\xf9':888,'\xfa':831,'\xfb':873,'\xfc':927,'\xfd':970,'\xfe':918,'\xff':0} + + + ADDED gluon/contrib/pyfpdf/html.py Index: gluon/contrib/pyfpdf/html.py ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/html.py @@ -0,0 +1,459 @@ +# -*- coding: latin-1 -*- + +"HTML Renderer for FPDF.py" + +__author__ = "Mariano Reingart <reingart@gmail.com>" +__copyright__ = "Copyright (C) 2010 Mariano Reingart" +__license__ = "LGPL 3.0" + +# Inspired by tuto5.py and several examples from fpdf.org, html2fpdf, etc. + +from fpdf import FPDF +from HTMLParser import HTMLParser + +DEBUG = False + +def px2mm(px): + return int(px)*25.4/72.0 + +def hex2dec(color = "#000000"): + if color: + r = int(color[1:3], 16) + g = int(color[3:5], 16) + b = int(color[5:7], 16) + return r, g, b + +class HTML2FPDF(HTMLParser): + "Render basic HTML to FPDF" + + def __init__(self, pdf, image_map, **kwargs): + HTMLParser.__init__(self) + self.image_map = image_map + self.style = {} + self.pre = False + self.href = '' + self.align = '' + self.page_links = {} + self.font_list = ("times","courier", "helvetica") + self.pdf = pdf + self.r = self.g = self.b = 0 + self.indent = 0 + self.bullet = [] + self.font_face="times" # initialize font + self.color=0 # initialize font color + self.set_font(kwargs.get("font","times"), kwargs.get("fontsize",12)) + self.table = None # table attributes + self.table_col_width = None # column (header) widths + self.table_col_index = None # current column index + self.td = None # cell attributes + self.th = False # header enabled + self.tr = None + self.theader = None # table header cells + self.tfooter = None # table footer cells + self.thead = None + self.tfoot = None + self.theader_out = self.tfooter_out = False + + def width2mm(self, length): + if length[-1]=='%': + total = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin + if self.table['width'][-1]=='%': + total *= int(self.table['width'][:-1])/100.0 + return int(length[:-1]) * total / 101.0 + else: + return int(length) / 6.0 + + def handle_data(self, txt): + if self.td is not None: # drawing a table? + if 'width' not in self.td and 'colspan' not in self.td: + l = [self.table_col_width[self.table_col_index]] + elif 'colspan' in self.td: + i = self.table_col_index + colspan = int(self.td['colspan']) + l = self.table_col_width[i:i+colspan] + else: + l = [self.td.get('width','240')] + w = sum([self.width2mm(lenght) for lenght in l]) + h = int(self.td.get('height', 0)) / 4 or self.h*1.30 + self.table_h = h + border = int(self.table.get('border', 0)) + if not self.th: + align = self.td.get('align', 'L')[0].upper() + border = border and 'LR' + else: + self.set_style('B',True) + border = border or 'B' + align = 'C' + bgcolor = hex2dec(self.td.get('bgcolor', self.tr.get('bgcolor', ''))) + # parsing table header/footer (drawn later): + if self.thead is not None: + self.theader.append(((w,h,txt,border,0,align), bgcolor)) + if self.tfoot is not None: + self.tfooter.append(((w,h,txt,border,0,align), bgcolor)) + # check if reached end of page, add table footer and header: + height = h + (self.tfooter and self.tfooter[0][0][1] or 0) + if self.pdf.y+height>self.pdf.page_break_trigger and not self.th: + self.output_table_footer() + self.pdf.add_page() + self.theader_out = self.tfooter_out = False + if self.tfoot is None and self.thead is None: + if not self.theader_out: + self.output_table_header() + self.box_shadow(w, h, bgcolor) + if DEBUG: print "td cell", self.pdf.x, w, txt, "*" + self.pdf.cell(w,h,txt,border,0,align) + elif self.table is not None: + # ignore anything else than td inside a table + pass + elif self.align: + if DEBUG: print "cell", txt, "*" + self.pdf.cell(0,self.h,txt,0,1,self.align[0].upper(), self.href) + else: + txt = txt.replace("\n"," ") + if self.href: + self.put_link(self.href,txt) + else: + if DEBUG: print "write", txt, "*" + self.pdf.write(self.h,txt) + + def box_shadow(self, w, h, bgcolor): + if DEBUG: print "box_shadow", w, h, bgcolor + if bgcolor: + fill_color = self.pdf.fill_color + self.pdf.set_fill_color(*bgcolor) + self.pdf.rect(self.pdf.x, self.pdf.y, w, h, 'F') + self.pdf.fill_color = fill_color + + def output_table_header(self): + if self.theader: + b = self.b + x = self.pdf.x + self.pdf.set_x(self.table_offset) + self.set_style('B',True) + for cell, bgcolor in self.theader: + self.box_shadow(cell[0], cell[1], bgcolor) + self.pdf.cell(*cell) + self.set_style('B',b) + self.pdf.ln(self.theader[0][0][1]) + self.pdf.set_x(self.table_offset) + #self.pdf.set_x(x) + self.theader_out = True + + def output_table_footer(self): + if self.tfooter: + x = self.pdf.x + self.pdf.set_x(self.table_offset) + #TODO: self.output_table_sep() + for cell, bgcolor in self.tfooter: + self.box_shadow(cell[0], cell[1], bgcolor) + self.pdf.cell(*cell) + self.pdf.ln(self.tfooter[0][0][1]) + self.pdf.set_x(x) + if int(self.table.get('border', 0)): + self.output_table_sep() + self.tfooter_out = True + + def output_table_sep(self): + self.pdf.set_x(self.table_offset) + x1 = self.pdf.x + y1 = self.pdf.y + w = sum([self.width2mm(lenght) for lenght in self.table_col_width]) + self.pdf.line(x1,y1,x1+w,y1) + + + def handle_starttag(self, tag, attrs): + attrs = dict(attrs) + if DEBUG: print "STARTTAG", tag, attrs + if tag=='b' or tag=='i' or tag=='u': + self.set_style(tag,1) + if tag=='a': + self.href=attrs['href'] + if tag=='br': + self.pdf.ln(5) + if tag=='p': + self.pdf.ln(5) + if attrs: + self.align=attrs['align'].lower() + if tag in ('h1', 'h2', 'h3', 'h4', 'h5', 'h6'): + k = (2, 1.5, 1.17, 1, 0.83, 0.67)[int(tag[1])] + self.pdf.ln(5*k) + self.pdf.set_text_color(150,0,0) + self.pdf.set_font_size(12 * k) + if attrs: self.align = attrs.get('align') + if tag=='hr': + self.put_line() + if tag=='pre': + self.pdf.set_font('Courier','',11) + self.pdf.set_font_size(11) + self.set_style('B',False) + self.set_style('I',False) + self.pre = True + if tag=='blockquote': + self.set_text_color(100,0,45) + self.pdf.ln(3) + if tag=='ul': + self.indent+=1 + self.bullet.append('\x95') + if tag=='ol': + self.indent+=1 + self.bullet.append(0) + if tag=='li': + self.pdf.ln(self.h+2) + self.pdf.set_text_color(190,0,0) + bullet = self.bullet[self.indent-1] + if not isinstance(bullet, basestring): + bullet += 1 + self.bullet[self.indent-1] = bullet + bullet = "%s. " % bullet + self.pdf.write(self.h,'%s%s ' % (' '*5*self.indent, bullet)) + self.set_text_color() + if tag=='font': + if 'color' in attrs: + self.color = hex2dec(attrs['color']) + self.set_text_color(*color) + self.color = color + if 'face' in attrs and attrs['face'].lower() in self.font_list: + face = attrs.get('face').lower() + self.pdf.set_font(face) + self.font_face = face + if 'size' in attrs: + size = int(attrs.get('size')) + self.pdf.set_font(self.font_face, size=int(size)) + self.font_size = size + if tag=='table': + self.table = dict([(k.lower(), v) for k,v in attrs.items()]) + if not 'width' in self.table: + self.table['width'] = '100%' + if self.table['width'][-1]=='%': + w = self.pdf.w - self.pdf.r_margin - self.pdf.l_margin + w *= int(self.table['width'][:-1])/100.0 + self.table_offset = (self.pdf.w-w)/2.0 + self.table_col_width = [] + self.theader_out = self.tfooter_out = False + self.theader = [] + self.tfooter = [] + self.thead = None + self.tfoot = None + self.pdf.ln() + if tag=='tr': + self.tr = dict([(k.lower(), v) for k,v in attrs.items()]) + self.table_col_index = 0 + self.pdf.set_x(self.table_offset) + if tag=='td': + self.td = dict([(k.lower(), v) for k,v in attrs.items()]) + if tag=='th': + self.td = dict([(k.lower(), v) for k,v in attrs.items()]) + self.th = True + if self.td['width']: + self.table_col_width.append(self.td['width']) + if tag=='thead': + self.thead = {} + if tag=='tfoot': + self.tfoot = {} + if tag=='img': + if 'src' in attrs: + x = self.pdf.get_x() + y = self.pdf.get_y() + w = px2mm(attrs.get('width', 0)) + h = px2mm(attrs.get('height',0)) + if self.align and self.align[0].upper() == 'C': + x = (self.pdf.w-x)/2.0 - w/2.0 + self.pdf.image(self.image_map(attrs['src']), + x, y, w, h, link=self.href) + self.pdf.set_x(x+w) + self.pdf.set_y(y+h) + if tag=='b' or tag=='i' or tag=='u': + self.set_style(tag, True) + if tag=='center': + self.align = 'Center' + + def handle_endtag(self, tag): + #Closing tag + if DEBUG: print "ENDTAG", tag + if tag=='h1' or tag=='h2' or tag=='h3' or tag=='h4': + self.pdf.ln(6) + self.set_font() + self.set_style() + self.align = None + if tag=='pre': + self.pdf.set_font(self.font or 'Times','',12) + self.pdf.set_font_size(12) + self.pre=False + if tag=='blockquote': + self.set_text_color(0,0,0) + self.pdf.ln(3) + if tag=='strong': + tag='b' + if tag=='em': + tag='i' + if tag=='b' or tag=='i' or tag=='u': + self.set_style(tag, False) + if tag=='a': + self.href='' + if tag=='p': + self.align='' + if tag in ('ul', 'ol'): + self.indent-=1 + self.bullet.pop() + if tag=='table': + if not self.tfooter_out: + self.output_table_footer() + self.table = None + self.th = False + self.theader = None + self.tfooter = None + self.pdf.ln() + if tag=='thead': + self.thead = None + if tag=='tfoot': + self.tfoot = None + if tag=='tbody': + # draw a line separator between table bodies + self.pdf.set_x(self.table_offset) + self.output_table_sep() + if tag=='tr': + h = self.table_h + if self.tfoot is None: + self.pdf.ln(h) + self.tr = None + if tag=='td' or tag=='th': + if self.th: + if DEBUG: print "revert style" + self.set_style('B', False) # revert style + self.table_col_index += int(self.td.get('colspan','1')) + self.td = None + self.th = False + if tag=='font': + if self.color: + self.pdf.set_text_color(0,0,0) + self.color = None + if self.font_face: + self.set_font('Times',12) + + if tag=='center': + self.align = None + + def set_font(self, face=None, size=None): + if face: + self.font_face = face + if size: + self.font_size = size + self.h = size / 72.0*25.4 + if DEBUG: print "H", self.h + self.pdf.set_font(self.font_face or 'times','',12) + self.pdf.set_font_size(self.font_size or 12) + self.set_style('u', False) + self.set_style('b', False) + self.set_style('i', False) + self.set_text_color() + + def set_style(self, tag=None, enable=None): + #Modify style and select corresponding font + if tag: + t = self.style.get(tag.lower()) + self.style[tag.lower()] = enable + style='' + for s in ('b','i','u'): + if self.style.get(s): + style+=s + if DEBUG: print "SET_FONT_STYLE", style + self.pdf.set_font('',style) + + def set_text_color(self, r=None, g=0, b=0): + if r is None: + self.pdf.set_text_color(self.r,self.g,self.b) + else: + self.pdf.set_text_color(r, g, b) + self.r = r + self.g = g + self.b = b + + def put_link(self, url, txt): + #Put a hyperlink + self.set_text_color(0,0,255) + self.set_style('u', True) + self.pdf.write(5,txt,url) + self.set_style('u', False) + self.set_text_color(0) + + def put_line(self): + self.pdf.ln(2) + self.pdf.line(self.pdf.get_x(),self.pdf.get_y(),self.pdf.get_x()+187,self.pdf.get_y()) + self.pdf.ln(3) + +class HTMLMixin(): + def write_html(self, text, image_map=lambda x:x, **kwargs): + "Parse HTML and convert it to PDF" + h2p = HTML2FPDF(self,image_map=image_map,**kwargs) + h2p.feed(text) + +if __name__=='__main__': + html=""" +<H1 align="center">html2fpdf</H1> +<h2>Basic usage</h2> +<p>You can now easily print text mixing different +styles : <B>bold</B>, <I>italic</I>, <U>underlined</U>, or +<B><I><U>all at once</U></I></B>!<BR>You can also insert links +on text, such as <A HREF="http://www.fpdf.org">www.fpdf.org</A>, +or on an image: click on the logo.<br> +<center> +<A HREF="http://www.fpdf.org"><img src="tutorial/logo.png" width="104" height="71"></A> +</center> +<h3>Sample List</h3> +<ul><li>option 1</li> +<ol><li>option 2</li></ol> +<li>option 3</li></ul> + +<table border="0" align="center" width="50%"> +<thead><tr><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead> +<tbody> +<tr><td>cell 1</td><td>cell 2</td></tr> +<tr><td>cell 2</td><td>cell 3</td></tr> +</tbody> +</table> + + +<table border="1"> +<thead><tr bgcolor="#A0A0A0"><th width="30%">Header 1</th><th width="70%">header 2</th></tr></thead> +<tfoot><tr bgcolor="#E0E0E0"><td>footer 1</td><td>footer 2</td></tr></tfoot> +<tbody> +<tr><td>cell 1</td><td>cell 2</td></tr> +<tr> +<td width="30%">cell 1</td><td width="70%" bgcolor="#D0D0FF" align='right'>cell 2</td> +</tr> +</tbody> +<tbody><tr><td colspan="2">cell spanned</td></tr></tbody> +<tbody> +""" + """<tr bgcolor="#F0F0F0"> +<td>cell 3</td><td>cell 4</td> +</tr><tr bgcolor="#FFFFFF"> +<td>cell 5</td><td>cell 6</td> +</tr>""" * 200 + """ +</tbody> +</table> +""" + + class MyFPDF(FPDF, HTMLMixin): + def header(self): + self.image('tutorial/logo_pb.png',10,8,33) + self.set_font('Arial','B',15) + self.cell(80) + self.cell(30,10,'Title',1,0,'C') + self.ln(20) + + def footer(self): + self.set_y(-15) + self.set_font('Arial','I',8) + txt = 'Page %s of %s' % (self.page_no(), self.alias_nb_pages()) + self.cell(0,10,txt,0,0,'C') + + pdf=MyFPDF() + #First page + pdf.add_page() + pdf.write_html(html) + pdf.output('html.pdf','F') + + import os + os.system("evince html.pdf") + + ADDED gluon/contrib/pyfpdf/template.py Index: gluon/contrib/pyfpdf/template.py ================================================================== --- /dev/null +++ gluon/contrib/pyfpdf/template.py @@ -0,0 +1,278 @@ +# -*- coding: iso-8859-1 -*- + +"PDF Template Helper for FPDF.py" + +__author__ = "Mariano Reingart <reingart@gmail.com>" +__copyright__ = "Copyright (C) 2010 Mariano Reingart" +__license__ = "LGPL 3.0" + +import sys,os,csv +from fpdf import FPDF + +def rgb(col): + return (col // 65536), (col // 256 % 256), (col% 256) + +class Template: + def __init__(self, infile=None, elements=None, format='A4', orientation='portrait', + title='', author='', subject='', creator='', keywords=''): + if elements: + self.elements = dict([(v['name'].lower(),v) for v in elements]) + self.handlers = {'T': self.text, 'L': self.line, 'I': self.image, + 'B': self.rect, 'BC': self.barcode, } + self.pg_no = 0 + self.texts = {} + pdf = self.pdf = FPDF(format=format,orientation=orientation, unit="mm") + pdf.set_title(title) + pdf.set_author(author) + pdf.set_creator(creator) + pdf.set_subject(subject) + pdf.set_keywords(keywords) + + def parse_csv(self, infile, delimiter=",", decimal_sep="."): + "Parse template format csv file and create elements dict" + keys = ('name','type','x1','y1','x2','y2','font','size', + 'bold','italic','underline','foreground','background', + 'align','text','priority') + self.elements = {} + f = open(infile, 'rb') + try: + for row in csv.reader(f, delimiter=delimiter): + kargs = {} + for i,v in enumerate(row): + if not v.startswith("'") and decimal_sep!=".": + v = v.replace(decimal_sep,".") + else: + v = v + if v=='': + v = None + else: + v = eval(v.strip()) + kargs[keys[i]] = v + self.elements[kargs['name'].lower()] = kargs + finally: + f.close() + + def add_page(self): + self.pg_no += 1 + self.texts[self.pg_no] = {} + + def __setitem__(self, name, value): + if name.lower() in self.elements: + if isinstance(value,unicode): + value = value.encode("latin1","ignore") + else: + value = str(value) + self.texts[self.pg_no][name.lower()] = value + + # setitem shortcut (may be further extended) + set = __setitem__ + + def __getitem__(self, name): + if name.lower() in self.elements: + return self.texts[self.pg_no].get(name.lower(), self.elements[name.lower()]['text']) + + def split_multicell(self, text, element_name): + "Divide (\n) a string using a given element width" + pdf = self.pdf + element = self.elements[element_name.lower()] + style = "" + if element['bold']: style += "B" + if element['italic']: style += "I" + if element['underline']: style += "U" + pdf.set_font(element['font'],style,element['size']) + align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(element['align']) # D/I in spanish + if isinstance(text, unicode): + text = text.encode("latin1","ignore") + else: + text = str(text) + return pdf.multi_cell(w=element['x2']-element['x1'], + h=element['y2']-element['y1'], + txt=text,align=align,split_only=True) + + def render(self, outfile, dest="F"): + pdf = self.pdf + for pg in range(1, self.pg_no+1): + pdf.add_page() + pdf.set_font('Arial','B',16) + pdf.set_auto_page_break(False,margin=0) + + for element in sorted(self.elements.values(),key=lambda x: x['priority']): + #print "dib",element['type'], element['name'], element['x1'], element['y1'], element['x2'], element['y2'] + element = element.copy() + element['text'] = self.texts[pg].get(element['name'].lower(), element['text']) + if 'rotate' in element: + pdf.rotate(element['rotate'], element['x1'], element['y1']) + self.handlers[element['type'].upper()](pdf, **element) + if 'rotate' in element: + pdf.rotate(0) + + return pdf.output(outfile, dest) + + def text(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=10, + bold=False, italic=False, underline=False, align="", + foreground=0, backgroud=65535, + *args, **kwargs): + if text: + if pdf.text_color!=rgb(foreground): + pdf.set_text_color(*rgb(foreground)) + if pdf.fill_color!=rgb(backgroud): + pdf.set_fill_color(*rgb(backgroud)) + + font = font.strip().lower() + if font == 'arial black': + font = 'arial' + style = "" + for tag in 'B', 'I', 'U': + if (text.startswith("<%s>" % tag) and text.endswith("</%s>" %tag)): + text = text[3:-4] + style += tag + if bold: style += "B" + if italic: style += "I" + if underline: style += "U" + align = {'L':'L','R':'R','I':'L','D':'R','C':'C','':''}.get(align) # D/I in spanish + pdf.set_font(font,style,size) + ##m_k = 72 / 2.54 + ##h = (size/m_k) + pdf.set_xy(x1,y1) + pdf.cell(w=x2-x1,h=y2-y1,txt=text,border=0,ln=0,align=align) + #pdf.Text(x=x1,y=y1,txt=text) + + def line(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, *args, **kwargs): + if pdf.draw_color!=rgb(foreground): + #print "SetDrawColor", hex(foreground) + pdf.set_draw_color(*rgb(foreground)) + #print "SetLineWidth", size + pdf.set_line_width(size) + pdf.line(x1, y1, x2, y2) + + def rect(self, pdf, x1=0, y1=0, x2=0, y2=0, size=0, foreground=0, backgroud=65535, *args, **kwargs): + if pdf.draw_color!=rgb(foreground): + pdf.set_draw_color(*rgb(foreground)) + if pdf.fill_color!=rgb(backgroud): + pdf.set_fill_color(*rgb(backgroud)) + pdf.set_line_width(size) + pdf.rect(x1, y1, x2-x1, y2-y1) + + def image(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', *args,**kwargs): + pdf.image(text,x1,y1,w=x2-x1,h=y2-y1,type='',link='') + + def barcode(self, pdf, x1=0, y1=0, x2=0, y2=0, text='', font="arial", size=1, + foreground=0, *args, **kwargs): + if pdf.draw_color!=rgb(foreground): + pdf.set_draw_color(*rgb(foreground)) + font = font.lower().strip() + if font == 'interleaved 2of5 nt': + pdf.interleaved2of5(text,x1,y1,w=size,h=y2-y1) + + +if __name__ == "__main__": + + # generate sample invoice (according Argentina's regulations) + + import random + from decimal import Decimal + + f = Template(format="A4", + title="Sample Invoice", author="Sample Company", + subject="Sample Customer", keywords="Electronic TAX Invoice") + f.parse_csv(infile="invoice.csv", delimiter=";", decimal_sep=",") + + detail = "Lorem ipsum dolor sit amet, consectetur. " * 30 + items = [] + for i in range(1, 30): + ds = "Sample product %s" % i + qty = random.randint(1,10) + price = round(random.random()*100,3) + code = "%s%s%02d" % (chr(random.randint(65,90)), chr(random.randint(65,90)),i) + items.append(dict(code=code, unit='u', + qty=qty, price=price, + amount=qty*price, + ds="%s: %s" % (i,ds))) + + # divide and count lines + lines = 0 + li_items = [] + for it in items: + qty = it['qty'] + code = it['code'] + unit = it['unit'] + for ds in f.split_multicell(it['ds'], 'item_description01'): + # add item description line (without price nor amount) + li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None)) + # clean qty and code (show only at first) + unit = qty = code = None + # set last item line price and amount + li_items[-1].update(amount = it['amount'], + price = it['price']) + + obs="\n<U>Detail:</U>\n\n" + detail + for ds in f.split_multicell(obs, 'item_description01'): + li_items.append(dict(code=code, ds=ds, qty=qty, unit=unit, price=None, amount=None)) + + # calculate pages: + lines = len(li_items) + max_lines_per_page = 24 + pages = lines / (max_lines_per_page - 1) + if lines % (max_lines_per_page - 1): pages = pages + 1 + + # completo campos y hojas + for page in range(1, pages+1): + f.add_page() + f['page'] = 'Page %s of %s' % (page, pages) + if pages>1 and page<pages: + s = 'Continues on page %s' % (page+1) + else: + s = '' + f['item_description%02d' % (max_lines_per_page+1)] = s + + f["company_name"] = "Sample Company" + f["company_logo"] = "tutorial/logo.png" + f["company_header1"] = "Some Address - somewhere -" + f["company_header2"] = "http://www.example.com" + f["company_footer1"] = "Tax Code ..." + f["company_footer2"] = "Tax/VAT ID ..." + f['number'] = '0001-00001234' + f['issue_date'] = '2010-09-10' + f['due_date'] = '2099-09-10' + f['customer_name'] = "Sample Client" + f['customer_address'] = "Siempreviva 1234" + + # print line item... + li = 0 + k = 0 + total = Decimal("0.00") + for it in li_items: + k = k + 1 + if k > page * (max_lines_per_page - 1): + break + if it['amount']: + total += Decimal("%.6f" % it['amount']) + if k > (page - 1) * (max_lines_per_page - 1): + li += 1 + if it['qty'] is not None: + f['item_quantity%02d' % li] = it['qty'] + if it['code'] is not None: + f['item_code%02d' % li] = it['code'] + if it['unit'] is not None: + f['item_unit%02d' % li] = it['unit'] + f['item_description%02d' % li] = it['ds'] + if it['price'] is not None: + f['item_price%02d' % li] = "%0.3f" % it['price'] + if it['amount'] is not None: + f['item_amount%02d' % li] = "%0.2f" % it['amount'] + + if pages == page: + f['net'] = "%0.2f" % (total/Decimal("1.21")) + f['vat'] = "%0.2f" % (total*(1-1/Decimal("1.21"))) + f['total_label'] = 'Total:' + else: + f['total_label'] = 'SubTotal:' + f['total'] = "%0.2f" % total + + f.render("./invoice.pdf") + if sys.platform.startswith("linux"): + os.system("evince ./invoice.pdf") + else: + os.system("./invoice.pdf") + + ADDED gluon/contrib/pymysql/LICENSE Index: gluon/contrib/pymysql/LICENSE ================================================================== --- /dev/null +++ gluon/contrib/pymysql/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2010 PyMySQL contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. ADDED gluon/contrib/pymysql/README Index: gluon/contrib/pymysql/README ================================================================== --- /dev/null +++ gluon/contrib/pymysql/README @@ -0,0 +1,37 @@ +==================== +PyMySQL Installation +==================== + +.. contents:: +.. + This package contains a pure-Python MySQL client library. + Documentation on the MySQL client/server protocol can be found here: + http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol + If you would like to run the test suite, create a ~/.my.cnf file and + a database called "test_pymysql". The goal of pymysql is to be a drop-in + replacement for MySQLdb and work on CPython 2.3+, Jython, IronPython, PyPy + and Python 3. We test for compatibility by simply changing the import + statements in the Django MySQL backend and running its unit tests as well + as running it against the MySQLdb and myconnpy unit tests. + +Requirements +------------- + ++ Python 2.4 or higher + + * http://www.python.org/ + + * 2.6 is the primary test environment. + +* MySQL 4.1 or higher + + * protocol41 support, experimental 4.0 support + +Installation +------------ + +# easy_install pymysql +# ... or ... +# python setup.py install + + ADDED gluon/contrib/pymysql/__init__.py Index: gluon/contrib/pymysql/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/__init__.py @@ -0,0 +1,132 @@ +''' +PyMySQL: A pure-Python drop-in replacement for MySQLdb. + +Copyright (c) 2010 PyMySQL contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + +''' + +VERSION = (0, 4, None) + +from constants import FIELD_TYPE +from converters import escape_dict, escape_sequence, escape_string +from err import Warning, Error, InterfaceError, DataError, \ + DatabaseError, OperationalError, IntegrityError, InternalError, \ + NotSupportedError, ProgrammingError +from times import Date, Time, Timestamp, \ + DateFromTicks, TimeFromTicks, TimestampFromTicks + +import sys + +try: + frozenset +except NameError: + from sets import ImmutableSet as frozenset + try: + from sets import BaseSet as set + except ImportError: + from sets import Set as set + +threadsafety = 1 +apilevel = "2.0" +paramstyle = "format" + +class DBAPISet(frozenset): + + + def __ne__(self, other): + if isinstance(other, set): + return super(DBAPISet, self).__ne__(self, other) + else: + return other not in self + + def __eq__(self, other): + if isinstance(other, frozenset): + return frozenset.__eq__(self, other) + else: + return other in self + + def __hash__(self): + return frozenset.__hash__(self) + + +STRING = DBAPISet([FIELD_TYPE.ENUM, FIELD_TYPE.STRING, + FIELD_TYPE.VAR_STRING]) +BINARY = DBAPISet([FIELD_TYPE.BLOB, FIELD_TYPE.LONG_BLOB, + FIELD_TYPE.MEDIUM_BLOB, FIELD_TYPE.TINY_BLOB]) +NUMBER = DBAPISet([FIELD_TYPE.DECIMAL, FIELD_TYPE.DOUBLE, FIELD_TYPE.FLOAT, + FIELD_TYPE.INT24, FIELD_TYPE.LONG, FIELD_TYPE.LONGLONG, + FIELD_TYPE.TINY, FIELD_TYPE.YEAR]) +DATE = DBAPISet([FIELD_TYPE.DATE, FIELD_TYPE.NEWDATE]) +TIME = DBAPISet([FIELD_TYPE.TIME]) +TIMESTAMP = DBAPISet([FIELD_TYPE.TIMESTAMP, FIELD_TYPE.DATETIME]) +DATETIME = TIMESTAMP +ROWID = DBAPISet() + +def Binary(x): + """Return x as a binary type.""" + return str(x) + +def Connect(*args, **kwargs): + """ + Connect to the database; see connections.Connection.__init__() for + more information. + """ + from connections import Connection + return Connection(*args, **kwargs) + +def get_client_info(): # for MySQLdb compatibility + return '%s.%s.%s' % VERSION + +connect = Connection = Connect + +# we include a doctored version_info here for MySQLdb compatibility +version_info = (1,2,2,"final",0) + +NULL = "NULL" + +__version__ = get_client_info() + +def thread_safe(): + return True # match MySQLdb.thread_safe() + +def install_as_MySQLdb(): + """ + After this function is called, any application that imports MySQLdb or + _mysql will unwittingly actually use + """ + sys.modules["MySQLdb"] = sys.modules["_mysql"] = sys.modules["pymysql"] + +__all__ = [ + 'BINARY', 'Binary', 'Connect', 'Connection', 'DATE', 'Date', + 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks', 'TimestampFromTicks', + 'DataError', 'DatabaseError', 'Error', 'FIELD_TYPE', 'IntegrityError', + 'InterfaceError', 'InternalError', 'MySQLError', 'NULL', 'NUMBER', + 'NotSupportedError', 'DBAPISet', 'OperationalError', 'ProgrammingError', + 'ROWID', 'STRING', 'TIME', 'TIMESTAMP', 'Warning', 'apilevel', 'connect', + 'connections', 'constants', 'converters', 'cursors', 'debug', 'escape', + 'escape_dict', 'escape_sequence', 'escape_string', 'get_client_info', + 'paramstyle', 'string_literal', 'threadsafety', 'version_info', + + "install_as_MySQLdb", + + "NULL","__version__", + ] + ADDED gluon/contrib/pymysql/charset.py Index: gluon/contrib/pymysql/charset.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/charset.py @@ -0,0 +1,175 @@ +MBLENGTH = { + 8:1, + 33:3, + 88:2, + 91:2 + } + +class Charset: + def __init__(self, id, name, collation, is_default): + self.id, self.name, self.collation = id, name, collation + self.is_default = is_default == 'Yes' + +class Charsets: + def __init__(self): + self._by_id = {} + + def add(self, c): + self._by_id[c.id] = c + + def by_id(self, id): + return self._by_id[id] + + def by_name(self, name): + for c in self._by_id.values(): + if c.name == name and c.is_default: + return c + +_charsets = Charsets() +""" +Generated with: + +mysql -N -s -e "select id, character_set_name, collation_name, is_default +from information_schema.collations order by id;" | python -c "import sys +for l in sys.stdin.readlines(): + id, name, collation, is_default = l.split(chr(9)) + print '_charsets.add(Charset(%s, \'%s\', \'%s\', \'%s\'))' \ + % (id, name, collation, is_default.strip()) +" + +""" +_charsets.add(Charset(1, 'big5', 'big5_chinese_ci', 'Yes')) +_charsets.add(Charset(2, 'latin2', 'latin2_czech_cs', '')) +_charsets.add(Charset(3, 'dec8', 'dec8_swedish_ci', 'Yes')) +_charsets.add(Charset(4, 'cp850', 'cp850_general_ci', 'Yes')) +_charsets.add(Charset(5, 'latin1', 'latin1_german1_ci', '')) +_charsets.add(Charset(6, 'hp8', 'hp8_english_ci', 'Yes')) +_charsets.add(Charset(7, 'koi8r', 'koi8r_general_ci', 'Yes')) +_charsets.add(Charset(8, 'latin1', 'latin1_swedish_ci', 'Yes')) +_charsets.add(Charset(9, 'latin2', 'latin2_general_ci', 'Yes')) +_charsets.add(Charset(10, 'swe7', 'swe7_swedish_ci', 'Yes')) +_charsets.add(Charset(11, 'ascii', 'ascii_general_ci', 'Yes')) +_charsets.add(Charset(12, 'ujis', 'ujis_japanese_ci', 'Yes')) +_charsets.add(Charset(13, 'sjis', 'sjis_japanese_ci', 'Yes')) +_charsets.add(Charset(14, 'cp1251', 'cp1251_bulgarian_ci', '')) +_charsets.add(Charset(15, 'latin1', 'latin1_danish_ci', '')) +_charsets.add(Charset(16, 'hebrew', 'hebrew_general_ci', 'Yes')) +_charsets.add(Charset(18, 'tis620', 'tis620_thai_ci', 'Yes')) +_charsets.add(Charset(19, 'euckr', 'euckr_korean_ci', 'Yes')) +_charsets.add(Charset(20, 'latin7', 'latin7_estonian_cs', '')) +_charsets.add(Charset(21, 'latin2', 'latin2_hungarian_ci', '')) +_charsets.add(Charset(22, 'koi8u', 'koi8u_general_ci', 'Yes')) +_charsets.add(Charset(23, 'cp1251', 'cp1251_ukrainian_ci', '')) +_charsets.add(Charset(24, 'gb2312', 'gb2312_chinese_ci', 'Yes')) +_charsets.add(Charset(25, 'greek', 'greek_general_ci', 'Yes')) +_charsets.add(Charset(26, 'cp1250', 'cp1250_general_ci', 'Yes')) +_charsets.add(Charset(27, 'latin2', 'latin2_croatian_ci', '')) +_charsets.add(Charset(28, 'gbk', 'gbk_chinese_ci', 'Yes')) +_charsets.add(Charset(29, 'cp1257', 'cp1257_lithuanian_ci', '')) +_charsets.add(Charset(30, 'latin5', 'latin5_turkish_ci', 'Yes')) +_charsets.add(Charset(31, 'latin1', 'latin1_german2_ci', '')) +_charsets.add(Charset(32, 'armscii8', 'armscii8_general_ci', 'Yes')) +_charsets.add(Charset(33, 'utf8', 'utf8_general_ci', 'Yes')) +_charsets.add(Charset(34, 'cp1250', 'cp1250_czech_cs', '')) +_charsets.add(Charset(35, 'ucs2', 'ucs2_general_ci', 'Yes')) +_charsets.add(Charset(36, 'cp866', 'cp866_general_ci', 'Yes')) +_charsets.add(Charset(37, 'keybcs2', 'keybcs2_general_ci', 'Yes')) +_charsets.add(Charset(38, 'macce', 'macce_general_ci', 'Yes')) +_charsets.add(Charset(39, 'macroman', 'macroman_general_ci', 'Yes')) +_charsets.add(Charset(40, 'cp852', 'cp852_general_ci', 'Yes')) +_charsets.add(Charset(41, 'latin7', 'latin7_general_ci', 'Yes')) +_charsets.add(Charset(42, 'latin7', 'latin7_general_cs', '')) +_charsets.add(Charset(43, 'macce', 'macce_bin', '')) +_charsets.add(Charset(44, 'cp1250', 'cp1250_croatian_ci', '')) +_charsets.add(Charset(47, 'latin1', 'latin1_bin', '')) +_charsets.add(Charset(48, 'latin1', 'latin1_general_ci', '')) +_charsets.add(Charset(49, 'latin1', 'latin1_general_cs', '')) +_charsets.add(Charset(50, 'cp1251', 'cp1251_bin', '')) +_charsets.add(Charset(51, 'cp1251', 'cp1251_general_ci', 'Yes')) +_charsets.add(Charset(52, 'cp1251', 'cp1251_general_cs', '')) +_charsets.add(Charset(53, 'macroman', 'macroman_bin', '')) +_charsets.add(Charset(57, 'cp1256', 'cp1256_general_ci', 'Yes')) +_charsets.add(Charset(58, 'cp1257', 'cp1257_bin', '')) +_charsets.add(Charset(59, 'cp1257', 'cp1257_general_ci', 'Yes')) +_charsets.add(Charset(63, 'binary', 'binary', 'Yes')) +_charsets.add(Charset(64, 'armscii8', 'armscii8_bin', '')) +_charsets.add(Charset(65, 'ascii', 'ascii_bin', '')) +_charsets.add(Charset(66, 'cp1250', 'cp1250_bin', '')) +_charsets.add(Charset(67, 'cp1256', 'cp1256_bin', '')) +_charsets.add(Charset(68, 'cp866', 'cp866_bin', '')) +_charsets.add(Charset(69, 'dec8', 'dec8_bin', '')) +_charsets.add(Charset(70, 'greek', 'greek_bin', '')) +_charsets.add(Charset(71, 'hebrew', 'hebrew_bin', '')) +_charsets.add(Charset(72, 'hp8', 'hp8_bin', '')) +_charsets.add(Charset(73, 'keybcs2', 'keybcs2_bin', '')) +_charsets.add(Charset(74, 'koi8r', 'koi8r_bin', '')) +_charsets.add(Charset(75, 'koi8u', 'koi8u_bin', '')) +_charsets.add(Charset(77, 'latin2', 'latin2_bin', '')) +_charsets.add(Charset(78, 'latin5', 'latin5_bin', '')) +_charsets.add(Charset(79, 'latin7', 'latin7_bin', '')) +_charsets.add(Charset(80, 'cp850', 'cp850_bin', '')) +_charsets.add(Charset(81, 'cp852', 'cp852_bin', '')) +_charsets.add(Charset(82, 'swe7', 'swe7_bin', '')) +_charsets.add(Charset(83, 'utf8', 'utf8_bin', '')) +_charsets.add(Charset(84, 'big5', 'big5_bin', '')) +_charsets.add(Charset(85, 'euckr', 'euckr_bin', '')) +_charsets.add(Charset(86, 'gb2312', 'gb2312_bin', '')) +_charsets.add(Charset(87, 'gbk', 'gbk_bin', '')) +_charsets.add(Charset(88, 'sjis', 'sjis_bin', '')) +_charsets.add(Charset(89, 'tis620', 'tis620_bin', '')) +_charsets.add(Charset(90, 'ucs2', 'ucs2_bin', '')) +_charsets.add(Charset(91, 'ujis', 'ujis_bin', '')) +_charsets.add(Charset(92, 'geostd8', 'geostd8_general_ci', 'Yes')) +_charsets.add(Charset(93, 'geostd8', 'geostd8_bin', '')) +_charsets.add(Charset(94, 'latin1', 'latin1_spanish_ci', '')) +_charsets.add(Charset(95, 'cp932', 'cp932_japanese_ci', 'Yes')) +_charsets.add(Charset(96, 'cp932', 'cp932_bin', '')) +_charsets.add(Charset(97, 'eucjpms', 'eucjpms_japanese_ci', 'Yes')) +_charsets.add(Charset(98, 'eucjpms', 'eucjpms_bin', '')) +_charsets.add(Charset(99, 'cp1250', 'cp1250_polish_ci', '')) +_charsets.add(Charset(128, 'ucs2', 'ucs2_unicode_ci', '')) +_charsets.add(Charset(129, 'ucs2', 'ucs2_icelandic_ci', '')) +_charsets.add(Charset(130, 'ucs2', 'ucs2_latvian_ci', '')) +_charsets.add(Charset(131, 'ucs2', 'ucs2_romanian_ci', '')) +_charsets.add(Charset(132, 'ucs2', 'ucs2_slovenian_ci', '')) +_charsets.add(Charset(133, 'ucs2', 'ucs2_polish_ci', '')) +_charsets.add(Charset(134, 'ucs2', 'ucs2_estonian_ci', '')) +_charsets.add(Charset(135, 'ucs2', 'ucs2_spanish_ci', '')) +_charsets.add(Charset(136, 'ucs2', 'ucs2_swedish_ci', '')) +_charsets.add(Charset(137, 'ucs2', 'ucs2_turkish_ci', '')) +_charsets.add(Charset(138, 'ucs2', 'ucs2_czech_ci', '')) +_charsets.add(Charset(139, 'ucs2', 'ucs2_danish_ci', '')) +_charsets.add(Charset(140, 'ucs2', 'ucs2_lithuanian_ci', '')) +_charsets.add(Charset(141, 'ucs2', 'ucs2_slovak_ci', '')) +_charsets.add(Charset(142, 'ucs2', 'ucs2_spanish2_ci', '')) +_charsets.add(Charset(143, 'ucs2', 'ucs2_roman_ci', '')) +_charsets.add(Charset(144, 'ucs2', 'ucs2_persian_ci', '')) +_charsets.add(Charset(145, 'ucs2', 'ucs2_esperanto_ci', '')) +_charsets.add(Charset(146, 'ucs2', 'ucs2_hungarian_ci', '')) +_charsets.add(Charset(192, 'utf8', 'utf8_unicode_ci', '')) +_charsets.add(Charset(193, 'utf8', 'utf8_icelandic_ci', '')) +_charsets.add(Charset(194, 'utf8', 'utf8_latvian_ci', '')) +_charsets.add(Charset(195, 'utf8', 'utf8_romanian_ci', '')) +_charsets.add(Charset(196, 'utf8', 'utf8_slovenian_ci', '')) +_charsets.add(Charset(197, 'utf8', 'utf8_polish_ci', '')) +_charsets.add(Charset(198, 'utf8', 'utf8_estonian_ci', '')) +_charsets.add(Charset(199, 'utf8', 'utf8_spanish_ci', '')) +_charsets.add(Charset(200, 'utf8', 'utf8_swedish_ci', '')) +_charsets.add(Charset(201, 'utf8', 'utf8_turkish_ci', '')) +_charsets.add(Charset(202, 'utf8', 'utf8_czech_ci', '')) +_charsets.add(Charset(203, 'utf8', 'utf8_danish_ci', '')) +_charsets.add(Charset(204, 'utf8', 'utf8_lithuanian_ci', '')) +_charsets.add(Charset(205, 'utf8', 'utf8_slovak_ci', '')) +_charsets.add(Charset(206, 'utf8', 'utf8_spanish2_ci', '')) +_charsets.add(Charset(207, 'utf8', 'utf8_roman_ci', '')) +_charsets.add(Charset(208, 'utf8', 'utf8_persian_ci', '')) +_charsets.add(Charset(209, 'utf8', 'utf8_esperanto_ci', '')) +_charsets.add(Charset(210, 'utf8', 'utf8_hungarian_ci', '')) + +def charset_by_name(name): + return _charsets.by_name(name) + +def charset_by_id(id): + return _charsets.by_id(id) + + ADDED gluon/contrib/pymysql/connections.py Index: gluon/contrib/pymysql/connections.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/connections.py @@ -0,0 +1,933 @@ +# Python implementation of the MySQL client-server protocol +# http://forge.mysql.com/wiki/MySQL_Internals_ClientServer_Protocol + +try: + import hashlib + sha_new = lambda *args, **kwargs: hashlib.new("sha1", *args, **kwargs) +except ImportError: + import sha + sha_new = sha.new + +import socket +try: + import ssl + SSL_ENABLED = True +except ImportError: + SSL_ENABLED = False + +import struct +import sys +import os +import ConfigParser + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +from charset import MBLENGTH, charset_by_name, charset_by_id +from cursors import Cursor +from constants import FIELD_TYPE, FLAG +from constants import SERVER_STATUS +from constants.CLIENT import * +from constants.COMMAND import * +from util import join_bytes, byte2int, int2byte +from converters import escape_item, encoders, decoders +from err import raise_mysql_exception, Warning, Error, \ + InterfaceError, DataError, DatabaseError, OperationalError, \ + IntegrityError, InternalError, NotSupportedError, ProgrammingError + +DEBUG = False + +NULL_COLUMN = 251 +UNSIGNED_CHAR_COLUMN = 251 +UNSIGNED_SHORT_COLUMN = 252 +UNSIGNED_INT24_COLUMN = 253 +UNSIGNED_INT64_COLUMN = 254 +UNSIGNED_CHAR_LENGTH = 1 +UNSIGNED_SHORT_LENGTH = 2 +UNSIGNED_INT24_LENGTH = 3 +UNSIGNED_INT64_LENGTH = 8 + +DEFAULT_CHARSET = 'latin1' +MAX_PACKET_LENGTH = 256*256*256-1 + + +def dump_packet(data): + + def is_ascii(data): + if byte2int(data) >= 65 and byte2int(data) <= 122: #data.isalnum(): + return data + return '.' + print "packet length %d" % len(data) + print "method call[1]: %s" % sys._getframe(1).f_code.co_name + print "method call[2]: %s" % sys._getframe(2).f_code.co_name + print "method call[3]: %s" % sys._getframe(3).f_code.co_name + print "method call[4]: %s" % sys._getframe(4).f_code.co_name + print "method call[5]: %s" % sys._getframe(5).f_code.co_name + print "-" * 88 + dump_data = [data[i:i+16] for i in xrange(len(data)) if i%16 == 0] + for d in dump_data: + print ' '.join(map(lambda x:"%02X" % byte2int(x), d)) + \ + ' ' * (16 - len(d)) + ' ' * 2 + \ + ' '.join(map(lambda x:"%s" % is_ascii(x), d)) + print "-" * 88 + print "" + +def _scramble(password, message): + if password == None or len(password) == 0: + return int2byte(0) + if DEBUG: print 'password=' + password + stage1 = sha_new(password).digest() + stage2 = sha_new(stage1).digest() + s = sha_new() + s.update(message) + s.update(stage2) + result = s.digest() + return _my_crypt(result, stage1) + +def _my_crypt(message1, message2): + length = len(message1) + result = struct.pack('B', length) + for i in xrange(length): + x = (struct.unpack('B', message1[i:i+1])[0] ^ \ + struct.unpack('B', message2[i:i+1])[0]) + result += struct.pack('B', x) + return result + +# old_passwords support ported from libmysql/password.c +SCRAMBLE_LENGTH_323 = 8 + +class RandStruct_323(object): + def __init__(self, seed1, seed2): + self.max_value = 0x3FFFFFFFL + self.seed1 = seed1 % self.max_value + self.seed2 = seed2 % self.max_value + + def my_rnd(self): + self.seed1 = (self.seed1 * 3L + self.seed2) % self.max_value + self.seed2 = (self.seed1 + self.seed2 + 33L) % self.max_value + return float(self.seed1) / float(self.max_value) + +def _scramble_323(password, message): + hash_pass = _hash_password_323(password) + hash_message = _hash_password_323(message[:SCRAMBLE_LENGTH_323]) + hash_pass_n = struct.unpack(">LL", hash_pass) + hash_message_n = struct.unpack(">LL", hash_message) + + rand_st = RandStruct_323(hash_pass_n[0] ^ hash_message_n[0], + hash_pass_n[1] ^ hash_message_n[1]) + outbuf = StringIO.StringIO() + for _ in xrange(min(SCRAMBLE_LENGTH_323, len(message))): + outbuf.write(int2byte(int(rand_st.my_rnd() * 31) + 64)) + extra = int2byte(int(rand_st.my_rnd() * 31)) + out = outbuf.getvalue() + outbuf = StringIO.StringIO() + for c in out: + outbuf.write(int2byte(byte2int(c) ^ byte2int(extra))) + return outbuf.getvalue() + +def _hash_password_323(password): + nr = 1345345333L + add = 7L + nr2 = 0x12345671L + + for c in [byte2int(x) for x in password if x not in (' ', '\t')]: + nr^= (((nr & 63)+add)*c)+ (nr << 8) & 0xFFFFFFFF + nr2= (nr2 + ((nr2 << 8) ^ nr)) & 0xFFFFFFFF + add= (add + c) & 0xFFFFFFFF + + r1 = nr & ((1L << 31) - 1L) # kill sign bits + r2 = nr2 & ((1L << 31) - 1L) + + # pack + return struct.pack(">LL", r1, r2) + +def pack_int24(n): + return struct.pack('BBB', n&0xFF, (n>>8)&0xFF, (n>>16)&0xFF) + +def unpack_uint16(n): + return struct.unpack('<H', n[0:2])[0] + + +# TODO: stop using bit-shifting in these functions... +# TODO: rename to "uint" to make it clear they're unsigned... +def unpack_int24(n): + return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\ + (struct.unpack('B',n[2])[0] << 16) + +def unpack_int32(n): + return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0] << 8) +\ + (struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B', n[3])[0] << 24) + +def unpack_int64(n): + return struct.unpack('B',n[0])[0] + (struct.unpack('B', n[1])[0]<<8) +\ + (struct.unpack('B',n[2])[0] << 16) + (struct.unpack('B',n[3])[0]<<24)+\ + (struct.unpack('B',n[4])[0] << 32) + (struct.unpack('B',n[5])[0]<<40)+\ + (struct.unpack('B',n[6])[0] << 48) + (struct.unpack('B',n[7])[0]<<56) + +def defaulterrorhandler(connection, cursor, errorclass, errorvalue): + err = errorclass, errorvalue + if DEBUG: + raise + + if cursor: + cursor.messages.append(err) + else: + connection.messages.append(err) + del cursor + del connection + + if not issubclass(errorclass, Error): + raise Error(errorclass, errorvalue) + else: + raise errorclass, errorvalue + + +class MysqlPacket(object): + """Representation of a MySQL response packet. Reads in the packet + from the network socket, removes packet header and provides an interface + for reading/parsing the packet results.""" + + def __init__(self, socket): + self.__position = 0 + self.__recv_packet(socket) + del socket + + def __recv_packet(self, socket): + """Parse the packet header and read entire packet payload into buffer.""" + packet_header = socket.recv(4) + while len(packet_header) < 4: + d = socket.recv(4 - len(packet_header)) + if len(d) == 0: + raise OperationalError(2013, "Lost connection to MySQL server during query") + packet_header += d + + if DEBUG: dump_packet(packet_header) + packet_length_bin = packet_header[:3] + self.__packet_number = byte2int(packet_header[3]) + # TODO: check packet_num is correct (+1 from last packet) + + bin_length = packet_length_bin + int2byte(0) # pad little-endian number + bytes_to_read = struct.unpack('<I', bin_length)[0] + + payload_buff = [] # this is faster than cStringIO + while bytes_to_read > 0: + recv_data = socket.recv(bytes_to_read) + if len(recv_data) == 0: + raise OperationalError(2013, "Lost connection to MySQL server during query") + if DEBUG: dump_packet(recv_data) + payload_buff.append(recv_data) + bytes_to_read -= len(recv_data) + self.__data = join_bytes(payload_buff) + + def packet_number(self): return self.__packet_number + + def get_all_data(self): return self.__data + + def read(self, size): + """Read the first 'size' bytes in packet and advance cursor past them.""" + result = self.peek(size) + self.advance(size) + return result + + def read_all(self): + """Read all remaining data in the packet. + + (Subsequent read() or peek() will return errors.) + """ + result = self.__data[self.__position:] + self.__position = None # ensure no subsequent read() or peek() + return result + + def advance(self, length): + """Advance the cursor in data buffer 'length' bytes.""" + new_position = self.__position + length + if new_position < 0 or new_position > len(self.__data): + raise Exception('Invalid advance amount (%s) for cursor. ' + 'Position=%s' % (length, new_position)) + self.__position = new_position + + def rewind(self, position=0): + """Set the position of the data buffer cursor to 'position'.""" + if position < 0 or position > len(self.__data): + raise Exception("Invalid position to rewind cursor to: %s." % position) + self.__position = position + + def peek(self, size): + """Look at the first 'size' bytes in packet without moving cursor.""" + result = self.__data[self.__position:(self.__position+size)] + if len(result) != size: + error = ('Result length not requested length:\n' + 'Expected=%s. Actual=%s. Position: %s. Data Length: %s' + % (size, len(result), self.__position, len(self.__data))) + if DEBUG: + print error + self.dump() + raise AssertionError(error) + return result + + def get_bytes(self, position, length=1): + """Get 'length' bytes starting at 'position'. + + Position is start of payload (first four packet header bytes are not + included) starting at index '0'. + + No error checking is done. If requesting outside end of buffer + an empty string (or string shorter than 'length') may be returned! + """ + return self.__data[position:(position+length)] + + def read_length_coded_binary(self): + """Read a 'Length Coded Binary' number from the data buffer. + + Length coded numbers can be anywhere from 1 to 9 bytes depending + on the value of the first byte. + """ + c = byte2int(self.read(1)) + if c == NULL_COLUMN: + return None + if c < UNSIGNED_CHAR_COLUMN: + return c + elif c == UNSIGNED_SHORT_COLUMN: + return unpack_uint16(self.read(UNSIGNED_SHORT_LENGTH)) + elif c == UNSIGNED_INT24_COLUMN: + return unpack_int24(self.read(UNSIGNED_INT24_LENGTH)) + elif c == UNSIGNED_INT64_COLUMN: + # TODO: what was 'longlong'? confirm it wasn't used? + return unpack_int64(self.read(UNSIGNED_INT64_LENGTH)) + + def read_length_coded_string(self): + """Read a 'Length Coded String' from the data buffer. + + A 'Length Coded String' consists first of a length coded + (unsigned, positive) integer represented in 1-9 bytes followed by + that many bytes of binary data. (For example "cat" would be "3cat".) + """ + length = self.read_length_coded_binary() + if length is None: + return None + return self.read(length) + + def is_ok_packet(self): + return byte2int(self.get_bytes(0)) == 0 + + def is_eof_packet(self): + return byte2int(self.get_bytes(0)) == 254 # 'fe' + + def is_resultset_packet(self): + field_count = byte2int(self.get_bytes(0)) + return field_count >= 1 and field_count <= 250 + + def is_error_packet(self): + return byte2int(self.get_bytes(0)) == 255 + + def check_error(self): + if self.is_error_packet(): + self.rewind() + self.advance(1) # field_count == error (we already know that) + errno = unpack_uint16(self.read(2)) + if DEBUG: print "errno = %d" % errno + raise_mysql_exception(self.__data) + + def dump(self): + dump_packet(self.__data) + + +class FieldDescriptorPacket(MysqlPacket): + """A MysqlPacket that represents a specific column's metadata in the result. + + Parsing is automatically done and the results are exported via public + attributes on the class such as: db, table_name, name, length, type_code. + """ + + def __init__(self, *args): + MysqlPacket.__init__(self, *args) + self.__parse_field_descriptor() + + def __parse_field_descriptor(self): + """Parse the 'Field Descriptor' (Metadata) packet. + + This is compatible with MySQL 4.1+ (not compatible with MySQL 4.0). + """ + self.catalog = self.read_length_coded_string() + self.db = self.read_length_coded_string() + self.table_name = self.read_length_coded_string() + self.org_table = self.read_length_coded_string() + self.name = self.read_length_coded_string() + self.org_name = self.read_length_coded_string() + self.advance(1) # non-null filler + self.charsetnr = struct.unpack('<H', self.read(2))[0] + self.length = struct.unpack('<I', self.read(4))[0] + self.type_code = byte2int(self.read(1)) + self.flags = struct.unpack('<H', self.read(2))[0] + self.scale = byte2int(self.read(1)) # "decimals" + self.advance(2) # filler (always 0x00) + + # 'default' is a length coded binary and is still in the buffer? + # not used for normal result sets... + + def description(self): + """Provides a 7-item tuple compatible with the Python PEP249 DB Spec.""" + desc = [] + desc.append(self.name) + desc.append(self.type_code) + desc.append(None) # TODO: display_length; should this be self.length? + desc.append(self.get_column_length()) # 'internal_size' + desc.append(self.get_column_length()) # 'precision' # TODO: why!?!? + desc.append(self.scale) + + # 'null_ok' -- can this be True/False rather than 1/0? + # if so just do: desc.append(bool(self.flags % 2 == 0)) + if self.flags % 2 == 0: + desc.append(1) + else: + desc.append(0) + return tuple(desc) + + def get_column_length(self): + if self.type_code == FIELD_TYPE.VAR_STRING: + mblen = MBLENGTH.get(self.charsetnr, 1) + return self.length // mblen + return self.length + + def __str__(self): + return ('%s %s.%s.%s, type=%s' + % (self.__class__, self.db, self.table_name, self.name, + self.type_code)) + + +class Connection(object): + """ + Representation of a socket with a mysql server. + + The proper way to get an instance of this class is to call + connect().""" + errorhandler = defaulterrorhandler + + def __init__(self, host="localhost", user=None, passwd="", + db=None, port=3306, unix_socket=None, + charset='', sql_mode=None, + read_default_file=None, conv=decoders, use_unicode=None, + client_flag=0, cursorclass=Cursor, init_command=None, + connect_timeout=None, ssl=None, read_default_group=None, + compress=None, named_pipe=None): + """ + Establish a connection to the MySQL database. Accepts several + arguments: + + host: Host where the database server is located + user: Username to log in as + passwd: Password to use. + db: Database to use, None to not use a particular one. + port: MySQL port to use, default is usually OK. + unix_socket: Optionally, you can use a unix socket rather than TCP/IP. + charset: Charset you want to use. + sql_mode: Default SQL_MODE to use. + read_default_file: Specifies my.cnf file to read these parameters from under the [client] section. + conv: Decoders dictionary to use instead of the default one. This is used to provide custom marshalling of types. See converters. + use_unicode: Whether or not to default to unicode strings. This option defaults to true for Py3k. + client_flag: Custom flags to send to MySQL. Find potential values in constants.CLIENT. + cursorclass: Custom cursor class to use. + init_command: Initial SQL statement to run when connection is established. + connect_timeout: Timeout before throwing an exception when connecting. + ssl: A dict of arguments similar to mysql_ssl_set()'s parameters. For now the capath and cipher arguments are not supported. + read_default_group: Group to read from in the configuration file. + compress; Not supported + named_pipe: Not supported + """ + + if use_unicode is None and sys.version_info[0] > 2: + use_unicode = True + + if compress or named_pipe: + raise NotImplementedError, "compress and named_pipe arguments are not supported" + + if ssl and (ssl.has_key('capath') or ssl.has_key('cipher')): + raise NotImplementedError, 'ssl options capath and cipher are not supported' + + self.ssl = False + if ssl: + if not SSL_ENABLED: + raise NotImplementedError, "ssl module not found" + self.ssl = True + client_flag |= SSL + for k in ('key', 'cert', 'ca'): + v = None + if ssl.has_key(k): + v = ssl[k] + setattr(self, k, v) + + if read_default_group and not read_default_file: + if sys.platform.startswith("win"): + read_default_file = "c:\\my.ini" + else: + read_default_file = "/etc/my.cnf" + + if read_default_file: + if not read_default_group: + read_default_group = "client" + + cfg = ConfigParser.RawConfigParser() + cfg.read(os.path.expanduser(read_default_file)) + + def _config(key, default): + try: + return cfg.get(read_default_group,key) + except: + return default + + user = _config("user",user) + passwd = _config("password",passwd) + host = _config("host", host) + db = _config("db",db) + unix_socket = _config("socket",unix_socket) + port = _config("port", port) + charset = _config("default-character-set", charset) + + self.host = host + self.port = port + self.user = user + self.password = passwd + self.db = db + self.unix_socket = unix_socket + if charset: + self.charset = charset + self.use_unicode = True + else: + self.charset = DEFAULT_CHARSET + self.use_unicode = False + + if use_unicode: + self.use_unicode = use_unicode + + client_flag |= CAPABILITIES + client_flag |= MULTI_STATEMENTS + if self.db: + client_flag |= CONNECT_WITH_DB + self.client_flag = client_flag + + self.cursorclass = cursorclass + self.connect_timeout = connect_timeout + + self._connect() + + self.messages = [] + self.set_charset(charset) + self.encoders = encoders + self.decoders = conv + + self._affected_rows = 0 + self.host_info = "Not connected" + + self.autocommit(False) + + if sql_mode is not None: + c = self.cursor() + c.execute("SET sql_mode=%s", (sql_mode,)) + + self.commit() + + if init_command is not None: + c = self.cursor() + c.execute(init_command) + + self.commit() + + + def close(self): + ''' Send the quit message and close the socket ''' + send_data = struct.pack('<i',1) + int2byte(COM_QUIT) + self.socket.send(send_data) + self.socket.close() + self.socket = None + + def autocommit(self, value): + ''' Set whether or not to commit after every execute() ''' + try: + self._execute_command(COM_QUERY, "SET AUTOCOMMIT = %s" % \ + self.escape(value)) + self.read_packet() + except: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + + def commit(self): + ''' Commit changes to stable storage ''' + try: + self._execute_command(COM_QUERY, "COMMIT") + self.read_packet() + except: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + + def rollback(self): + ''' Roll back the current transaction ''' + try: + self._execute_command(COM_QUERY, "ROLLBACK") + self.read_packet() + except: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + + def escape(self, obj): + ''' Escape whatever value you pass to it ''' + return escape_item(obj, self.charset) + + def literal(self, obj): + ''' Alias for escape() ''' + return escape_item(obj, self.charset) + + def cursor(self): + ''' Create a new cursor to execute queries with ''' + return self.cursorclass(self) + + def __enter__(self): + ''' Context manager that returns a Cursor ''' + return self.cursor() + + def __exit__(self, exc, value, traceback): + ''' On successful exit, commit. On exception, rollback. ''' + if exc: + self.rollback() + else: + self.commit() + + # The following methods are INTERNAL USE ONLY (called from Cursor) + def query(self, sql): + self._execute_command(COM_QUERY, sql) + self._affected_rows = self._read_query_result() + return self._affected_rows + + def next_result(self): + self._affected_rows = self._read_query_result() + return self._affected_rows + + def affected_rows(self): + return self._affected_rows + + def kill(self, thread_id): + arg = struct.pack('<I', thread_id) + try: + self._execute_command(COM_PROCESS_KILL, arg) + except: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + return + pkt = self.read_packet() + return pkt.is_ok_packet() + + def ping(self, reconnect=True): + ''' Check if the server is alive ''' + try: + self._execute_command(COM_PING, "") + except: + if reconnect: + self._connect() + return self.ping(False) + else: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + return + + pkt = self.read_packet() + return pkt.is_ok_packet() + + def set_charset(self, charset): + try: + if charset: + self._execute_command(COM_QUERY, "SET NAMES %s" % + self.escape(charset)) + self.read_packet() + self.charset = charset + except: + exc,value,tb = sys.exc_info() + self.errorhandler(None, exc, value) + + def _connect(self): + try: + if self.unix_socket and (self.host == 'localhost' or self.host == '127.0.0.1'): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + t = sock.gettimeout() + sock.settimeout(self.connect_timeout) + sock.connect(self.unix_socket) + sock.settimeout(t) + self.host_info = "Localhost via UNIX socket" + if DEBUG: print 'connected using unix_socket' + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + t = sock.gettimeout() + sock.settimeout(self.connect_timeout) + sock.connect((self.host, self.port)) + sock.settimeout(t) + self.host_info = "socket %s:%d" % (self.host, self.port) + if DEBUG: print 'connected using socket' + self.socket = sock + self._get_server_information() + self._request_authentication() + except socket.error, e: + raise OperationalError(2003, "Can't connect to MySQL server on %r (%d)" % (self.host, e.args[0])) + + def read_packet(self, packet_type=MysqlPacket): + """Read an entire "mysql packet" in its entirety from the network + and return a MysqlPacket type that represents the results.""" + + # TODO: is socket.recv(small_number) significantly slower than + # socket.recv(large_number)? if so, maybe we should buffer + # the socket.recv() (though that obviously makes memory management + # more complicated. + packet = packet_type(self.socket) + packet.check_error() + return packet + + def _read_query_result(self): + result = MySQLResult(self) + result.read() + self._result = result + return result.affected_rows + + def _send_command(self, command, sql): + #send_data = struct.pack('<i', len(sql) + 1) + command + sql + # could probably be more efficient, at least it's correct + if not self.socket: + self.errorhandler(None, InterfaceError, "(0, '')") + + if isinstance(sql, unicode): + sql = sql.encode(self.charset) + + buf = int2byte(command) + sql + pckt_no = 0 + while len(buf) >= MAX_PACKET_LENGTH: + header = struct.pack('<i', MAX_PACKET_LENGTH)[:-1]+int2byte(pckt_no) + send_data = header + buf[:MAX_PACKET_LENGTH] + self.socket.send(send_data) + if DEBUG: dump_packet(send_data) + buf = buf[MAX_PACKET_LENGTH:] + pckt_no += 1 + header = struct.pack('<i', len(buf))[:-1]+int2byte(pckt_no) + self.socket.send(header+buf) + + + #sock = self.socket + #sock.send(send_data) + + # + + def _execute_command(self, command, sql): + self._send_command(command, sql) + + def _request_authentication(self): + self._send_authentication() + + def _send_authentication(self): + sock = self.socket + self.client_flag |= CAPABILITIES + if self.server_version.startswith('5'): + self.client_flag |= MULTI_RESULTS + + if self.user is None: + raise ValueError, "Did not specify a username" + + charset_id = charset_by_name(self.charset).id + self.user = self.user.encode(self.charset) + + data_init = struct.pack('<i', self.client_flag) + struct.pack("<I", 1) + \ + int2byte(charset_id) + int2byte(0)*23 + + next_packet = 1 + + if self.ssl: + data = pack_int24(len(data_init)) + int2byte(next_packet) + data_init + next_packet += 1 + + if DEBUG: dump_packet(data) + + sock.send(data) + sock = self.socket = ssl.wrap_socket(sock, keyfile=self.key, + certfile=self.cert, + ssl_version=ssl.PROTOCOL_TLSv1, + cert_reqs=ssl.CERT_REQUIRED, + ca_certs=self.ca) + + data = data_init + self.user+int2byte(0) + _scramble(self.password.encode(self.charset), self.salt) + + if self.db: + self.db = self.db.encode(self.charset) + data += self.db + int2byte(0) + + data = pack_int24(len(data)) + int2byte(next_packet) + data + next_packet += 2 + + if DEBUG: dump_packet(data) + + sock.send(data) + + auth_packet = MysqlPacket(sock) + auth_packet.check_error() + if DEBUG: auth_packet.dump() + + # if old_passwords is enabled the packet will be 1 byte long and + # have the octet 254 + + if auth_packet.is_eof_packet(): + # send legacy handshake + #raise NotImplementedError, "old_passwords are not supported. Check to see if mysqld was started with --old-passwords, if old-passwords=1 in a my.cnf file, or if there are some short hashes in your mysql.user table." + # TODO: is this the correct charset? + data = _scramble_323(self.password.encode(self.charset), self.salt.encode(self.charset)) + int2byte(0) + data = pack_int24(len(data)) + int2byte(next_packet) + data + + sock.send(data) + auth_packet = MysqlPacket(sock) + auth_packet.check_error() + if DEBUG: auth_packet.dump() + + + # _mysql support + def thread_id(self): + return self.server_thread_id[0] + + def character_set_name(self): + return self.charset + + def get_host_info(self): + return self.host_info + + def get_proto_info(self): + return self.protocol_version + + def _get_server_information(self): + sock = self.socket + i = 0 + packet = MysqlPacket(sock) + data = packet.get_all_data() + + if DEBUG: dump_packet(data) + #packet_len = byte2int(data[i:i+1]) + #i += 4 + self.protocol_version = byte2int(data[i:i+1]) + + i += 1 + server_end = data.find(int2byte(0), i) + # TODO: is this the correct charset? should it be default_charset? + self.server_version = data[i:server_end].decode(self.charset) + + i = server_end + 1 + self.server_thread_id = struct.unpack('<h', data[i:i+2]) + + i += 4 + self.salt = data[i:i+8] + + i += 9 + if len(data) >= i + 1: + i += 1 + + self.server_capabilities = struct.unpack('<h', data[i:i+2])[0] + + i += 1 + self.server_language = byte2int(data[i:i+1]) + self.server_charset = charset_by_id(self.server_language).name + + i += 16 + if len(data) >= i+12-1: + rest_salt = data[i:i+12] + self.salt += rest_salt + + def get_server_info(self): + return self.server_version + + Warning = Warning + Error = Error + InterfaceError = InterfaceError + DatabaseError = DatabaseError + DataError = DataError + OperationalError = OperationalError + IntegrityError = IntegrityError + InternalError = InternalError + ProgrammingError = ProgrammingError + NotSupportedError = NotSupportedError + +# TODO: move OK and EOF packet parsing/logic into a proper subclass +# of MysqlPacket like has been done with FieldDescriptorPacket. +class MySQLResult(object): + + def __init__(self, connection): + from weakref import proxy + self.connection = proxy(connection) + self.affected_rows = None + self.insert_id = None + self.server_status = 0 + self.warning_count = 0 + self.message = None + self.field_count = 0 + self.description = None + self.rows = None + self.has_next = None + + def read(self): + self.first_packet = self.connection.read_packet() + + # TODO: use classes for different packet types? + if self.first_packet.is_ok_packet(): + self._read_ok_packet() + else: + self._read_result_packet() + + def _read_ok_packet(self): + self.first_packet.advance(1) # field_count (always '0') + self.affected_rows = self.first_packet.read_length_coded_binary() + self.insert_id = self.first_packet.read_length_coded_binary() + self.server_status = struct.unpack('<H', self.first_packet.read(2))[0] + self.warning_count = struct.unpack('<H', self.first_packet.read(2))[0] + self.message = self.first_packet.read_all() + + def _read_result_packet(self): + self.field_count = byte2int(self.first_packet.read(1)) + self._get_descriptions() + self._read_rowdata_packet() + + # TODO: implement this as an iteratable so that it is more + # memory efficient and lower-latency to client... + def _read_rowdata_packet(self): + """Read a rowdata packet for each data row in the result set.""" + rows = [] + while True: + packet = self.connection.read_packet() + if packet.is_eof_packet(): + self.warning_count = packet.read(2) + server_status = struct.unpack('<h', packet.read(2))[0] + self.has_next = (server_status + & SERVER_STATUS.SERVER_MORE_RESULTS_EXISTS) + break + + row = [] + for field in self.fields: + if field.type_code in self.connection.decoders: + converter = self.connection.decoders[field.type_code] + + if DEBUG: print "DEBUG: field=%s, converter=%s" % (field, converter) + data = packet.read_length_coded_string() + converted = None + if data != None: + converted = converter(self.connection, field, data) + + row.append(converted) + + rows.append(tuple(row)) + + self.affected_rows = len(rows) + self.rows = tuple(rows) + if DEBUG: self.rows + + def _get_descriptions(self): + """Read a column descriptor packet for each column in the result.""" + self.fields = [] + description = [] + for i in xrange(self.field_count): + field = self.connection.read_packet(FieldDescriptorPacket) + self.fields.append(field) + description.append(field.description()) + + eof_packet = self.connection.read_packet() + assert eof_packet.is_eof_packet(), 'Protocol error, expecting EOF' + self.description = tuple(description) + ADDED gluon/contrib/pymysql/constants/CLIENT.py Index: gluon/contrib/pymysql/constants/CLIENT.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/CLIENT.py @@ -0,0 +1,20 @@ + +LONG_PASSWORD = 1 +FOUND_ROWS = 1 << 1 +LONG_FLAG = 1 << 2 +CONNECT_WITH_DB = 1 << 3 +NO_SCHEMA = 1 << 4 +COMPRESS = 1 << 5 +ODBC = 1 << 6 +LOCAL_FILES = 1 << 7 +IGNORE_SPACE = 1 << 8 +PROTOCOL_41 = 1 << 9 +INTERACTIVE = 1 << 10 +SSL = 1 << 11 +IGNORE_SIGPIPE = 1 << 12 +TRANSACTIONS = 1 << 13 +SECURE_CONNECTION = 1 << 15 +MULTI_STATEMENTS = 1 << 16 +MULTI_RESULTS = 1 << 17 +CAPABILITIES = LONG_PASSWORD|LONG_FLAG|TRANSACTIONS| \ + PROTOCOL_41|SECURE_CONNECTION ADDED gluon/contrib/pymysql/constants/COMMAND.py Index: gluon/contrib/pymysql/constants/COMMAND.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/COMMAND.py @@ -0,0 +1,23 @@ + +COM_SLEEP = 0x00 +COM_QUIT = 0x01 +COM_INIT_DB = 0x02 +COM_QUERY = 0x03 +COM_FIELD_LIST = 0x04 +COM_CREATE_DB = 0x05 +COM_DROP_DB = 0x06 +COM_REFRESH = 0x07 +COM_SHUTDOWN = 0x08 +COM_STATISTICS = 0x09 +COM_PROCESS_INFO = 0x0a +COM_CONNECT = 0x0b +COM_PROCESS_KILL = 0x0c +COM_DEBUG = 0x0d +COM_PING = 0x0e +COM_TIME = 0x0f +COM_DELAYED_INSERT = 0x10 +COM_CHANGE_USER = 0x11 +COM_BINLOG_DUMP = 0x12 +COM_TABLE_DUMP = 0x13 +COM_CONNECT_OUT = 0x14 +COM_REGISTER_SLAVE = 0x15 ADDED gluon/contrib/pymysql/constants/ER.py Index: gluon/contrib/pymysql/constants/ER.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/ER.py @@ -0,0 +1,472 @@ + +ERROR_FIRST = 1000 +HASHCHK = 1000 +NISAMCHK = 1001 +NO = 1002 +YES = 1003 +CANT_CREATE_FILE = 1004 +CANT_CREATE_TABLE = 1005 +CANT_CREATE_DB = 1006 +DB_CREATE_EXISTS = 1007 +DB_DROP_EXISTS = 1008 +DB_DROP_DELETE = 1009 +DB_DROP_RMDIR = 1010 +CANT_DELETE_FILE = 1011 +CANT_FIND_SYSTEM_REC = 1012 +CANT_GET_STAT = 1013 +CANT_GET_WD = 1014 +CANT_LOCK = 1015 +CANT_OPEN_FILE = 1016 +FILE_NOT_FOUND = 1017 +CANT_READ_DIR = 1018 +CANT_SET_WD = 1019 +CHECKREAD = 1020 +DISK_FULL = 1021 +DUP_KEY = 1022 +ERROR_ON_CLOSE = 1023 +ERROR_ON_READ = 1024 +ERROR_ON_RENAME = 1025 +ERROR_ON_WRITE = 1026 +FILE_USED = 1027 +FILSORT_ABORT = 1028 +FORM_NOT_FOUND = 1029 +GET_ERRNO = 1030 +ILLEGAL_HA = 1031 +KEY_NOT_FOUND = 1032 +NOT_FORM_FILE = 1033 +NOT_KEYFILE = 1034 +OLD_KEYFILE = 1035 +OPEN_AS_READONLY = 1036 +OUTOFMEMORY = 1037 +OUT_OF_SORTMEMORY = 1038 +UNEXPECTED_EOF = 1039 +CON_COUNT_ERROR = 1040 +OUT_OF_RESOURCES = 1041 +BAD_HOST_ERROR = 1042 +HANDSHAKE_ERROR = 1043 +DBACCESS_DENIED_ERROR = 1044 +ACCESS_DENIED_ERROR = 1045 +NO_DB_ERROR = 1046 +UNKNOWN_COM_ERROR = 1047 +BAD_NULL_ERROR = 1048 +BAD_DB_ERROR = 1049 +TABLE_EXISTS_ERROR = 1050 +BAD_TABLE_ERROR = 1051 +NON_UNIQ_ERROR = 1052 +SERVER_SHUTDOWN = 1053 +BAD_FIELD_ERROR = 1054 +WRONG_FIELD_WITH_GROUP = 1055 +WRONG_GROUP_FIELD = 1056 +WRONG_SUM_SELECT = 1057 +WRONG_VALUE_COUNT = 1058 +TOO_LONG_IDENT = 1059 +DUP_FIELDNAME = 1060 +DUP_KEYNAME = 1061 +DUP_ENTRY = 1062 +WRONG_FIELD_SPEC = 1063 +PARSE_ERROR = 1064 +EMPTY_QUERY = 1065 +NONUNIQ_TABLE = 1066 +INVALID_DEFAULT = 1067 +MULTIPLE_PRI_KEY = 1068 +TOO_MANY_KEYS = 1069 +TOO_MANY_KEY_PARTS = 1070 +TOO_LONG_KEY = 1071 +KEY_COLUMN_DOES_NOT_EXITS = 1072 +BLOB_USED_AS_KEY = 1073 +TOO_BIG_FIELDLENGTH = 1074 +WRONG_AUTO_KEY = 1075 +READY = 1076 +NORMAL_SHUTDOWN = 1077 +GOT_SIGNAL = 1078 +SHUTDOWN_COMPLETE = 1079 +FORCING_CLOSE = 1080 +IPSOCK_ERROR = 1081 +NO_SUCH_INDEX = 1082 +WRONG_FIELD_TERMINATORS = 1083 +BLOBS_AND_NO_TERMINATED = 1084 +TEXTFILE_NOT_READABLE = 1085 +FILE_EXISTS_ERROR = 1086 +LOAD_INFO = 1087 +ALTER_INFO = 1088 +WRONG_SUB_KEY = 1089 +CANT_REMOVE_ALL_FIELDS = 1090 +CANT_DROP_FIELD_OR_KEY = 1091 +INSERT_INFO = 1092 +UPDATE_TABLE_USED = 1093 +NO_SUCH_THREAD = 1094 +KILL_DENIED_ERROR = 1095 +NO_TABLES_USED = 1096 +TOO_BIG_SET = 1097 +NO_UNIQUE_LOGFILE = 1098 +TABLE_NOT_LOCKED_FOR_WRITE = 1099 +TABLE_NOT_LOCKED = 1100 +BLOB_CANT_HAVE_DEFAULT = 1101 +WRONG_DB_NAME = 1102 +WRONG_TABLE_NAME = 1103 +TOO_BIG_SELECT = 1104 +UNKNOWN_ERROR = 1105 +UNKNOWN_PROCEDURE = 1106 +WRONG_PARAMCOUNT_TO_PROCEDURE = 1107 +WRONG_PARAMETERS_TO_PROCEDURE = 1108 +UNKNOWN_TABLE = 1109 +FIELD_SPECIFIED_TWICE = 1110 +INVALID_GROUP_FUNC_USE = 1111 +UNSUPPORTED_EXTENSION = 1112 +TABLE_MUST_HAVE_COLUMNS = 1113 +RECORD_FILE_FULL = 1114 +UNKNOWN_CHARACTER_SET = 1115 +TOO_MANY_TABLES = 1116 +TOO_MANY_FIELDS = 1117 +TOO_BIG_ROWSIZE = 1118 +STACK_OVERRUN = 1119 +WRONG_OUTER_JOIN = 1120 +NULL_COLUMN_IN_INDEX = 1121 +CANT_FIND_UDF = 1122 +CANT_INITIALIZE_UDF = 1123 +UDF_NO_PATHS = 1124 +UDF_EXISTS = 1125 +CANT_OPEN_LIBRARY = 1126 +CANT_FIND_DL_ENTRY = 1127 +FUNCTION_NOT_DEFINED = 1128 +HOST_IS_BLOCKED = 1129 +HOST_NOT_PRIVILEGED = 1130 +PASSWORD_ANONYMOUS_USER = 1131 +PASSWORD_NOT_ALLOWED = 1132 +PASSWORD_NO_MATCH = 1133 +UPDATE_INFO = 1134 +CANT_CREATE_THREAD = 1135 +WRONG_VALUE_COUNT_ON_ROW = 1136 +CANT_REOPEN_TABLE = 1137 +INVALID_USE_OF_NULL = 1138 +REGEXP_ERROR = 1139 +MIX_OF_GROUP_FUNC_AND_FIELDS = 1140 +NONEXISTING_GRANT = 1141 +TABLEACCESS_DENIED_ERROR = 1142 +COLUMNACCESS_DENIED_ERROR = 1143 +ILLEGAL_GRANT_FOR_TABLE = 1144 +GRANT_WRONG_HOST_OR_USER = 1145 +NO_SUCH_TABLE = 1146 +NONEXISTING_TABLE_GRANT = 1147 +NOT_ALLOWED_COMMAND = 1148 +SYNTAX_ERROR = 1149 +DELAYED_CANT_CHANGE_LOCK = 1150 +TOO_MANY_DELAYED_THREADS = 1151 +ABORTING_CONNECTION = 1152 +NET_PACKET_TOO_LARGE = 1153 +NET_READ_ERROR_FROM_PIPE = 1154 +NET_FCNTL_ERROR = 1155 +NET_PACKETS_OUT_OF_ORDER = 1156 +NET_UNCOMPRESS_ERROR = 1157 +NET_READ_ERROR = 1158 +NET_READ_INTERRUPTED = 1159 +NET_ERROR_ON_WRITE = 1160 +NET_WRITE_INTERRUPTED = 1161 +TOO_LONG_STRING = 1162 +TABLE_CANT_HANDLE_BLOB = 1163 +TABLE_CANT_HANDLE_AUTO_INCREMENT = 1164 +DELAYED_INSERT_TABLE_LOCKED = 1165 +WRONG_COLUMN_NAME = 1166 +WRONG_KEY_COLUMN = 1167 +WRONG_MRG_TABLE = 1168 +DUP_UNIQUE = 1169 +BLOB_KEY_WITHOUT_LENGTH = 1170 +PRIMARY_CANT_HAVE_NULL = 1171 +TOO_MANY_ROWS = 1172 +REQUIRES_PRIMARY_KEY = 1173 +NO_RAID_COMPILED = 1174 +UPDATE_WITHOUT_KEY_IN_SAFE_MODE = 1175 +KEY_DOES_NOT_EXITS = 1176 +CHECK_NO_SUCH_TABLE = 1177 +CHECK_NOT_IMPLEMENTED = 1178 +CANT_DO_THIS_DURING_AN_TRANSACTION = 1179 +ERROR_DURING_COMMIT = 1180 +ERROR_DURING_ROLLBACK = 1181 +ERROR_DURING_FLUSH_LOGS = 1182 +ERROR_DURING_CHECKPOINT = 1183 +NEW_ABORTING_CONNECTION = 1184 +DUMP_NOT_IMPLEMENTED = 1185 +FLUSH_MASTER_BINLOG_CLOSED = 1186 +INDEX_REBUILD = 1187 +MASTER = 1188 +MASTER_NET_READ = 1189 +MASTER_NET_WRITE = 1190 +FT_MATCHING_KEY_NOT_FOUND = 1191 +LOCK_OR_ACTIVE_TRANSACTION = 1192 +UNKNOWN_SYSTEM_VARIABLE = 1193 +CRASHED_ON_USAGE = 1194 +CRASHED_ON_REPAIR = 1195 +WARNING_NOT_COMPLETE_ROLLBACK = 1196 +TRANS_CACHE_FULL = 1197 +SLAVE_MUST_STOP = 1198 +SLAVE_NOT_RUNNING = 1199 +BAD_SLAVE = 1200 +MASTER_INFO = 1201 +SLAVE_THREAD = 1202 +TOO_MANY_USER_CONNECTIONS = 1203 +SET_CONSTANTS_ONLY = 1204 +LOCK_WAIT_TIMEOUT = 1205 +LOCK_TABLE_FULL = 1206 +READ_ONLY_TRANSACTION = 1207 +DROP_DB_WITH_READ_LOCK = 1208 +CREATE_DB_WITH_READ_LOCK = 1209 +WRONG_ARGUMENTS = 1210 +NO_PERMISSION_TO_CREATE_USER = 1211 +UNION_TABLES_IN_DIFFERENT_DIR = 1212 +LOCK_DEADLOCK = 1213 +TABLE_CANT_HANDLE_FT = 1214 +CANNOT_ADD_FOREIGN = 1215 +NO_REFERENCED_ROW = 1216 +ROW_IS_REFERENCED = 1217 +CONNECT_TO_MASTER = 1218 +QUERY_ON_MASTER = 1219 +ERROR_WHEN_EXECUTING_COMMAND = 1220 +WRONG_USAGE = 1221 +WRONG_NUMBER_OF_COLUMNS_IN_SELECT = 1222 +CANT_UPDATE_WITH_READLOCK = 1223 +MIXING_NOT_ALLOWED = 1224 +DUP_ARGUMENT = 1225 +USER_LIMIT_REACHED = 1226 +SPECIFIC_ACCESS_DENIED_ERROR = 1227 +LOCAL_VARIABLE = 1228 +GLOBAL_VARIABLE = 1229 +NO_DEFAULT = 1230 +WRONG_VALUE_FOR_VAR = 1231 +WRONG_TYPE_FOR_VAR = 1232 +VAR_CANT_BE_READ = 1233 +CANT_USE_OPTION_HERE = 1234 +NOT_SUPPORTED_YET = 1235 +MASTER_FATAL_ERROR_READING_BINLOG = 1236 +SLAVE_IGNORED_TABLE = 1237 +INCORRECT_GLOBAL_LOCAL_VAR = 1238 +WRONG_FK_DEF = 1239 +KEY_REF_DO_NOT_MATCH_TABLE_REF = 1240 +OPERAND_COLUMNS = 1241 +SUBQUERY_NO_1_ROW = 1242 +UNKNOWN_STMT_HANDLER = 1243 +CORRUPT_HELP_DB = 1244 +CYCLIC_REFERENCE = 1245 +AUTO_CONVERT = 1246 +ILLEGAL_REFERENCE = 1247 +DERIVED_MUST_HAVE_ALIAS = 1248 +SELECT_REDUCED = 1249 +TABLENAME_NOT_ALLOWED_HERE = 1250 +NOT_SUPPORTED_AUTH_MODE = 1251 +SPATIAL_CANT_HAVE_NULL = 1252 +COLLATION_CHARSET_MISMATCH = 1253 +SLAVE_WAS_RUNNING = 1254 +SLAVE_WAS_NOT_RUNNING = 1255 +TOO_BIG_FOR_UNCOMPRESS = 1256 +ZLIB_Z_MEM_ERROR = 1257 +ZLIB_Z_BUF_ERROR = 1258 +ZLIB_Z_DATA_ERROR = 1259 +CUT_VALUE_GROUP_CONCAT = 1260 +WARN_TOO_FEW_RECORDS = 1261 +WARN_TOO_MANY_RECORDS = 1262 +WARN_NULL_TO_NOTNULL = 1263 +WARN_DATA_OUT_OF_RANGE = 1264 +WARN_DATA_TRUNCATED = 1265 +WARN_USING_OTHER_HANDLER = 1266 +CANT_AGGREGATE_2COLLATIONS = 1267 +DROP_USER = 1268 +REVOKE_GRANTS = 1269 +CANT_AGGREGATE_3COLLATIONS = 1270 +CANT_AGGREGATE_NCOLLATIONS = 1271 +VARIABLE_IS_NOT_STRUCT = 1272 +UNKNOWN_COLLATION = 1273 +SLAVE_IGNORED_SSL_PARAMS = 1274 +SERVER_IS_IN_SECURE_AUTH_MODE = 1275 +WARN_FIELD_RESOLVED = 1276 +BAD_SLAVE_UNTIL_COND = 1277 +MISSING_SKIP_SLAVE = 1278 +UNTIL_COND_IGNORED = 1279 +WRONG_NAME_FOR_INDEX = 1280 +WRONG_NAME_FOR_CATALOG = 1281 +WARN_QC_RESIZE = 1282 +BAD_FT_COLUMN = 1283 +UNKNOWN_KEY_CACHE = 1284 +WARN_HOSTNAME_WONT_WORK = 1285 +UNKNOWN_STORAGE_ENGINE = 1286 +WARN_DEPRECATED_SYNTAX = 1287 +NON_UPDATABLE_TABLE = 1288 +FEATURE_DISABLED = 1289 +OPTION_PREVENTS_STATEMENT = 1290 +DUPLICATED_VALUE_IN_TYPE = 1291 +TRUNCATED_WRONG_VALUE = 1292 +TOO_MUCH_AUTO_TIMESTAMP_COLS = 1293 +INVALID_ON_UPDATE = 1294 +UNSUPPORTED_PS = 1295 +GET_ERRMSG = 1296 +GET_TEMPORARY_ERRMSG = 1297 +UNKNOWN_TIME_ZONE = 1298 +WARN_INVALID_TIMESTAMP = 1299 +INVALID_CHARACTER_STRING = 1300 +WARN_ALLOWED_PACKET_OVERFLOWED = 1301 +CONFLICTING_DECLARATIONS = 1302 +SP_NO_RECURSIVE_CREATE = 1303 +SP_ALREADY_EXISTS = 1304 +SP_DOES_NOT_EXIST = 1305 +SP_DROP_FAILED = 1306 +SP_STORE_FAILED = 1307 +SP_LILABEL_MISMATCH = 1308 +SP_LABEL_REDEFINE = 1309 +SP_LABEL_MISMATCH = 1310 +SP_UNINIT_VAR = 1311 +SP_BADSELECT = 1312 +SP_BADRETURN = 1313 +SP_BADSTATEMENT = 1314 +UPDATE_LOG_DEPRECATED_IGNORED = 1315 +UPDATE_LOG_DEPRECATED_TRANSLATED = 1316 +QUERY_INTERRUPTED = 1317 +SP_WRONG_NO_OF_ARGS = 1318 +SP_COND_MISMATCH = 1319 +SP_NORETURN = 1320 +SP_NORETURNEND = 1321 +SP_BAD_CURSOR_QUERY = 1322 +SP_BAD_CURSOR_SELECT = 1323 +SP_CURSOR_MISMATCH = 1324 +SP_CURSOR_ALREADY_OPEN = 1325 +SP_CURSOR_NOT_OPEN = 1326 +SP_UNDECLARED_VAR = 1327 +SP_WRONG_NO_OF_FETCH_ARGS = 1328 +SP_FETCH_NO_DATA = 1329 +SP_DUP_PARAM = 1330 +SP_DUP_VAR = 1331 +SP_DUP_COND = 1332 +SP_DUP_CURS = 1333 +SP_CANT_ALTER = 1334 +SP_SUBSELECT_NYI = 1335 +STMT_NOT_ALLOWED_IN_SF_OR_TRG = 1336 +SP_VARCOND_AFTER_CURSHNDLR = 1337 +SP_CURSOR_AFTER_HANDLER = 1338 +SP_CASE_NOT_FOUND = 1339 +FPARSER_TOO_BIG_FILE = 1340 +FPARSER_BAD_HEADER = 1341 +FPARSER_EOF_IN_COMMENT = 1342 +FPARSER_ERROR_IN_PARAMETER = 1343 +FPARSER_EOF_IN_UNKNOWN_PARAMETER = 1344 +VIEW_NO_EXPLAIN = 1345 +FRM_UNKNOWN_TYPE = 1346 +WRONG_OBJECT = 1347 +NONUPDATEABLE_COLUMN = 1348 +VIEW_SELECT_DERIVED = 1349 +VIEW_SELECT_CLAUSE = 1350 +VIEW_SELECT_VARIABLE = 1351 +VIEW_SELECT_TMPTABLE = 1352 +VIEW_WRONG_LIST = 1353 +WARN_VIEW_MERGE = 1354 +WARN_VIEW_WITHOUT_KEY = 1355 +VIEW_INVALID = 1356 +SP_NO_DROP_SP = 1357 +SP_GOTO_IN_HNDLR = 1358 +TRG_ALREADY_EXISTS = 1359 +TRG_DOES_NOT_EXIST = 1360 +TRG_ON_VIEW_OR_TEMP_TABLE = 1361 +TRG_CANT_CHANGE_ROW = 1362 +TRG_NO_SUCH_ROW_IN_TRG = 1363 +NO_DEFAULT_FOR_FIELD = 1364 +DIVISION_BY_ZERO = 1365 +TRUNCATED_WRONG_VALUE_FOR_FIELD = 1366 +ILLEGAL_VALUE_FOR_TYPE = 1367 +VIEW_NONUPD_CHECK = 1368 +VIEW_CHECK_FAILED = 1369 +PROCACCESS_DENIED_ERROR = 1370 +RELAY_LOG_FAIL = 1371 +PASSWD_LENGTH = 1372 +UNKNOWN_TARGET_BINLOG = 1373 +IO_ERR_LOG_INDEX_READ = 1374 +BINLOG_PURGE_PROHIBITED = 1375 +FSEEK_FAIL = 1376 +BINLOG_PURGE_FATAL_ERR = 1377 +LOG_IN_USE = 1378 +LOG_PURGE_UNKNOWN_ERR = 1379 +RELAY_LOG_INIT = 1380 +NO_BINARY_LOGGING = 1381 +RESERVED_SYNTAX = 1382 +WSAS_FAILED = 1383 +DIFF_GROUPS_PROC = 1384 +NO_GROUP_FOR_PROC = 1385 +ORDER_WITH_PROC = 1386 +LOGGING_PROHIBIT_CHANGING_OF = 1387 +NO_FILE_MAPPING = 1388 +WRONG_MAGIC = 1389 +PS_MANY_PARAM = 1390 +KEY_PART_0 = 1391 +VIEW_CHECKSUM = 1392 +VIEW_MULTIUPDATE = 1393 +VIEW_NO_INSERT_FIELD_LIST = 1394 +VIEW_DELETE_MERGE_VIEW = 1395 +CANNOT_USER = 1396 +XAER_NOTA = 1397 +XAER_INVAL = 1398 +XAER_RMFAIL = 1399 +XAER_OUTSIDE = 1400 +XAER_RMERR = 1401 +XA_RBROLLBACK = 1402 +NONEXISTING_PROC_GRANT = 1403 +PROC_AUTO_GRANT_FAIL = 1404 +PROC_AUTO_REVOKE_FAIL = 1405 +DATA_TOO_LONG = 1406 +SP_BAD_SQLSTATE = 1407 +STARTUP = 1408 +LOAD_FROM_FIXED_SIZE_ROWS_TO_VAR = 1409 +CANT_CREATE_USER_WITH_GRANT = 1410 +WRONG_VALUE_FOR_TYPE = 1411 +TABLE_DEF_CHANGED = 1412 +SP_DUP_HANDLER = 1413 +SP_NOT_VAR_ARG = 1414 +SP_NO_RETSET = 1415 +CANT_CREATE_GEOMETRY_OBJECT = 1416 +FAILED_ROUTINE_BREAK_BINLOG = 1417 +BINLOG_UNSAFE_ROUTINE = 1418 +BINLOG_CREATE_ROUTINE_NEED_SUPER = 1419 +EXEC_STMT_WITH_OPEN_CURSOR = 1420 +STMT_HAS_NO_OPEN_CURSOR = 1421 +COMMIT_NOT_ALLOWED_IN_SF_OR_TRG = 1422 +NO_DEFAULT_FOR_VIEW_FIELD = 1423 +SP_NO_RECURSION = 1424 +TOO_BIG_SCALE = 1425 +TOO_BIG_PRECISION = 1426 +M_BIGGER_THAN_D = 1427 +WRONG_LOCK_OF_SYSTEM_TABLE = 1428 +CONNECT_TO_FOREIGN_DATA_SOURCE = 1429 +QUERY_ON_FOREIGN_DATA_SOURCE = 1430 +FOREIGN_DATA_SOURCE_DOESNT_EXIST = 1431 +FOREIGN_DATA_STRING_INVALID_CANT_CREATE = 1432 +FOREIGN_DATA_STRING_INVALID = 1433 +CANT_CREATE_FEDERATED_TABLE = 1434 +TRG_IN_WRONG_SCHEMA = 1435 +STACK_OVERRUN_NEED_MORE = 1436 +TOO_LONG_BODY = 1437 +WARN_CANT_DROP_DEFAULT_KEYCACHE = 1438 +TOO_BIG_DISPLAYWIDTH = 1439 +XAER_DUPID = 1440 +DATETIME_FUNCTION_OVERFLOW = 1441 +CANT_UPDATE_USED_TABLE_IN_SF_OR_TRG = 1442 +VIEW_PREVENT_UPDATE = 1443 +PS_NO_RECURSION = 1444 +SP_CANT_SET_AUTOCOMMIT = 1445 +MALFORMED_DEFINER = 1446 +VIEW_FRM_NO_USER = 1447 +VIEW_OTHER_USER = 1448 +NO_SUCH_USER = 1449 +FORBID_SCHEMA_CHANGE = 1450 +ROW_IS_REFERENCED_2 = 1451 +NO_REFERENCED_ROW_2 = 1452 +SP_BAD_VAR_SHADOW = 1453 +TRG_NO_DEFINER = 1454 +OLD_FILE_FORMAT = 1455 +SP_RECURSION_LIMIT = 1456 +SP_PROC_TABLE_CORRUPT = 1457 +SP_WRONG_NAME = 1458 +TABLE_NEEDS_UPGRADE = 1459 +SP_NO_AGGREGATE = 1460 +MAX_PREPARED_STMT_COUNT_REACHED = 1461 +VIEW_RECURSIVE = 1462 +NON_GROUPING_FIELD_USED = 1463 +TABLE_CANT_HANDLE_SPKEYS = 1464 +NO_TRIGGERS_ON_SYSTEM_SCHEMA = 1465 +USERNAME = 1466 +HOSTNAME = 1467 +WRONG_STRING_LENGTH = 1468 +ERROR_LAST = 1468 ADDED gluon/contrib/pymysql/constants/FIELD_TYPE.py Index: gluon/contrib/pymysql/constants/FIELD_TYPE.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/FIELD_TYPE.py @@ -0,0 +1,32 @@ + + +DECIMAL = 0 +TINY = 1 +SHORT = 2 +LONG = 3 +FLOAT = 4 +DOUBLE = 5 +NULL = 6 +TIMESTAMP = 7 +LONGLONG = 8 +INT24 = 9 +DATE = 10 +TIME = 11 +DATETIME = 12 +YEAR = 13 +NEWDATE = 14 +VARCHAR = 15 +BIT = 16 +NEWDECIMAL = 246 +ENUM = 247 +SET = 248 +TINY_BLOB = 249 +MEDIUM_BLOB = 250 +LONG_BLOB = 251 +BLOB = 252 +VAR_STRING = 253 +STRING = 254 +GEOMETRY = 255 + +CHAR = TINY +INTERVAL = ENUM ADDED gluon/contrib/pymysql/constants/FLAG.py Index: gluon/contrib/pymysql/constants/FLAG.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/FLAG.py @@ -0,0 +1,15 @@ +NOT_NULL = 1 +PRI_KEY = 2 +UNIQUE_KEY = 4 +MULTIPLE_KEY = 8 +BLOB = 16 +UNSIGNED = 32 +ZEROFILL = 64 +BINARY = 128 +ENUM = 256 +AUTO_INCREMENT = 512 +TIMESTAMP = 1024 +SET = 2048 +PART_KEY = 16384 +GROUP = 32767 +UNIQUE = 65536 ADDED gluon/contrib/pymysql/constants/SERVER_STATUS.py Index: gluon/contrib/pymysql/constants/SERVER_STATUS.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/SERVER_STATUS.py @@ -0,0 +1,12 @@ + +SERVER_STATUS_IN_TRANS = 1 +SERVER_STATUS_AUTOCOMMIT = 2 +SERVER_MORE_RESULTS_EXISTS = 8 +SERVER_QUERY_NO_GOOD_INDEX_USED = 16 +SERVER_QUERY_NO_INDEX_USED = 32 +SERVER_STATUS_CURSOR_EXISTS = 64 +SERVER_STATUS_LAST_ROW_SENT = 128 +SERVER_STATUS_DB_DROPPED = 256 +SERVER_STATUS_NO_BACKSLASH_ESCAPES = 512 +SERVER_STATUS_METADATA_CHANGED = 1024 + ADDED gluon/contrib/pymysql/constants/__init__.py Index: gluon/contrib/pymysql/constants/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/constants/__init__.py ADDED gluon/contrib/pymysql/converters.py Index: gluon/contrib/pymysql/converters.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/converters.py @@ -0,0 +1,347 @@ +import re +import datetime +import time + +from constants import FIELD_TYPE, FLAG +from charset import charset_by_id + +try: + set +except NameError: + try: + from sets import BaseSet as set + except ImportError: + from sets import Set as set + +ESCAPE_REGEX = re.compile(r"[\0\n\r\032\'\"\\]") +ESCAPE_MAP = {'\0': '\\0', '\n': '\\n', '\r': '\\r', '\032': '\\Z', + '\'': '\\\'', '"': '\\"', '\\': '\\\\'} + +def escape_item(val, charset): + if type(val) in [tuple, list, set]: + return escape_sequence(val, charset) + if type(val) is dict: + return escape_dict(val, charset) + if hasattr(val, "decode") and not isinstance(val, unicode): + # deal with py3k bytes + val = val.decode(charset) + encoder = encoders[type(val)] + val = encoder(val) + if type(val) is str: + return val + val = val.encode(charset) + return val + +def escape_dict(val, charset): + n = {} + for k, v in val.items(): + quoted = escape_item(v, charset) + n[k] = quoted + return n + +def escape_sequence(val, charset): + n = [] + for item in val: + quoted = escape_item(item, charset) + n.append(quoted) + return tuple(n) + +def escape_set(val, charset): + val = map(lambda x: escape_item(x, charset), val) + return ','.join(val) + +def escape_bool(value): + return str(int(value)) + +def escape_object(value): + return str(value) + +escape_int = escape_long = escape_object + +def escape_float(value): + return ('%.15g' % value) + +def escape_string(value): + return ("'%s'" % ESCAPE_REGEX.sub( + lambda match: ESCAPE_MAP.get(match.group(0)), value)) + +def escape_unicode(value): + return escape_string(value) + +def escape_None(value): + return 'NULL' + +def escape_timedelta(obj): + seconds = int(obj.seconds) % 60 + minutes = int(obj.seconds // 60) % 60 + hours = int(obj.seconds // 3600) % 24 + int(obj.days) * 24 + return escape_string('%02d:%02d:%02d' % (hours, minutes, seconds)) + +def escape_time(obj): + s = "%02d:%02d:%02d" % (int(obj.hour), int(obj.minute), + int(obj.second)) + if obj.microsecond: + s += ".%f" % obj.microsecond + + return escape_string(s) + +def escape_datetime(obj): + return escape_string(obj.strftime("%Y-%m-%d %H:%M:%S")) + +def escape_date(obj): + return escape_string(obj.strftime("%Y-%m-%d")) + +def escape_struct_time(obj): + return escape_datetime(datetime.datetime(*obj[:6])) + +def convert_datetime(connection, field, obj): + """Returns a DATETIME or TIMESTAMP column value as a datetime object: + + >>> datetime_or_None('2007-02-25 23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + >>> datetime_or_None('2007-02-25T23:06:20') + datetime.datetime(2007, 2, 25, 23, 6, 20) + + Illegal values are returned as None: + + >>> datetime_or_None('2007-02-31T23:06:20') is None + True + >>> datetime_or_None('0000-00-00 00:00:00') is None + True + + """ + if not isinstance(obj, unicode): + obj = obj.decode(connection.charset) + if ' ' in obj: + sep = ' ' + elif 'T' in obj: + sep = 'T' + else: + return convert_date(connection, field, obj) + + try: + ymd, hms = obj.split(sep, 1) + return datetime.datetime(*[ int(x) for x in ymd.split('-')+hms.split(':') ]) + except ValueError: + return convert_date(connection, field, obj) + +def convert_timedelta(connection, field, obj): + """Returns a TIME column as a timedelta object: + + >>> timedelta_or_None('25:06:17') + datetime.timedelta(1, 3977) + >>> timedelta_or_None('-25:06:17') + datetime.timedelta(-2, 83177) + + Illegal values are returned as None: + + >>> timedelta_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + """ + from math import modf + try: + if not isinstance(obj, unicode): + obj = obj.decode(connection.charset) + hours, minutes, seconds = tuple([int(x) for x in obj.split(':')]) + tdelta = datetime.timedelta( + hours = int(hours), + minutes = int(minutes), + seconds = int(seconds), + microseconds = int(modf(float(seconds))[0]*1000000), + ) + return tdelta + except ValueError: + return None + +def convert_time(connection, field, obj): + """Returns a TIME column as a time object: + + >>> time_or_None('15:06:17') + datetime.time(15, 6, 17) + + Illegal values are returned as None: + + >>> time_or_None('-25:06:17') is None + True + >>> time_or_None('random crap') is None + True + + Note that MySQL always returns TIME columns as (+|-)HH:MM:SS, but + can accept values as (+|-)DD HH:MM:SS. The latter format will not + be parsed correctly by this function. + + Also note that MySQL's TIME column corresponds more closely to + Python's timedelta and not time. However if you want TIME columns + to be treated as time-of-day and not a time offset, then you can + use set this function as the converter for FIELD_TYPE.TIME. + """ + from math import modf + try: + hour, minute, second = obj.split(':') + return datetime.time(hour=int(hour), minute=int(minute), + second=int(second), + microsecond=int(modf(float(second))[0]*1000000)) + except ValueError: + return None + +def convert_date(connection, field, obj): + """Returns a DATE column as a date object: + + >>> date_or_None('2007-02-26') + datetime.date(2007, 2, 26) + + Illegal values are returned as None: + + >>> date_or_None('2007-02-31') is None + True + >>> date_or_None('0000-00-00') is None + True + + """ + try: + if not isinstance(obj, unicode): + obj = obj.decode(connection.charset) + return datetime.date(*[ int(x) for x in obj.split('-', 2) ]) + except ValueError: + return None + +def convert_mysql_timestamp(connection, field, timestamp): + """Convert a MySQL TIMESTAMP to a Timestamp object. + + MySQL >= 4.1 returns TIMESTAMP in the same format as DATETIME: + + >>> mysql_timestamp_converter('2007-02-25 22:32:17') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + MySQL < 4.1 uses a big string of numbers: + + >>> mysql_timestamp_converter('20070225223217') + datetime.datetime(2007, 2, 25, 22, 32, 17) + + Illegal values are returned as None: + + >>> mysql_timestamp_converter('2007-02-31 22:32:17') is None + True + >>> mysql_timestamp_converter('00000000000000') is None + True + + """ + if not isinstance(timestamp, unicode): + timestamp = timestamp.decode(connection.charset) + + if timestamp[4] == '-': + return convert_datetime(connection, field, timestamp) + timestamp += "0"*(14-len(timestamp)) # padding + year, month, day, hour, minute, second = \ + int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), \ + int(timestamp[8:10]), int(timestamp[10:12]), int(timestamp[12:14]) + try: + return datetime.datetime(year, month, day, hour, minute, second) + except ValueError: + return None + +def convert_set(s): + return set(s.split(",")) + +def convert_bit(connection, field, b): + #b = "\x00" * (8 - len(b)) + b # pad w/ zeroes + #return struct.unpack(">Q", b)[0] + # + # the snippet above is right, but MySQLdb doesn't process bits, + # so we shouldn't either + return b + +def convert_characters(connection, field, data): + field_charset = charset_by_id(field.charsetnr).name + if field.flags & FLAG.SET: + return convert_set(data.decode(field_charset)) + if field.flags & FLAG.BINARY: + return data + + if connection.use_unicode: + data = data.decode(field_charset) + elif connection.charset != field_charset: + data = data.decode(field_charset) + data = data.encode(connection.charset) + else: + data = data.decode(connection.charset) + return data + +def convert_int(connection, field, data): + return int(data) + +def convert_long(connection, field, data): + return long(data) + +def convert_float(connection, field, data): + return float(data) + +encoders = { + bool: escape_bool, + int: escape_int, + long: escape_long, + float: escape_float, + str: escape_string, + unicode: escape_unicode, + tuple: escape_sequence, + list:escape_sequence, + set:escape_sequence, + dict:escape_dict, + type(None):escape_None, + datetime.date: escape_date, + datetime.datetime : escape_datetime, + datetime.timedelta : escape_timedelta, + datetime.time : escape_time, + time.struct_time : escape_struct_time, + } + +decoders = { + FIELD_TYPE.BIT: convert_bit, + FIELD_TYPE.TINY: convert_int, + FIELD_TYPE.SHORT: convert_int, + FIELD_TYPE.LONG: convert_long, + FIELD_TYPE.FLOAT: convert_float, + FIELD_TYPE.DOUBLE: convert_float, + FIELD_TYPE.DECIMAL: convert_float, + FIELD_TYPE.NEWDECIMAL: convert_float, + FIELD_TYPE.LONGLONG: convert_long, + FIELD_TYPE.INT24: convert_int, + FIELD_TYPE.YEAR: convert_int, + FIELD_TYPE.TIMESTAMP: convert_mysql_timestamp, + FIELD_TYPE.DATETIME: convert_datetime, + FIELD_TYPE.TIME: convert_timedelta, + FIELD_TYPE.DATE: convert_date, + FIELD_TYPE.SET: convert_set, + FIELD_TYPE.BLOB: convert_characters, + FIELD_TYPE.TINY_BLOB: convert_characters, + FIELD_TYPE.MEDIUM_BLOB: convert_characters, + FIELD_TYPE.LONG_BLOB: convert_characters, + FIELD_TYPE.STRING: convert_characters, + FIELD_TYPE.VAR_STRING: convert_characters, + FIELD_TYPE.VARCHAR: convert_characters, + #FIELD_TYPE.BLOB: str, + #FIELD_TYPE.STRING: str, + #FIELD_TYPE.VAR_STRING: str, + #FIELD_TYPE.VARCHAR: str + } +conversions = decoders # for MySQLdb compatibility + +try: + # python version > 2.3 + from decimal import Decimal + def convert_decimal(connection, field, data): + return Decimal(data) + decoders[FIELD_TYPE.DECIMAL] = convert_decimal + decoders[FIELD_TYPE.NEWDECIMAL] = convert_decimal + + def escape_decimal(obj): + return unicode(obj) + encoders[Decimal] = escape_decimal + +except ImportError: + pass + ADDED gluon/contrib/pymysql/cursors.py Index: gluon/contrib/pymysql/cursors.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/cursors.py @@ -0,0 +1,251 @@ +# -*- coding: utf-8 -*- +import struct +import re + +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +from err import Warning, Error, InterfaceError, DataError, \ + DatabaseError, OperationalError, IntegrityError, InternalError, \ + NotSupportedError, ProgrammingError + +insert_values = re.compile(r'\svalues\s*(\(.+\))', re.IGNORECASE) + +class Cursor(object): + ''' + This is the object you use to interact with the database. + ''' + def __init__(self, connection): + ''' + Do not create an instance of a Cursor yourself. Call + connections.Connection.cursor(). + ''' + from weakref import proxy + self.connection = proxy(connection) + self.description = None + self.rownumber = 0 + self.rowcount = -1 + self.arraysize = 1 + self._executed = None + self.messages = [] + self.errorhandler = connection.errorhandler + self._has_next = None + self._rows = () + + def __del__(self): + ''' + When this gets GC'd close it. + ''' + self.close() + + def close(self): + ''' + Closing a cursor just exhausts all remaining data. + ''' + if not self.connection: + return + try: + while self.nextset(): + pass + except: + pass + + self.connection = None + + def _get_db(self): + if not self.connection: + self.errorhandler(self, ProgrammingError, "cursor closed") + return self.connection + + def _check_executed(self): + if not self._executed: + self.errorhandler(self, ProgrammingError, "execute() first") + + def setinputsizes(self, *args): + """Does nothing, required by DB API.""" + + def setoutputsizes(self, *args): + """Does nothing, required by DB API.""" + + def nextset(self): + ''' Get the next query set ''' + if self._executed: + self.fetchall() + del self.messages[:] + + if not self._has_next: + return None + connection = self._get_db() + connection.next_result() + self._do_get_result() + return True + + def execute(self, query, args=None): + ''' Execute a query ''' + from sys import exc_info + + conn = self._get_db() + charset = conn.charset + del self.messages[:] + + # TODO: make sure that conn.escape is correct + + if args is not None: + query = query % conn.escape(args) + + if isinstance(query, unicode): + query = query.encode(charset) + + result = 0 + try: + result = self._query(query) + except: + exc, value, tb = exc_info() + del tb + self.messages.append((exc,value)) + self.errorhandler(self, exc, value) + + self._executed = query + return result + + def executemany(self, query, args): + ''' Run several data against one query ''' + del self.messages[:] + conn = self._get_db() + if not args: + return + charset = conn.charset + if isinstance(query, unicode): + query = query.encode(charset) + + self.rowcount = sum([ self.execute(query, arg) for arg in args ]) + return self.rowcount + + + def callproc(self, procname, args=()): + """Execute stored procedure procname with args + + procname -- string, name of procedure to execute on server + + args -- Sequence of parameters to use with procedure + + Returns the original args. + + Compatibility warning: PEP-249 specifies that any modified + parameters must be returned. This is currently impossible + as they are only available by storing them in a server + variable and then retrieved by a query. Since stored + procedures return zero or more result sets, there is no + reliable way to get at OUT or INOUT parameters via callproc. + The server variables are named @_procname_n, where procname + is the parameter above and n is the position of the parameter + (from zero). Once all result sets generated by the procedure + have been fetched, you can issue a SELECT @_procname_0, ... + query using .execute() to get any OUT or INOUT values. + + Compatibility warning: The act of calling a stored procedure + itself creates an empty result set. This appears after any + result sets generated by the procedure. This is non-standard + behavior with respect to the DB-API. Be sure to use nextset() + to advance through all result sets; otherwise you may get + disconnected. + """ + conn = self._get_db() + for index, arg in enumerate(args): + q = "SET @_%s_%d=%s" % (procname, index, conn.escape(arg)) + if isinstance(q, unicode): + q = q.encode(conn.charset) + self._query(q) + self.nextset() + + q = "CALL %s(%s)" % (procname, + ','.join(['@_%s_%d' % (procname, i) + for i in range(len(args))])) + if isinstance(q, unicode): + q = q.encode(conn.charset) + self._query(q) + self._executed = q + + return args + + def fetchone(self): + ''' Fetch the next row ''' + self._check_executed() + if self._rows is None or self.rownumber >= len(self._rows): + return None + result = self._rows[self.rownumber] + self.rownumber += 1 + return result + + def fetchmany(self, size=None): + ''' Fetch several rows ''' + self._check_executed() + end = self.rownumber + (size or self.arraysize) + result = self._rows[self.rownumber:end] + if self._rows is None: + return None + self.rownumber = min(end, len(self._rows)) + return result + + def fetchall(self): + ''' Fetch all the rows ''' + self._check_executed() + if self._rows is None: + return None + if self.rownumber: + result = self._rows[self.rownumber:] + else: + result = self._rows + self.rownumber = len(self._rows) + return result + + def scroll(self, value, mode='relative'): + self._check_executed() + if mode == 'relative': + r = self.rownumber + value + elif mode == 'absolute': + r = value + else: + self.errorhandler(self, ProgrammingError, + "unknown scroll mode %s" % mode) + + if r < 0 or r >= len(self._rows): + self.errorhandler(self, IndexError, "out of range") + self.rownumber = r + + def _query(self, q): + conn = self._get_db() + self._last_executed = q + conn.query(q) + self._do_get_result() + return self.rowcount + + def _do_get_result(self): + conn = self._get_db() + self.rowcount = conn._result.affected_rows + + self.rownumber = 0 + self.description = conn._result.description + self.lastrowid = conn._result.insert_id + self._rows = conn._result.rows + self._has_next = conn._result.has_next + conn._result = None + + def __iter__(self): + self._check_executed() + result = self.rownumber and self._rows[self.rownumber:] or self._rows + return iter(result) + + Warning = Warning + Error = Error + InterfaceError = InterfaceError + DatabaseError = DatabaseError + DataError = DataError + OperationalError = OperationalError + IntegrityError = IntegrityError + InternalError = InternalError + ProgrammingError = ProgrammingError + NotSupportedError = NotSupportedError + ADDED gluon/contrib/pymysql/err.py Index: gluon/contrib/pymysql/err.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/err.py @@ -0,0 +1,141 @@ +import struct + + +try: + Exception, Warning +except ImportError: + try: + from exceptions import Exception, Warning + except ImportError: + import sys + e = sys.modules['exceptions'] + Exception = e.Exception + Warning = e.Warning + +from constants import ER + +class MySQLError(Exception): + + """Exception related to operation with MySQL.""" + + +class Warning(Warning, MySQLError): + + """Exception raised for important warnings like data truncations + while inserting, etc.""" + +class Error(MySQLError): + + """Exception that is the base class of all other error exceptions + (not Warning).""" + + +class InterfaceError(Error): + + """Exception raised for errors that are related to the database + interface rather than the database itself.""" + + +class DatabaseError(Error): + + """Exception raised for errors that are related to the + database.""" + + +class DataError(DatabaseError): + + """Exception raised for errors that are due to problems with the + processed data like division by zero, numeric value out of range, + etc.""" + + +class OperationalError(DatabaseError): + + """Exception raised for errors that are related to the database's + operation and not necessarily under the control of the programmer, + e.g. an unexpected disconnect occurs, the data source name is not + found, a transaction could not be processed, a memory allocation + error occurred during processing, etc.""" + + +class IntegrityError(DatabaseError): + + """Exception raised when the relational integrity of the database + is affected, e.g. a foreign key check fails, duplicate key, + etc.""" + + +class InternalError(DatabaseError): + + """Exception raised when the database encounters an internal + error, e.g. the cursor is not valid anymore, the transaction is + out of sync, etc.""" + + +class ProgrammingError(DatabaseError): + + """Exception raised for programming errors, e.g. table not found + or already exists, syntax error in the SQL statement, wrong number + of parameters specified, etc.""" + + +class NotSupportedError(DatabaseError): + + """Exception raised in case a method or database API was used + which is not supported by the database, e.g. requesting a + .rollback() on a connection that does not support transaction or + has transactions turned off.""" + + +error_map = {} + +def _map_error(exc, *errors): + for error in errors: + error_map[error] = exc + +_map_error(ProgrammingError, ER.DB_CREATE_EXISTS, ER.SYNTAX_ERROR, + ER.PARSE_ERROR, ER.NO_SUCH_TABLE, ER.WRONG_DB_NAME, + ER.WRONG_TABLE_NAME, ER.FIELD_SPECIFIED_TWICE, + ER.INVALID_GROUP_FUNC_USE, ER.UNSUPPORTED_EXTENSION, + ER.TABLE_MUST_HAVE_COLUMNS, ER.CANT_DO_THIS_DURING_AN_TRANSACTION) +_map_error(DataError, ER.WARN_DATA_TRUNCATED, ER.WARN_NULL_TO_NOTNULL, + ER.WARN_DATA_OUT_OF_RANGE, ER.NO_DEFAULT, ER.PRIMARY_CANT_HAVE_NULL, + ER.DATA_TOO_LONG, ER.DATETIME_FUNCTION_OVERFLOW) +_map_error(IntegrityError, ER.DUP_ENTRY, ER.NO_REFERENCED_ROW, + ER.NO_REFERENCED_ROW_2, ER.ROW_IS_REFERENCED, ER.ROW_IS_REFERENCED_2, + ER.CANNOT_ADD_FOREIGN) +_map_error(NotSupportedError, ER.WARNING_NOT_COMPLETE_ROLLBACK, + ER.NOT_SUPPORTED_YET, ER.FEATURE_DISABLED, ER.UNKNOWN_STORAGE_ENGINE) + +del _map_error, ER + + +def _get_error_info(data): + errno = struct.unpack('<h', data[1:3])[0] + if data[3] == "#": + # version 4.1 + sqlstate = data[4:9].decode("utf8") + errorvalue = data[9:].decode("utf8") + return (errno, sqlstate, errorvalue) + else: + # version 4.0 + return (errno, None, data[3:].decode("utf8")) + +def _check_mysql_exception(errinfo): + errno, sqlstate, errorvalue = errinfo + errorclass = error_map.get(errno, None) + if errorclass: + raise errorclass, (errno,errorvalue) + + # couldn't find the right error number + raise InternalError, (errno, errorvalue) + +def raise_mysql_exception(data): + errinfo = _get_error_info(data) + _check_mysql_exception(errinfo) + + + + + + ADDED gluon/contrib/pymysql/tests/__init__.py Index: gluon/contrib/pymysql/tests/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/tests/__init__.py @@ -0,0 +1,7 @@ +from pymysql.tests.test_issues import * +from pymysql.tests.test_example import * +from pymysql.tests.test_basic import * + +if __name__ == "__main__": + import unittest + unittest.main() ADDED gluon/contrib/pymysql/tests/base.py Index: gluon/contrib/pymysql/tests/base.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/tests/base.py @@ -0,0 +1,19 @@ +import pymysql +import unittest + +class PyMySQLTestCase(unittest.TestCase): + databases = [ + {"host":"localhost","user":"root", + "passwd":"","db":"test_pymysql", "use_unicode": True}, + {"host":"localhost","user":"root","passwd":"","db":"test_pymysql2"}] + + def setUp(self): + self.connections = [] + + for params in self.databases: + self.connections.append(pymysql.connect(**params)) + + def tearDown(self): + for connection in self.connections: + connection.close() + ADDED gluon/contrib/pymysql/tests/test_basic.py Index: gluon/contrib/pymysql/tests/test_basic.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/tests/test_basic.py @@ -0,0 +1,141 @@ +from pymysql.tests import base +from pymysql import util + +import time +import datetime + +class TestConversion(base.PyMySQLTestCase): + def test_datatypes(self): + """ test every data type """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table test_datatypes (b bit, i int, l bigint, f real, s varchar(32), u varchar(32), bb blob, d date, dt datetime, ts timestamp, td time, t time, st datetime)") + try: + # insert values + v = (True, -3, 123456789012, 5.7, "hello'\" world", u"Espa\xc3\xb1ol", "binary\x00data".encode(conn.charset), datetime.date(1988,2,2), datetime.datetime.now(), datetime.timedelta(5,6), datetime.time(16,32), time.localtime()) + c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", v) + c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") + r = c.fetchone() + self.assertEqual(util.int2byte(1), r[0]) + self.assertEqual(v[1:8], r[1:8]) + # mysql throws away microseconds so we need to check datetimes + # specially. additionally times are turned into timedeltas. + self.assertEqual(datetime.datetime(*v[8].timetuple()[:6]), r[8]) + self.assertEqual(v[9], r[9]) # just timedeltas + self.assertEqual(datetime.timedelta(0, 60 * (v[10].hour * 60 + v[10].minute)), r[10]) + self.assertEqual(datetime.datetime(*v[-1][:6]), r[-1]) + + c.execute("delete from test_datatypes") + + # check nulls + c.execute("insert into test_datatypes (b,i,l,f,s,u,bb,d,dt,td,t,st) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", [None] * 12) + c.execute("select b,i,l,f,s,u,bb,d,dt,td,t,st from test_datatypes") + r = c.fetchone() + self.assertEqual(tuple([None] * 12), r) + + c.execute("delete from test_datatypes") + + # check sequence type + c.execute("insert into test_datatypes (i, l) values (2,4), (6,8), (10,12)") + c.execute("select l from test_datatypes where i in %s order by i", ((2,6),)) + r = c.fetchall() + self.assertEqual(((4,),(8,)), r) + finally: + c.execute("drop table test_datatypes") + + def test_dict(self): + """ test dict escaping """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table test_dict (a integer, b integer, c integer)") + try: + c.execute("insert into test_dict (a,b,c) values (%(a)s, %(b)s, %(c)s)", {"a":1,"b":2,"c":3}) + c.execute("select a,b,c from test_dict") + self.assertEqual((1,2,3), c.fetchone()) + finally: + c.execute("drop table test_dict") + + def test_big_blob(self): + """ test tons of data """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table test_big_blob (b blob)") + try: + data = "pymysql" * 1024 + c.execute("insert into test_big_blob (b) values (%s)", (data,)) + c.execute("select b from test_big_blob") + self.assertEqual(data.encode(conn.charset), c.fetchone()[0]) + finally: + c.execute("drop table test_big_blob") + +class TestCursor(base.PyMySQLTestCase): + # this test case does not work quite right yet, however, + # we substitute in None for the erroneous field which is + # compatible with the DB-API 2.0 spec and has not broken + # any unit tests for anything we've tried. + + #def test_description(self): + # """ test description attribute """ + # # result is from MySQLdb module + # r = (('Host', 254, 11, 60, 60, 0, 0), + # ('User', 254, 16, 16, 16, 0, 0), + # ('Password', 254, 41, 41, 41, 0, 0), + # ('Select_priv', 254, 1, 1, 1, 0, 0), + # ('Insert_priv', 254, 1, 1, 1, 0, 0), + # ('Update_priv', 254, 1, 1, 1, 0, 0), + # ('Delete_priv', 254, 1, 1, 1, 0, 0), + # ('Create_priv', 254, 1, 1, 1, 0, 0), + # ('Drop_priv', 254, 1, 1, 1, 0, 0), + # ('Reload_priv', 254, 1, 1, 1, 0, 0), + # ('Shutdown_priv', 254, 1, 1, 1, 0, 0), + # ('Process_priv', 254, 1, 1, 1, 0, 0), + # ('File_priv', 254, 1, 1, 1, 0, 0), + # ('Grant_priv', 254, 1, 1, 1, 0, 0), + # ('References_priv', 254, 1, 1, 1, 0, 0), + # ('Index_priv', 254, 1, 1, 1, 0, 0), + # ('Alter_priv', 254, 1, 1, 1, 0, 0), + # ('Show_db_priv', 254, 1, 1, 1, 0, 0), + # ('Super_priv', 254, 1, 1, 1, 0, 0), + # ('Create_tmp_table_priv', 254, 1, 1, 1, 0, 0), + # ('Lock_tables_priv', 254, 1, 1, 1, 0, 0), + # ('Execute_priv', 254, 1, 1, 1, 0, 0), + # ('Repl_slave_priv', 254, 1, 1, 1, 0, 0), + # ('Repl_client_priv', 254, 1, 1, 1, 0, 0), + # ('Create_view_priv', 254, 1, 1, 1, 0, 0), + # ('Show_view_priv', 254, 1, 1, 1, 0, 0), + # ('Create_routine_priv', 254, 1, 1, 1, 0, 0), + # ('Alter_routine_priv', 254, 1, 1, 1, 0, 0), + # ('Create_user_priv', 254, 1, 1, 1, 0, 0), + # ('Event_priv', 254, 1, 1, 1, 0, 0), + # ('Trigger_priv', 254, 1, 1, 1, 0, 0), + # ('ssl_type', 254, 0, 9, 9, 0, 0), + # ('ssl_cipher', 252, 0, 65535, 65535, 0, 0), + # ('x509_issuer', 252, 0, 65535, 65535, 0, 0), + # ('x509_subject', 252, 0, 65535, 65535, 0, 0), + # ('max_questions', 3, 1, 11, 11, 0, 0), + # ('max_updates', 3, 1, 11, 11, 0, 0), + # ('max_connections', 3, 1, 11, 11, 0, 0), + # ('max_user_connections', 3, 1, 11, 11, 0, 0)) + # conn = self.connections[0] + # c = conn.cursor() + # c.execute("select * from mysql.user") + # + # self.assertEqual(r, c.description) + + def test_fetch_no_result(self): + """ test a fetchone() with no rows """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table test_nr (b varchar(32))") + try: + data = "pymysql" + c.execute("insert into test_nr (b) values (%s)", (data,)) + self.assertEqual(None, c.fetchone()) + finally: + c.execute("drop table test_nr") + +__all__ = ["TestConversion","TestCursor"] + +if __name__ == "__main__": + import unittest + unittest.main() ADDED gluon/contrib/pymysql/tests/test_example.py Index: gluon/contrib/pymysql/tests/test_example.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/tests/test_example.py @@ -0,0 +1,32 @@ +import pymysql +from pymysql.tests import base + +class TestExample(base.PyMySQLTestCase): + def test_example(self): + conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='', db='mysql') + + + cur = conn.cursor() + + cur.execute("SELECT Host,User FROM user") + + # print cur.description + + # r = cur.fetchall() + # print r + # ...or... + u = False + + for r in cur.fetchall(): + u = u or conn.user in r + + self.assertTrue(u) + + cur.close() + conn.close() + +__all__ = ["TestExample"] + +if __name__ == "__main__": + import unittest + unittest.main() ADDED gluon/contrib/pymysql/tests/test_issues.py Index: gluon/contrib/pymysql/tests/test_issues.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/tests/test_issues.py @@ -0,0 +1,240 @@ +import pymysql +from pymysql.tests import base + +import sys + +try: + import imp + reload = imp.reload +except AttributeError: + pass + +import datetime + +class TestOldIssues(base.PyMySQLTestCase): + def test_issue_3(self): + """ undefined methods datetime_or_None, date_or_None """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table issue3 (d date, t time, dt datetime, ts timestamp)") + try: + c.execute("insert into issue3 (d, t, dt, ts) values (%s,%s,%s,%s)", (None, None, None, None)) + c.execute("select d from issue3") + self.assertEqual(None, c.fetchone()[0]) + c.execute("select t from issue3") + self.assertEqual(None, c.fetchone()[0]) + c.execute("select dt from issue3") + self.assertEqual(None, c.fetchone()[0]) + c.execute("select ts from issue3") + self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime)) + finally: + c.execute("drop table issue3") + + def test_issue_4(self): + """ can't retrieve TIMESTAMP fields """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table issue4 (ts timestamp)") + try: + c.execute("insert into issue4 (ts) values (now())") + c.execute("select ts from issue4") + self.assertTrue(isinstance(c.fetchone()[0], datetime.datetime)) + finally: + c.execute("drop table issue4") + + def test_issue_5(self): + """ query on information_schema.tables fails """ + con = self.connections[0] + cur = con.cursor() + cur.execute("select * from information_schema.tables") + + def test_issue_6(self): + """ exception: TypeError: ord() expected a character, but string of length 0 found """ + conn = pymysql.connect(host="localhost",user="root",passwd="",db="mysql") + c = conn.cursor() + c.execute("select * from user") + conn.close() + + def test_issue_8(self): + """ Primary Key and Index error when selecting data """ + conn = self.connections[0] + c = conn.cursor() + c.execute("""CREATE TABLE `test` (`station` int(10) NOT NULL DEFAULT '0', `dh` +datetime NOT NULL DEFAULT '0000-00-00 00:00:00', `echeance` int(1) NOT NULL +DEFAULT '0', `me` double DEFAULT NULL, `mo` double DEFAULT NULL, PRIMARY +KEY (`station`,`dh`,`echeance`)) ENGINE=MyISAM DEFAULT CHARSET=latin1;""") + try: + self.assertEqual(0, c.execute("SELECT * FROM test")) + c.execute("ALTER TABLE `test` ADD INDEX `idx_station` (`station`)") + self.assertEqual(0, c.execute("SELECT * FROM test")) + finally: + c.execute("drop table test") + + def test_issue_9(self): + """ sets DeprecationWarning in Python 2.6 """ + try: + reload(pymysql) + except DeprecationWarning: + self.fail() + + def test_issue_10(self): + """ Allocate a variable to return when the exception handler is permissive """ + conn = self.connections[0] + conn.errorhandler = lambda cursor, errorclass, errorvalue: None + cur = conn.cursor() + cur.execute( "create table t( n int )" ) + cur.execute( "create table t( n int )" ) + + def test_issue_13(self): + """ can't handle large result fields """ + conn = self.connections[0] + cur = conn.cursor() + cur.execute("create table issue13 (t text)") + try: + # ticket says 18k + size = 18*1024 + cur.execute("insert into issue13 (t) values (%s)", ("x" * size,)) + cur.execute("select t from issue13") + # use assert_ so that obscenely huge error messages don't print + r = cur.fetchone()[0] + self.assert_("x" * size == r) + finally: + cur.execute("drop table issue13") + + def test_issue_14(self): + """ typo in converters.py """ + self.assertEqual('1', pymysql.converters.escape_item(1, "utf8")) + self.assertEqual('1', pymysql.converters.escape_item(1L, "utf8")) + + self.assertEqual('1', pymysql.converters.escape_object(1)) + self.assertEqual('1', pymysql.converters.escape_object(1L)) + + def test_issue_15(self): + """ query should be expanded before perform character encoding """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table issue15 (t varchar(32))") + try: + c.execute("insert into issue15 (t) values (%s)", (u'\xe4\xf6\xfc')) + c.execute("select t from issue15") + self.assertEqual(u'\xe4\xf6\xfc', c.fetchone()[0]) + finally: + c.execute("drop table issue15") + + def test_issue_16(self): + """ Patch for string and tuple escaping """ + conn = self.connections[0] + c = conn.cursor() + c.execute("create table issue16 (name varchar(32) primary key, email varchar(32))") + try: + c.execute("insert into issue16 (name, email) values ('pete', 'floydophone')") + c.execute("select email from issue16 where name=%s", ("pete",)) + self.assertEqual("floydophone", c.fetchone()[0]) + finally: + c.execute("drop table issue16") + + def test_issue_17(self): + """ could not connect mysql use passwod """ + conn = self.connections[0] + host = self.databases[0]["host"] + db = self.databases[0]["db"] + c = conn.cursor() + # grant access to a table to a user with a password + try: + c.execute("create table issue17 (x varchar(32) primary key)") + c.execute("insert into issue17 (x) values ('hello, world!')") + c.execute("grant all privileges on %s.issue17 to 'issue17user'@'%%' identified by '1234'" % db) + conn.commit() + + conn2 = pymysql.connect(host=host, user="issue17user", passwd="1234", db=db) + c2 = conn2.cursor() + c2.execute("select x from issue17") + self.assertEqual("hello, world!", c2.fetchone()[0]) + finally: + c.execute("drop table issue17") + +def _uni(s, e): + # hack for py3 + if sys.version_info[0] > 2: + return unicode(bytes(s, sys.getdefaultencoding()), e) + else: + return unicode(s, e) + +class TestNewIssues(base.PyMySQLTestCase): + def test_issue_34(self): + try: + pymysql.connect(host="localhost", port=1237, user="root") + self.fail() + except pymysql.OperationalError, e: + self.assertEqual(2003, e.args[0]) + except: + self.fail() + + def test_issue_33(self): + conn = pymysql.connect(host="localhost", user="root", db=self.databases[0]["db"], charset="utf8") + c = conn.cursor() + try: + c.execute(_uni("create table hei\xc3\x9fe (name varchar(32))", "utf8")) + c.execute(_uni("insert into hei\xc3\x9fe (name) values ('Pi\xc3\xb1ata')", "utf8")) + c.execute(_uni("select name from hei\xc3\x9fe", "utf8")) + self.assertEqual(_uni("Pi\xc3\xb1ata","utf8"), c.fetchone()[0]) + finally: + c.execute(_uni("drop table hei\xc3\x9fe", "utf8")) + + # Will fail without manual intervention: + #def test_issue_35(self): + # + # conn = self.connections[0] + # c = conn.cursor() + # print "sudo killall -9 mysqld within the next 10 seconds" + # try: + # c.execute("select sleep(10)") + # self.fail() + # except pymysql.OperationalError, e: + # self.assertEqual(2013, e.args[0]) + + def test_issue_36(self): + conn = self.connections[0] + c = conn.cursor() + # kill connections[0] + original_count = c.execute("show processlist") + kill_id = None + for id,user,host,db,command,time,state,info in c.fetchall(): + if info == "show processlist": + kill_id = id + break + # now nuke the connection + conn.kill(kill_id) + # make sure this connection has broken + try: + c.execute("show tables") + self.fail() + except: + pass + # check the process list from the other connection + self.assertEqual(original_count - 1, self.connections[1].cursor().execute("show processlist")) + del self.connections[0] + + def test_issue_37(self): + conn = self.connections[0] + c = conn.cursor() + self.assertEqual(1, c.execute("SELECT @foo")) + self.assertEqual((None,), c.fetchone()) + self.assertEqual(0, c.execute("SET @foo = 'bar'")) + c.execute("set @foo = 'bar'") + + def test_issue_38(self): + conn = self.connections[0] + c = conn.cursor() + datum = "a" * 1024 * 1023 # reduced size for most default mysql installs + + try: + c.execute("create table issue38 (id integer, data mediumblob)") + c.execute("insert into issue38 values (1, %s)", datum) + finally: + c.execute("drop table issue38") +__all__ = ["TestOldIssues", "TestNewIssues"] + +if __name__ == "__main__": + import unittest + unittest.main() ADDED gluon/contrib/pymysql/times.py Index: gluon/contrib/pymysql/times.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/times.py @@ -0,0 +1,17 @@ +from time import localtime +from datetime import date, datetime, time, timedelta + +Date = date +Time = time +TimeDelta = timedelta +Timestamp = datetime + +def DateFromTicks(ticks): + return date(*localtime(ticks)[:3]) + +def TimeFromTicks(ticks): + return time(*localtime(ticks)[3:6]) + +def TimestampFromTicks(ticks): + return datetime(*localtime(ticks)[:6]) + ADDED gluon/contrib/pymysql/util.py Index: gluon/contrib/pymysql/util.py ================================================================== --- /dev/null +++ gluon/contrib/pymysql/util.py @@ -0,0 +1,20 @@ +import struct + +def byte2int(b): + if isinstance(b, int): + return b + else: + return struct.unpack("!B", b)[0] + +def int2byte(i): + return struct.pack("!B", i) + +def join_bytes(bs): + if len(bs) == 0: + return "" + else: + rv = bs[0] + for b in bs[1:]: + rv += b + return rv + ADDED gluon/contrib/pyrtf/Constants.py Index: gluon/contrib/pyrtf/Constants.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/Constants.py @@ -0,0 +1,158 @@ +class ViewKind : + """An integer (0-5) that represents the view mode of the document.""" + + NONE = 0 + PageLayout = 1 + Outline = 2 + MasterDocument = 3 + Normal = 4 + OnlineLayout = 5 + + DEFAULT = PageLayout + + def _IsValid( cls, value ) : + return value in [ 0, 1, 2, 3, 4, 5 ] + IsValid = classmethod( _IsValid ) + +class ViewScale : + """Zoom level of the document; the N argument is a value representing a percentage (the default is 100).""" + + def _IsValid( cls, value ) : + return value is None or (0 < value < 101) + IsValid = classmethod( _IsValid ) + +class ViewZoomKind : + """An integer (0 to 2) that represents the zoom kind of the document.""" + + NONE = 0 + FullPage = 1 + BestFit = 2 + + def _IsValid( cls, value ) : + return value in [ None, 0, 1, 2 ] + IsValid = classmethod( _IsValid ) + + +class Languages : + NoLanguage = 1024 + Albanian = 1052 + Arabic = 1025 + Bahasa = 1057 + BelgianDutch = 2067 + BelgianFrench = 2060 + BrazilianPortuguese = 1046 + Bulgarian = 1026 + Catalan = 1027 + CroatoSerbianLatin = 1050 + Czech = 1029 + Danish = 1030 + Dutch = 1043 + EnglishAustralian = 3081 + EnglishUK = 2057 + EnglishUS = 1033 + Finnish = 1035 + French = 1036 + FrenchCanadian = 3084 + German = 1031 + Greek = 1032 + Hebrew = 1037 + Hungarian = 1038 + Icelandic = 1039 + Italian = 1040 + Japanese = 1041 + Korean = 1042 + NorwegianBokmal = 1044 + NorwegianNynorsk = 2068 + Polish = 1045 + Portuguese = 2070 + RhaetoRomanic = 1047 + Romanian = 1048 + Russian = 1049 + SerboCroatianCyrillic = 2074 + SimplifiedChinese = 2052 + Slovak = 1051 + SpanishCastilian = 1034 + SpanishMexican = 2058 + Swedish = 1053 + SwissFrench = 4108 + SwissGerman = 2055 + SwissItalian = 2064 + Thai = 1054 + TraditionalChinese = 1028 + Turkish = 1055 + Urdu = 1056 + SesothoSotho = 1072 + Afrikaans = 1078 + Zulu = 1077 + Xhosa = 1076 + Venda = 1075 + Tswana = 1074 + Tsonga = 1073 + FarsiPersian = 1065 + + Codes = [ 1024, + 1052, + 1025, + 1057, + 2067, + 2060, + 1046, + 1026, + 1027, + 1050, + 1029, + 1030, + 1043, + 3081, + 2057, + 1033, + 1035, + 1036, + 3084, + 1031, + 1032, + 1037, + 1038, + 1039, + 1040, + 1041, + 1042, + 1044, + 2068, + 1045, + 2070, + 1047, + 1048, + 1049, + 2074, + 2052, + 1051, + 1034, + 2058, + 1053, + 4108, + 2055, + 2064, + 1054, + 1028, + 1055, + 1056, + 1072, + 1078, + 1077, + 1076, + 1075, + 1074, + 1073, + 1065 ] + + # make it Australian as that is what I use most of the time + DEFAULT = EnglishAustralian + + def _IsValid( cls, value ) : + return value in cls.Codes + IsValid = classmethod( _IsValid ) + +if __name__ == '__main__' : + PrintHexTable() + ADDED gluon/contrib/pyrtf/Elements.py Index: gluon/contrib/pyrtf/Elements.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/Elements.py @@ -0,0 +1,757 @@ +from types import IntType, FloatType, LongType, StringTypes +from copy import deepcopy +from binascii import hexlify + +from Constants import * +from Styles import * + +class UnhandledParamError( Exception ) : + def __init__( self, param ) : + Exception.__init__( self, "Don't know what to do with param %s" % param ) + +# red green blue +StandardColours = Colours() +StandardColours.append( Colour( 'Black', 0, 0, 0 ) ) +StandardColours.append( Colour( 'Blue', 0, 0, 255 ) ) +StandardColours.append( Colour( 'Turquoise', 0, 255, 255 ) ) +StandardColours.append( Colour( 'Green', 0, 255, 0 ) ) +StandardColours.append( Colour( 'Pink', 255, 0, 255 ) ) +StandardColours.append( Colour( 'Red', 255, 0, 0 ) ) +StandardColours.append( Colour( 'Yellow', 255, 255, 0 ) ) +StandardColours.append( Colour( 'White', 255, 255, 255 ) ) +StandardColours.append( Colour( 'Blue Dark', 0, 0, 128 ) ) +StandardColours.append( Colour( 'Teal', 0, 128, 128 ) ) +StandardColours.append( Colour( 'Green Dark', 0, 128, 0 ) ) +StandardColours.append( Colour( 'Violet', 128, 0, 128 ) ) +StandardColours.append( Colour( 'Red Dark', 128, 0, 0 ) ) +StandardColours.append( Colour( 'Yellow Dark', 128, 128, 0 ) ) +StandardColours.append( Colour( 'Grey Dark', 128, 128, 128 ) ) +StandardColours.append( Colour( 'Grey', 192, 192, 192 ) ) + +StandardFonts = Fonts() +StandardFonts.append( Font( 'Arial' , 'swiss' , 0, 2, '020b0604020202020204' ) ) +StandardFonts.append( Font( 'Arial Black' , 'swiss' , 0, 2, '020b0a04020102020204' ) ) +StandardFonts.append( Font( 'Arial Narrow' , 'swiss' , 0, 2, '020b0506020202030204' ) ) +StandardFonts.append( Font( 'Bitstream Vera Sans Mono', 'modern', 0, 1, '020b0609030804020204' ) ) +StandardFonts.append( Font( 'Bitstream Vera Sans' , 'swiss' , 0, 2, '020b0603030804020204' ) ) +StandardFonts.append( Font( 'Bitstream Vera Serif' , 'roman' , 0, 2, '02060603050605020204' ) ) +StandardFonts.append( Font( 'Book Antiqua' , 'roman' , 0, 2, '02040602050305030304' ) ) +StandardFonts.append( Font( 'Bookman Old Style' , 'roman' , 0, 2, '02050604050505020204' ) ) +StandardFonts.append( Font( 'Castellar' , 'roman' , 0, 2, '020a0402060406010301' ) ) +StandardFonts.append( Font( 'Century Gothic' , 'swiss' , 0, 2, '020b0502020202020204' ) ) +StandardFonts.append( Font( 'Comic Sans MS' , 'script', 0, 2, '030f0702030302020204' ) ) +StandardFonts.append( Font( 'Courier New' , 'modern', 0, 1, '02070309020205020404' ) ) +StandardFonts.append( Font( 'Franklin Gothic Medium' , 'swiss' , 0, 2, '020b0603020102020204' ) ) +StandardFonts.append( Font( 'Garamond' , 'roman' , 0, 2, '02020404030301010803' ) ) +StandardFonts.append( Font( 'Georgia' , 'roman' , 0, 2, '02040502050405020303' ) ) +StandardFonts.append( Font( 'Haettenschweiler' , 'swiss' , 0, 2, '020b0706040902060204' ) ) +StandardFonts.append( Font( 'Impact' , 'swiss' , 0, 2, '020b0806030902050204' ) ) +StandardFonts.append( Font( 'Lucida Console' , 'modern', 0, 1, '020b0609040504020204' ) ) +StandardFonts.append( Font( 'Lucida Sans Unicode' , 'swiss' , 0, 2, '020b0602030504020204' ) ) +StandardFonts.append( Font( 'Microsoft Sans Serif' , 'swiss' , 0, 2, '020b0604020202020204' ) ) +StandardFonts.append( Font( 'Monotype Corsiva' , 'script', 0, 2, '03010101010201010101' ) ) +StandardFonts.append( Font( 'Palatino Linotype' , 'roman' , 0, 2, '02040502050505030304' ) ) +StandardFonts.append( Font( 'Papyrus' , 'script', 0, 2, '03070502060502030205' ) ) +StandardFonts.append( Font( 'Sylfaen' , 'roman' , 0, 2, '010a0502050306030303' ) ) +StandardFonts.append( Font( 'Symbol' , 'roman' , 2, 2, '05050102010706020507' ) ) +StandardFonts.append( Font( 'Tahoma' , 'swiss' , 0, 2, '020b0604030504040204' ) ) +StandardFonts.append( Font( 'Times New Roman' , 'roman' , 0, 2, '02020603050405020304' ) ) +StandardFonts.append( Font( 'Trebuchet MS' , 'swiss' , 0, 2, '020b0603020202020204' ) ) +StandardFonts.append( Font( 'Verdana' , 'swiss' , 0, 2, '020b0604030504040204' ) ) + +StandardFonts.Castellar.SetAlternate( StandardFonts.Georgia ) + +""" +Found the following definition at http://www.pbdr.com/vbtips/gen/convtwip.htm + +Twips are screen-independent units used to ensure that the placement and +proportion of screen elements in your screen application are the same on all +display systems. A twip is a unit of screen measurement equal to 1/20 of a +printer's point. The conversion between twips and +inches/centimeters/millimeters is as follows: + +There are approximately 1440 twips to a inch (the length of a screen item +measuring one inch when printed). + +As there are 2.54 centimeters to 1 inch, then there are approximately 567 +twips to a centimeter (the length of a screen item measuring one centimeter +when printed). + +Or in millimeters, as there are 25.4 millimeters to 1 inch, therefore there +are approximately 56.7 twips to a millimeter (the length of a screen item +measuring one millimeter when printed).""" + +# Width default is 12240, Height default is 15840 +StandardPaper = Papers() +StandardPaper.append( Paper( 'LETTER' , 1, 'Letter 8 1/2 x 11 in' , 12240, 15840 ) ) +StandardPaper.append( Paper( 'LETTERSMALL' , 2, 'Letter Small 8 1/2 x 11 in' , 12240, 15840 ) ) +StandardPaper.append( Paper( 'TABLOID' , 3, 'Tabloid 11 x 17 in' , 15840, 24480 ) ) +StandardPaper.append( Paper( 'LEDGER' , 4, 'Ledger 17 x 11 in' , 24480, 15840 ) ) +StandardPaper.append( Paper( 'LEGAL' , 5, 'Legal 8 1/2 x 14 in' , 12240, 20160 ) ) +StandardPaper.append( Paper( 'STATEMENT' , 6, 'Statement 5 1/2 x 8 1/2 in' , 7920, 12240 ) ) +StandardPaper.append( Paper( 'EXECUTIVE' , 7, 'Executive 7 1/4 x 10 1/2 in' , 10440, 15120 ) ) +StandardPaper.append( Paper( 'A3' , 8, 'A3 297 x 420 mm' , 16838, 23811 ) ) +StandardPaper.append( Paper( 'A4' , 9, 'A4 210 x 297 mm' , 11907, 16838 ) ) +StandardPaper.append( Paper( 'A4SMALL' , 10, 'A4 Small 210 x 297 mm' , 11907, 16838 ) ) +StandardPaper.append( Paper( 'A5' , 11, 'A5 148 x 210 mm' , 8391, 11907 ) ) +StandardPaper.append( Paper( 'B4' , 12, 'B4 (JIS) 250 x 354' , 14175, 20072 ) ) +StandardPaper.append( Paper( 'B5' , 13, 'B5 (JIS) 182 x 257 mm' , 10319, 14572 ) ) +StandardPaper.append( Paper( 'FOLIO' , 14, 'Folio 8 1/2 x 13 in' , 12240, 18720 ) ) +StandardPaper.append( Paper( 'QUARTO' , 15, 'Quarto 215 x 275 mm' , 12191, 15593 ) ) +StandardPaper.append( Paper( '10X14' , 16, '10x14 in' , 14400, 20160 ) ) +StandardPaper.append( Paper( '11X17' , 17, '11x17 in' , 15840, 24480 ) ) +StandardPaper.append( Paper( 'NOTE' , 18, 'Note 8 1/2 x 11 in' , 12240, 15840 ) ) +StandardPaper.append( Paper( 'ENV_9' , 19, 'Envelope #9 3 7/8 x 8 7/8' , 5580, 12780 ) ) +StandardPaper.append( Paper( 'ENV_10' , 20, 'Envelope #10 4 1/8 x 9 1/2' , 5940, 13680 ) ) +StandardPaper.append( Paper( 'ENV_11' , 21, 'Envelope #11 4 1/2 x 10 3/8' , 6480, 14940 ) ) +StandardPaper.append( Paper( 'ENV_12' , 22, 'Envelope #12 4 3/4 x 11' , 6840, 15840 ) ) +StandardPaper.append( Paper( 'ENV_14' , 23, 'Envelope #14 5 x 11 1/2' , 7200, 16560 ) ) +StandardPaper.append( Paper( 'CSHEET' , 24, 'C size sheet 18 x 24 in' , 29520, 34560 ) ) +StandardPaper.append( Paper( 'DSHEET' , 25, 'D size sheet 22 x 34 in' , 31680, 48960 ) ) +StandardPaper.append( Paper( 'ESHEET' , 26, 'E size sheet 34 x 44 in' , 48960, 63360 ) ) +StandardPaper.append( Paper( 'ENV_DL' , 27, 'Envelope DL 110 x 220mm' , 6237, 12474 ) ) +StandardPaper.append( Paper( 'ENV_C5' , 28, 'Envelope C5 162 x 229 mm' , 9185, 12984 ) ) +StandardPaper.append( Paper( 'ENV_C3' , 29, 'Envelope C3 324 x 458 mm' , 18371, 25969 ) ) +StandardPaper.append( Paper( 'ENV_C4' , 30, 'Envelope C4 229 x 324 mm' , 12984, 18371 ) ) +StandardPaper.append( Paper( 'ENV_C6' , 31, 'Envelope C6 114 x 162 mm' , 6464, 9185 ) ) +StandardPaper.append( Paper( 'ENV_C65' , 32, 'Envelope C65 114 x 229 mm' , 6464, 12984 ) ) +StandardPaper.append( Paper( 'ENV_B4' , 33, 'Envelope B4 250 x 353 mm' , 14175, 20015 ) ) +StandardPaper.append( Paper( 'ENV_B5' , 34, 'Envelope B5 176 x 250 mm' , 9979, 14175 ) ) +StandardPaper.append( Paper( 'ENV_B6' , 35, 'Envelope B6 176 x 125 mm' , 9979, 7088 ) ) +StandardPaper.append( Paper( 'ENV_ITALY' , 36, 'Envelope 110 x 230 mm' , 6237, 13041 ) ) +StandardPaper.append( Paper( 'ENV_MONARCH' , 37, 'Envelope Monarch 3.875 x 7.5 in' , 5580, 10800 ) ) +StandardPaper.append( Paper( 'ENV_PERSONAL' , 38, '6 3/4 Envelope 3 5/8 x 6 1/2 in' , 5220, 9360 ) ) +StandardPaper.append( Paper( 'FANFOLD_US' , 39, 'US Std Fanfold 14 7/8 x 11 in' , 21420, 15840 ) ) +StandardPaper.append( Paper( 'FANFOLD_STD_GERMAN' , 40, 'German Std Fanfold 8 1/2 x 12 in' , 12240, 17280 ) ) +StandardPaper.append( Paper( 'FANFOLD_LGL_GERMAN' , 41, 'German Legal Fanfold 8 1/2 x 13 in' , 12240, 18720 ) ) + +# +# Finally a StyleSheet in which all of this stuff is put together +# +class StyleSheet : + def __init__( self, colours=None, fonts=None ) : + + self.Colours = colours or deepcopy( StandardColours ) + self.Fonts = fonts or deepcopy( StandardFonts ) + + self.TextStyles = AttributedList() + self.ParagraphStyles = AttributedList() + +class Section( list ) : + NONE = 1 + COLUMN = 2 + PAGE = 3 + EVEN = 4 + ODD = 5 + BREAK_TYPES = [ NONE, COLUMN, PAGE, EVEN, ODD ] + + def __init__( self, paper=None, margins=None, break_type=None, headery=None, footery=None, landscape=None, first_page_number=None ) : + super( Section, self ).__init__() + + self.Paper = paper or StandardPaper.A4 + self.SetMargins( margins ) + + self.Header = [] + self.Footer = [] + self.FirstHeader = [] + self.FirstFooter = [] + + self.SetBreakType( break_type or self.NONE ) + self.SetHeaderY( headery ) + self.SetFooterY( footery ) + self.SetLandscape( landscape ) + self.SetFirstPageNumber( first_page_number ) + + def TwipsToRightMargin( self ) : + return self.Paper.Width - ( self.Margins.Left + self.Margins.Right ) + + def SetMargins( self, value ) : + self.Margins = value or MarginsPropertySet( top=1000, left=1200, bottom=1000, right=1200 ) + self.Width = self.Paper.Width - ( self.Margins.Left + self.Margins.Right ) + + def SetBreakType( self, value ) : + assert value in self.BREAK_TYPES + self.BreakType = value + return self + + def SetHeaderY( self, value ) : + self.HeaderY = value + return self + + def SetFooterY( self, value ) : + self.FooterY = value + return self + + def SetLandscape( self, value ) : + self.Landscape = False + if value : self.Landscape = True + return self + + def SetFirstPageNumber( self, value ) : + self.FirstPageNumber = value + return self + +def MakeDefaultStyleSheet( ) : + result = StyleSheet() + + NormalText = TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) ) + + ps = ParagraphStyle( 'Normal', + NormalText.Copy(), + ParagraphPropertySet( space_before = 60, + space_after = 60 ) ) + result.ParagraphStyles.append( ps ) + + ps = ParagraphStyle( 'Normal Short', + NormalText.Copy() ) + result.ParagraphStyles.append( ps ) + + NormalText.TextPropertySet.SetSize( 32 ) + ps = ParagraphStyle( 'Heading 1', + NormalText.Copy(), + ParagraphPropertySet( space_before = 240, + space_after = 60 ) ) + result.ParagraphStyles.append( ps ) + + NormalText.TextPropertySet.SetSize( 24 ).SetBold( True ) + ps = ParagraphStyle( 'Heading 2', + NormalText.Copy(), + ParagraphPropertySet( space_before = 240, + space_after = 60 ) ) + result.ParagraphStyles.append( ps ) + + # Add some more in that are based on the normal template but that + # have some indenting set that makes them suitable for doing numbered + normal_numbered = result.ParagraphStyles.Normal.Copy() + normal_numbered.SetName( 'Normal Numbered' ) + normal_numbered.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 ) + normal_numbered.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH ) + + result.ParagraphStyles.append( normal_numbered ) + + normal_numbered2 = result.ParagraphStyles.Normal.Copy() + normal_numbered2.SetName( 'Normal Numbered 2' ) + normal_numbered2.ParagraphPropertySet.SetFirstLineIndent( TabPropertySet.DEFAULT_WIDTH * -1 ) + normal_numbered2.ParagraphPropertySet.SetLeftIndent ( TabPropertySet.DEFAULT_WIDTH * 2 ) + + result.ParagraphStyles.append( normal_numbered2 ) + + ## LIST STYLES + for idx, indent in [ (1, TabPS.DEFAULT_WIDTH ), + (2, TabPS.DEFAULT_WIDTH * 2), + (3, TabPS.DEFAULT_WIDTH * 3) ] : + indent = TabPropertySet.DEFAULT_WIDTH + ps = ParagraphStyle( 'List %s' % idx, + TextStyle( TextPropertySet( result.Fonts.Arial, 22 ) ), + ParagraphPropertySet( space_before = 60, + space_after = 60, + first_line_indent = -indent, + left_indent = indent) ) + result.ParagraphStyles.append( ps ) + + return result + +class TAB : pass +class LINE : pass + +class RawCode : + def __init__( self, data ) : + self.Data = data + +PAGE_NUMBER = RawCode( r'{\field{\fldinst page}}' ) +TOTAL_PAGES = RawCode( r'{\field{\fldinst numpages}}' ) +SECTION_PAGES = RawCode( r'{\field{\fldinst sectionpages}}' ) +ARIAL_BULLET = RawCode( r'{\f2\'95}' ) + +def _get_jpg_dimensions( fin ): + """ + converted from: http://dev.w3.org/cvsweb/Amaya/libjpeg/rdjpgcom.c?rev=1.2 + """ + + M_SOF0 = chr( 0xC0 ) # /* Start Of Frame N */ + M_SOF1 = chr( 0xC1 ) # /* N indicates which compression process */ + M_SOF2 = chr( 0xC2 ) # /* Only SOF0-SOF2 are now in common use */ + M_SOF3 = chr( 0xC3 ) # + M_SOF5 = chr( 0xC5 ) # /* NB: codes C4 and CC are NOT SOF markers */ + M_SOF6 = chr( 0xC6 ) # + M_SOF7 = chr( 0xC7 ) # + M_SOF9 = chr( 0xC9 ) # + M_SOF10 = chr( 0xCA ) # + M_SOF11 = chr( 0xCB ) # + M_SOF13 = chr( 0xCD ) # + M_SOF14 = chr( 0xCE ) # + M_SOF15 = chr( 0xCF ) # + M_SOI = chr( 0xD8 ) # /* Start Of Image (beginning of datastream) */ + M_EOI = chr( 0xD9 ) # /* End Of Image (end of datastream) */ + + M_FF = chr( 0xFF ) + + MARKERS = [ M_SOF0, M_SOF1, M_SOF2, M_SOF3, + M_SOF5, M_SOF6, M_SOF7, M_SOF9, + M_SOF10,M_SOF11, M_SOF13, M_SOF14, + M_SOF15 ] + + def get_length() : + b1 = fin.read( 1 ) + b2 = fin.read( 1 ) + return (ord(b1) << 8) + ord(b2) + + def next_marker() : + # markers come straight after an 0xFF so skip everything + # up to the first 0xFF that we find + while fin.read(1) != M_FF : + pass + + # there can be more than one 0xFF as they can be used + # for padding so we are now looking for the first byte + # that isn't an 0xFF, this will be the marker + while True : + result = fin.read(1) + if result != M_FF : + return result + + raise Exception( 'Invalid JPEG' ) + + # BODY OF THE FUNCTION + if not ((fin.read(1) == M_FF) and (fin.read(1) == M_SOI)) : + raise Exception( 'Invalid Jpeg' ) + + while True : + marker = next_marker() + + # the marker is always followed by two bytes representing the length of the data field + length = get_length () + if length < 2 : raise Exception( "Erroneous JPEG marker length" ) + + # if it is a compression process marker then it will contain the dimension of the image + if marker in MARKERS : + # the next byte is the data precision, just skip it + fin.read(1) + + # bingo + image_height = get_length() + image_width = get_length() + return image_width, image_height + + # just skip whatever data it contains + fin.read( length - 2 ) + + raise Exception( 'Invalid JPEG, end of stream reached' ) + + +_PNG_HEADER = '\x89\x50\x4e' +def _get_png_dimensions( data ) : + if data[0:3] != _PNG_HEADER : + raise Exception( 'Invalid PNG image' ) + + width = (ord(data[18]) * 256) + (ord(data[19])) + height = (ord(data[22]) * 256) + (ord(data[23])) + return width, height + +def _get_emf_dimensions( fin ): + import struct + def get_DWORD(): + return struct.unpack("<L",fin.read(4))[0] + def get_LONG(): + return struct.unpack("<l",fin.read(4))[0] + def get_WORD(): + return struct.unpack("<H",fin.read(2))[0] + class Empty: + pass + header = Empty() + header.RecordType = get_DWORD() # Record type + header.RecordSize = get_DWORD() # Size of the record in bytes + header.BoundsLeft = get_LONG() # Left inclusive bounds + header.BoundsTop = get_LONG() # Top inclusive bounds + header.BoundsRight = get_LONG() # Right inclusive bounds + header.BoundsBottom = get_LONG() # Bottom inclusive bounds + header.FrameLeft = get_LONG() # Left side of inclusive picture frame + header.FrameTop = get_LONG() # Top side of inclusive picture frame + header.FrameRight = get_LONG() # Right side of inclusive picture frame + header.FrameBottom = get_LONG() # Bottom side of inclusive picture frame + header.Signature = get_DWORD() # Signature ID (always 0x464D4520) + header.Version = get_DWORD() # Version of the metafile + header.Size = get_DWORD() # Size of the metafile in bytes + header.NumOfRecords = get_DWORD() # Number of records in the metafile + header.NumOfHandles = get_WORD() # Number of handles in the handle table + header.Reserved = get_WORD() # Not used (always 0) + header.SizeOfDescrip = get_DWORD() # Size of description string in WORDs + header.OffsOfDescrip = get_DWORD() # Offset of description string in metafile + header.NumPalEntries = get_DWORD() # Number of color palette entries + header.WidthDevPixels = get_LONG() # Width of reference device in pixels + header.HeightDevPixels = get_LONG() # Height of reference device in pixels + header.WidthDevMM = get_LONG() # Width of reference device in millimeters + header.HeightDevMM = get_LONG() # Height of reference device in millimeters + + if 0: + klist = header.__dict__.keys() + klist.sort() + for k in klist: + print "%20s:%s" % (k,header.__dict__[k]) + + dw = header.FrameRight-header.FrameLeft + dh = header.FrameBottom-header.FrameTop + + # convert from 0.01mm units to 1/72in units + return int(dw * 72.0/2540.0), int(dh * 72.0/2540.0) + +class Image( RawCode ) : + + # Need to add in the width and height in twips as it crashes + # word xp with these values. Still working out the most + # efficient way of getting these values. + # \picscalex100\picscaley100\piccropl0\piccropr0\piccropt0\piccropb0 + # picwgoal900\pichgoal281 + + PNG_LIB = 'pngblip' + JPG_LIB = 'jpegblip' + EMF_LIB = 'emfblip' + PICT_TYPES = { 'png' : PNG_LIB, + 'jpg' : JPG_LIB, + 'emf' : EMF_LIB} + + def __init__( self, infile, **kwargs ) : + + if hasattr( infile, 'read' ): + fin = infile + if 'datatype' not in kwargs.keys(): + msg = "If passing in a file object, you must also specify type='xxx' where xxx is one of %s" % self.PICT_TYPES.keys() + raise ValueError,msg + file_name = kwargs.pop('datatype') + else: + fin = file( infile, 'rb' ) + file_name = infile + + pict_type = self.PICT_TYPES[ file_name[ -3 : ].lower() ] + if pict_type == self.PNG_LIB : + width, height = _get_png_dimensions( fin.read( 100 ) ) + elif pict_type == self.JPG_LIB : + width, height = _get_jpg_dimensions( fin ) + elif pict_type == self.EMF_LIB : + width, height = _get_emf_dimensions( fin ) + + + # if user specified height or width but not both, then + # scale unspecified dimension to maintain aspect ratio + + if ('width' in kwargs) and ('height' not in kwargs): + height = int(height * float(kwargs['width'])/width) + elif ('height' in kwargs) and ('width' not in kwargs): + width = int(width * float(kwargs['height'])/height) + + width = kwargs.pop('width',width) + height = kwargs.pop('height', height) + + codes = [ pict_type, + 'picwgoal%s' % (width * 20), + 'pichgoal%s' % (height * 20) ] + # let user specify global scaling + scale = kwargs.pop('scale',100) + + for kwarg, code, default in [ ( 'scale_x', 'scalex', scale ), + ( 'scale_y', 'scaley', scale ), + ( 'crop_left', 'cropl', '0' ), + ( 'crop_right', 'cropr', '0' ), + ( 'crop_top', 'cropt', '0' ), + ( 'crop_bottom', 'cropb', '0' ) ] : + codes.append( 'pic%s%s' % ( code, kwargs.pop( kwarg, default ) ) ) + + + # reset back to the start of the file to get all of it and now + # turn it into hex. + fin.seek( 0, 0 ) + image = hexlify( fin.read() ) + fin.close() + data = [] + for i in range( 0, len( image ), 128 ) : + data.append( image[ i : i + 128 ] ) + + data = r'{\pict{\%s}%s}' % ( '\\'.join( codes ), '\n'.join( data ) ) + RawCode.__init__( self, data ) + + def ToRawCode( self, var_name ) : + return '%s = RawCode( """%s""" )' % ( var_name, self.Data ) + +class Text : + def __init__( self, *params ) : + self.Data = None + self.Style = None + self.Properties = None + self.Shading = None + + for param in params : + if isinstance( param, TextStyle ) : self.Style = param + elif isinstance( param, TextPS ) : self.Properties = param + elif isinstance( param, ShadingPS ) : self.Shading = param + else : + # otherwise let the rendering custom handler sort it out itself + self.Data = param + + def SetData( self, value ) : + self.Data = value + +class Inline( list ) : + def __init__( self, *params ) : + super( Inline, self ).__init__() + + self.Style = None + self.Properties = None + self.Shading = None + + self._append = super( Inline, self ).append + + for param in params : + if isinstance( param, TextStyle ) : self.Style = param + elif isinstance( param, TextPS ) : self.Properties = param + elif isinstance( param, ShadingPS ) : self.Shading = param + else : + # otherwise we add to it to our list of elements and let + # the rendering custom handler sort it out itself. + self.append( param ) + + def append( self, *params ) : + # filter out any that are explicitly None + [ self._append( param ) for param in params if param is not None ] + +class Paragraph( list ) : + def __init__( self, *params ) : + super( Paragraph, self ).__init__() + + self.Style = None + self.Properties = None + self.Frame = None + self.Shading = None + + self._append = super( Paragraph, self ).append + + for param in params : + if isinstance( param, ParagraphStyle ) : self.Style = param + elif isinstance( param, ParagraphPS ) : self.Properties = param + elif isinstance( param, FramePS ) : self.Frame = param + elif isinstance( param, ShadingPS ) : self.Shading = param + else : + # otherwise we add to it to our list of elements and let + # the rendering custom handler sort it out itself. + self.append( param ) + + def append( self, *params ) : + # filter out any that are explicitly None + [ self._append( param ) for param in params if param is not None ] + + def insert( self, index, value ) : + if value is not None : + super( Paragraph, self ).insert( index, value ) + +class Table : + LEFT = 1 + RIGHT = 2 + CENTER = 3 + ALIGNMENT = [ LEFT, RIGHT, CENTER ] + + NO_WRAPPING = 1 + WRAP_AROUND = 2 + WRAPPING = [ NO_WRAPPING, WRAP_AROUND ] + + # trrh height of row, 0 means automatically adjust, use negative for an absolute + # trgaph is half of the space between a table cell in width, reduce this one + # to get a really tiny column + + def __init__( self, *column_widths, **kwargs ) : + + self.Rows = [] + + self.SetAlignment ( kwargs.pop( 'alignment', self.LEFT ) ) + self.SetLeftOffset ( kwargs.pop( 'left_offset', None ) ) + self.SetGapBetweenCells( kwargs.pop( 'gap_between_cells', None ) ) + self.SetColumnWidths ( *column_widths ) + + assert not kwargs, 'invalid keyword args %s' % kwargs + + def SetAlignment( self, value ) : + assert value is None or value in self.ALIGNMENT + self.Alignment = value or self.LEFT + return self + + def SetLeftOffset( self, value ) : + self.LeftOffset = value + return self + + def SetGapBetweenCells( self, value ) : + self.GapBetweenCells = value + return self + + def SetColumnWidths( self, *column_widths ) : + self.ColumnWidths = column_widths + self.ColumnCount = len( column_widths ) + return self + + def AddRow( self, *cells ) : + height = None + if isinstance( cells[ 0 ], (IntType, FloatType, LongType) ): + height = int( cells[ 0 ] ) + cells = cells[ 1 : ] + + # make sure all of the spans add up to the number of columns + # otherwise the table will get corrupted + if self.ColumnCount != sum( [ cell.Span for cell in cells ] ) : + raise Exception( 'ColumnCount != the total of this row\'s cell.Spans.' ) + + self.Rows.append( ( height, cells ) ) + + append = AddRow + +class Cell( list ) : + + """ + \clvertalt Text is top-aligned in cell (the default). + \clvertalc Text is centered vertically in cell. + \clvertalb Text is bottom-aligned in cell. + \cltxlrtb Vertical text aligned left (direction bottom up). + \cltxtbrl Vertical text aligned right (direction top down). + """ + + ALIGN_TOP = 1 + ALIGN_CENTER = 2 + ALIGN_BOTTOM = 3 + + FLOW_LR_TB = 1 + FLOW_RL_TB = 2 + FLOW_LR_BT = 3 + FLOW_VERTICAL_LR_TB = 4 + FLOW_VERTICAL_TB_RL = 5 + + def __init__( self, *params, **kwargs ) : + super( Cell, self ).__init__() + + self.SetFrame ( None ) + self.SetMargins( None ) + + self.SetAlignment( kwargs.get( 'alignment', self.ALIGN_TOP ) ) + self.SetFlow ( kwargs.get( 'flow' , self.FLOW_LR_TB ) ) + self.SetSpan ( kwargs.get( 'span', 1 ) ) + + self.SetStartVerticalMerge( kwargs.get( 'start_vertical_merge', False ) ) + self.SetVerticalMerge ( kwargs.get( 'vertical_merge', False ) ) + + self._append = super( Cell, self ).append + + for param in params : + if isinstance( param, StringType ) : self.append ( param ) + elif isinstance( param, Paragraph ) : self.append ( param ) + elif isinstance( param, FramePS ) : self.SetFrame ( param ) + elif isinstance( param, MarginsPS ) : self.SetMargins( param ) + + def SetFrame( self, value ) : + self.Frame = value + return self + + def SetMargins( self, value ) : + self.Margins = value + return self + + def SetAlignment( self, value ) : + assert value in [ self.ALIGN_TOP, self.ALIGN_CENTER, self.ALIGN_BOTTOM ] #, self.ALIGN_TEXT_TOP_DOWN, self.ALIGN_TEXT_BOTTOM_UP ] + self.Alignment = value + + def SetFlow( self, value ) : + assert value in [ self.FLOW_LR_TB, self.FLOW_RL_TB, self.FLOW_LR_BT, self.FLOW_VERTICAL_LR_TB, self.FLOW_VERTICAL_TB_RL ] + self.Flow = value + + def SetSpan( self, value ) : + # must be a positive integer + self.Span = int( max( value, 1 ) ) + return self + + def SetStartVerticalMerge( self, value ) : + self.StartVerticalMerge = False + if value : + self.StartVerticalMerge = True + return self + + def SetVerticalMerge( self, value ) : + self.VerticalMerge = False + if value : + self.VerticalMerge = True + return self + + def append( self, *params ) : + [ self._append( param ) for param in params ] + +class Document : + def __init__( self, style_sheet=None, default_language=None, view_kind=None, view_zoom_kind=None, view_scale=None ) : + self.StyleSheet = style_sheet or MakeDefaultStyleSheet() + self.Sections = AttributedList( Section ) + + self.SetTitle( None ) + + self.DefaultLanguage = default_language or Languages.DEFAULT + self.ViewKind = view_kind or ViewKind.DEFAULT + self.ViewZoomKind = view_zoom_kind + self.ViewScale = view_scale + + def NewSection( self, *params, **kwargs ) : + result = Section( *params, **kwargs ) + self.Sections.append( result ) + return result + + def SetTitle( self, value ) : + self.Title = value + return self + + def Copy( self ) : + result = Document( style_sheet = self.StyleSheet.Copy(), + default_language = self.DefaultLanguage, + view_kind = self.ViewKind, + view_zoom_kind = self.ViewZoomKind, + view_scale = self.ViewScale ) + result.SetTitle( self.Title ) + result.Sections = self.Sections.Copy() + + return result + +def TEXT( *params, **kwargs ) : + text_props = TextPropertySet() + text_props.SetFont ( kwargs.get( 'font', None ) ) + text_props.SetSize ( kwargs.get( 'size', None ) ) + text_props.SetBold ( kwargs.get( 'bold', False ) ) + text_props.SetItalic ( kwargs.get( 'italic', False ) ) + text_props.SetUnderline( kwargs.get( 'underline', False ) ) + text_props.SetColour ( kwargs.get( 'colour', None ) ) + + if len( params ) == 1 : + return Text( params[ 0 ], text_props ) + + result = Inline( text_props ) + apply( result.append, params ) + return result + +def B( *params ) : + text_props = TextPropertySet( bold=True ) + + if len( params ) == 1 : + return Text( params[ 0 ], text_props ) + + result = Inline( text_props ) + apply( result.append, params ) + return result + +def I( *params ) : + text_props = TextPropertySet( italic=True ) + + if len( params ) == 1 : + return Text( params[ 0 ], text_props ) + + result = Inline( text_props ) + apply( result.append, params ) + return result + +def U( *params ) : + text_props = TextPropertySet( underline=True ) + + if len( params ) == 1 : + return Text( params[ 0 ], text_props ) + + result = Inline( text_props ) + apply( result.append, params ) + return result + ADDED gluon/contrib/pyrtf/PropertySets.py Index: gluon/contrib/pyrtf/PropertySets.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/PropertySets.py @@ -0,0 +1,489 @@ +""" +PropertySets group common attributes together, each property set is used to control a specific part of the rendering. + +PropertySets can be used in different elements of the document. + +For example the FramePropertySet is used in paragraphs, tables, cells, etc. + +The TextPropertySet can be used for text or in a Paragraph Style. + +""" + +from types import StringType +from copy import deepcopy + + +# +# We need some basic Type like fonts, colours and paper definitions +# +def MakeAttributeName( value ) : + assert value and type( value ) is StringType + value = value.replace( ' ', '' ) + return value + +class AttributedList( list ) : + def __init__( self, accepted_type=None ) : + super( AttributedList, self ).__init__() + self.AcceptedType = accepted_type + self._append = super( AttributedList, self ).append + + def append( self, *values ) : + for value in values : + if self.AcceptedType : assert isinstance( value, self.AcceptedType ) + + self._append( value ) + + name = getattr( value, 'Name', None ) + if name : + name = MakeAttributeName( value.Name ) + setattr( self, name, value ) + + def __deepcopy__( self, memo ) : + result = self.__class__() + result.append( *self[:] ) + return result + +class Colour : + def __init__( self, name, red, green, blue ) : + self.SetName ( name ) + self.SetRed ( red ) + self.SetGreen( green ) + self.SetBlue ( blue ) + + def SetName( self, value ) : + self.Name = value + return self + + def SetRed( self, value ) : + self.Red = value + return self + + def SetGreen( self, value ) : + self.Green = value + return self + + def SetBlue( self, value ) : + self.Blue = value + return self + +class Colours( AttributedList ) : + def __init__( self ) : + super( Colours, self ).__init__( Colour ) + +class Font : + def __init__( self, name, family, character_set = 0, pitch = None, panose = None, alternate = None ) : + self.SetName ( name ) + self.SetFamily ( family ) + self.SetCharacterSet( character_set ) + self.SetPitch ( pitch ) + self.SetPanose ( panose ) + self.SetAlternate ( alternate ) + + def SetName( self, value ) : + self.Name = value + return self + + def SetFamily( self, value ) : + self.Family = value + return self + + def SetCharacterSet( self, value ) : + self.CharacterSet = value + return self + + def SetPitch( self, value ) : + self.Pitch = value + return self + + def SetPanose( self, value ) : + self.Panose = value + return self + + def SetAlternate( self, value ) : + self.Alternate = value + return self + +class Fonts( AttributedList ) : + def __init__( self ) : + super( Fonts, self ).__init__( Font ) + +class Paper : + def __init__( self, name, code, description, width, height ) : + self.SetName ( name ) + self.SetCode ( code ) + self.SetDescription( description ) + self.SetWidth ( width ) + self.SetHeight ( height ) + + def SetName( self, value ) : + self.Name = value + return self + + def SetCode( self, value ) : + self.Code = value + return self + + def SetDescription( self, value ) : + self.Description = value + return self + + def SetWidth( self, value ) : + self.Width = value + return self + + def SetHeight( self, value ) : + self.Height = value + return self + +class Papers( AttributedList ) : + def __init__( self ) : + super( Papers, self ).__init__( Paper ) + +# +# Then we have property sets which represent different aspects of Styles +# +class MarginsPropertySet : + def __init__( self, top=None, left=None, bottom=None, right=None ) : + self.SetTop ( top ) + self.SetLeft ( left ) + self.SetBottom( bottom ) + self.SetRight ( right ) + + def SetTop( self, value ) : + self.Top = value + return self + + def SetLeft( self, value ) : + self.Left = value + return self + + def SetBottom( self, value ) : + self.Bottom = value + return self + + def SetRight( self, value ) : + self.Right = value + return self + +class ShadingPropertySet : + HORIZONTAL = 1 + VERTICAL = 2 + FORWARD_DIAGONAL = 3 + BACKWARD_DIAGONAL = 4 + VERTICAL_CROSS = 5 + DIAGONAL_CROSS = 6 + DARK_HORIZONTAL = 7 + DARK_VERTICAL = 8 + DARK_FORWARD_DIAGONAL = 9 + DARK_BACKWARD_DIAGONAL = 10 + DARK_VERTICAL_CROSS = 11 + DARK_DIAGONAL_CROSS = 12 + PATTERNS = [ HORIZONTAL, + VERTICAL, + FORWARD_DIAGONAL, + BACKWARD_DIAGONAL, + VERTICAL_CROSS, + DIAGONAL_CROSS, + DARK_HORIZONTAL, + DARK_VERTICAL, + DARK_FORWARD_DIAGONAL, + DARK_BACKWARD_DIAGONAL, + DARK_VERTICAL_CROSS, + DARK_DIAGONAL_CROSS ] + + def __init__( self, shading=None, pattern=None, foreground=None, background=None ) : + self.SetShading ( shading ) + self.SetForeground( foreground ) + self.SetBackground( background ) + self.SetPattern ( pattern ) + + def __deepcopy__( self, memo ) : + return ShadingPropertySet( self.Shading, + self.Foreground, + self.Background, + self.Pattern ) + + def SetShading( self, value ) : + self.Shading = value + return self + + def SetPattern( self, value ) : + assert value is None or value in self.PATTERNS + self.Pattern = value + return self + + def SetForeground( self, value ) : + assert not value or isinstance( value, Colour ) + self.Foreground = value + return self + + def SetBackground( self, value ) : + assert not value or isinstance( value, Colour ) + self.Background = value + return self + + +class BorderPropertySet : + SINGLE = 1 + DOUBLE = 2 + SHADOWED = 3 + DOUBLED = 4 + DOTTED = 5 + DASHED = 6 + HAIRLINE = 7 + STYLES = [ SINGLE, DOUBLE, SHADOWED, DOUBLED, DOTTED, DASHED, HAIRLINE ] + + def __init__( self, width=None, style=None, colour=None, spacing=None ) : + self.SetWidth ( width ) + self.SetStyle ( style or self.SINGLE ) + self.SetColour ( colour ) + self.SetSpacing( spacing ) + + def SetWidth( self, value ) : + self.Width = value + return self + + def SetStyle( self, value ) : + assert value is None or value in self.STYLES + self.Style = value + return self + + def SetColour( self, value ) : + assert value is None or isinstance( value, Colour ) + self.Colour = value + return self + + def SetSpacing( self, value ) : + self.Spacing = value + return self + +class FramePropertySet : + def __init__( self, top=None, left=None, bottom=None, right=None ) : + self.SetTop ( top ) + self.SetLeft ( left ) + self.SetBottom( bottom ) + self.SetRight ( right ) + + def SetTop( self, value ) : + assert value is None or isinstance( value, BorderPropertySet ) + self.Top = value + return self + + def SetLeft( self, value ) : + assert value is None or isinstance( value, BorderPropertySet ) + self.Left = value + return self + + def SetBottom( self, value ) : + assert value is None or isinstance( value, BorderPropertySet ) + self.Bottom = value + return self + + def SetRight( self, value ) : + assert value is None or isinstance( value, BorderPropertySet ) + self.Right = value + return self + +class TabPropertySet : + DEFAULT_WIDTH = 720 + + LEFT = 1 + RIGHT = 2 + CENTER = 3 + DECIMAL = 4 + ALIGNMENT = [ LEFT, RIGHT, CENTER, DECIMAL ] + + DOTS = 1 + HYPHENS = 2 + UNDERLINE = 3 + THICK_LINE = 4 + EQUAL_SIGN = 5 + LEADERS = [ DOTS, HYPHENS, UNDERLINE, THICK_LINE, EQUAL_SIGN ] + + def __init__( self, width=None, alignment=None, leader=None ) : + self.SetWidth ( width ) + self.SetAlignment( alignment or self.LEFT ) + self.SetLeader ( leader ) + + def SetWidth( self, value ) : + self.Width = value + return self + + def SetAlignment( self, value ) : + assert value in self.ALIGNMENT + self.Alignment = value + return self + + def SetLeader( self, value ) : + assert not value or value in self.LEADERS + self.Leader = value + return self + +class TextPropertySet : + + def __init__( self, font=None, size=None, bold=None, italic=None, underline=None, colour=None, frame=None, expansion=None ) : + self.SetFont ( font ) + self.SetSize ( size ) + + self.SetBold ( bold or False ) + self.SetItalic ( italic or False ) + self.SetUnderline ( underline or False ) + + self.SetColour( colour ) + self.SetFrame ( frame ) + + self.SetStrikeThrough ( False ) + self.SetDottedUnderline( False ) + self.SetDoubleUnderline( False ) + self.SetWordUnderline ( False ) + self.SetExpansion ( expansion ) + + def Copy( self ) : + return deepcopy( self ) + + def __deepcopy__( self, memo ) : + # the font must remain a reference to the same font that we are looking at + # so we want to stop the recursiveness at this point and return an object + # with the right references. + result = TextPropertySet( self.Font, + self.Size, + self.Bold, + self.Italic, + self.Underline, + self.Colour, + deepcopy( self.Frame, memo ) ) + result.SetStrikeThrough( self.StrikeThrough ) + return result + + def SetFont( self, value ) : + assert not value or isinstance( value, Font ) + self.Font = value + return self + + def SetSize( self, value ) : + self.Size = value + return self + + def SetBold( self, value ) : + self.Bold = False + if value : self.Bold = True + return self + + def SetItalic( self, value ) : + self.Italic = False + if value : self.Italic = True + return self + + def SetUnderline( self, value ) : + self.Underline = False + if value : self.Underline = True + return self + + def SetColour( self, value ) : + assert value is None or isinstance( value, Colour ) + self.Colour = value + return self + + def SetFrame( self, value ) : + assert value is None or isinstance( value, BorderPropertySet ) + self.Frame = value + return self + + def SetStrikeThrough( self, value ) : + self.StrikeThrough = False + if value : self.StrikeThrough = True + return self + + def SetDottedUnderline( self, value ) : + self.DottedUnderline = False + if value : self.DottedUnderline = True + return self + + def SetDoubleUnderline( self, value ) : + self.DoubleUnderline = False + if value : self.DoubleUnderline = True + return self + + def SetWordUnderline( self, value ) : + self.WordUnderline = False + if value : self.WordUnderline = True + return self + + def SetExpansion( self, value ) : + self.Expansion = value + return self + +class ParagraphPropertySet : + LEFT = 1 + RIGHT = 2 + CENTER = 3 + JUSTIFY = 4 + DISTRIBUTE = 5 + ALIGNMENT = [ LEFT, RIGHT, CENTER, JUSTIFY, DISTRIBUTE ] + + def __init__( self, alignment=None, space_before=None, space_after=None, tabs=None, first_line_indent=None, left_indent=None, right_indent=None, page_break_before=None ) : + self.SetAlignment ( alignment or self.LEFT ) + self.SetSpaceBefore( space_before ) + self.SetSpaceAfter ( space_after ) + + self.Tabs = [] + if tabs : apply( self.SetTabs, tabs ) + + self.SetFirstLineIndent( first_line_indent or None ) + self.SetLeftIndent ( left_indent or None ) + self.SetRightIndent ( right_indent or None ) + + self.SetPageBreakBefore( page_break_before ) + + self.SetSpaceBetweenLines( None ) + + def Copy( self ) : + return deepcopy( self ) + + def SetAlignment( self, value ) : + assert not value or value in self.ALIGNMENT + self.Alignment = value or self.LEFT + return self + + def SetSpaceBefore( self, value ) : + self.SpaceBefore = value + return self + + def SetSpaceAfter( self, value ) : + self.SpaceAfter = value + return self + + def SetTabs( self, *params ) : + self.Tabs = params + return self + + def SetFirstLineIndent( self, value ) : + self.FirstLineIndent = value + return self + + def SetLeftIndent( self, value ) : + self.LeftIndent = value + return self + + def SetRightIndent( self, value ) : + self.RightIndent = value + return self + + def SetSpaceBetweenLines( self, value ) : + self.SpaceBetweenLines = value + return self + + def SetPageBreakBefore( self, value ) : + self.PageBreakBefore = False + if value : self.PageBreakBefore = True + return self + +# Some short cuts to make the code a bit easier to read +MarginsPS = MarginsPropertySet +ShadingPS = ShadingPropertySet +BorderPS = BorderPropertySet +FramePS = FramePropertySet +TabPS = TabPropertySet +TextPS = TextPropertySet +ParagraphPS = ParagraphPropertySet + ADDED gluon/contrib/pyrtf/README Index: gluon/contrib/pyrtf/README ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/README @@ -0,0 +1,19 @@ +Version 0.46 + +Added EMF support. + +Added more sophisticated scaling options. + +See examples2.py for both. + +Grant Edwards, grante@users.sourceforge.net + + + +Version 0.45 + +Finally, image support!!! Handles PNGs and JPGs. + +See examples2.py for the gory details. + +Simon Cusack, scusack@sourceforge.net ADDED gluon/contrib/pyrtf/Renderer.py Index: gluon/contrib/pyrtf/Renderer.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/Renderer.py @@ -0,0 +1,639 @@ +from types import StringType, ListType, TupleType +from copy import deepcopy +from Elements import * + +DEFAULT_TAB_WIDTH = 720 + +ParagraphAlignmentMap = { ParagraphPropertySet.LEFT : 'ql', + ParagraphPropertySet.RIGHT : 'qr', + ParagraphPropertySet.CENTER : 'qc', + ParagraphPropertySet.JUSTIFY : 'qj', + ParagraphPropertySet.DISTRIBUTE : 'qd' } + +TabAlignmentMap = { TabPropertySet.LEFT : '', + TabPropertySet.RIGHT : 'tqr', + TabPropertySet.CENTER : 'tqc', + TabPropertySet.DECIMAL : 'tqdec' } + +TableAlignmentMap = { Table.LEFT : 'trql', + Table.RIGHT : 'trqr', + Table.CENTER : 'trqc' } + +CellAlignmentMap = { Cell.ALIGN_TOP : '', # clvertalt + Cell.ALIGN_CENTER : 'clvertalc', + Cell.ALIGN_BOTTOM : 'clvertalb' } + +CellFlowMap = { Cell.FLOW_LR_TB : '', # cltxlrtb, Text in a cell flows from left to right and top to bottom (default) + Cell.FLOW_RL_TB : 'cltxtbrl', # Text in a cell flows right to left and top to bottom + Cell.FLOW_LR_BT : 'cltxbtlr', # Text in a cell flows left to right and bottom to top + Cell.FLOW_VERTICAL_LR_TB : 'cltxlrtbv', # Text in a cell flows left to right and top to bottom, vertical + Cell.FLOW_VERTICAL_TB_RL : 'cltxtbrlv' } # Text in a cell flows top to bottom and right to left, vertical + +ShadingPatternMap = { ShadingPropertySet.HORIZONTAL : 'bghoriz', + ShadingPropertySet.VERTICAL : 'bgvert', + ShadingPropertySet.FORWARD_DIAGONAL : 'bgfdiag', + ShadingPropertySet.BACKWARD_DIAGONAL : 'bgbdiag', + ShadingPropertySet.VERTICAL_CROSS : 'bgcross', + ShadingPropertySet.DIAGONAL_CROSS : 'bgdcross', + ShadingPropertySet.DARK_HORIZONTAL : 'bgdkhoriz', + ShadingPropertySet.DARK_VERTICAL : 'bgdkvert', + ShadingPropertySet.DARK_FORWARD_DIAGONAL : 'bgdkfdiag', + ShadingPropertySet.DARK_BACKWARD_DIAGONAL : 'bgdkbdiag', + ShadingPropertySet.DARK_VERTICAL_CROSS : 'bgdkcross', + ShadingPropertySet.DARK_DIAGONAL_CROSS : 'bgdkdcross' } + +TabLeaderMap = { TabPropertySet.DOTS : 'tldot', + TabPropertySet.HYPHENS : 'tlhyph', + TabPropertySet.UNDERLINE : 'tlul', + TabPropertySet.THICK_LINE : 'tlth', + TabPropertySet.EQUAL_SIGN : 'tleq' } + +BorderStyleMap = { BorderPropertySet.SINGLE : 'brdrs', + BorderPropertySet.DOUBLE : 'brdrth', + BorderPropertySet.SHADOWED : 'brdrsh', + BorderPropertySet.DOUBLED : 'brdrdb', + BorderPropertySet.DOTTED : 'brdrdot', + BorderPropertySet.DASHED : 'brdrdash', + BorderPropertySet.HAIRLINE : 'brdrhair' } + +SectionBreakTypeMap = { Section.NONE : 'sbknone', + Section.COLUMN : 'sbkcol', + Section.PAGE : 'sbkpage', + Section.EVEN : 'sbkeven', + Section.ODD : 'sbkodd' } + +class Settings( list ) : + def __init__( self ) : + super( Settings, self ).__init__() + self._append = super( Settings, self ).append + + def append( self, value, mask=None, fallback=None ) : + if (value is not 0) and value in [ False, None, '' ] : + if fallback : self._append( self, fallback ) + + else : + if mask : + if value is True : + value = mask + else : + value = mask % value + self._append( value ) + + def Join( self ) : + if self : return r'\%s' % '\\'.join( self ) + return '' + + def __repr__( self ) : + return self.Join() + +class Renderer : + def __init__( self, write_custom_element_callback=None ) : + self.character_style_map = {} + self.paragraph_style_map = {} + self.WriteCustomElement = write_custom_element_callback + + # + # All of the Rend* Functions populate a Settings object with values + # + def _RendPageProperties( self, section, settings, in_section ) : + # this one is different from the others as it takes the settings from a + if in_section : + #paper_size_code = 'psz%s' + paper_width_code = 'pgwsxn%s' + paper_height_code = 'pghsxn%s' + landscape = 'lndscpsxn' + margin_suffix = 'sxn' + + else : + #paper_size_code = 'psz%s' + paper_width_code = 'paperw%s' + paper_height_code = 'paperh%s' + landscape = 'landscape' + margin_suffix = '' + + #settings.append( section.Paper.Code, paper_size_code ) + settings.append( section.Paper.Width, paper_width_code ) + settings.append( section.Paper.Height, paper_height_code ) + + if section.Landscape : + settings.append( landscape ) + + if section.FirstPageNumber : + settings.append( section.FirstPageNumber, 'pgnstarts%s' ) + settings.append( 'pgnrestart' ) + + self._RendMarginsPropertySet( section.Margins, settings, margin_suffix ) + + def _RendShadingPropertySet( self, shading_props, settings, prefix='' ) : + if not shading_props : return + + settings.append( shading_props.Shading, prefix + 'shading%s' ) + settings.append( ShadingPatternMap.get( shading_props.Pattern, False ) ) + + settings.append( self._colour_map.get( shading_props.Foreground, False ), prefix + 'cfpat%s' ) + settings.append( self._colour_map.get( shading_props.Background, False ), prefix + 'cbpat%s' ) + + def _RendBorderPropertySet( self, edge_props, settings ) : + settings.append( BorderStyleMap[ edge_props.Style ] ) + settings.append( edge_props.Width , 'brdrw%s' ) + settings.append( self._colour_map.get( edge_props.Colour, False ), 'brdrcf%s' ) + settings.append( edge_props.Spacing or False , 'brsp%s' ) + + def _RendFramePropertySet( self, frame_props, settings, tag_prefix='' ) : + if not frame_props : return + + if frame_props.Top : + settings.append( tag_prefix + 'brdrt' ) + self._RendBorderPropertySet( frame_props.Top, settings ) + + if frame_props.Left : + settings.append( tag_prefix + 'brdrl' ) + self._RendBorderPropertySet( frame_props.Left, settings ) + + if frame_props.Bottom : + settings.append( tag_prefix + 'brdrb' ) + self._RendBorderPropertySet( frame_props.Bottom, settings ) + + if frame_props.Right : + settings.append( tag_prefix + 'brdrr' ) + self._RendBorderPropertySet( frame_props.Right, settings ) + + def _RendMarginsPropertySet( self, margin_props, settings, suffix='' ) : + if not margin_props : return + + settings.append( margin_props.Top, 'margt' + suffix + '%s' ) + settings.append( margin_props.Left, 'margl' + suffix + '%s' ) + settings.append( margin_props.Bottom, 'margb' + suffix + '%s' ) + settings.append( margin_props.Right, 'margr' + suffix + '%s' ) + + def _RendParagraphPropertySet( self, paragraph_props, settings ) : + if not paragraph_props : return + settings.append( ParagraphAlignmentMap[ paragraph_props.Alignment ] ) + + settings.append( paragraph_props.SpaceBefore, 'sb%s' ) + settings.append( paragraph_props.SpaceAfter, 'sa%s' ) + + # then we have to find out all of the tabs + width = 0 + for tab in paragraph_props.Tabs : + settings.append( TabAlignmentMap[ tab.Alignment ] ) + settings.append( TabLeaderMap.get( tab.Leader, '' ) ) + + width += tab.Width or DEFAULT_TAB_WIDTH + settings.append( 'tx%s' % width ) + + settings.append( paragraph_props.PageBreakBefore, 'pagebb' ) + + settings.append( paragraph_props.FirstLineIndent, 'fi%s' ) + settings.append( paragraph_props.LeftIndent, 'li%s' ) + settings.append( paragraph_props.RightIndent, 'ri%s' ) + + if paragraph_props.SpaceBetweenLines : + if paragraph_props.SpaceBetweenLines < 0 : + settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult0' ) + else : + settings.append( paragraph_props.SpaceBetweenLines, r'sl%s\slmult1' ) + + def _RendTextPropertySet( self, text_props, settings ) : + if not text_props : return + + if text_props.Expansion : + settings.append( text_props.Expansion, 'expndtw%s' ) + + settings.append( text_props.Bold, 'b' ) + settings.append( text_props.Italic, 'i' ) + settings.append( text_props.Underline, 'ul' ) + settings.append( text_props.DottedUnderline, 'uld' ) + settings.append( text_props.DoubleUnderline, 'uldb' ) + settings.append( text_props.WordUnderline, 'ulw' ) + + settings.append( self._font_map.get( text_props.Font, False ), 'f%s' ) + settings.append( text_props.Size, 'fs%s' ) + settings.append( self._colour_map.get( text_props.Colour, False ), 'cf%s' ) + + if text_props.Frame : + frame = text_props.Frame + settings.append( 'chbrdr' ) + settings.append( BorderStyleMap[ frame.Style ] ) + settings.append( frame.Width , 'brdrw%s' ) + settings.append( self._colour_map.get( frame.Colour, False ), 'brdrcf%s' ) + + # + # All of the Write* functions will write to the internal file object + # + # the _ ones probably don't need to be used by anybody outside + # but the other ones like WriteTextElement could be used in the Custom + # callback. + def Write( self, document, fout ) : + # write all of the standard stuff based upon the first document + self._doc = document + self._fout = fout + self._WriteDocument () + self._WriteColours () + self._WriteFonts () + self._WriteStyleSheet() + + settings = Settings() + self._RendPageProperties( self._doc.Sections[ 0 ], settings, in_section=False ) + self._write( repr( settings ) ) + + # handle the simplest case first, we don't need to do anymore mucking around + # with section headers, etc we can just rip the document out + if len( document.Sections ) == 1 : + self._WriteSection( document.Sections[ 0 ], + is_first = True, + add_header = False ) + + else : + for section_idx, section in enumerate( document.Sections ) : + is_first = section_idx == 0 + add_header = True + self._WriteSection( section, is_first, add_header ) + + self._write( '}' ) + + del self._fout, self._doc, self._CurrentStyle + + def _write( self, data, *params ) : + #---------------------------------- + # begin modification + # by Herbert Weinhandl + # to convert accented characters + # to their rtf-compatible form + #for c in range( 128, 256 ) : + # data = data.replace( chr(c), "\'%x" % c) + # end modification + # + # This isn't the right place for this as it is going to do + # this loop for all sorts of writes, including settings, control codes, etc. + # + # I will create a def _WriteText (or something) method that is used when the + # actual string that is to be viewed in the document is written, this can then + # do the final accented character check. + # + # I left it here so that I remember to do the right thing when I have time + #---------------------------------- + + if params : data = data % params + self._fout.write( data ) + + def _WriteDocument( self ) : + settings = Settings() + + assert Languages.IsValid ( self._doc.DefaultLanguage ) + assert ViewKind.IsValid ( self._doc.ViewKind ) + assert ViewZoomKind.IsValid( self._doc.ViewZoomKind ) + assert ViewScale.IsValid ( self._doc.ViewScale ) + + settings.append( self._doc.DefaultLanguage, 'deflang%s' ) + settings.append( self._doc.ViewKind , 'viewkind%s' ) + settings.append( self._doc.ViewZoomKind , 'viewzk%s' ) + settings.append( self._doc.ViewScale , 'viewscale%s' ) + + self._write( "{\\rtf1\\ansi\\ansicpg1252\\deff0%s\n" % settings ) + + def _WriteColours( self ) : + self._write( r"{\colortbl ;" ) + + self._colour_map = {} + offset = 0 + for colour in self._doc.StyleSheet.Colours : + self._write( r'\red%s\green%s\blue%s;', colour.Red, colour.Green, colour.Blue ) + self._colour_map[ colour ] = offset + 1 + offset += 1 + self._write( "}\n" ) + + def _WriteFonts( self ) : + self._write( r'{\fonttbl' ) + + self._font_map = {} + offset = 0 + for font in self._doc.StyleSheet.Fonts : + pitch = '' + panose = '' + alternate = '' + if font.Pitch : pitch = r'\fprq%s' % font.Pitch + if font.Panose : panose = r'{\*\panose %s}' % font.Panose + if font.Alternate : alternate = r'{\*\falt %s}' % font.Alternate.Name + + self._write( r'{\f%s\f%s%s\fcharset%s%s %s%s;}', + offset, + font.Family, + pitch, + font.CharacterSet, + panose, + font.Name, + alternate ) + + self._font_map[ font ] = offset + offset += 1 + + self._write( "}\n" ) + + def _WriteStyleSheet( self ) : + self._write( r"{\stylesheet" ) + + # TO DO: character styles, does anybody actually use them? + + offset_map = {} + for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) : + offset_map[ style ] = idx + + # paragraph styles + self.paragraph_style_map = {} + for idx, style in enumerate( self._doc.StyleSheet.ParagraphStyles ) : + + if idx == 0 : + default = style + else : + self._write( '\n' ) + + settings = Settings() + + # paragraph properties + self._RendParagraphPropertySet( style.ParagraphPropertySet, settings ) + self._RendFramePropertySet ( style.FramePropertySet, settings ) + self._RendShadingPropertySet ( style.ShadingPropertySet, settings ) + + # text properties + self._RendTextPropertySet ( style.TextStyle.TextPropertySet, settings ) + self._RendShadingPropertySet( style.TextStyle.ShadingPropertySet, settings ) + + # have to take + based_on = '\\sbasedon%s' % offset_map.get( style.BasedOn, 0 ) + next = '\\snext%s' % offset_map.get( style.Next, 0 ) + + inln = '\\s%s%s' % ( idx, settings ) + self._write( "{%s%s%s %s;}", inln, based_on, next, style.Name ) + + self.paragraph_style_map[ style ] = inln + + # if now style is specified for the first paragraph to be written, this one + # will be used + self._CurrentStyle = self.paragraph_style_map[ default ] + + self._write( "}\n" ) + + def _WriteSection( self, section, is_first, add_header ) : + + def WriteHF( hf, rtfword ) : + #if not hf : return + + # if we don't have anything in the header/footer then include + # a blank paragraph, this stops it from picking up the header/footer + # from the previous section + # if not hf : hf = [ Paragraph( '' ) ] + if not hf : hf = [] + + self._write( '{\\%s' % rtfword ) + self._WriteElements( hf ) + self._write( '}\n' ) + + settings = Settings() + + if not is_first : + # we need to finish off the preceding section + # and reset all of our defaults back to standard + settings.append( 'sect' ) + + # reset to our defaults + settings.append( 'sectd' ) + + if add_header : + settings.append( SectionBreakTypeMap[ section.BreakType ] ) + self._RendPageProperties( section, settings, in_section=True ) + + settings.append( section.HeaderY, 'headery%s' ) + settings.append( section.FooterY, 'footery%s' ) + + # write all of these out now as we need to do a write elements in the + # next section + self._write( repr( settings ) ) + + # finally after all that has settled down we can do the + # headers and footers + if section.FirstHeader or section.FirstFooter : + # include the titlepg flag if the first page has a special format + self._write( r'\titlepg' ) + WriteHF( section.FirstHeader, 'headerf' ) + WriteHF( section.FirstFooter, 'footerf' ) + + WriteHF( section.Header, 'header' ) + WriteHF( section.Footer, 'footer' ) + + # and at last the contents of the section that actually appear on the page + self._WriteElements( section ) + + def _WriteElements( self, elements ) : + new_line = '' + for element in elements : + self._write( new_line ) + new_line = '\n' + + clss = element.__class__ + + if clss == Paragraph : + self.WriteParagraphElement( element ) + + elif clss == Table : + self.WriteTableElement( element ) + + elif clss == StringType : + self.WriteParagraphElement( Paragraph( element ) ) + + elif clss in [ RawCode, Image ] : + self.WriteRawCode( element ) + + #elif clss == List : + # self._HandleListElement( element ) + + elif self.WriteCustomElement : + self.WriteCustomElement( self, element ) + + else : + raise Exception( "Don't know how to handle elements of type %s" % clss ) + + def WriteParagraphElement( self, paragraph_elem, tag_prefix='', tag_suffix=r'\par', opening='{', closing='}' ) : + + # the tag_prefix and the tag_suffix take care of paragraphs in tables. A + # paragraph in a table requires and extra tag at the front (intbl) and we + # don't want the ending tag everytime. We want it for all paragraphs but + # the last. + + overrides = Settings() + self._RendParagraphPropertySet( paragraph_elem.Properties, overrides ) + self._RendFramePropertySet ( paragraph_elem.Frame, overrides ) + self._RendShadingPropertySet ( paragraph_elem.Shading, overrides ) + + # when writing the RTF the style is carried from the previous paragraph to the next, + # so if the currently written paragraph has a style then make it the current one, + # otherwise leave it as it was + self._CurrentStyle = self.paragraph_style_map.get( paragraph_elem.Style, self._CurrentStyle ) + + self._write( r'%s\pard\plain%s %s%s ' % ( opening, tag_prefix, self._CurrentStyle, overrides ) ) + + for element in paragraph_elem : + + if isinstance( element, StringType ) : + self._write( element ) + + elif isinstance( element, RawCode ) : + self._write( element.Data ) + + elif isinstance( element, Text ) : + self.WriteTextElement( element ) + + elif isinstance( element, Inline ) : + self.WriteInlineElement( element ) + + elif element == TAB : + self._write( r'\tab ' ) + + elif element == LINE : + self._write( r'\line ' ) + + elif self.WriteCustomElement : + self.WriteCustomElement( self, element ) + + else : + raise Exception( 'Don\'t know how to handle %s' % element ) + + self._write( tag_suffix + closing ) + + def WriteRawCode( self, raw_elem ) : + self._write( raw_elem.Data ) + + def WriteTextElement( self, text_elem ) : + overrides = Settings() + + self._RendTextPropertySet ( text_elem.Properties, overrides ) + self._RendShadingPropertySet( text_elem.Shading, overrides, 'ch' ) + + # write the wrapper and then let the custom handler have a go + if overrides : self._write( '{%s ' % repr( overrides ) ) + + # if the data is just a string then we can now write it + if isinstance( text_elem.Data, StringType ) : + self._write( text_elem.Data or '' ) + + elif text_elem.Data == TAB : + self._write( r'\tab ' ) + + else : + self.WriteCustomElement( self, text_elem.Data ) + + if overrides : self._write( '}' ) + + def WriteInlineElement( self, inline_elem ) : + overrides = Settings() + + self._RendTextPropertySet ( inline_elem.Properties, overrides ) + self._RendShadingPropertySet( inline_elem.Shading, overrides, 'ch' ) + + # write the wrapper and then let the custom handler have a go + if overrides : self._write( '{%s ' % repr( overrides ) ) + + for element in inline_elem : + # if the data is just a string then we can now write it + if isinstance( element, StringType ) : + self._write( element ) + + elif isinstance( element, RawCode ) : + self._write( element.Data ) + + elif element == TAB : + self._write( r'\tab ' ) + + elif element == LINE : + self._write( r'\line ' ) + + else : + self.WriteCustomElement( self, element ) + + if overrides : self._write( '}' ) + + def WriteText( self, text ) : + self._write( text or '' ) + + def WriteTableElement( self, table_elem ) : + + vmerge = [ False ] * table_elem.ColumnCount + for height, cells in table_elem.Rows : + + # calculate the right hand edge of the cells taking into account the spans + offset = table_elem.LeftOffset or 0 + cellx = [] + cell_idx = 0 + for cell in cells : + cellx.append( offset + sum( table_elem.ColumnWidths[ : cell_idx + cell.Span ] ) ) + cell_idx += cell.Span + + self._write( r'{\trowd' ) + + settings = Settings() + + # the spec says that this value is mandatory and I think that 108 is the default value + # so I'll take care of it here + settings.append( table_elem.GapBetweenCells or 108, 'trgaph%s' ) + settings.append( TableAlignmentMap[ table_elem.Alignment ] ) + settings.append( height, 'trrh%s' ) + settings.append( table_elem.LeftOffset, 'trleft%s' ) + + width = table_elem.LeftOffset or 0 + for idx, cell in enumerate( cells ) : + self._RendFramePropertySet ( cell.Frame, settings, 'cl' ) + + # cells don't have margins so I don't know why I was doing this + # I think it might have an affect in some versions of some WPs. + #self._RendMarginsPropertySet( cell.Margins, settings, 'cl' ) + + # if we are starting to merge or if this one is the first in what is + # probably a series of merges then start the vertical merging + if cell.StartVerticalMerge or (cell.VerticalMerge and not vmerge[ idx ]) : + settings.append( 'clvmgf' ) + vmerge[ idx ] = True + + elif cell.VerticalMerge : + #..continuing a merge + settings.append( 'clvmrg' ) + + else : + #..no merging going on so make sure that it is off + vmerge[ idx ] = False + + # for any cell in the next row that is covered by this span we + # need to run off the vertical merging as we don't want them + # merging up into this spanned cell + for vmerge_idx in range( idx + 1, idx + cell.Span - 1 ) : + vmerge[ vmerge_idx ] = False + + settings.append( CellAlignmentMap[ cell.Alignment ] ) + settings.append( CellFlowMap[ cell.Flow ] ) + + # this terminates the definition of a cell and represents the right most edge of the cell from the left margin + settings.append( cellx[ idx ], 'cellx%s' ) + + self._write( repr( settings ) ) + + for cell in cells : + if len( cell ) : + last_idx = len( cell ) - 1 + for element_idx, element in enumerate( cell ) : + # wrap plain strings in paragraph tags + if isinstance( element, StringType ) : + element = Paragraph( element ) + + # don't forget the prefix or else word crashes and does all sorts of strange things + if element_idx == last_idx : + self.WriteParagraphElement( element, tag_prefix=r'\intbl', tag_suffix='', opening='', closing='' ) + + else : + self.WriteParagraphElement( element, tag_prefix=r'\intbl', opening='', closing='' ) + + self._write( r'\cell' ) + + else : + self._write( r'\pard\intbl\cell' ) + + self._write( '\\row}\n' ) + ADDED gluon/contrib/pyrtf/Styles.py Index: gluon/contrib/pyrtf/Styles.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/Styles.py @@ -0,0 +1,94 @@ +""" +A Styles is a collection of PropertySets that can be applied to a particular RTF element. + +At present there are only two, Text and Paragraph but ListStyles will be added soon too. + + +""" + +from PropertySets import * + +class TextStyle : + def __init__( self, text_props, name=None, shading_props=None ) : + self.SetTextPropertySet ( text_props ) + self.SetName ( name ) + self.SetShadingPropertySet( shading_props ) + + def Copy( self ) : + return deepcopy( self ) + + def SetName( self, value ) : + self.Name = value + return self + + def SetTextPropertySet( self, value ) : + assert isinstance( value, TextPropertySet ) + self.TextPropertySet = value + return self + + def SetShadingPropertySet( self, value ) : + assert value is None or isinstance( value, ShadingPropertySet ) + self.ShadingPropertySet = value or ShadingPropertySet() + return self + +class ParagraphStyle : + def __init__( self, name, text_style, paragraph_props=None, frame_props=None, shading_props=None ) : + + # A style must have Font and a Font Size but the Text property set doesn't + # make these mandatory so that they can be used for overrides so at this point + # we need to make sure that that we have these values set + if not text_style.TextPropertySet.Font : raise Exception( 'Paragraph Styles must have a Font specified.' ) + if not text_style.TextPropertySet.Size : raise Exception( 'Paragraph Styles must have a Font Size specified.' ) + + self.SetName ( name ) + self.SetTextStyle ( text_style ) + self.SetParagraphPropertySet( paragraph_props ) + self.SetFramePropertySet ( frame_props ) + self.SetShadingPropertySet ( shading_props ) + + self.SetBasedOn( None ) + self.SetNext ( None ) + + def Copy( self ) : + return deepcopy( self ) + + def SetName( self, value ) : + self.Name = value + return self + + def SetTextStyle( self, value ) : + assert isinstance( value, TextStyle ) + self.TextStyle = value + return self + + def SetParagraphPropertySet( self, value ) : + assert value is None or isinstance( value, ParagraphPropertySet ) + self.ParagraphPropertySet = value or ParagraphPropertySet() + return self + + def SetFramePropertySet( self, value ) : + assert value is None or isinstance( value, FramePropertySet ) + self.FramePropertySet = value or FramePropertySet() + return self + + def SetShadingPropertySet( self, value ) : + """Set the background shading for the paragraph.""" + + assert value is None or isinstance( value, ShadingPropertySet ) + self.ShadingPropertySet = value or ShadingPropertySet() + return self + + def SetBasedOn( self, value ) : + """Set the Paragraph Style that this one is based on.""" + + assert not value or isinstance( value, ParagraphStyle ) + self.BasedOn = value + return self + + def SetNext( self, value ) : + """Set the Paragraph Style that should follow this one.""" + + assert not value or isinstance( value, ParagraphStyle ) + self.Next = value + return self + ADDED gluon/contrib/pyrtf/__init__.py Index: gluon/contrib/pyrtf/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pyrtf/__init__.py @@ -0,0 +1,12 @@ +from PropertySets import * +from Elements import * +from Styles import * +from Renderer import * + +def dumps(doc): + import cStringIO + s=cStringIO.StringIO() + r=Renderer() + r.Write(doc,s) + return s.getvalue() + ADDED gluon/contrib/pysimplesoap/__init__.py Index: gluon/contrib/pysimplesoap/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/pysimplesoap/__init__.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +"Contributed modules" + ADDED gluon/contrib/pysimplesoap/client.py Index: gluon/contrib/pysimplesoap/client.py ================================================================== --- /dev/null +++ gluon/contrib/pysimplesoap/client.py @@ -0,0 +1,689 @@ +#!/usr/bin/python +# -*- coding: latin-1 -*- +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation; either version 3, or (at your option) any later +# version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. + +"Pythonic simple SOAP Client implementation" + +__author__ = "Mariano Reingart (reingart@gmail.com)" +__copyright__ = "Copyright (C) 2008 Mariano Reingart" +__license__ = "LGPL 3.0" +__version__ = "1.02c" + +import urllib +try: + import httplib2 + Http = httplib2.Http +except ImportError: + import urllib2 + class Http(): # wrapper to use when httplib2 not available + def request(self, url, method, body, headers): + f = urllib2.urlopen(urllib2.Request(url, body, headers)) + return f.info(), f.read() + + +from simplexml import SimpleXMLElement, TYPE_MAP, OrderedDict + +class SoapFault(RuntimeError): + def __init__(self,faultcode,faultstring): + self.faultcode = faultcode + self.faultstring = faultstring + +# soap protocol specification & namespace +soap_namespaces = dict( + soap11="http://schemas.xmlsoap.org/soap/envelope/", + soap="http://schemas.xmlsoap.org/soap/envelope/", + soapenv="http://schemas.xmlsoap.org/soap/envelope/", + soap12="http://www.w3.org/2003/05/soap-env", +) + +class SoapClient(object): + "Simple SOAP Client (s�mil PHP)" + def __init__(self, location = None, action = None, namespace = None, + cert = None, trace = False, exceptions = True, proxy = None, ns=False, + soap_ns=None, wsdl = None, cache = False): + self.certssl = cert + self.keyssl = None + self.location = location # server location (url) + self.action = action # SOAP base action + self.namespace = namespace # message + self.trace = trace # show debug messages + self.exceptions = exceptions # lanzar execpiones? (Soap Faults) + self.xml_request = self.xml_response = '' + if not soap_ns and not ns: + self.__soap_ns = 'soap' # 1.1 + elif not soap_ns and ns: + self.__soap_ns = 'soapenv' # 1.2 + else: + self.__soap_ns = soap_ns + + # parse wsdl url + self.services = wsdl and self.wsdl(wsdl, debug=trace, cache=cache) + self.service_port = None # service port for late binding + + if not proxy: + self.http = Http() + else: + import socks + ##httplib2.debuglevel=4 + self.http = httplib2.Http(proxy_info = httplib2.ProxyInfo( + proxy_type=socks.PROXY_TYPE_HTTP, **proxy)) + #if self.certssl: # esto funciona para validar al server? + # self.http.add_certificate(self.keyssl, self.keyssl, self.certssl) + self.__ns = ns # namespace prefix or False to not use it + if not ns: + self.__xml = """<?xml version="1.0" encoding="UTF-8"?> +<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:xsd="http://www.w3.org/2001/XMLSchema" + xmlns:%(soap_ns)s="%(soap_uri)s"> +<%(soap_ns)s:Body> + <%(method)s xmlns="%(namespace)s"> + </%(method)s> +</%(soap_ns)s:Body> +</%(soap_ns)s:Envelope>""" + else: + self.__xml = """<?xml version="1.0" encoding="UTF-8"?> +<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" xmlns:%(ns)s="%(namespace)s"> +<%(soap_ns)s:Header/> +<%(soap_ns)s:Body> + <%(ns)s:%(method)s> + </%(ns)s:%(method)s> +</%(soap_ns)s:Body> +</%(soap_ns)s:Envelope>""" + + def __getattr__(self, attr): + "Return a pseudo-method that can be called" + if not self.services: # not using WSDL? + return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs) + else: # using WSDL: + return lambda self=self, *args, **kwargs: self.wsdl_call(attr,*args,**kwargs) + + def call(self, method, *args, **kwargs): + "Prepare xml request and make SOAP call, returning a SimpleXMLElement" + #TODO: method != input_message + # Basic SOAP request: + xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns, + soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns]) + request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns) + # serialize parameters + if kwargs: + parameters = kwargs.items() + else: + parameters = args + if parameters and isinstance(parameters[0], SimpleXMLElement): + # merge xmlelement parameter ("raw" - already marshalled) + for param in parameters[0].children(): + getattr(request,method).import_node(param) + else: + # marshall parameters: + for k,v in parameters: # dict: tag=valor + getattr(request,method).marshall(k,v) + self.xml_request = request.as_xml() + self.xml_response = self.send(method, self.xml_request) + response = SimpleXMLElement(self.xml_response, namespace=self.namespace) + if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False): + raise SoapFault(unicode(response.faultcode), unicode(response.faultstring)) + return response + + def send(self, method, xml): + "Send SOAP request using HTTP" + if self.location == 'test': return + location = "%s" % self.location #?op=%s" % (self.location, method) + if self.services: + soap_action = self.action + else: + soap_action = self.action+method + headers={ + 'Content-type': 'text/xml; charset="UTF-8"', + 'Content-length': str(len(xml)), + "SOAPAction": "\"%s\"" % (soap_action) + } + if self.trace: + print "-"*80 + print "POST %s" % location + print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()]) + print u"\n%s" % xml.decode("utf8","ignore") + response, content = self.http.request( + location,"POST", body=xml, headers=headers ) + self.response = response + self.content = content + if self.trace: + print + print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()]) + print content#.decode("utf8","ignore") + print "="*80 + return content + + def get_operation(self, method): + # try to find operation in wsdl file + soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11' + if not self.service_port: + for service_name, service in self.services.items(): + for port_name, port in [port for port in service['ports'].items()]: + if port['soap_ver'] == soap_ver: + self.service_port = service_name, port_name + break + else: + raise RuntimeError("Cannot determine service in WSDL: " + "SOAP version: %s" % soap_ver) + else: + port = self.services[self.service_port[0]]['ports'][self.service_port[1]] + self.location = port['location'] + operation = port['operations'].get(unicode(method)) + if not operation: + raise RuntimeError("Operation %s not found in WSDL: " + "Service/Port Type: %s" % + (method, self.service_port)) + return operation + + def wsdl_call(self, method, *args, **kwargs): + "Pre and post process SOAP call, input and output parameters using WSDL" + soap_uri = soap_namespaces[self.__soap_ns] + operation = self.get_operation(method) + # get i/o type declarations: + input = operation['input'] + output = operation['output'] + if 'action' in operation: + self.action = operation['action'] + # sort parameters (same order as xsd:sequence) + def sort_dict(od, d): + if isinstance(od, dict): + ret = OrderedDict() + for k in od.keys(): + v = d.get(k) + if v: + if isinstance(v, dict): + v = sort_dict(od[k], v) + elif isinstance(v, list): + v = [sort_dict(od[k][0], v1) + for v1 in v] + ret[str(k)] = v + return ret + else: + return d + if input and kwargs: + params = sort_dict(input.values()[0], kwargs).items() + method = input.keys()[0] + #elif not input: + #TODO: no message! (see wsmtxca.dummy) + else: + params = kwargs and kwargs.items() + # call remote procedure + response = self.call(method, *params) + # parse results: + resp = response('Body',ns=soap_uri).children().unmarshall(output) + return resp and resp.values()[0] # pass Response tag children + + def help(self, method): + "Return operation documentation and invocation/returned value example" + operation = self.get_operation(method) + input = operation['input'].values() + input = input and input[0] + output = operation['output'].values()[0] + return u"%s(%s)\n -> %s:\n\n%s" % ( + method, + input and ", ".join("%s=%s" % (k,repr(v)) for k,v + in input.items()) or "", + output and output or "", + operation.get("documentation",""), + ) + + def wsdl(self, url, debug=False, cache=False): + "Parse Web Service Description v1.1" + soap_ns = { + "http://schemas.xmlsoap.org/wsdl/soap/": 'soap11', + "http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12', + } + wsdl_uri="http://schemas.xmlsoap.org/wsdl/" + xsd_uri="http://www.w3.org/2001/XMLSchema" + xsi_uri="http://www.w3.org/2001/XMLSchema-instance" + + get_local_name = lambda s: str((':' in s) and s.split(':')[1] or s) + + REVERSE_TYPE_MAP = dict([(v,k) for k,v in TYPE_MAP.items()]) + + def fetch(url): + "Fetch a document from a URL, save it locally if cache enabled" + import os, hashlib + # make md5 hash of the url for caching... + filename = "%s.xml" % hashlib.md5(url).hexdigest() + if isinstance(cache, basestring): + filename = os.path.join(cache, filename) + if cache and os.path.exists(filename): + if debug: print "Reading file %s" % (filename, ) + f = open(filename, "r") + xml = f.read() + f.close() + else: + if debug: print "Fetching url %s" % (url, ) + f = urllib.urlopen(url) + xml = f.read() + if cache: + if debug: print "Writing file %s" % (filename, ) + f = open(filename, "w") + f.write(xml) + f.close() + return xml + + # Open uri and read xml: + xml = fetch(url) + # Parse WSDL XML: + wsdl = SimpleXMLElement(xml, namespace=wsdl_uri) + + # detect soap prefix and uri (xmlns attributes of <definitions>) + xsd_ns = None + soap_uris = {} + for k, v in wsdl[:]: + if v in soap_ns and k.startswith("xmlns:"): + soap_uris[get_local_name(k)] = v + if v== xsd_uri and k.startswith("xmlns:"): + xsd_ns = get_local_name(k) + + # Extract useful data: + self.namespace = wsdl['targetNamespace'] + self.documentation = unicode(wsdl('documentation', error=False) or '') + + services = {} + bindings = {} # binding_name: binding + operations = {} # operation_name: operation + port_type_bindings = {} # port_type_name: binding + messages = {} # message: element + elements = {} # element: type def + + for service in wsdl.service: + service_name=service['name'] + if not service_name: + continue # empty service? + if debug: print "Processing service", service_name + serv = services.setdefault(service_name, {'ports': {}}) + serv['documentation']=service['documentation'] or '' + for port in service.port: + binding_name = get_local_name(port['binding']) + address = port('address', ns=soap_uris.values(), error=False) + location = address and address['location'] or None + soap_uri = address and soap_uris.get(address.get_prefix()) + soap_ver = soap_uri and soap_ns.get(soap_uri) + bindings[binding_name] = {'service_name': service_name, + 'location': location, + 'soap_uri': soap_uri, 'soap_ver': soap_ver, + } + serv['ports'][port['name']] = bindings[binding_name] + + for binding in wsdl.binding: + binding_name = binding['name'] + if debug: print "Processing binding", service_name + soap_binding = binding('binding', ns=soap_uris.values(), error=False) + transport = soap_binding and soap_binding['transport'] or None + port_type_name = get_local_name(binding['type']) + bindings[binding_name].update({ + 'port_type_name': port_type_name, + 'transport': transport, 'operations': {}, + }) + port_type_bindings[port_type_name] = bindings[binding_name] + for operation in binding.operation: + op_name = operation['name'] + op = operation('operation',ns=soap_uris.values(), error=False) + action = op and op['soapAction'] + d = operations.setdefault(op_name, {}) + bindings[binding_name]['operations'][op_name] = d + d.update({'name': op_name}) + #if action: #TODO: separe operation_binding from operation + if action: + d["action"] = action + + #TODO: cleanup element/schema/types parsing: + def process_element(element_name, node): + "Parse and define simple element types" + if debug: print "Processing element", element_name + for tag in node: + if tag.get_local_name() in ("annotation", "documentation"): + continue + elif tag.get_local_name() in ('element', 'restriction'): + if debug: print element_name,"has not children!",tag + children = tag # element "alias"? + alias = True + elif tag.children(): + children = tag.children() + alias = False + else: + if debug: print element_name,"has not children!",tag + continue #TODO: abstract? + d = OrderedDict() + for e in children: + t = e['type'] + if not t: + t = e['base'] # complexContent (extension)! + if not t: + t = 'anyType' # no type given! + t = t.split(":") + if len(t)>1: + ns, type_name = t + else: + ns, type_name = None, t[0] + if element_name == type_name: + continue # prevent infinite recursion + uri = ns and e.get_namespace_uri(ns) or xsd_uri + if uri==xsd_uri: + # look for the type, None == any + fn = REVERSE_TYPE_MAP.get(unicode(type_name), None) + else: + # complex type, postprocess later + fn = elements.setdefault(unicode(type_name), OrderedDict()) + if e['name'] is not None and not alias: + e_name = unicode(e['name']) + d[e_name] = fn + else: + if debug: print "complexConent/simpleType/element", element_name, "=", type_name + d[None] = fn + if e['maxOccurs']=="unbounded": + # it's an array... TODO: compound arrays? + d.array = True + if e is not None and e.get_local_name() == 'extension' and e.children(): + # extend base element: + process_element(element_name, e.children()) + elements.setdefault(element_name, OrderedDict()).update(d) + + # check axis2 namespace at schema types attributes + self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace) + + imported_schemas = {} + + def preprocess_schema(schema): + "Find schema elements and complex types" + for element in schema.children(): + if element.get_local_name() in ('import', ): + schema_namespace = element['namespace'] + schema_location = element['schemaLocation'] + if schema_location is None: + if debug: print "Schema location not provided for %s!" % (schema_namespace, ) + continue + if schema_location in imported_schemas: + if debug: print "Schema %s already imported!" % (schema_location, ) + continue + imported_schemas[schema_location] = schema_namespace + if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location) + # Open uri and read xml: + xml = fetch(schema_location) + # Parse imported XML schema (recursively): + imported_schema = SimpleXMLElement(xml, namespace=xsd_uri) + preprocess_schema(imported_schema) + + if element.get_local_name() in ('element', 'complexType', "simpleType"): + element_name = unicode(element['name']) + if debug: print "Parsing Element %s: %s" % (element.get_local_name(),element_name) + if element.get_local_name() == 'complexType': + children = element.children() + elif element.get_local_name() == 'simpleType': + children = element("restriction", ns=xsd_uri) + elif element.get_local_name() == 'element' and element['type']: + children = element + else: + children = element.children() + if children: + children = children.children() + elif element.get_local_name() == 'element': + children = element + if children: + process_element(element_name, children) + + def postprocess_element(elements): + "Fix unresolved references (elements referenced before its definition, thanks .net)" + for k,v in elements.items(): + if isinstance(v, OrderedDict): + if v.array: + elements[k] = [v] # convert arrays to python lists + if v!=elements: #TODO: fix recursive elements + postprocess_element(v) + if None in v and v[None]: # extension base? + if isinstance(v[None], dict): + for i, kk in enumerate(v[None]): + # extend base -keep orginal order- + elements[k].insert(kk, v[None][kk], i) + del v[None] + else: # "alias", just replace + if debug: print "Replacing ", k , " = ", v[None] + elements[k] = v[None] + #break + if isinstance(v, list): + for n in v: # recurse list + postprocess_element(n) + + + # process current wsdl schema: + for schema in wsdl.types("schema", ns=xsd_uri): + preprocess_schema(schema) + + postprocess_element(elements) + + for message in wsdl.message: + if debug: print "Processing message", message['name'] + part = message('part', error=False) + element = {} + if part: + element_name = part['element'] + if not element_name: + element_name = part['type'] # some uses type instead + element_name = get_local_name(element_name) + element = {element_name: elements.get(element_name)} + messages[message['name']] = element + + for port_type in wsdl.portType: + port_type_name = port_type['name'] + if debug: print "Processing port type", port_type_name + binding = port_type_bindings[port_type_name] + + for operation in port_type.operation: + op_name = operation['name'] + op = operations[op_name] + op['documentation'] = unicode(operation('documentation', error=False) or '') + if binding['soap_ver']: + #TODO: separe operation_binding from operation (non SOAP?) + input = get_local_name(operation.input['message']) + output = get_local_name(operation.output['message']) + op['input'] = messages[input] + op['output'] = messages[output] + + if debug: + import pprint + pprint.pprint(services) + + return services + +def parse_proxy(proxy_str): + "Parses proxy address user:pass@host:port into a dict suitable for httplib2" + proxy_dict = {} + if proxy_str is None: + return + if "@" in proxy_str: + user_pass, host_port = proxy_str.split("@") + else: + user_pass, host_port = "", proxy_str + if ":" in host_port: + host, port = host_port.split(":") + proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port) + if ":" in user_pass: + proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":") + return proxy_dict + + +if __name__=="__main__": + import sys + + if '--web2py' in sys.argv: + # test local sample webservice exposed by web2py + from client import SoapClient + if not '--wsdl' in sys.argv: + client = SoapClient( + location = "http://127.0.0.1:8000/webservices/sample/call/soap", + action = 'http://127.0.0.1:8000/webservices/sample/call/soap', # SOAPAction + namespace = "http://127.0.0.1:8000/webservices/sample/call/soap", + soap_ns='soap', trace = True, ns = False, exceptions=True) + else: + client = SoapClient(wsdl="http://127.0.0.1:8000/webservices/sample/call/soap?WSDL",trace=True) + response = client.Dummy() + print 'dummy', response + response = client.Echo(value='hola') + print 'echo', repr(response) + response = client.AddIntegers(a=1,b=2) + if not '--wsdl' in sys.argv: + result = response.AddResult # manully convert returned type + print int(result) + else: + result = response['AddResult'] + print result, type(result), "auto-unmarshalled" + + if '--raw' in sys.argv: + # raw (unmarshalled parameter) local sample webservice exposed by web2py + from client import SoapClient + client = SoapClient( + location = "http://127.0.0.1:8000/webservices/sample/call/soap", + action = 'http://127.0.0.1:8000/webservices/sample/call/soap', # SOAPAction + namespace = "http://127.0.0.1:8000/webservices/sample/call/soap", + soap_ns='soap', trace = True, ns = False) + params = SimpleXMLElement("""<?xml version="1.0" encoding="UTF-8"?><AddIntegers><a>3</a><b>2</b></AddIntegers>""") # manully convert returned type + response = client.call('AddIntegers',params) + result = response.AddResult + print int(result) # manully convert returned type + + if '--ctg' in sys.argv: + # test AFIP Agriculture webservice + client = SoapClient( + location = "https://fwshomo.afip.gov.ar/wsctg/services/CTGService", + action = 'http://impl.service.wsctg.afip.gov.ar/CTGService/', # SOAPAction + namespace = "http://impl.service.wsctg.afip.gov.ar/CTGService/", + trace = True, + ns = True) + response = client.dummy() + result = response.dummyResponse + print str(result.appserver) + print str(result.dbserver) + print str(result.authserver) + + if '--wsfe' in sys.argv: + # Demo & Test (AFIP Electronic Invoice): + ta_file = open("TA.xml") + try: + ta_string = ta_file.read() # read access ticket (wsaa.py) + finally: + ta_file.close() + ta = SimpleXMLElement(ta_string) + token = str(ta.credentials.token) + sign = str(ta.credentials.sign) + cuit = long(20267565393) + id = 1234 + cbte =199 + client = SoapClient( + location = "https://wswhomo.afip.gov.ar/wsfe/service.asmx", + action = 'http://ar.gov.afip.dif.facturaelectronica/', # SOAPAction + namespace = "http://ar.gov.afip.dif.facturaelectronica/", + trace = True) + results = client.FERecuperaQTYRequest( + argAuth= {"Token": token, "Sign": sign, "cuit":long(cuit)} + ) + if int(results.FERecuperaQTYRequestResult.RError.percode) != 0: + print "Percode: %s" % results.FERecuperaQTYRequestResult.RError.percode + print "MSGerror: %s" % results.FERecuperaQTYRequestResult.RError.perrmsg + else: + print int(results.FERecuperaQTYRequestResult.qty.value) + + if '--feriados' in sys.argv: + # Demo & Test: Argentina Holidays (Ministerio del Interior): + # this webservice seems disabled + from datetime import datetime, timedelta + client = SoapClient( + location = "http://webservices.mininterior.gov.ar/Feriados/Service.svc", + action = 'http://tempuri.org/IMyService/', # SOAPAction + namespace = "http://tempuri.org/FeriadoDS.xsd", + trace = True) + dt1 = datetime.today() - timedelta(days=60) + dt2 = datetime.today() + timedelta(days=60) + feriadosXML = client.FeriadosEntreFechasas_xml(dt1=dt1.isoformat(), dt2=dt2.isoformat()); + print feriadosXML + + if '--wsdl-parse' in sys.argv: + client = SoapClient() + # Test PySimpleSOAP WSDL + client.wsdl("file:C:/test.wsdl", debug=True) + # Test Java Axis WSDL: + client.wsdl('https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl',debug=True) + # Test .NET 2.0 WSDL: + client.wsdl('https://wswhomo.afip.gov.ar/wsfe/service.asmx?WSDL',debug=True) + client.wsdl('https://wswhomo.afip.gov.ar/wsfex/service.asmx?WSDL',debug=True) + client.wsdl('https://testdia.afip.gov.ar/Dia/Ws/wDigDepFiel/wDigDepFiel.asmx?WSDL',debug=True) + # Test JBoss WSDL: + client.wsdl('https://fwshomo.afip.gov.ar/wsctg/services/CTGService?wsdl',debug=True) + client.wsdl('https://wsaahomo.afip.gov.ar/ws/services/LoginCms?wsdl',debug=True) + + if '--wsdl-client' in sys.argv: + client = SoapClient(wsdl='https://wswhomo.afip.gov.ar/wsfex/service.asmx?WSDL',trace=True) + results = client.FEXDummy() + print results['FEXDummyResult']['AppServer'] + print results['FEXDummyResult']['DbServer'] + print results['FEXDummyResult']['AuthServer'] + ta_file = open("TA.xml") + try: + ta_string = ta_file.read() # read access ticket (wsaa.py) + finally: + ta_file.close() + ta = SimpleXMLElement(ta_string) + token = str(ta.credentials.token) + sign = str(ta.credentials.sign) + response = client.FEXGetCMP( + Auth={"Token": token, "Sign": sign, "Cuit": 20267565393}, + Cmp={"Tipo_cbte": 19, "Punto_vta": 1, "Cbte_nro": 1}) + result = response['FEXGetCMPResult'] + if False: print result + if 'FEXErr' in result: + print "FEXError:", result['FEXErr']['ErrCode'], result['FEXErr']['ErrCode'] + cbt = result['FEXResultGet'] + print cbt['Cae'] + FEX_event = result['FEXEvents'] + print FEX_event['EventCode'], FEX_event['EventMsg'] + + if '--wsdl-ctg' in sys.argv: + client = SoapClient(wsdl='https://fwshomo.afip.gov.ar/wsctg/services/CTGService?wsdl', + trace=True, ns = "ctg") + results = client.dummy() + print results + print results['DummyResponse']['appserver'] + print results['DummyResponse']['dbserver'] + print results['DummyResponse']['authserver'] + ta_file = open("TA.xml") + try: + ta_string = ta_file.read() # read access ticket (wsaa.py) + finally: + ta_file.close() + ta = SimpleXMLElement(ta_string) + token = str(ta.credentials.token) + sign = str(ta.credentials.sign) + print client.help("obtenerProvincias") + response = client.obtenerProvincias(auth={"token":token, "sign":sign, "cuitRepresentado":20267565393}) + print "response=",response + for ret in response: + print ret['return']['codigoProvincia'], ret['return']['descripcionProvincia'].encode("latin1") + prueba = dict(numeroCartaDePorte=512345678, codigoEspecie=23, + cuitRemitenteComercial=20267565393, cuitDestino=20267565393, cuitDestinatario=20267565393, + codigoLocalidadOrigen=3058, codigoLocalidadDestino=3059, + codigoCosecha='0910', pesoNetoCarga=1000, cantHoras=1, + patenteVehiculo='CZO985', cuitTransportista=20267565393, + numeroCTG="43816783", transaccion='10000001681', observaciones='', + ) + + response = client.solicitarCTG( + auth={"token": token, "sign": sign, "cuitRepresentado": 20267565393}, + solicitarCTGRequest= prueba) + + print response['return']['numeroCTG'] + + ##print parse_proxy(None) + ##print parse_proxy("host:1234") + ##print parse_proxy("user:pass@host:1234") + ##sys.exit(0) + ADDED gluon/contrib/pysimplesoap/server.py Index: gluon/contrib/pysimplesoap/server.py ================================================================== --- /dev/null +++ gluon/contrib/pysimplesoap/server.py @@ -0,0 +1,455 @@ +#!/usr/bin/python +# -*- coding: latin-1 -*- +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation; either version 3, or (at your option) any later +# version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. + +"Simple SOAP Server implementation" + +__author__ = "Mariano Reingart (reingart@gmail.com)" +__copyright__ = "Copyright (C) 2010 Mariano Reingart" +__license__ = "LGPL 3.0" +__version__ = "1.02c" + +from simplexml import SimpleXMLElement, TYPE_MAP, DateTime, Date, Decimal + +DEBUG = False + + +class SoapDispatcher(object): + "Simple Dispatcher for SOAP Server" + + def __init__(self, name, documentation='', action='', location='', + namespace=None, prefix=False, + soap_uri="http://schemas.xmlsoap.org/soap/envelope/", + soap_ns='soap', + **kwargs): + self.methods = {} + self.name = name + self.documentation = documentation + self.action = action # base SoapAction + self.location = location + self.namespace = namespace # targetNamespace + self.prefix = prefix + self.soap_ns = soap_ns + self.soap_uri = soap_uri + + def register_function(self, name, fn, returns=None, args=None, doc=None): + self.methods[name] = fn, returns, args, doc or getattr(fn,"__doc__","") + + def dispatch(self, xml, action=None): + "Receive and proccess SOAP call" + # default values: + prefix = self.prefix + ret = fault = None + soap_ns, soap_uri = self.soap_ns, self.soap_uri + soap_fault_code = 'VersionMismatch' + + try: + request = SimpleXMLElement(xml, namespace=self.namespace) + + # detect soap prefix and uri (xmlns attributes of Envelope) + for k, v in request[:]: + if v in ("http://schemas.xmlsoap.org/soap/envelope/", + "http://www.w3.org/2003/05/soap-env",): + soap_ns = request.attributes()[k].localName + soap_uri = request.attributes()[k].value + + soap_fault_code = 'Client' + + # parse request message and get local method + method = request('Body', ns=soap_uri).children()(0) + if action: + # method name = action + name = action[len(self.action)+1:-1] + prefix = self.prefix + if not action or not name: + # method name = input message name + name = method.get_local_name() + prefix = method.get_prefix() + + if DEBUG: print "dispatch method", name + function, returns_types, args_types, doc = self.methods[name] + + # de-serialize parameters (if type definitions given) + if args_types: + args = method.children().unmarshall(args_types) + elif args_types is None: + args = {'request':method} # send raw request + else: + args = {} # no parameters + + soap_fault_code = 'Server' + # execute function + ret = function(**args) + if DEBUG: print ret + + except Exception, e: + import sys + etype, evalue, etb = sys.exc_info() + if DEBUG: + import traceback + detail = ''.join(traceback.format_exception(etype, evalue, etb)) + detail += '\n\nXML REQUEST\n\n' + xml + else: + detail = None + fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__), + 'faultstring': unicode(evalue), + 'detail': detail} + + # build response message + if not prefix: + xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>""" + else: + xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s" + xmlns:%(prefix)s="%(namespace)s"/>""" + + xml = xml % {'namespace': self.namespace, 'prefix': prefix, + 'soap_ns': soap_ns, 'soap_uri': soap_uri} + + response = SimpleXMLElement(xml, namespace=self.namespace, + prefix=prefix) + + response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance" + response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema" + + body = response.add_child("%s:Body" % soap_ns, ns=False) + if fault: + # generate a Soap Fault (with the python exception) + body.marshall("%s:Fault" % soap_ns, fault, ns=False) + else: + # return normal value + res = body.add_child("%sResponse" % name, ns=prefix) + if not prefix: + res['xmlns'] = self.namespace # add target namespace + + # serialize returned values (response) if type definition available + if returns_types: + if not isinstance(ret, dict): + res.marshall(returns_types.keys()[0], ret, ) + else: + for k,v in ret.items(): + res.marshall(k, v) + elif returns_types is None: + # merge xmlelement returned + res.import_node(ret) + + return response.as_xml() + + # Introspection functions: + + def list_methods(self): + "Return a list of aregistered operations" + return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()] + + def help(self, method=None): + "Generate sample request and response messages" + (function, returns, args, doc) = self.methods[method] + xml = """ +<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> +<soap:Body><%(method)s xmlns="%(namespace)s"/></soap:Body> +</soap:Envelope>""" % {'method':method, 'namespace':self.namespace} + request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix) + if args: + items = args.items() + elif args is None: + items = [('value', None)] + else: + items = [] + for k,v in items: + request(method).marshall(k, v, add_comments=True, ns=False) + + xml = """ +<soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> +<soap:Body><%(method)sResponse xmlns="%(namespace)s"/></soap:Body> +</soap:Envelope>""" % {'method':method, 'namespace':self.namespace} + response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix) + if returns: + items = returns.items() + elif args is None: + items = [('value', None)] + else: + items = [] + for k,v in items: + response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False) + + return request.as_xml(pretty=True), response.as_xml(pretty=True), doc + + + def wsdl(self): + "Generate Web Service Description v1.1" + xml = """<?xml version="1.0"?> +<wsdl:definitions name="%(name)s" + targetNamespace="%(namespace)s" + xmlns:tns="%(namespace)s" + xmlns:soap="http://schemas.xmlsoap.org/wsdl/soap/" + xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/" + xmlns:xsd="http://www.w3.org/2001/XMLSchema"> + <wsdl:documentation xmlns:wsdl="http://schemas.xmlsoap.org/wsdl/">%(documentation)s</wsdl:documentation> + + <wsdl:types> + <xsd:schema targetNamespace="%(namespace)s" + elementFormDefault="qualified" + xmlns:xsd="http://www.w3.org/2001/XMLSchema"> + </xsd:schema> + </wsdl:types> + +</wsdl:definitions> +""" % {'namespace': self.namespace, 'name': self.name, 'documentation': self.documentation} + wsdl = SimpleXMLElement(xml) + + for method, (function, returns, args, doc) in self.methods.items(): + # create elements: + + def parse_element(name, values, array=False, complex=False): + if not complex: + element = wsdl('wsdl:types')('xsd:schema').add_child('xsd:element') + complex = element.add_child("xsd:complexType") + else: + complex = wsdl('wsdl:types')('xsd:schema').add_child('xsd:complexType') + element = complex + element['name'] = name + if values: + items = values + elif values is None: + items = [('value', None)] + else: + items = [] + if not array and items: + all = complex.add_child("xsd:all") + elif items: + all = complex.add_child("xsd:sequence") + for k,v in items: + e = all.add_child("xsd:element") + e['name'] = k + if array: + e[:]={'minOccurs': "0", 'maxOccurs': "unbounded"} + if v in TYPE_MAP.keys(): + t='xsd:%s' % TYPE_MAP[v] + elif v is None: + t='xsd:anyType' + elif isinstance(v, list): + n="ArrayOf%s%s" % (name, k) + l = [] + for d in v: + l.extend(d.items()) + parse_element(n, l, array=True, complex=True) + t = "tns:%s" % n + elif isinstance(v, dict): + n="%s%s" % (name, k) + parse_element(n, v.items(), complex=True) + t = "tns:%s" % n + e.add_attribute('type', t) + + parse_element("%s" % method, args and args.items()) + parse_element("%sResponse" % method, returns and returns.items()) + + # create messages: + for m,e in ('Input',''), ('Output','Response'): + message = wsdl.add_child('wsdl:message') + message['name'] = "%s%s" % (method, m) + part = message.add_child("wsdl:part") + part[:] = {'name': 'parameters', + 'element': 'tns:%s%s' % (method,e)} + + # create ports + portType = wsdl.add_child('wsdl:portType') + portType['name'] = "%sPortType" % self.name + for method, (function, returns, args, doc) in self.methods.items(): + op = portType.add_child('wsdl:operation') + op['name'] = method + if doc: + op.add_child("wsdl:documentation", doc) + input = op.add_child("wsdl:input") + input['message'] = "tns:%sInput" % method + output = op.add_child("wsdl:output") + output['message'] = "tns:%sOutput" % method + + # create bindings + binding = wsdl.add_child('wsdl:binding') + binding['name'] = "%sBinding" % self.name + binding['type'] = "tns:%sPortType" % self.name + soapbinding = binding.add_child('soap:binding') + soapbinding['style'] = "document" + soapbinding['transport'] = "http://schemas.xmlsoap.org/soap/http" + for method in self.methods.keys(): + op = binding.add_child('wsdl:operation') + op['name'] = method + soapop = op.add_child('soap:operation') + soapop['soapAction'] = self.action + method + soapop['style'] = 'document' + input = op.add_child("wsdl:input") + ##input.add_attribute('name', "%sInput" % method) + soapbody = input.add_child("soap:body") + soapbody["use"] = "literal" + output = op.add_child("wsdl:output") + ##output.add_attribute('name', "%sOutput" % method) + soapbody = output.add_child("soap:body") + soapbody["use"] = "literal" + + service = wsdl.add_child('wsdl:service') + service["name"] = "%sService" % self.name + service.add_child('wsdl:documentation', text=self.documentation) + port=service.add_child('wsdl:port') + port["name"] = "%s" % self.name + port["binding"] = "tns:%sBinding" % self.name + soapaddress = port.add_child('soap:address') + soapaddress["location"] = self.location + return wsdl.as_xml(pretty=True) + + +from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer +class SOAPHandler(BaseHTTPRequestHandler): + def do_GET(self): + "User viewable help information and wsdl" + args = self.path[1:].split("?") + print "serving", args + if self.path != "/" and args[0] not in self.server.dispatcher.methods.keys(): + self.send_error(404, "Method not found: %s" % args[0]) + else: + if self.path == "/": + # return wsdl if no method supplied + response = self.server.dispatcher.wsdl() + else: + # return supplied method help (?request or ?response messages) + req, res, doc = self.server.dispatcher.help(args[0]) + if len(args)==1 or args[1]=="request": + response = req + else: + response = res + self.send_response(200) + self.send_header("Content-type", "text/xml") + self.end_headers() + self.wfile.write(response) + + def do_POST(self): + "SOAP POST gateway" + self.send_response(200) + self.send_header("Content-type", "text/xml") + self.end_headers() + request = self.rfile.read(int(self.headers.getheader('content-length'))) + response = self.server.dispatcher.dispatch(request) + self.wfile.write(response) + + +if __name__=="__main__": + import sys + + dispatcher = SoapDispatcher( + name = "PySimpleSoapSample", + location = "http://localhost:8008/", + action = 'http://localhost:8008/', # SOAPAction + namespace = "http://example.com/pysimplesoapsamle/", prefix="ns0", + documentation = 'Example soap service using PySimpleSoap', + trace = True, + ns = True) + + def adder(p,c, dt=None): + "Add several values" + print c[0]['d'],c[1]['d'], + import datetime + dt = dt + datetime.timedelta(365) + return {'ab': p['a']+p['b'], 'dd': c[0]['d']+c[1]['d'], 'dt': dt} + + def dummy(in0): + "Just return input" + return in0 + + def echo(request): + "Copy request->response (generic, any type)" + return request.value + + dispatcher.register_function('Adder', adder, + returns={'AddResult': {'ab': int, 'dd': str } }, + args={'p': {'a': int,'b': int}, 'dt': Date, 'c': [{'d': Decimal}]}) + + dispatcher.register_function('Dummy', dummy, + returns={'out0': str}, + args={'in0': str}) + + dispatcher.register_function('Echo', echo) + + if '--local' in sys.argv: + + wsdl=dispatcher.wsdl() + print wsdl + testfile = open("C:/test.wsdl","w") + try: + testfile.write(wsdl) + finally: + testfile.close() + # dummy local test (clasic soap dialect) + xml = """<?xml version="1.0" encoding="UTF-8"?> + <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/"> + <soap:Body> + <Adder xmlns="http://example.com/sample.wsdl"> + <p><a>1</a><b>2</b></p><c><d>5000000.1</d><d>.2</d></c><dt>20100724</dt> + </Adder> + </soap:Body> + </soap:Envelope>""" + + print dispatcher.dispatch(xml) + + # dummy local test (modern soap dialect, SoapUI) + xml = """ +<soapenv:Envelope xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/" xmlns:pys="http://example.com/pysimplesoapsamle/"> + <soapenv:Header/> + <soapenv:Body> + <pys:Adder> + <pys:p><pys:a>9</pys:a><pys:b>3</pys:b></pys:p> + <pys:dt>19690720<!--1969-07-20T21:28:00--></pys:dt> + <pys:c><pys:d>10.001</pys:d><pys:d>5.02</pys:d></pys:c> + </pys:Adder> + </soapenv:Body> +</soapenv:Envelope> + """ + print dispatcher.dispatch(xml) + + # echo local test (generic soap service) + xml = """<?xml version="1.0" encoding="UTF-8"?> + <soap:Envelope xmlns:soap="http://schemas.xmlsoap.org/soap/envelope/" + xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" + xmlns:xsd="http://www.w3.org/2001/XMLSchema"> + <soap:Body> + <Echo xmlns="http://example.com/sample.wsdl"> + <value xsi:type="xsd:string">Hello world</value> + </Echo> + </soap:Body> + </soap:Envelope>""" + + print dispatcher.dispatch(xml) + + + for method, doc in dispatcher.list_methods(): + request, response, doc = dispatcher.help(method) + ##print request + ##print response + + if '--serve' in sys.argv: + print "Starting server..." + httpd = HTTPServer(("", 8008), SOAPHandler) + httpd.dispatcher = dispatcher + httpd.serve_forever() + + if '--consume' in sys.argv: + from client import SoapClient + client = SoapClient( + location = "http://localhost:8008/", + action = 'http://localhost:8008/', # SOAPAction + namespace = "http://example.com/sample.wsdl", + soap_ns='soap', + trace = True, + ns = False) + response = client.Adder(p={'a':1,'b':2},dt='20100724',c=[{'d':'1.20'},{'d':'2.01'}]) + result = response.AddResult + print int(result.ab) + print str(result.dd) + + + ADDED gluon/contrib/pysimplesoap/simplexml.py Index: gluon/contrib/pysimplesoap/simplexml.py ================================================================== --- /dev/null +++ gluon/contrib/pysimplesoap/simplexml.py @@ -0,0 +1,416 @@ +#!/usr/bin/python +# -*- coding: latin-1 -*- +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation; either version 3, or (at your option) any later +# version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. + +"Simple XML manipulation" + +__author__ = "Mariano Reingart (reingart@gmail.com)" +__copyright__ = "Copyright (C) 2008/009 Mariano Reingart" +__license__ = "LGPL 3.0" +__version__ = "1.02c" + +import xml.dom.minidom +from decimal import Decimal +import datetime +import time + +DEBUG = False + +# Functions to serialize/unserialize special immutable types: +datetime_u = lambda s: datetime.datetime.strptime(s, "%Y-%m-%dT%H:%M:%S") +datetime_m = lambda dt: dt.isoformat('T') +date_u = lambda s: datetime.datetime.strptime(s[0:10], "%Y-%m-%d").date() +date_m = lambda d: d.strftime("%Y-%m-%d") +time_u = lambda s: datetime.datetime.strptime(s, "%H:%M:%S").time() +time_m = lambda d: d.strftime("%H%M%S") +bool_u = lambda s: {'0':False, 'false': False, '1': True, 'true': True}[s] + +# aliases: +class Alias(): + def __init__(self, py_type, xml_type): + self.py_type, self.xml_type = py_type, xml_type + def __call__(self, value): + return self.py_type(value) + def __repr__(self): + return "<alias '%s' for '%s'>" % (self.xml_type, self.py_type) + +byte = Alias(str,'byte') +short = Alias(int,'short') +double = Alias(float,'double') +integer = Alias(long,'integer') +DateTime = datetime.datetime +Date = datetime.date +Time = datetime.time + +# Define convertion function (python type): xml schema type +TYPE_MAP = {str:'string',unicode:'string', + bool:'boolean', short:'short', byte:'byte', + int:'int', long:'long', integer:'integer', + float:'float', double:'double', + Decimal:'decimal', + datetime.datetime:'dateTime', datetime.date:'date', + } +TYPE_MARSHAL_FN = {datetime.datetime:datetime_m, datetime.date:date_m,} +TYPE_UNMARSHAL_FN = {datetime.datetime:datetime_u, datetime.date:date_u, + bool:bool_u, + } + + +class OrderedDict(dict): + "Minimal ordered dictionary for xsd:sequences" + def __init__(self): + self.__keys = [] + self.array = False + def __setitem__(self, key, value): + if key not in self.__keys: + self.__keys.append(key) + dict.__setitem__(self, key, value) + def insert(self, key, value, index=0): + if key not in self.__keys: + self.__keys.insert(index, key) + dict.__setitem__(self, key, value) + def __delitem__(self, key): + if key in self.__keys: + self.__keys.remove(key) + dict.__delitem__(self, key) + def __iter__(self): + return iter(self.__keys) + def keys(self): + return self.__keys + def items(self): + return [(key, self[key]) for key in self.__keys] + def update(self, other): + for k,v in other.items(): + self[k] = v + if isinstance(other, OrderedDict): + self.array = other.array + def __str__(self): + return "*%s*" % dict.__str__(self) + def __repr__(self): + s= "*{%s}*" % ", ".join(['%s: %s' % (repr(k),repr(v)) for k,v in self.items()]) + if self.array and False: + s = "[%s]" % s + return s + + +class SimpleXMLElement(object): + "Simple XML manipulation (simil PHP)" + + def __init__(self, text = None, elements = None, document = None, namespace = None, prefix=None): + self.__ns = namespace + self.__prefix = prefix + if text: + try: + self.__document = xml.dom.minidom.parseString(text) + except: + if DEBUG: print text + raise + self.__elements = [self.__document.documentElement] + else: + self.__elements = elements + self.__document = document + + def add_child(self,name,text=None,ns=True): + "Adding a child tag to a node" + if not ns or not self.__ns: + if DEBUG: print "adding %s" % (name) + element = self.__document.createElement(name) + else: + if DEBUG: print "adding %s ns %s %s" % (name, self.__ns,ns) + if self.__prefix: + element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, name)) + else: + element = self.__document.createElementNS(self.__ns, name) + if text: + if isinstance(text, unicode): + element.appendChild(self.__document.createTextNode(text)) + else: + element.appendChild(self.__document.createTextNode(str(text))) + self._element.appendChild(element) + return SimpleXMLElement( + elements=[element], + document=self.__document, + namespace=self.__ns, + prefix=self.__prefix) + + def __setattr__(self, tag, text): + "Add text child tag node (short form)" + if tag.startswith("_"): + object.__setattr__(self, tag, text) + else: + if DEBUG: print "__setattr__(%s,%s)" % (tag, text) + self.add_child(tag,text) + + def add_comment(self, data): + "Add an xml comment to this child" + comment = self.__document.createComment(data) + self._element.appendChild(comment) + + def as_xml(self,filename=None,pretty=False): + "Return the XML representation of the document" + if not pretty: + return self.__document.toxml('UTF-8') + else: + return self.__document.toprettyxml(encoding='UTF-8') + + def __repr__(self): + "Return the XML representation of this tag" + return self._element.toxml('UTF-8') + + def get_name(self): + "Return the tag name of this node" + return self._element.tagName + + def get_local_name(self): + "Return the tag loca name (prefix:name) of this node" + return self._element.localName + + def get_prefix(self): + "Return the namespace prefix of this node" + return self._element.prefix + + def get_namespace_uri(self, ns): + "Return the namespace uri for a prefix" + v = self.__document.documentElement.attributes['xmlns:%s' % ns] + return v.value + + def attributes(self): + "Return a dict of attributes for this tag" + #TODO: use slice syntax [:]? + return self._element.attributes + + def __getitem__(self, item): + "Return xml tag attribute value or a slice of attributes (iter)" + if DEBUG: print "__getitem__(%s)" % item + if isinstance(item,basestring): + if self._element.hasAttribute(item): + return self._element.attributes[item].value + elif isinstance(item, slice): + # return a list with name:values + return self._element.attributes.items()[item] + else: + # return element by index (position) + element = self.__elements[item] + return SimpleXMLElement( + elements=[element], + document=self.__document, + namespace=self.__ns, + prefix=self.__prefix) + + def add_attribute(self, name, value): + "Set an attribute value from a string" + self._element.setAttribute(name, value) + + def __setitem__(self, item, value): + "Set an attribute value" + if isinstance(item,basestring): + self.add_attribute(item, value) + elif isinstance(item, slice): + # set multiple attributes at once + for k, v in value.items(): + self.add_attribute(k, v) + + def __call__(self, tag=None, ns=None, children=False, error=True): + "Search (even in child nodes) and return a child tag by name" + try: + if tag is None: + # if no name given, iterate over siblings (same level) + return self.__iter__() + if children: + # future: filter children? by ns? + return self.children() + elements = None + if isinstance(tag, int): + # return tag by index + elements=[self.__elements[tag]] + if ns and not elements: + for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ): + if DEBUG: print "searching %s by ns=%s" % (tag,ns_uri) + elements = self._element.getElementsByTagNameNS(ns_uri, tag) + if elements: + break + if self.__ns and not elements: + if DEBUG: print "searching %s by ns=%s" % (tag, self.__ns) + elements = self._element.getElementsByTagNameNS(self.__ns, tag) + if not elements: + if DEBUG: print "searching %s " % (tag) + elements = self._element.getElementsByTagName(tag) + if not elements: + if DEBUG: print self._element.toxml() + if error: + raise AttributeError("No elements found") + else: + return + return SimpleXMLElement( + elements=elements, + document=self.__document, + namespace=self.__ns, + prefix=self.__prefix) + except AttributeError, e: + raise AttributeError("Tag not found: %s (%s)" % (tag, str(e))) + + def __getattr__(self, tag): + "Shortcut for __call__" + return self.__call__(tag) + + def __iter__(self): + "Iterate over xml tags at this level" + try: + for __element in self.__elements: + yield SimpleXMLElement( + elements=[__element], + document=self.__document, + namespace=self.__ns, + prefix=self.__prefix) + except: + raise + + def __dir__(self): + "List xml children tags names" + return [node.tagName for node + in self._element.childNodes + if node.nodeType != node.TEXT_NODE] + + def children(self): + "Return xml children tags element" + elements=[__element for __element in self._element.childNodes + if __element.nodeType == __element.ELEMENT_NODE] + if not elements: + return None + #raise IndexError("Tag %s has no children" % self._element.tagName) + return SimpleXMLElement( + elements=elements, + document=self.__document, + namespace=self.__ns, + prefix=self.__prefix) + + def __len__(self): + "Return elements count" + return len(self.__elements) + + def __contains__( self, item): + "Search for a tag name in this element or child nodes" + return self._element.getElementsByTagName(item) + + def __unicode__(self): + "Returns the unicode text nodes of the current element" + if self._element.childNodes: + rc = u"" + for node in self._element.childNodes: + if node.nodeType == node.TEXT_NODE: + rc = rc + node.data + return rc + return '' + + def __str__(self): + "Returns the str text nodes of the current element" + return unicode(self).encode("utf8","ignore") + + def __int__(self): + "Returns the integer value of the current element" + return int(self.__str__()) + + def __float__(self): + "Returns the float value of the current element" + try: + return float(self.__str__()) + except: + raise IndexError(self._element.toxml()) + + _element = property(lambda self: self.__elements[0]) + + def unmarshall(self, types): + "Convert to python values the current serialized xml element" + # types is a dict of {tag name: convertion function} + # example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]} + # expected xml: <p><a>1</a><b>2</b></p><c><d>hola</d><d>chau</d> + # returnde value: {'p': {'a':1,'b':2}, `'c':[{'d':'hola'},{'d':'chau'}]} + d = {} + for node in self(): + name = str(node.get_local_name()) + try: + fn = types[name] + except (KeyError, ), e: + raise TypeError("Tag: %s invalid" % (name,)) + if isinstance(fn,list): + value = [] + children = node.children() + for child in children and children() or []: + value.append(child.unmarshall(fn[0])) + elif isinstance(fn,dict): + children = node.children() + value = children and children.unmarshall(fn) + else: + if fn is None: # xsd:anyType not unmarshalled + value = node + elif str(node) or fn == str: + try: + # get special desserialization function (if any) + fn = TYPE_UNMARSHAL_FN.get(fn,fn) + value = fn(unicode(node)) + except (ValueError, TypeError), e: + raise ValueError("Tag: %s: %s" % (name, unicode(e))) + else: + value = None + d[name] = value + return d + + def marshall(self, name, value, add_child=True, add_comments=False, ns=False): + "Analize python value and add the serialized XML element using tag name" + if isinstance(value, dict): # serialize dict (<key>value</key>) + child = add_child and self.add_child(name,ns=ns) or self + for k,v in value.items(): + child.marshall(k, v, add_comments=add_comments, ns=ns) + elif isinstance(value, tuple): # serialize tuple (<key>value</key>) + child = add_child and self.add_child(name,ns=ns) or self + for k,v in value: + getattr(self,name).marshall(k, v, add_comments=add_comments, ns=ns) + elif isinstance(value, list): # serialize lists + child=self.add_child(name,ns=ns) + if add_comments: + child.add_comment("Repetitive array of:") + for t in value: + child.marshall(name,t, False, add_comments=add_comments, ns=ns) + elif isinstance(value, basestring): # do not convert strings or unicodes + self.add_child(name,value,ns=ns) + elif value is None: # sent a empty tag? + self.add_child(name,ns=ns) + elif value in TYPE_MAP.keys(): + # add commented placeholders for simple tipes (for examples/help only) + child = self.add_child(name,ns=ns) + child.add_comment(TYPE_MAP[value]) + else: # the rest of object types are converted to string + # get special serialization function (if any) + fn = TYPE_MARSHAL_FN.get(type(value),str) + self.add_child(name,fn(value),ns=ns) + + def import_node(self, other): + x = self.__document.importNode(other._element, True) # deep copy + self._element.appendChild(x) + + +if __name__ == "__main__": + span = SimpleXMLElement('<span><a href="python.org.ar">pyar</a><prueba><i>1</i><float>1.5</float></prueba></span>') + assert str(span.a)==str(span('a'))==str(span.a(0))=="pyar" + assert span.a['href']=="python.org.ar" + assert int(span.prueba.i)==1 and float(span.prueba.float)==1.5 + span1 = SimpleXMLElement('<span><a href="google.com">google</a><a>yahoo</a><a>hotmail</a></span>') + assert [str(a) for a in span1.a()] == ['google', 'yahoo', 'hotmail'] + span1.add_child('a','altavista') + span1.b = "ex msn" + d = {'href':'http://www.bing.com/', 'alt': 'Bing'} + span1.b[:] = d + assert sorted([(k,v) for k,v in span1.b[:]]) == sorted(d.items()) + print span1.as_xml() + assert 'b' in span1 + span.import_node(span1) + print span.as_xml() + ADDED gluon/contrib/rss2.py Index: gluon/contrib/rss2.py ================================================================== --- /dev/null +++ gluon/contrib/rss2.py @@ -0,0 +1,590 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""PyRSS2Gen - A Python library for generating RSS 2.0 feeds.""" + +__name__ = 'PyRSS2Gen' +__version__ = (1, 0, 0) +__author__ = 'Andrew Dalke <dalke@dalkescientific.com>' + +_generator_name = __name__ + '-' + '.'.join(map(str, __version__)) + +import datetime +import cStringIO + +# Could make this the base class; will need to add 'publish' + + +class WriteXmlMixin: + + def write_xml(self, outfile, encoding='iso-8859-1'): + from xml.sax import saxutils + handler = saxutils.XMLGenerator(outfile, encoding) + handler.startDocument() + self.publish(handler) + handler.endDocument() + + def to_xml(self, encoding='iso-8859-1'): + try: + import cStringIO as StringIO + except ImportError: + import StringIO + f = StringIO.StringIO() + self.write_xml(f, encoding) + return f.getvalue() + + +def _element( + handler, + name, + obj, + d={}, + ): + if isinstance(obj, basestring) or obj is None: + + # special-case handling to make the API easier + # to use for the common case. + + handler.startElement(name, d) + if obj is not None: + handler.characters(obj) + handler.endElement(name) + else: + + # It better know how to emit the correct XML. + + obj.publish(handler) + + +def _opt_element(handler, name, obj): + if obj is None: + return + _element(handler, name, obj) + + +def _format_date(dt): + """convert a datetime into an RFC 822 formatted date + + Input date must be in GMT. + """ + + # Looks like: + # Sat, 07 Sep 2002 00:00:01 GMT + # Can't use strftime because that's locale dependent + # + # Isn't there a standard way to do this for Python? The + # rfc822 and email.Utils modules assume a timestamp. The + # following is based on the rfc822 module. + + return '%s, %02d %s %04d %02d:%02d:%02d GMT' % ( + [ + 'Mon', + 'Tue', + 'Wed', + 'Thu', + 'Fri', + 'Sat', + 'Sun', + ][dt.weekday()], + dt.day, + [ + 'Jan', + 'Feb', + 'Mar', + 'Apr', + 'May', + 'Jun', + 'Jul', + 'Aug', + 'Sep', + 'Oct', + 'Nov', + 'Dec', + ][dt.month - 1], + dt.year, + dt.hour, + dt.minute, + dt.second, + ) + + +## +# A couple simple wrapper objects for the fields which +# take a simple value other than a string. + + +class IntElement: + + """implements the 'publish' API for integers + + Takes the tag name and the integer value to publish. + + (Could be used for anything which uses str() to be published + to text for XML.) + """ + + element_attrs = {} + + def __init__(self, name, val): + self.name = name + self.val = val + + def publish(self, handler): + handler.startElement(self.name, self.element_attrs) + handler.characters(str(self.val)) + handler.endElement(self.name) + + +class DateElement: + + """implements the 'publish' API for a datetime.datetime + + Takes the tag name and the datetime to publish. + + Converts the datetime to RFC 2822 timestamp (4-digit year). + """ + + def __init__(self, name, dt): + self.name = name + self.dt = dt + + def publish(self, handler): + _element(handler, self.name, _format_date(self.dt)) + + +# ### + + +class Category: + + """Publish a category element""" + + def __init__(self, category, domain=None): + self.category = category + self.domain = domain + + def publish(self, handler): + d = {} + if self.domain is not None: + d['domain'] = self.domain + _element(handler, 'category', self.category, d) + + +class Cloud: + + """Publish a cloud""" + + def __init__( + self, + domain, + port, + path, + registerProcedure, + protocol, + ): + self.domain = domain + self.port = port + self.path = path + self.registerProcedure = registerProcedure + self.protocol = protocol + + def publish(self, handler): + _element(handler, 'cloud', None, { + 'domain': self.domain, + 'port': str(self.port), + 'path': self.path, + 'registerProcedure': self.registerProcedure, + 'protocol': self.protocol, + }) + + +class Image: + + """Publish a channel Image""" + + element_attrs = {} + + def __init__( + self, + url, + title, + link, + width=None, + height=None, + description=None, + ): + self.url = url + self.title = title + self.link = link + self.width = width + self.height = height + self.description = description + + def publish(self, handler): + handler.startElement('image', self.element_attrs) + + _element(handler, 'url', self.url) + _element(handler, 'title', self.title) + _element(handler, 'link', self.link) + + width = self.width + if isinstance(width, int): + width = IntElement('width', width) + _opt_element(handler, 'width', width) + + height = self.height + if isinstance(height, int): + height = IntElement('height', height) + _opt_element(handler, 'height', height) + + _opt_element(handler, 'description', self.description) + + handler.endElement('image') + + +class Guid: + + """Publish a guid + + Defaults to being a permalink, which is the assumption if it's + omitted. Hence strings are always permalinks. + """ + + def __init__(self, guid, isPermaLink=1): + self.guid = guid + self.isPermaLink = isPermaLink + + def publish(self, handler): + d = {} + if self.isPermaLink: + d['isPermaLink'] = 'true' + else: + d['isPermaLink'] = 'false' + _element(handler, 'guid', self.guid, d) + + +class TextInput: + + """Publish a textInput + + Apparently this is rarely used. + """ + + element_attrs = {} + + def __init__( + self, + title, + description, + name, + link, + ): + self.title = title + self.description = description + self.name = name + self.link = link + + def publish(self, handler): + handler.startElement('textInput', self.element_attrs) + _element(handler, 'title', self.title) + _element(handler, 'description', self.description) + _element(handler, 'name', self.name) + _element(handler, 'link', self.link) + handler.endElement('textInput') + + +class Enclosure: + + """Publish an enclosure""" + + def __init__( + self, + url, + length, + type, + ): + self.url = url + self.length = length + self.type = type + + def publish(self, handler): + _element(handler, 'enclosure', None, + {'url': self.url, 'length': str(self.length), 'type': self.type}) + + +class Source: + + """Publish the item's original source, used by aggregators""" + + def __init__(self, name, url): + self.name = name + self.url = url + + def publish(self, handler): + _element(handler, 'source', self.name, {'url': self.url}) + + +class SkipHours: + + """Publish the skipHours + + This takes a list of hours, as integers. + """ + + element_attrs = {} + + def __init__(self, hours): + self.hours = hours + + def publish(self, handler): + if self.hours: + handler.startElement('skipHours', self.element_attrs) + for hour in self.hours: + _element(handler, 'hour', str(hour)) + handler.endElement('skipHours') + + +class SkipDays: + + """Publish the skipDays + + This takes a list of days as strings. + """ + + element_attrs = {} + + def __init__(self, days): + self.days = days + + def publish(self, handler): + if self.days: + handler.startElement('skipDays', self.element_attrs) + for day in self.days: + _element(handler, 'day', day) + handler.endElement('skipDays') + + +class RSS2(WriteXmlMixin): + + """The main RSS class. + + Stores the channel attributes, with the \"category\" elements under + \".categories\" and the RSS items under \".items\". + """ + + rss_attrs = {'version': '2.0'} + element_attrs = {} + + def __init__( + self, + title, + link, + description, + language=None, + copyright=None, + managingEditor=None, + webMaster=None, + pubDate=None, + lastBuildDate=None, + categories=None, + generator=_generator_name, + docs='http://blogs.law.harvard.edu/tech/rss', + cloud=None, + ttl=None, + image=None, + rating=None, + textInput=None, + skipHours=None, + skipDays=None, + items=None, + ): + + self.title = title + self.link = link + self.description = description + self.language = language + self.copyright = copyright + self.managingEditor = managingEditor + + self.webMaster = webMaster + self.pubDate = pubDate + self.lastBuildDate = lastBuildDate + + if categories is None: + categories = [] + self.categories = categories + self.generator = generator + self.docs = docs + self.cloud = cloud + self.ttl = ttl + self.image = image + self.rating = rating + self.textInput = textInput + self.skipHours = skipHours + self.skipDays = skipDays + + if items is None: + items = [] + self.items = items + + def publish(self, handler): + handler.startElement('rss', self.rss_attrs) + handler.startElement('channel', self.element_attrs) + _element(handler, 'title', self.title) + _element(handler, 'link', self.link) + _element(handler, 'description', self.description) + + self.publish_extensions(handler) + + _opt_element(handler, 'language', self.language) + _opt_element(handler, 'copyright', self.copyright) + _opt_element(handler, 'managingEditor', self.managingEditor) + _opt_element(handler, 'webMaster', self.webMaster) + + pubDate = self.pubDate + if isinstance(pubDate, datetime.datetime): + pubDate = DateElement('pubDate', pubDate) + _opt_element(handler, 'pubDate', pubDate) + + lastBuildDate = self.lastBuildDate + if isinstance(lastBuildDate, datetime.datetime): + lastBuildDate = DateElement('lastBuildDate', lastBuildDate) + _opt_element(handler, 'lastBuildDate', lastBuildDate) + + for category in self.categories: + if isinstance(category, basestring): + category = Category(category) + category.publish(handler) + + _opt_element(handler, 'generator', self.generator) + _opt_element(handler, 'docs', self.docs) + + if self.cloud is not None: + self.cloud.publish(handler) + + ttl = self.ttl + if isinstance(self.ttl, int): + ttl = IntElement('ttl', ttl) + _opt_element(handler, 'tt', ttl) + + if self.image is not None: + self.image.publish(handler) + + _opt_element(handler, 'rating', self.rating) + if self.textInput is not None: + self.textInput.publish(handler) + if self.skipHours is not None: + self.skipHours.publish(handler) + if self.skipDays is not None: + self.skipDays.publish(handler) + + for item in self.items: + item.publish(handler) + + handler.endElement('channel') + handler.endElement('rss') + + def publish_extensions(self, handler): + + # Derived classes can hook into this to insert + # output after the three required fields. + + pass + + +class RSSItem(WriteXmlMixin): + + """Publish an RSS Item""" + + element_attrs = {} + + def __init__( + self, + title=None, + link=None, + description=None, + author=None, + categories=None, + comments=None, + enclosure=None, + guid=None, + pubDate=None, + source=None, + ): + + if title is None and description is None: + raise TypeError( + "RSSItem must define at least one of 'title' or 'description'") + self.title = title + self.link = link + self.description = description + self.author = author + if categories is None: + categories = [] + self.categories = categories + self.comments = comments + self.enclosure = enclosure + self.guid = guid + self.pubDate = pubDate + self.source = source + + # It sure does get tedious typing these names three times... + + def publish(self, handler): + handler.startElement('item', self.element_attrs) + _opt_element(handler, 'title', self.title) + _opt_element(handler, 'link', self.link) + self.publish_extensions(handler) + _opt_element(handler, 'description', self.description) + _opt_element(handler, 'author', self.author) + + for category in self.categories: + if isinstance(category, basestring): + category = Category(category) + category.publish(handler) + + _opt_element(handler, 'comments', self.comments) + if self.enclosure is not None: + self.enclosure.publish(handler) + _opt_element(handler, 'guid', self.guid) + + pubDate = self.pubDate + if isinstance(pubDate, datetime.datetime): + pubDate = DateElement('pubDate', pubDate) + _opt_element(handler, 'pubDate', pubDate) + + if self.source is not None: + self.source.publish(handler) + + handler.endElement('item') + + def publish_extensions(self, handler): + + # Derived classes can hook into this to insert + # output after the title and link elements + + pass + + +def dumps(rss, encoding='utf-8'): + s = cStringIO.StringIO() + rss.write_xml(s, encoding) + return s.getvalue() + + +def test(): + rss = RSS2(title='web2py feed', link='http://www.web2py.com', + description='About web2py', + lastBuildDate=datetime.datetime.now(), + items=[RSSItem(title='web2py and PyRSS2Gen-0.0', + link='http://www.web2py.com/examples/simple_examples/getrss', + description='web2py can now make rss feeds!', + guid=Guid('http://www.web2py.com/'), + pubDate=datetime.datetime(2007, 11, 14, 10, 30))]) + return dumps(rss) + + +if __name__ == '__main__': + print test() + + ADDED gluon/contrib/shell.py Index: gluon/contrib/shell.py ================================================================== --- /dev/null +++ gluon/contrib/shell.py @@ -0,0 +1,268 @@ +#!/usr/bin/python +# +# Copyright 2007 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Modified by Massimo Di Pierro so it works with and without GAE with web2py +# the modified version of this file is still released under the original Apache license +# and it is not released under the web2py license. +# +# This should be compatible with the Apache license since it states: +# "For the purposes of this License, Derivative Works shall not include works +# that remain separable from, or merely link (or bind by name) to the interfaces of, +# the Work and Derivative Works thereof." +# +# In fact this file is Apache-licensed and it is separable from the rest of web2py. + + +""" +An interactive, stateful AJAX shell that runs Python code on the server. +""" + +import logging +import new +import os +import cPickle +import sys +import traceback +import types +import wsgiref.handlers +import StringIO +import threading +locker = threading.RLock() + +# Set to True if stack traces should be shown in the browser, etc. +_DEBUG = True + +# The entity kind for shell historys. Feel free to rename to suit your app. +_HISTORY_KIND = '_Shell_History' + +# Types that can't be pickled. +UNPICKLABLE_TYPES = ( + types.ModuleType, + types.TypeType, + types.ClassType, + types.FunctionType, + ) + +# Unpicklable statements to seed new historys with. +INITIAL_UNPICKLABLES = [ + 'import logging', + 'import os', + 'import sys', + ] + + +class History: + """A shell history. Stores the history's globals. + + Each history globals is stored in one of two places: + + If the global is picklable, it's stored in the parallel globals and + global_names list properties. (They're parallel lists to work around the + unfortunate fact that the datastore can't store dictionaries natively.) + + If the global is not picklable (e.g. modules, classes, and functions), or if + it was created by the same statement that created an unpicklable global, + it's not stored directly. Instead, the statement is stored in the + unpicklables list property. On each request, before executing the current + statement, the unpicklable statements are evaluated to recreate the + unpicklable globals. + + The unpicklable_names property stores all of the names of globals that were + added by unpicklable statements. When we pickle and store the globals after + executing a statement, we skip the ones in unpicklable_names. + + Using Text instead of string is an optimization. We don't query on any of + these properties, so they don't need to be indexed. + """ + global_names = [] + globals = [] + unpicklable_names = [] + unpicklables = [] + + def set_global(self, name, value): + """Adds a global, or updates it if it already exists. + + Also removes the global from the list of unpicklable names. + + Args: + name: the name of the global to remove + value: any picklable value + """ + blob = cPickle.dumps(value) + + if name in self.global_names: + index = self.global_names.index(name) + self.globals[index] = blob + else: + self.global_names.append(name) + self.globals.append(blob) + + self.remove_unpicklable_name(name) + + def remove_global(self, name): + """Removes a global, if it exists. + + Args: + name: string, the name of the global to remove + """ + if name in self.global_names: + index = self.global_names.index(name) + del self.global_names[index] + del self.globals[index] + + def globals_dict(self): + """Returns a dictionary view of the globals. + """ + return dict((name, cPickle.loads(val)) + for name, val in zip(self.global_names, self.globals)) + + def add_unpicklable(self, statement, names): + """Adds a statement and list of names to the unpicklables. + + Also removes the names from the globals. + + Args: + statement: string, the statement that created new unpicklable global(s). + names: list of strings; the names of the globals created by the statement. + """ + self.unpicklables.append(statement) + + for name in names: + self.remove_global(name) + if name not in self.unpicklable_names: + self.unpicklable_names.append(name) + + def remove_unpicklable_name(self, name): + """Removes a name from the list of unpicklable names, if it exists. + + Args: + name: string, the name of the unpicklable global to remove + """ + if name in self.unpicklable_names: + self.unpicklable_names.remove(name) + +def represent(obj): + """Returns a string representing the given object's value, which should allow the + code below to determine whether the object changes over time. + """ + try: + return cPickle.dumps(obj) + except: + return repr(obj) + +def run(history, statement, env={}): + """ + Evaluates a python statement in a given history and returns the result. + """ + history.unpicklables = INITIAL_UNPICKLABLES + + # extract the statement to be run + if not statement: + return '' + + # the python compiler doesn't like network line endings + statement = statement.replace('\r\n', '\n') + + # add a couple newlines at the end of the statement. this makes + # single-line expressions such as 'class Foo: pass' evaluate happily. + statement += '\n\n' + + + # log and compile the statement up front + try: + logging.info('Compiling and evaluating:\n%s' % statement) + compiled = compile(statement, '<string>', 'single') + except: + return str(traceback.format_exc()) + + # create a dedicated module to be used as this statement's __main__ + statement_module = new.module('__main__') + + # use this request's __builtin__, since it changes on each request. + # this is needed for import statements, among other things. + import __builtin__ + statement_module.__builtins__ = __builtin__ + + # load the history from the datastore + history = History() + + # swap in our custom module for __main__. then unpickle the history + # globals, run the statement, and re-pickle the history globals, all + # inside it. + old_main = sys.modules.get('__main__') + output = StringIO.StringIO() + try: + sys.modules['__main__'] = statement_module + statement_module.__name__ = '__main__' + statement_module.__dict__.update(env) + + # re-evaluate the unpicklables + for code in history.unpicklables: + exec code in statement_module.__dict__ + + # re-initialize the globals + for name, val in history.globals_dict().items(): + try: + statement_module.__dict__[name] = val + except: + msg = 'Dropping %s since it could not be unpickled.\n' % name + output.write(msg) + logging.warning(msg + traceback.format_exc()) + history.remove_global(name) + + # run! + old_globals = dict((key,represent(value)) for key,value in statement_module.__dict__.items()) + try: + old_stdout, old_stderr = sys.stdout, sys.stderr + try: + sys.stderr = sys.stdout = output + locker.acquire() + exec compiled in statement_module.__dict__ + finally: + locker.release() + sys.stdout, sys.stderr = old_stdout, old_stderr + except: + output.write(str(traceback.format_exc())) + return output.getvalue() + + # extract the new globals that this statement added + new_globals = {} + for name, val in statement_module.__dict__.items(): + if name not in old_globals or represent(val) != old_globals[name]: + new_globals[name] = val + + if True in [isinstance(val, UNPICKLABLE_TYPES) + for val in new_globals.values()]: + # this statement added an unpicklable global. store the statement and + # the names of all of the globals it added in the unpicklables. + history.add_unpicklable(statement, new_globals.keys()) + logging.debug('Storing this statement as an unpicklable.') + else: + # this statement didn't add any unpicklables. pickle and store the + # new globals back into the datastore. + for name, val in new_globals.items(): + if not name.startswith('__'): + history.set_global(name, val) + + finally: + sys.modules['__main__'] = old_main + return output.getvalue() + +if __name__=='__main__': + history=History() + while True: print run(history, raw_input('>>> ')).rstrip() + + ADDED gluon/contrib/simplejson/LICENSE.txt Index: gluon/contrib/simplejson/LICENSE.txt ================================================================== --- /dev/null +++ gluon/contrib/simplejson/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2006 Bob Ippolito + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. ADDED gluon/contrib/simplejson/__init__.py Index: gluon/contrib/simplejson/__init__.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/__init__.py @@ -0,0 +1,440 @@ +r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of +JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data +interchange format. + +:mod:`simplejson` exposes an API familiar to users of the standard library +:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained +version of the :mod:`json` library contained in Python 2.6, but maintains +compatibility with Python 2.4 and Python 2.5 and (currently) has +significant performance advantages, even without using the optional C +extension for speedups. + +Encoding basic Python object hierarchies:: + + >>> import simplejson as json + >>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}]) + '["foo", {"bar": ["baz", null, 1.0, 2]}]' + >>> print json.dumps("\"foo\bar") + "\"foo\bar" + >>> print json.dumps(u'\u1234') + "\u1234" + >>> print json.dumps('\\') + "\\" + >>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True) + {"a": 0, "b": 0, "c": 0} + >>> from StringIO import StringIO + >>> io = StringIO() + >>> json.dump(['streaming API'], io) + >>> io.getvalue() + '["streaming API"]' + +Compact encoding:: + + >>> import simplejson as json + >>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':')) + '[1,2,3,{"4":5,"6":7}]' + +Pretty printing:: + + >>> import simplejson as json + >>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ') + >>> print '\n'.join([l.rstrip() for l in s.splitlines()]) + { + "4": 5, + "6": 7 + } + +Decoding JSON:: + + >>> import simplejson as json + >>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}] + >>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj + True + >>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar' + True + >>> from StringIO import StringIO + >>> io = StringIO('["streaming API"]') + >>> json.load(io)[0] == 'streaming API' + True + +Specializing JSON object decoding:: + + >>> import simplejson as json + >>> def as_complex(dct): + ... if '__complex__' in dct: + ... return complex(dct['real'], dct['imag']) + ... return dct + ... + >>> json.loads('{"__complex__": true, "real": 1, "imag": 2}', + ... object_hook=as_complex) + (1+2j) + >>> from decimal import Decimal + >>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1') + True + +Specializing JSON object encoding:: + + >>> import simplejson as json + >>> def encode_complex(obj): + ... if isinstance(obj, complex): + ... return [obj.real, obj.imag] + ... raise TypeError(repr(o) + " is not JSON serializable") + ... + >>> json.dumps(2 + 1j, default=encode_complex) + '[2.0, 1.0]' + >>> json.JSONEncoder(default=encode_complex).encode(2 + 1j) + '[2.0, 1.0]' + >>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j)) + '[2.0, 1.0]' + + +Using simplejson.tool from the shell to validate and pretty-print:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 2 (char 2) +""" +__version__ = '2.1.3' +__all__ = [ + 'dump', 'dumps', 'load', 'loads', + 'JSONDecoder', 'JSONDecodeError', 'JSONEncoder', + 'OrderedDict', +] + +__author__ = 'Bob Ippolito <bob@redivi.com>' + +from decimal import Decimal + +from decoder import JSONDecoder, JSONDecodeError +from encoder import JSONEncoder +def _import_OrderedDict(): + import collections + try: + return collections.OrderedDict + except AttributeError: + import ordered_dict + return ordered_dict.OrderedDict +OrderedDict = _import_OrderedDict() + +def _import_c_make_encoder(): + try: + raise ImportError # because assumes simplejson in path + from simplejson._speedups import make_encoder + return make_encoder + except ImportError: + return None + +_default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + use_decimal=False, +) + +def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=False, **kw): + """Serialize ``obj`` as a JSON formatted stream to ``fp`` (a + ``.write()``-supporting file-like object). + + If ``skipkeys`` is true then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the some chunks written to ``fp`` + may be ``unicode`` instances, subject to normal Python ``str`` to + ``unicode`` coercion rules. Unless ``fp.write()`` explicitly + understands ``unicode`` (as in ``codecs.getwriter()``) this is likely + to cause an error. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) + in strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If *indent* is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``False``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not use_decimal + and not kw): + iterable = _default_encoder.iterencode(obj) + else: + if cls is None: + cls = JSONEncoder + iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, + default=default, use_decimal=use_decimal, **kw).iterencode(obj) + # could accelerate with writelines in some versions of Python, at + # a debuggability cost + for chunk in iterable: + fp.write(chunk) + + +def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True, + allow_nan=True, cls=None, indent=None, separators=None, + encoding='utf-8', default=None, use_decimal=False, **kw): + """Serialize ``obj`` to a JSON formatted ``str``. + + If ``skipkeys`` is false then ``dict`` keys that are not basic types + (``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``) + will be skipped instead of raising a ``TypeError``. + + If ``ensure_ascii`` is false, then the return value will be a + ``unicode`` instance subject to normal Python ``str`` to ``unicode`` + coercion rules instead of being escaped to an ASCII ``str``. + + If ``check_circular`` is false, then the circular reference check + for container types will be skipped and a circular reference will + result in an ``OverflowError`` (or worse). + + If ``allow_nan`` is false, then it will be a ``ValueError`` to + serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in + strict compliance of the JSON specification, instead of using the + JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``). + + If ``indent`` is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If ``separators`` is an ``(item_separator, dict_separator)`` tuple + then it will be used instead of the default ``(', ', ': ')`` separators. + ``(',', ':')`` is the most compact JSON representation. + + ``encoding`` is the character encoding for str instances, default is UTF-8. + + ``default(obj)`` is a function that should return a serializable version + of obj or raise TypeError. The default simply raises TypeError. + + If *use_decimal* is true (default: ``False``) then decimal.Decimal + will be natively serialized to JSON with full precision. + + To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the + ``.default()`` method to serialize additional types), specify it with + the ``cls`` kwarg. + + """ + # cached encoder + if (not skipkeys and ensure_ascii and + check_circular and allow_nan and + cls is None and indent is None and separators is None and + encoding == 'utf-8' and default is None and not use_decimal + and not kw): + return _default_encoder.encode(obj) + if cls is None: + cls = JSONEncoder + return cls( + skipkeys=skipkeys, ensure_ascii=ensure_ascii, + check_circular=check_circular, allow_nan=allow_nan, indent=indent, + separators=separators, encoding=encoding, default=default, + use_decimal=use_decimal, **kw).encode(obj) + + +_default_decoder = JSONDecoder(encoding=None, object_hook=None, + object_pairs_hook=None) + + +def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``fp`` (a ``.read()``-supporting file-like object containing + a JSON document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + return loads(fp.read(), + encoding=encoding, cls=cls, object_hook=object_hook, + parse_float=parse_float, parse_int=parse_int, + parse_constant=parse_constant, object_pairs_hook=object_pairs_hook, + use_decimal=use_decimal, **kw) + + +def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, object_pairs_hook=None, + use_decimal=False, **kw): + """Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON + document) to a Python object. + + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + If *use_decimal* is true (default: ``False``) then it implies + parse_float=decimal.Decimal for parity with ``dump``. + + To use a custom ``JSONDecoder`` subclass, specify it with the ``cls`` + kwarg. + + """ + if (cls is None and encoding is None and object_hook is None and + parse_int is None and parse_float is None and + parse_constant is None and object_pairs_hook is None + and not use_decimal and not kw): + return _default_decoder.decode(s) + if cls is None: + cls = JSONDecoder + if object_hook is not None: + kw['object_hook'] = object_hook + if object_pairs_hook is not None: + kw['object_pairs_hook'] = object_pairs_hook + if parse_float is not None: + kw['parse_float'] = parse_float + if parse_int is not None: + kw['parse_int'] = parse_int + if parse_constant is not None: + kw['parse_constant'] = parse_constant + if use_decimal: + if parse_float is not None: + raise TypeError("use_decimal=True implies parse_float=Decimal") + kw['parse_float'] = Decimal + return cls(encoding=encoding, **kw).decode(s) + + +def _toggle_speedups(enabled): + import decoder as dec + import encoder as enc + import scanner as scan + c_make_encoder = _import_c_make_encoder() + if enabled: + dec.scanstring = dec.c_scanstring or dec.py_scanstring + enc.c_make_encoder = c_make_encoder + enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or + enc.py_encode_basestring_ascii) + scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner + else: + dec.scanstring = dec.py_scanstring + enc.c_make_encoder = None + enc.encode_basestring_ascii = enc.py_encode_basestring_ascii + scan.make_scanner = scan.py_make_scanner + dec.make_scanner = scan.make_scanner + global _default_decoder + _default_decoder = JSONDecoder( + encoding=None, + object_hook=None, + object_pairs_hook=None, + ) + global _default_encoder + _default_encoder = JSONEncoder( + skipkeys=False, + ensure_ascii=True, + check_circular=True, + allow_nan=True, + indent=None, + separators=None, + encoding='utf-8', + default=None, + ) + ADDED gluon/contrib/simplejson/decoder.py Index: gluon/contrib/simplejson/decoder.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/decoder.py @@ -0,0 +1,423 @@ +"""Implementation of JSONDecoder +""" +import re +import sys +import struct + +from scanner import make_scanner +def _import_c_scanstring(): + try: + raise ImportError # because assumes simplejson in path + from simplejson._speedups import scanstring + return scanstring + except ImportError: + return None +c_scanstring = _import_c_scanstring() + +__all__ = ['JSONDecoder'] + +FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL + +def _floatconstants(): + _BYTES = '7FF80000000000007FF0000000000000'.decode('hex') + # The struct module in Python 2.4 would get frexp() out of range here + # when an endian is specified in the format string. Fixed in Python 2.5+ + if sys.byteorder != 'big': + _BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1] + nan, inf = struct.unpack('dd', _BYTES) + return nan, inf, -inf + +NaN, PosInf, NegInf = _floatconstants() + + +class JSONDecodeError(ValueError): + """Subclass of ValueError with the following additional properties: + + msg: The unformatted error message + doc: The JSON document being parsed + pos: The start index of doc where parsing failed + end: The end index of doc where parsing failed (may be None) + lineno: The line corresponding to pos + colno: The column corresponding to pos + endlineno: The line corresponding to end (may be None) + endcolno: The column corresponding to end (may be None) + + """ + def __init__(self, msg, doc, pos, end=None): + ValueError.__init__(self, errmsg(msg, doc, pos, end=end)) + self.msg = msg + self.doc = doc + self.pos = pos + self.end = end + self.lineno, self.colno = linecol(doc, pos) + if end is not None: + self.endlineno, self.endcolno = linecol(doc, end) + else: + self.endlineno, self.endcolno = None, None + + +def linecol(doc, pos): + lineno = doc.count('\n', 0, pos) + 1 + if lineno == 1: + colno = pos + else: + colno = pos - doc.rindex('\n', 0, pos) + return lineno, colno + + +def errmsg(msg, doc, pos, end=None): + # Note that this function is called from _speedups + lineno, colno = linecol(doc, pos) + if end is None: + #fmt = '{0}: line {1} column {2} (char {3})' + #return fmt.format(msg, lineno, colno, pos) + fmt = '%s: line %d column %d (char %d)' + return fmt % (msg, lineno, colno, pos) + endlineno, endcolno = linecol(doc, end) + #fmt = '{0}: line {1} column {2} - line {3} column {4} (char {5} - {6})' + #return fmt.format(msg, lineno, colno, endlineno, endcolno, pos, end) + fmt = '%s: line %d column %d - line %d column %d (char %d - %d)' + return fmt % (msg, lineno, colno, endlineno, endcolno, pos, end) + + +_CONSTANTS = { + '-Infinity': NegInf, + 'Infinity': PosInf, + 'NaN': NaN, +} + +STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS) +BACKSLASH = { + '"': u'"', '\\': u'\\', '/': u'/', + 'b': u'\b', 'f': u'\f', 'n': u'\n', 'r': u'\r', 't': u'\t', +} + +DEFAULT_ENCODING = "utf-8" + +def py_scanstring(s, end, encoding=None, strict=True, + _b=BACKSLASH, _m=STRINGCHUNK.match): + """Scan the string s for a JSON string. End is the index of the + character in s after the quote that started the JSON string. + Unescapes all valid JSON string escape sequences and raises ValueError + on attempt to decode an invalid string. If strict is False then literal + control characters are allowed in the string. + + Returns a tuple of the decoded string and the index of the character in s + after the end quote.""" + if encoding is None: + encoding = DEFAULT_ENCODING + chunks = [] + _append = chunks.append + begin = end - 1 + while 1: + chunk = _m(s, end) + if chunk is None: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + end = chunk.end() + content, terminator = chunk.groups() + # Content is contains zero or more unescaped string characters + if content: + if not isinstance(content, unicode): + content = unicode(content, encoding) + _append(content) + # Terminator is the end of string, a literal control character, + # or a backslash denoting that an escape sequence follows + if terminator == '"': + break + elif terminator != '\\': + if strict: + msg = "Invalid control character %r at" % (terminator,) + #msg = "Invalid control character {0!r} at".format(terminator) + raise JSONDecodeError(msg, s, end) + else: + _append(terminator) + continue + try: + esc = s[end] + except IndexError: + raise JSONDecodeError( + "Unterminated string starting at", s, begin) + # If not a unicode escape sequence, must be in the lookup table + if esc != 'u': + try: + char = _b[esc] + except KeyError: + msg = "Invalid \\escape: " + repr(esc) + raise JSONDecodeError(msg, s, end) + end += 1 + else: + # Unicode escape sequence + esc = s[end + 1:end + 5] + next_end = end + 5 + if len(esc) != 4: + msg = "Invalid \\uXXXX escape" + raise JSONDecodeError(msg, s, end) + uni = int(esc, 16) + # Check for surrogate pair on UCS-4 systems + if 0xd800 <= uni <= 0xdbff and sys.maxunicode > 65535: + msg = "Invalid \\uXXXX\\uXXXX surrogate pair" + if not s[end + 5:end + 7] == '\\u': + raise JSONDecodeError(msg, s, end) + esc2 = s[end + 7:end + 11] + if len(esc2) != 4: + raise JSONDecodeError(msg, s, end) + uni2 = int(esc2, 16) + uni = 0x10000 + (((uni - 0xd800) << 10) | (uni2 - 0xdc00)) + next_end += 6 + char = unichr(uni) + end = next_end + # Append the unescaped character + _append(char) + return u''.join(chunks), end + + +# Use speedup if available +scanstring = c_scanstring or py_scanstring + +WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS) +WHITESPACE_STR = ' \t\n\r' + +def JSONObject((s, end), encoding, strict, scan_once, object_hook, + object_pairs_hook, memo=None, + _w=WHITESPACE.match, _ws=WHITESPACE_STR): + # Backwards compatibility + if memo is None: + memo = {} + memo_get = memo.setdefault + pairs = [] + # Use a slice to prevent IndexError from being raised, the following + # check will raise a more specific ValueError if the string is empty + nextchar = s[end:end + 1] + # Normally we expect nextchar == '"' + if nextchar != '"': + if nextchar in _ws: + end = _w(s, end).end() + nextchar = s[end:end + 1] + # Trivial empty object + if nextchar == '}': + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + 1 + pairs = {} + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + 1 + elif nextchar != '"': + raise JSONDecodeError("Expecting property name", s, end) + end += 1 + while True: + key, end = scanstring(s, end, encoding, strict) + key = memo_get(key, key) + + # To skip some function call overhead we optimize the fast paths where + # the JSON key separator is ": " or just ":". + if s[end:end + 1] != ':': + end = _w(s, end).end() + if s[end:end + 1] != ':': + raise JSONDecodeError("Expecting : delimiter", s, end) + + end += 1 + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + try: + value, end = scan_once(s, end) + except StopIteration: + raise JSONDecodeError("Expecting object", s, end) + pairs.append((key, value)) + + try: + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + end += 1 + + if nextchar == '}': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting , delimiter", s, end - 1) + + try: + nextchar = s[end] + if nextchar in _ws: + end += 1 + nextchar = s[end] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end] + except IndexError: + nextchar = '' + + end += 1 + if nextchar != '"': + raise JSONDecodeError("Expecting property name", s, end - 1) + + if object_pairs_hook is not None: + result = object_pairs_hook(pairs) + return result, end + pairs = dict(pairs) + if object_hook is not None: + pairs = object_hook(pairs) + return pairs, end + +def JSONArray((s, end), scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR): + values = [] + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + # Look-ahead for trivial empty array + if nextchar == ']': + return values, end + 1 + _append = values.append + while True: + try: + value, end = scan_once(s, end) + except StopIteration: + raise JSONDecodeError("Expecting object", s, end) + _append(value) + nextchar = s[end:end + 1] + if nextchar in _ws: + end = _w(s, end + 1).end() + nextchar = s[end:end + 1] + end += 1 + if nextchar == ']': + break + elif nextchar != ',': + raise JSONDecodeError("Expecting , delimiter", s, end) + + try: + if s[end] in _ws: + end += 1 + if s[end] in _ws: + end = _w(s, end + 1).end() + except IndexError: + pass + + return values, end + +class JSONDecoder(object): + """Simple JSON <http://json.org> decoder + + Performs the following translations in decoding by default: + + +---------------+-------------------+ + | JSON | Python | + +===============+===================+ + | object | dict | + +---------------+-------------------+ + | array | list | + +---------------+-------------------+ + | string | unicode | + +---------------+-------------------+ + | number (int) | int, long | + +---------------+-------------------+ + | number (real) | float | + +---------------+-------------------+ + | true | True | + +---------------+-------------------+ + | false | False | + +---------------+-------------------+ + | null | None | + +---------------+-------------------+ + + It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as + their corresponding ``float`` values, which is outside the JSON spec. + + """ + + def __init__(self, encoding=None, object_hook=None, parse_float=None, + parse_int=None, parse_constant=None, strict=True, + object_pairs_hook=None): + """ + *encoding* determines the encoding used to interpret any + :class:`str` objects decoded by this instance (``'utf-8'`` by + default). It has no effect when decoding :class:`unicode` objects. + + Note that currently only encodings that are a superset of ASCII work, + strings of other encodings should be passed in as :class:`unicode`. + + *object_hook*, if specified, will be called with the result of every + JSON object decoded and its return value will be used in place of the + given :class:`dict`. This can be used to provide custom + deserializations (e.g. to support JSON-RPC class hinting). + + *object_pairs_hook* is an optional function that will be called with + the result of any object literal decode with an ordered list of pairs. + The return value of *object_pairs_hook* will be used instead of the + :class:`dict`. This feature can be used to implement custom decoders + that rely on the order that the key and value pairs are decoded (for + example, :func:`collections.OrderedDict` will remember the order of + insertion). If *object_hook* is also defined, the *object_pairs_hook* + takes priority. + + *parse_float*, if specified, will be called with the string of every + JSON float to be decoded. By default, this is equivalent to + ``float(num_str)``. This can be used to use another datatype or parser + for JSON floats (e.g. :class:`decimal.Decimal`). + + *parse_int*, if specified, will be called with the string of every + JSON int to be decoded. By default, this is equivalent to + ``int(num_str)``. This can be used to use another datatype or parser + for JSON integers (e.g. :class:`float`). + + *parse_constant*, if specified, will be called with one of the + following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This + can be used to raise an exception if invalid JSON numbers are + encountered. + + *strict* controls the parser's behavior when it encounters an + invalid control character in a string. The default setting of + ``True`` means that unescaped control characters are parse errors, if + ``False`` then control characters will be allowed in strings. + + """ + self.encoding = encoding + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.parse_float = parse_float or float + self.parse_int = parse_int or int + self.parse_constant = parse_constant or _CONSTANTS.__getitem__ + self.strict = strict + self.parse_object = JSONObject + self.parse_array = JSONArray + self.parse_string = scanstring + self.memo = {} + self.scan_once = make_scanner(self) + + def decode(self, s, _w=WHITESPACE.match): + """Return the Python representation of ``s`` (a ``str`` or ``unicode`` + instance containing a JSON document) + + """ + obj, end = self.raw_decode(s, idx=_w(s, 0).end()) + end = _w(s, end).end() + if end != len(s): + raise JSONDecodeError("Extra data", s, end, len(s)) + return obj + + def raw_decode(self, s, idx=0): + """Decode a JSON document from ``s`` (a ``str`` or ``unicode`` + beginning with a JSON document) and return a 2-tuple of the Python + representation and the index in ``s`` where the document ended. + + This can be used to decode a JSON document from a string that may + have extraneous data at the end. + + """ + try: + obj, end = self.scan_once(s, idx) + except StopIteration: + raise JSONDecodeError("No JSON object could be decoded", s, idx) + return obj, end + ADDED gluon/contrib/simplejson/encoder.py Index: gluon/contrib/simplejson/encoder.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/encoder.py @@ -0,0 +1,503 @@ +"""Implementation of JSONEncoder +""" +import re +from decimal import Decimal + +def _import_speedups(): + try: + raise ImportError # because assumes simplejson in path + from simplejson import _speedups + return _speedups.encode_basestring_ascii, _speedups.make_encoder + except ImportError: + return None, None +c_encode_basestring_ascii, c_make_encoder = _import_speedups() + +from decoder import PosInf + +ESCAPE = re.compile(r'[\x00-\x1f\\"\b\f\n\r\t]') +ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])') +HAS_UTF8 = re.compile(r'[\x80-\xff]') +ESCAPE_DCT = { + '\\': '\\\\', + '"': '\\"', + '\b': '\\b', + '\f': '\\f', + '\n': '\\n', + '\r': '\\r', + '\t': '\\t', +} +for i in range(0x20): + #ESCAPE_DCT.setdefault(chr(i), '\\u{0:04x}'.format(i)) + ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,)) + +FLOAT_REPR = repr + +def encode_basestring(s): + """Return a JSON representation of a Python string + + """ + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + return ESCAPE_DCT[match.group(0)] + return u'"' + ESCAPE.sub(replace, s) + u'"' + + +def py_encode_basestring_ascii(s): + """Return an ASCII-only JSON representation of a Python string + + """ + if isinstance(s, str) and HAS_UTF8.search(s) is not None: + s = s.decode('utf-8') + def replace(match): + s = match.group(0) + try: + return ESCAPE_DCT[s] + except KeyError: + n = ord(s) + if n < 0x10000: + #return '\\u{0:04x}'.format(n) + return '\\u%04x' % (n,) + else: + # surrogate pair + n -= 0x10000 + s1 = 0xd800 | ((n >> 10) & 0x3ff) + s2 = 0xdc00 | (n & 0x3ff) + #return '\\u{0:04x}\\u{1:04x}'.format(s1, s2) + return '\\u%04x\\u%04x' % (s1, s2) + return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"' + + +encode_basestring_ascii = ( + c_encode_basestring_ascii or py_encode_basestring_ascii) + +class JSONEncoder(object): + """Extensible JSON <http://json.org> encoder for Python data structures. + + Supports the following objects and types by default: + + +-------------------+---------------+ + | Python | JSON | + +===================+===============+ + | dict | object | + +-------------------+---------------+ + | list, tuple | array | + +-------------------+---------------+ + | str, unicode | string | + +-------------------+---------------+ + | int, long, float | number | + +-------------------+---------------+ + | True | true | + +-------------------+---------------+ + | False | false | + +-------------------+---------------+ + | None | null | + +-------------------+---------------+ + + To extend this to recognize other objects, subclass and implement a + ``.default()`` method with another method that returns a serializable + object for ``o`` if possible, otherwise it should call the superclass + implementation (to raise ``TypeError``). + + """ + item_separator = ', ' + key_separator = ': ' + def __init__(self, skipkeys=False, ensure_ascii=True, + check_circular=True, allow_nan=True, sort_keys=False, + indent=None, separators=None, encoding='utf-8', default=None, + use_decimal=False): + """Constructor for JSONEncoder, with sensible defaults. + + If skipkeys is false, then it is a TypeError to attempt + encoding of keys that are not str, int, long, float or None. If + skipkeys is True, such items are simply skipped. + + If ensure_ascii is true, the output is guaranteed to be str + objects with all incoming unicode characters escaped. If + ensure_ascii is false, the output will be unicode object. + + If check_circular is true, then lists, dicts, and custom encoded + objects will be checked for circular references during encoding to + prevent an infinite recursion (which would cause an OverflowError). + Otherwise, no such check takes place. + + If allow_nan is true, then NaN, Infinity, and -Infinity will be + encoded as such. This behavior is not JSON specification compliant, + but is consistent with most JavaScript based encoders and decoders. + Otherwise, it will be a ValueError to encode such floats. + + If sort_keys is true, then the output of dictionaries will be + sorted by key; this is useful for regression tests to ensure + that JSON serializations can be compared on a day-to-day basis. + + If indent is a string, then JSON array elements and object members + will be pretty-printed with a newline followed by that string repeated + for each level of nesting. ``None`` (the default) selects the most compact + representation without any newlines. For backwards compatibility with + versions of simplejson earlier than 2.1.0, an integer is also accepted + and is converted to a string with that many spaces. + + If specified, separators should be a (item_separator, key_separator) + tuple. The default is (', ', ': '). To get the most compact JSON + representation you should specify (',', ':') to eliminate whitespace. + + If specified, default is a function that gets called for objects + that can't otherwise be serialized. It should return a JSON encodable + version of the object or raise a ``TypeError``. + + If encoding is not None, then all input strings will be + transformed into unicode using that encoding prior to JSON-encoding. + The default is UTF-8. + + If use_decimal is true (not the default), ``decimal.Decimal`` will + be supported directly by the encoder. For the inverse, decode JSON + with ``parse_float=decimal.Decimal``. + + """ + + self.skipkeys = skipkeys + self.ensure_ascii = ensure_ascii + self.check_circular = check_circular + self.allow_nan = allow_nan + self.sort_keys = sort_keys + self.use_decimal = use_decimal + if isinstance(indent, (int, long)): + indent = ' ' * indent + self.indent = indent + if separators is not None: + self.item_separator, self.key_separator = separators + if default is not None: + self.default = default + self.encoding = encoding + + def default(self, o): + """Implement this method in a subclass such that it returns + a serializable object for ``o``, or calls the base implementation + (to raise a ``TypeError``). + + For example, to support arbitrary iterators, you could + implement default like this:: + + def default(self, o): + try: + iterable = iter(o) + except TypeError: + pass + else: + return list(iterable) + return JSONEncoder.default(self, o) + + """ + raise TypeError(repr(o) + " is not JSON serializable") + + def encode(self, o): + """Return a JSON string representation of a Python data structure. + + >>> from simplejson import JSONEncoder + >>> JSONEncoder().encode({"foo": ["bar", "baz"]}) + '{"foo": ["bar", "baz"]}' + + """ + # This is for extremely simple cases and benchmarks. + if isinstance(o, basestring): + if isinstance(o, str): + _encoding = self.encoding + if (_encoding is not None + and not (_encoding == 'utf-8')): + o = o.decode(_encoding) + if self.ensure_ascii: + return encode_basestring_ascii(o) + else: + return encode_basestring(o) + # This doesn't pass the iterator directly to ''.join() because the + # exceptions aren't as detailed. The list call should be roughly + # equivalent to the PySequence_Fast that ''.join() would do. + chunks = self.iterencode(o, _one_shot=True) + if not isinstance(chunks, (list, tuple)): + chunks = list(chunks) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + """Encode the given object and yield each string + representation as available. + + For example:: + + for chunk in JSONEncoder().iterencode(bigobject): + mysocket.write(chunk) + + """ + if self.check_circular: + markers = {} + else: + markers = None + if self.ensure_ascii: + _encoder = encode_basestring_ascii + else: + _encoder = encode_basestring + if self.encoding != 'utf-8': + def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding): + if isinstance(o, str): + o = o.decode(_encoding) + return _orig_encoder(o) + + def floatstr(o, allow_nan=self.allow_nan, + _repr=FLOAT_REPR, _inf=PosInf, _neginf=-PosInf): + # Check for specials. Note that this type of test is processor + # and/or platform-specific, so do tests which don't depend on + # the internals. + + if o != o: + text = 'NaN' + elif o == _inf: + text = 'Infinity' + elif o == _neginf: + text = '-Infinity' + else: + return _repr(o) + + if not allow_nan: + raise ValueError( + "Out of range float values are not JSON compliant: " + + repr(o)) + + return text + + + key_memo = {} + if (_one_shot and c_make_encoder is not None + and self.indent is None): + _iterencode = c_make_encoder( + markers, self.default, _encoder, self.indent, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, self.allow_nan, key_memo, self.use_decimal) + else: + _iterencode = _make_iterencode( + markers, self.default, _encoder, self.indent, floatstr, + self.key_separator, self.item_separator, self.sort_keys, + self.skipkeys, _one_shot, self.use_decimal) + try: + return _iterencode(o, 0) + finally: + key_memo.clear() + + +class JSONEncoderForHTML(JSONEncoder): + """An encoder that produces JSON safe to embed in HTML. + + To embed JSON content in, say, a script tag on a web page, the + characters &, < and > should be escaped. They cannot be escaped + with the usual entities (e.g. &) because they are not expanded + within <script> tags. + """ + + def encode(self, o): + # Override JSONEncoder.encode because it has hacks for + # performance that make things more complicated. + chunks = self.iterencode(o, True) + if self.ensure_ascii: + return ''.join(chunks) + else: + return u''.join(chunks) + + def iterencode(self, o, _one_shot=False): + chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot) + for chunk in chunks: + chunk = chunk.replace('&', '\\u0026') + chunk = chunk.replace('<', '\\u003c') + chunk = chunk.replace('>', '\\u003e') + yield chunk + + +def _make_iterencode(markers, _default, _encoder, _indent, _floatstr, + _key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot, + _use_decimal, + ## HACK: hand-optimized bytecode; turn globals into locals + False=False, + True=True, + ValueError=ValueError, + basestring=basestring, + Decimal=Decimal, + dict=dict, + float=float, + id=id, + int=int, + isinstance=isinstance, + list=list, + long=long, + str=str, + tuple=tuple, + ): + + def _iterencode_list(lst, _current_indent_level): + if not lst: + yield '[]' + return + if markers is not None: + markerid = id(lst) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = lst + buf = '[' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + separator = _item_separator + newline_indent + buf += newline_indent + else: + newline_indent = None + separator = _item_separator + first = True + for value in lst: + if first: + first = False + else: + buf = separator + if isinstance(value, basestring): + yield buf + _encoder(value) + elif value is None: + yield buf + 'null' + elif value is True: + yield buf + 'true' + elif value is False: + yield buf + 'false' + elif isinstance(value, (int, long)): + yield buf + str(value) + elif isinstance(value, float): + yield buf + _floatstr(value) + elif _use_decimal and isinstance(value, Decimal): + yield buf + str(value) + else: + yield buf + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + for chunk in chunks: + yield chunk + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield ']' + if markers is not None: + del markers[markerid] + + def _iterencode_dict(dct, _current_indent_level): + if not dct: + yield '{}' + return + if markers is not None: + markerid = id(dct) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = dct + yield '{' + if _indent is not None: + _current_indent_level += 1 + newline_indent = '\n' + (_indent * _current_indent_level) + item_separator = _item_separator + newline_indent + yield newline_indent + else: + newline_indent = None + item_separator = _item_separator + first = True + if _sort_keys: + items = dct.items() + items.sort(key=lambda kv: kv[0]) + else: + items = dct.iteritems() + for key, value in items: + if isinstance(key, basestring): + pass + # JavaScript is weakly typed for these, so it makes sense to + # also allow them. Many encoders seem to do something like this. + elif isinstance(key, float): + key = _floatstr(key) + elif key is True: + key = 'true' + elif key is False: + key = 'false' + elif key is None: + key = 'null' + elif isinstance(key, (int, long)): + key = str(key) + elif _skipkeys: + continue + else: + raise TypeError("key " + repr(key) + " is not a string") + if first: + first = False + else: + yield item_separator + yield _encoder(key) + yield _key_separator + if isinstance(value, basestring): + yield _encoder(value) + elif value is None: + yield 'null' + elif value is True: + yield 'true' + elif value is False: + yield 'false' + elif isinstance(value, (int, long)): + yield str(value) + elif isinstance(value, float): + yield _floatstr(value) + elif _use_decimal and isinstance(value, Decimal): + yield str(value) + else: + if isinstance(value, (list, tuple)): + chunks = _iterencode_list(value, _current_indent_level) + elif isinstance(value, dict): + chunks = _iterencode_dict(value, _current_indent_level) + else: + chunks = _iterencode(value, _current_indent_level) + for chunk in chunks: + yield chunk + if newline_indent is not None: + _current_indent_level -= 1 + yield '\n' + (_indent * _current_indent_level) + yield '}' + if markers is not None: + del markers[markerid] + + def _iterencode(o, _current_indent_level): + if isinstance(o, basestring): + yield _encoder(o) + elif o is None: + yield 'null' + elif o is True: + yield 'true' + elif o is False: + yield 'false' + elif isinstance(o, (int, long)): + yield str(o) + elif isinstance(o, float): + yield _floatstr(o) + elif isinstance(o, (list, tuple)): + for chunk in _iterencode_list(o, _current_indent_level): + yield chunk + elif isinstance(o, dict): + for chunk in _iterencode_dict(o, _current_indent_level): + yield chunk + elif _use_decimal and isinstance(o, Decimal): + yield str(o) + else: + if markers is not None: + markerid = id(o) + if markerid in markers: + raise ValueError("Circular reference detected") + markers[markerid] = o + o = _default(o) + for chunk in _iterencode(o, _current_indent_level): + yield chunk + if markers is not None: + del markers[markerid] + + return _iterencode + ADDED gluon/contrib/simplejson/ordered_dict.py Index: gluon/contrib/simplejson/ordered_dict.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/ordered_dict.py @@ -0,0 +1,120 @@ +"""Drop-in replacement for collections.OrderedDict by Raymond Hettinger + +http://code.activestate.com/recipes/576693/ + +""" +from UserDict import DictMixin + +# Modified from original to support Python 2.4, see +# http://code.google.com/p/simplejson/issues/detail?id=53 +try: + all +except NameError: + def all(seq): + for elem in seq: + if not elem: + return False + return True + +class OrderedDict(dict, DictMixin): + + def __init__(self, *args, **kwds): + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__end + except AttributeError: + self.clear() + self.update(*args, **kwds) + + def clear(self): + self.__end = end = [] + end += [None, end, end] # sentinel node for doubly linked list + self.__map = {} # key --> [key, prev, next] + dict.clear(self) + + def __setitem__(self, key, value): + if key not in self: + end = self.__end + curr = end[1] + curr[2] = end[1] = self.__map[key] = [key, curr, end] + dict.__setitem__(self, key, value) + + def __delitem__(self, key): + dict.__delitem__(self, key) + key, prev, next = self.__map.pop(key) + prev[2] = next + next[1] = prev + + def __iter__(self): + end = self.__end + curr = end[2] + while curr is not end: + yield curr[0] + curr = curr[2] + + def __reversed__(self): + end = self.__end + curr = end[1] + while curr is not end: + yield curr[0] + curr = curr[1] + + def popitem(self, last=True): + if not self: + raise KeyError('dictionary is empty') + # Modified from original to support Python 2.4, see + # http://code.google.com/p/simplejson/issues/detail?id=53 + if last: + key = reversed(self).next() + else: + key = iter(self).next() + value = self.pop(key) + return key, value + + def __reduce__(self): + items = [[k, self[k]] for k in self] + tmp = self.__map, self.__end + del self.__map, self.__end + inst_dict = vars(self).copy() + self.__map, self.__end = tmp + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def keys(self): + return list(self) + + setdefault = DictMixin.setdefault + update = DictMixin.update + pop = DictMixin.pop + values = DictMixin.values + items = DictMixin.items + iterkeys = DictMixin.iterkeys + itervalues = DictMixin.itervalues + iteritems = DictMixin.iteritems + + def __repr__(self): + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + + def copy(self): + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + d = cls() + for key in iterable: + d[key] = value + return d + + def __eq__(self, other): + if isinstance(other, OrderedDict): + return len(self)==len(other) and \ + all(p==q for p, q in zip(self.items(), other.items())) + return dict.__eq__(self, other) + + def __ne__(self, other): + return not self == other + ADDED gluon/contrib/simplejson/scanner.py Index: gluon/contrib/simplejson/scanner.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/scanner.py @@ -0,0 +1,79 @@ +"""JSON token scanner +""" +import re +def _import_c_make_scanner(): + try: + raise ImportError # because assumes simplejson in path + from simplejson._speedups import make_scanner + return make_scanner + except ImportError: + return None +c_make_scanner = _import_c_make_scanner() + +__all__ = ['make_scanner'] + +NUMBER_RE = re.compile( + r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?', + (re.VERBOSE | re.MULTILINE | re.DOTALL)) + +def py_make_scanner(context): + parse_object = context.parse_object + parse_array = context.parse_array + parse_string = context.parse_string + match_number = NUMBER_RE.match + encoding = context.encoding + strict = context.strict + parse_float = context.parse_float + parse_int = context.parse_int + parse_constant = context.parse_constant + object_hook = context.object_hook + object_pairs_hook = context.object_pairs_hook + memo = context.memo + + def _scan_once(string, idx): + try: + nextchar = string[idx] + except IndexError: + raise StopIteration + + if nextchar == '"': + return parse_string(string, idx + 1, encoding, strict) + elif nextchar == '{': + return parse_object((string, idx + 1), encoding, strict, + _scan_once, object_hook, object_pairs_hook, memo) + elif nextchar == '[': + return parse_array((string, idx + 1), _scan_once) + elif nextchar == 'n' and string[idx:idx + 4] == 'null': + return None, idx + 4 + elif nextchar == 't' and string[idx:idx + 4] == 'true': + return True, idx + 4 + elif nextchar == 'f' and string[idx:idx + 5] == 'false': + return False, idx + 5 + + m = match_number(string, idx) + if m is not None: + integer, frac, exp = m.groups() + if frac or exp: + res = parse_float(integer + (frac or '') + (exp or '')) + else: + res = parse_int(integer) + return res, m.end() + elif nextchar == 'N' and string[idx:idx + 3] == 'NaN': + return parse_constant('NaN'), idx + 3 + elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity': + return parse_constant('Infinity'), idx + 8 + elif nextchar == '-' and string[idx:idx + 9] == '-Infinity': + return parse_constant('-Infinity'), idx + 9 + else: + raise StopIteration + + def scan_once(string, idx): + try: + return _scan_once(string, idx) + finally: + memo.clear() + + return scan_once + +make_scanner = c_make_scanner or py_make_scanner + ADDED gluon/contrib/simplejson/tool.py Index: gluon/contrib/simplejson/tool.py ================================================================== --- /dev/null +++ gluon/contrib/simplejson/tool.py @@ -0,0 +1,43 @@ +r"""Command-line tool to validate and pretty-print JSON + +Usage:: + + $ echo '{"json":"obj"}' | python -m simplejson.tool + { + "json": "obj" + } + $ echo '{ 1.2:3.4}' | python -m simplejson.tool + Expecting property name: line 1 column 2 (char 2) + +""" +import sys +import simplejson as json + +def main(): + if len(sys.argv) == 1: + infile = sys.stdin + outfile = sys.stdout + elif len(sys.argv) == 2: + infile = open(sys.argv[1], 'rb') + outfile = sys.stdout + elif len(sys.argv) == 3: + infile = open(sys.argv[1], 'rb') + outfile = open(sys.argv[2], 'wb') + else: + raise SystemExit(sys.argv[0] + " [infile [outfile]]") + try: + try: + obj = json.load(infile, + object_pairs_hook=json.OrderedDict, + use_decimal=True) + except ValueError, e: + raise SystemExit(e) + json.dump(obj, outfile, sort_keys=True, indent=' ', use_decimal=True) + outfile.write('\n') + finally: + infile.close() + outfile.close() + +if __name__ == '__main__': + main() + ADDED gluon/contrib/simplejsonrpc.py Index: gluon/contrib/simplejsonrpc.py ================================================================== --- /dev/null +++ gluon/contrib/simplejsonrpc.py @@ -0,0 +1,147 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by the +# Free Software Foundation; either version 3, or (at your option) any later +# version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY +# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# for more details. + +"Pythonic simple JSON RPC Client implementation" + +__author__ = "Mariano Reingart (reingart@gmail.com)" +__copyright__ = "Copyright (C) 2011 Mariano Reingart" +__license__ = "LGPL 3.0" +__version__ = "0.04" + + +import urllib +from xmlrpclib import Transport, SafeTransport +from cStringIO import StringIO +import random +import sys +try: + import gluon.contrib.simplejson as json # try web2py json serializer +except ImportError: + try: + import json # try stdlib (py2.6) + except: + import simplejson as json # try external module + + +class JSONRPCError(RuntimeError): + "Error object for remote procedure call fail" + def __init__(self, code, message): + self.code = code + self.message = message + def __unicode__(self): + return u"%s: %s" % (self.code, self.message) + def __str__(self): + return self.__unicode__().encode("ascii","ignore") + + +class JSONDummyParser: + "json wrapper for xmlrpclib parser interfase" + def __init__(self): + self.buf = StringIO() + def feed(self, data): + self.buf.write(data) + def close(self): + return self.buf.getvalue() + + +class JSONTransportMixin: + "json wrapper for xmlrpclib transport interfase" + + def send_content(self, connection, request_body): + connection.putheader("Content-Type", "application/json") + connection.putheader("Content-Length", str(len(request_body))) + connection.endheaders() + if request_body: + connection.send(request_body) + # todo: add gzip compression + + def getparser(self): + # get parser and unmarshaller + parser = JSONDummyParser() + return parser, parser + + +class JSONTransport(JSONTransportMixin, Transport): + pass + +class JSONSafeTransport(JSONTransportMixin, SafeTransport): + pass + + +class ServerProxy(object): + "JSON RPC Simple Client Service Proxy" + + def __init__(self, uri, transport=None, encoding=None, verbose=0): + self.location = uri # server location (url) + self.trace = verbose # show debug messages + self.exceptions = True # raise errors? (JSONRPCError) + self.timeout = None + self.json_request = self.json_response = '' + + type, uri = urllib.splittype(uri) + if type not in ("http", "https"): + raise IOError, "unsupported JSON-RPC protocol" + self.__host, self.__handler = urllib.splithost(uri) + + if transport is None: + if type == "https": + transport = JSONSafeTransport() + else: + transport = JSONTransport() + self.__transport = transport + self.__encoding = encoding + self.__verbose = verbose + + def __getattr__(self, attr): + "pseudo method that can be called" + return lambda *args: self.call(attr, *args) + + def call(self, method, *args): + "JSON RPC communication (method invocation)" + + # build data sent to the service + request_id = random.randint(0, sys.maxint) + data = {'id': request_id, 'method': method, 'params': args, } + request = json.dumps(data) + + # make HTTP request (retry if connection is lost) + response = self.__transport.request( + self.__host, + self.__handler, + request, + verbose=self.__verbose + ) + + # store plain request and response for further debugging + self.json_request = request + self.json_response = response + + # parse json data coming from service + # {'version': '1.1', 'id': id, 'result': result, 'error': None} + response = json.loads(response) + + if response['id'] != request_id: + raise JSONRPCError(0, "JSON Request ID != Response ID") + + self.error = response.get('error', {}) + if self.error and self.exceptions: + raise JSONRPCError(self.error.get('code', 0), self.error.get('message', '')) + + return response.get('result') + + +if __name__ == "__main__": + # basic tests: + location = "http://www.web2py.com.ar/webservices/sample/call/jsonrpc" + client = ServerProxy(location, verbose='--verbose' in sys.argv,) + print client.add(1, 2) + ADDED gluon/contrib/sms_utils.py Index: gluon/contrib/sms_utils.py ================================================================== --- /dev/null +++ gluon/contrib/sms_utils.py @@ -0,0 +1,114 @@ +SMSCODES = { + 'Aliant':'@chat.wirefree.ca', + 'Alltel':'@message.alltel.com', + 'Ameritech':'@paging.acswireless.com', + 'AT&T':'@txt.att.net', + 'AU by KDDI':'@ezweb.ne.jp', + 'BeeLine GSM':'@sms.beemail.ru', + 'Bell Mobility Canada':'@txt.bellmobility.ca', + 'Bellsouth':'@bellsouth.cl', + 'BellSouth Mobility':'@blsdcs.net', + 'Blue Sky Frog':'@blueskyfrog.com', + 'Boost':'@myboostmobile.com', + 'Cellular South':'@csouth1.com', + 'CellularOne':'@mobile.celloneusa.com', + 'CellularOne West':'@mycellone.com', + 'Cincinnati Bell':'@gocbw.com', + 'Claro':'@clarotorpedo.com.br', + 'Comviq':'@sms.comviq.se', + 'Dutchtone/Orange-NL':'@sms.orange.nl', + 'Edge Wireless':'@sms.edgewireless.com', + 'EinsteinPCS / Airadigm Communications':'@einsteinsms.com', + 'EPlus':'@smsmail.eplus.de', + 'Fido Canada':'@fido.ca', + 'Golden Telecom':'@sms.goldentele.com', + 'Idea Cellular':'@ideacellular.net', + 'Kyivstar':'@sms.kyivstar.net', + 'LMT':'@sms.lmt.lv', + 'Manitoba Telecom Systems':'@text.mtsmobility.com', + 'Meteor':'@sms.mymeteor.ie', + 'Metro PCS':'@mymetropcs.com', + 'Metrocall Pager':'@page.metrocall.com', + 'MobileOne':'@m1.com.sg', + 'Mobilfone':'@page.mobilfone.com', + 'Mobility Bermuda':'@ml.bm', + 'Netcom':'@sms.netcom.no', + 'Nextel':'@messaging.nextel.com', + 'NPI Wireless':'@npiwireless.com', + 'O2':'@o2.co.uk', + 'O2 M-mail':'@mmail.co.uk', + 'Optus':'@optusmobile.com.au', + 'Orange':'@orange.net', + 'Oskar':'@mujoskar.cz', + 'Pagenet':'@pagenet.net', + 'PCS Rogers':'@pcs.rogers.com', + 'Personal Communication':'@pcom.ru', + 'Plus GSM Poland':'@text.plusgsm.pl', + 'Powertel':'@ptel.net', + 'Primtel':'@sms.primtel.ru', + 'PSC Wireless':'@sms.pscel.com', + 'Qualcomm':'@pager.qualcomm.com', + 'Qwest':'@qwestmp.com', + 'Safaricom':'@safaricomsms.com', + 'Satelindo GSM':'@satelindogsm.com', + 'SCS-900':'@scs-900.ru', + 'Simple Freedom':'@text.simplefreedom.net', + 'Skytel - Alphanumeric':'@skytel.com', + 'Smart Telecom':'@mysmart.mymobile.ph', + 'Southern Linc':'@page.southernlinc.com', + 'Sprint PCS':'@messaging.sprintpcs.com', + 'Sprint PCS - Short Mail':'@sprintpcs.com', + 'SunCom':'@tms.suncom.com', + 'SureWest Communications':'@mobile.surewest.com', + 'SwissCom Mobile':'@bluewin.ch', + 'T-Mobile Germany':'@T-D1-SMS.de', + 'T-Mobile Netherlands':'@gin.nl', + 'T-Mobile UK':'@t-mobile.uk.net', + 'T-Mobile USA (tmail)':'@tmail.com', + 'T-Mobile USA (tmomail)':'@tmomail.net', + 'Tele2 Latvia':'@sms.tele2.lv', + 'Telefonica Movistar':'@movistar.net', + 'Telenor':'@mobilpost.no', + 'Telia Denmark':'@gsm1800.telia.dk', + 'Telus Mobility':'@msg.telus.com', + 'The Phone House':'@sms.phonehouse.de', + 'TIM':'@timnet.com', + 'UMC':'@sms.umc.com.ua', + 'Unicel':'@utext.com', + 'US Cellular':'@email.uscc.net', + 'Verizon Wireless (vtext)':'@vtext.com', + 'Verizon Wireless (airtouchpaging)':'@airtouchpaging.com', + 'Verizon Wireless (myairmail)':'@myairmail.com', + 'Vessotel':'@pager.irkutsk.ru', + 'Virgin Mobile Canada':'@vmobile.ca', + 'Virgin Mobile USA':'@vmobl.com', + 'Vodafone Italy':'@sms.vodafone.it', + 'Vodafone Japan (n)':'@n.vodafone.ne.jp', + 'Vodafone Japan (d)':'@d.vodafone.ne.jp', + 'Vodafone Japan (r)':'@r.vodafone.ne.jp', + 'Vodafone Japan (k)':'@k.vodafone.ne.jp', + 'Vodafone Japan (t)':'@t.vodafone.ne.jp', + 'Vodafone Japan (q)':'@q.vodafone.ne.jp', + 'Vodafone Japan (s)':'@s.vodafone.ne.jp', + 'Vodafone Japan (h)':'@h.vodafone.ne.jp', + 'Vodafone Japan (c)':'@c.vodafone.ne.jp', + 'Vodafone Spain':'@vodafone.es', + 'Vodafone UK':'@vodafone.net', + 'Weblink Wireless':'@airmessage.net', + 'WellCom':'@sms.welcome2well.com', + 'WyndTell':'@wyndtell.com', + } + +def sms_email(number,provider): + """ + >>> print sms_email('1 (312) 375-6536','T-Mobile USA (tmail)') + print 13123756536@tmail.com + """ + import re + if number[0]=='+1': number=number[1:] + elif number[0]=='+': number=number[3:] + elif number[:2]=='00': number=number[3:] + number=re.sub('[^\d]','',number) + return number+SMSCODES[provider] + + ADDED gluon/contrib/spreadsheet.py Index: gluon/contrib/spreadsheet.py ================================================================== --- /dev/null +++ gluon/contrib/spreadsheet.py @@ -0,0 +1,264 @@ +""" +Developed by Massimo Di Pierro, optional component of web2py, GPL2 license. +""" +import re +import pickle +import copy + + +def quote(text): + return str(text).replace('\\', '\\\\').replace("'", "\\'") + + +class Node: + """ + Example:: + + # controller + from gluon.contrib.spreadsheet import Sheet + + def callback(): + return cache.ram('sheet1', lambda: None, None).process(request) + + def index(): + sheet = cache.ram('sheet1', + lambda: Sheet(10, 10, URL(r=request, f='callback')), 0) + #sheet.cell('r0c3', value='=r0c0+r0c1+r0c2', readonly=True) + return dict(sheet=sheet) + + # view + {{extend 'layout.html'}} + {{=sheet}} + + or insert invidivual cells via + + {{=sheet.nodes['r0c0']}} + + """ + + def __init__(self, name, value, url='.', readonly=False, active=True, + onchange=None): + self.url = url + self.name = name + self.value = str(value) + self.computed_value = '' + self.incoming = {} + self.outcoming = {} + self.readonly = readonly + self.active = active + self.onchange = onchange + self.size = 4 + self.locked = False + + def xml(self): + return """<input name="%s" id="%s" value="%s" size="%s" + onkeyup="ajax('%s/keyup',['%s'], ':eval');" + onfocus="ajax('%s/focus',['%s'], ':eval');" + onblur="ajax('%s/blur',['%s'], ':eval');" %s/> + """ % (self.name, self.name, self.computed_value, self.size, + self.url, self.name, self.url, self.name, self.url, self.name, + (self.readonly and 'readonly ') or '') + + def __repr__(self): + return '%s:%s' % (self.name, self.computed_value) + + +class Sheet: + + regex=re.compile('(?<!\w)[a-zA-Z_]\w*') + + re_strings = re.compile(r'(?P<name>' + + r"[uU]?[rR]?'''([^']+|'{1,2}(?!'))*'''|" + + r"'([^'\\]|\\.)*'|" + + r'"""([^"]|"{1,2}(?!"))*"""|' + + r'"([^"\\]|\\.)*")', re.DOTALL) + + def dumps(self): + dump = pickle.dumps(self) + return dump + + @staticmethod + def loads(data): + sheet = pickle.loads(data) + return sheet + + def process(self, request): + """ + call this in action that creates table, it will handle ajax callbacks + """ + cell = request.vars.keys()[0] + if request.args(0) == 'focus': + return "jQuery('#%s').val('%s');" % (cell, quote(self[cell].value)) + value = request.vars[cell] + self[cell] = value + if request.args(0) == 'blur': + return "jQuery('#%s').val('%s');" \ + % (cell, quote(self[cell].computed_value)) + elif request.args(0) == 'keyup': + jquery = '' + for other_key in self.modified: + if other_key != cell: + jquery += "jQuery('#%s').val('%s');" % \ + (other_key, quote(self[other_key].computed_value)) + return jquery + + def __init__(self, rows, cols, url='.', readonly=False, active=True, + onchange=None): + self.rows = rows + self.cols = cols + self.url = url + self.nodes = {} + self.error = 'ERROR: %(error)s' + self.allowed_keywords = ['for', 'in', 'if', 'else', 'and', 'or', 'not', + 'i', 'j', 'k', 'x', 'y', 'z', 'sum'] + self.environment = {} + [self.cell('r%sc%s'%(k/cols, k%cols), '0.0', readonly, active, onchange) + for k in xrange(rows*cols)] + exec('from math import *', {}, self.environment) + + def delete_from(self, other_list): + indices = [k for (k, node) in enumerate(other_list) if k == node] + if indices: + del other_list[indices[0]] + + def changed(self, node, changed_nodes=[]): + for other_node in node.outcoming: + if not other_node in changed_nodes: + changed_nodes.append(other_node) + self.changed(other_node, changed_nodes) + return changed_nodes + + def define(self, name, obj): + self.environment[name] = obj + + def cell(self, key, value, readonly=False, active=True, onchange=None): + """ + key is the name of the cell + value is the initial value of the cell. It can be a formula "=1+3" + a cell is active if it evaluates formuls + """ + key = str(key) + if not self.regex.match(key): + raise SyntaxError, "Invalid cell name: %s" % key + node = Node(key, value, self.url, readonly, active, onchange) + self.nodes[key] = node + self[key] = value + + def __setitem__(self, key, value): + key = str(key) + value = str(value) + node = self.nodes[key] + node.value = value + if value[:1] == '=' and node.active: + # clear all edges involving current node + for other_node in node.incoming: + del other_node.outcoming[node] + node.incoming.clear() + # build new edges + command = self.re_strings.sub("''", value[1:]) + node.locked = False + for match in self.regex.finditer(command): + other_key = match.group() + if other_key == key: + self.computed_value = self.error % dict(error='cycle') + self.modified={} + break + if other_key in self.nodes: + other_node = self.nodes[other_key] + other_node.outcoming[node] = True + node.incoming[other_node] = True + elif not other_key in self.allowed_keywords and \ + not other_key in self.environment: + node.locked = True + node.computed_value = \ + self.error % dict(error='invalid keyword: ' + other_key) + self.modified = {} + break + self.compute(node) + else: + try: + node.computed_value = int(node.value) + except: + try: + node.computed_value = float(node.value) + except: + node.computed_value = node.value + self.environment[key] = node.computed_value + if node.onchange: + node.onchange(node) + self.modified = self.iterate(node) + + def compute(self, node): + if node.value[:1] == '=' and not node.locked: + try: + exec('__value__=' + node.value[1:], {}, self.environment) + node.computed_value = self.environment['__value__'] + del self.environment['__value__'] + except Exception, e: + node.computed_value = self.error % dict(error=str(e)) + self.environment[node.name] = node.computed_value + if node.onchange: + node.onchange(node) + + def iterate(self, node): + output = {node.name: node.computed_value} + changed_nodes = self.changed(node) + while changed_nodes: + ok=False + set_changed_nodes = set(changed_nodes) + for (k, other_node) in enumerate(changed_nodes): + #print other_node, changed_nodes + if not set(other_node.incoming.keys()).\ + intersection(set_changed_nodes): + #print 'ok' + self.compute(other_node) + output[other_node.name] = other_node.computed_value + #print other_node + del changed_nodes[k] + ok = True + break + if not ok: + return {} + return output + + def __getitem__(self, key): + return self.nodes[str(key)] + + def get_computed_values(self): + d={} + for key in self.nodes: + node = self.nodes[key] + if node.value[:1] != '=' or not node.active: + d[key] = node.computed_value + return d + + def set_computed_values(self, d): + for key in d: + if not key in self.nodes: + continue + node = self.nodes[key] + if node.value[:1] != '=' or not node.active: + node.value = d[key] + + def xml(self): + import gluon.html + (DIV, TABLE, TR, TD, TH, BR) = \ + (gluon.html.DIV, gluon.html.TABLE, gluon.html.TR, gluon.html.TD, + gluon.html.TH, gluon.html.BR) + regex = re.compile('r\d+c\d+') + return DIV(TABLE(TR(TH(), *[TH('c%s' % c) for c in range(self.cols)]), + *[TR(TH('r%s' % r), *[TD(self.nodes['r%sc%s'%(r, c)]) \ + for c in range(self.cols)]) \ + for r in range(self.rows)]), + BR(), + TABLE(*[TR(TH(key), TD(self.nodes[key])) \ + for key in self.nodes if not regex.match(key)])).xml() + +if __name__ == '__main__': + s = Sheet(0, 0) + s.cell('a', value="2") + s.cell('b', value="=sin(a)") + s.cell('c', value="=cos(a)**2+b*b") + print s['c'].computed_value + + ADDED gluon/contrib/taskbar_widget.py Index: gluon/contrib/taskbar_widget.py ================================================================== --- /dev/null +++ gluon/contrib/taskbar_widget.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# # Creates a taskbar icon for web2py +# # Author: Mark Larsen, mostly stolen from Mark Hammond's +# # C:\Python25\Lib\site-packages\win32\Demos\win32gui_taskbar.py +# # 11/7/08 +# dual licensed under the web2py license (LGPL) and the Python license. + +import os +import sys +import base64 +import win32con +import win32api +import win32gui + + +class TaskBarIcon: + + def __init__(self, iconPath=None): + + self.iconPath = iconPath + self.status = [] + + msg_TaskbarRestart = \ + win32api.RegisterWindowMessage('TaskbarCreated') + message_map = { + msg_TaskbarRestart: self.OnRestart, + win32con.WM_DESTROY: self.OnDestroy, + win32con.WM_COMMAND: self.OnCommand, + win32con.WM_USER + 20: self.OnTaskbarNotify, + } + + # Register the Window class. + + wc = win32gui.WNDCLASS() + hinst = wc.hInstance = win32api.GetModuleHandle(None) + wc.lpszClassName = 'web2pyTaskbar' + wc.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW + wc.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW) + wc.hbrBackground = win32con.COLOR_WINDOW + wc.lpfnWndProc = message_map # could also specify a wndproc. + classAtom = win32gui.RegisterClass(wc) + + # Create the Window. + + style = win32con.WS_OVERLAPPED | win32con.WS_SYSMENU + self.hwnd = win32gui.CreateWindow( + classAtom, + 'web2pyTaskbar', + style, + 0, + 0, + win32con.CW_USEDEFAULT, + win32con.CW_USEDEFAULT, + 0, + 0, + hinst, + None, + ) + win32gui.UpdateWindow(self.hwnd) + self.SetServerStopped() + + def __createIcon(self): + + # try and use custom icon + + if self.iconPath and os.path.isfile(self.iconPath): + hicon = self.__loadFromFile(self.iconPath) + else: + try: + fp = 'tmp.ico' + icFH = file(fp, 'wb') + if self.serverState == self.EnumServerState.STOPPED: + icFH.write(base64.b64decode(self.__getIconStopped())) + elif self.serverState == self.EnumServerState.RUNNING: + icFH.write(base64.b64decode(self.__getIconRunning())) + icFH.close() + hicon = self.__loadFromFile(fp) + os.unlink(fp) + except: + print "Can't load web2py icons - using default" + hicon = win32gui.LoadIcon(0, win32con.IDI_APPLICATION) + + flags = win32gui.NIF_ICON | win32gui.NIF_MESSAGE\ + | win32gui.NIF_TIP + nid = ( + self.hwnd, + 0, + flags, + win32con.WM_USER + 20, + hicon, + 'web2py Framework', + ) + try: + win32gui.Shell_NotifyIcon(win32gui.NIM_MODIFY, nid) + except: + try: + win32gui.Shell_NotifyIcon(win32gui.NIM_ADD, nid) + except win32api.error: + + # This is common when windows is starting, and this code is hit + # before the taskbar has been created. + + print 'Failed to add the taskbar icon - is explorer running?' + + # but keep running anyway - when explorer starts, we get the + + def OnRestart( + self, + hwnd, + msg, + wparam, + lparam, + ): + self._DoCreateIcons() + + def OnDestroy( + self, + hwnd, + msg, + wparam, + lparam, + ): + nid = (self.hwnd, 0) + win32gui.Shell_NotifyIcon(win32gui.NIM_DELETE, nid) + + def OnTaskbarNotify( + self, + hwnd, + msg, + wparam, + lparam, + ): + if lparam == win32con.WM_LBUTTONUP: + pass + elif lparam == win32con.WM_LBUTTONDBLCLK: + pass + elif lparam == win32con.WM_RBUTTONUP: + menu = win32gui.CreatePopupMenu() + win32gui.AppendMenu(menu, win32con.MF_STRING, 1023, + 'Toggle Display') + win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '') + if self.serverState == self.EnumServerState.STOPPED: + win32gui.AppendMenu(menu, win32con.MF_STRING, 1024, + 'Start Server') + win32gui.AppendMenu(menu, win32con.MF_STRING + | win32con.MF_GRAYED, 1025, + 'Restart Server') + win32gui.AppendMenu(menu, win32con.MF_STRING + | win32con.MF_GRAYED, 1026, + 'Stop Server') + else: + win32gui.AppendMenu(menu, win32con.MF_STRING + | win32con.MF_GRAYED, 1024, + 'Start Server') + win32gui.AppendMenu(menu, win32con.MF_STRING, 1025, + 'Restart Server') + win32gui.AppendMenu(menu, win32con.MF_STRING, 1026, + 'Stop Server') + win32gui.AppendMenu(menu, win32con.MF_SEPARATOR, 0, '') + win32gui.AppendMenu(menu, win32con.MF_STRING, 1027, + 'Quit (pid:%i)' % os.getpid()) + pos = win32gui.GetCursorPos() + + # See http://msdn.microsoft.com/library/default.asp?url=/library/en-us/winui/menus_0hdi.asp + + win32gui.SetForegroundWindow(self.hwnd) + win32gui.TrackPopupMenu( + menu, + win32con.TPM_LEFTALIGN, + pos[0], + pos[1], + 0, + self.hwnd, + None, + ) + win32api.PostMessage(self.hwnd, win32con.WM_NULL, 0, 0) + return 1 + + def OnCommand( + self, + hwnd, + msg, + wparam, + lparam, + ): + id = win32api.LOWORD(wparam) + if id == 1023: + self.status.append(self.EnumStatus.TOGGLE) + elif id == 1024: + self.status.append(self.EnumStatus.START) + elif id == 1025: + self.status.append(self.EnumStatus.RESTART) + elif id == 1026: + self.status.append(self.EnumStatus.STOP) + elif id == 1027: + self.status.append(self.EnumStatus.QUIT) + self.Destroy() + else: + print 'Unknown command -', id + + def Destroy(self): + win32gui.DestroyWindow(self.hwnd) + + def SetServerRunning(self): + self.serverState = self.EnumServerState.RUNNING + self.__createIcon() + + def SetServerStopped(self): + self.serverState = self.EnumServerState.STOPPED + self.__createIcon() + + def __getIconRunning(self): + return 'AAABAAEAEBAQAAAAAAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAgAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABERAgAAIAAAEAACAAAgAAABEAIiACIgAAABAgAgIAIAEAECACAgAgABEAIiACACAAAAAAAAAAAAICACIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAICAgIAACACAgICAgAAIAICAgICIiAiIAAAAAAAAAAAD//wAAhe8AAL3vAADMYwAA9a0AALWtAADMbQAA//8AAKwjAABV7QAAVe0AAFQjAABV7QAAVe0AAFQjAAD//wAA' + + def __getIconStopped(self): + return 'AAABAAEAEBAQAAEABAAoAQAAFgAAACgAAAAQAAAAIAAAAAEABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJCdIAIXMGAABe/wAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAzMzMzMzMzAwERMjMzIzAzEDMyMzMjAzMxAzIiMyAjMzMwMjMjAzIzEzECMyAjMjMxEzAiAyMyMzMzMwAzMzMzIyMyACMiIzIyMjAzAyMyMjIyAjMwIzIyMjAyIiMCIzIyAjIzMyAyMjAyMjMzIwIyAjIyIiMiIDAzMzMzMzMzB//gAAhe0AAJ3rAADMYwAA9a0AALGNAADMLQAA/n8AAKwjAABVrQAAUc0AAFQjAABF5QAAVekAABQhAAB//gAA' + + def __loadFromFile(self, iconPath): + hinst = win32api.GetModuleHandle(None) + icon_flags = win32con.LR_LOADFROMFILE | win32con.LR_DEFAULTSIZE + hicon = win32gui.LoadImage( + hinst, + iconPath, + win32con.IMAGE_ICON, + 0, + 0, + icon_flags, + ) + return hicon + + class EnumStatus: + + TOGGLE = 0 + START = 1 + STOP = 2 + RESTART = 3 + QUIT = 4 + + class EnumServerState: + + RUNNING = 0 + STOPPED = 1 + + ADDED gluon/contrib/user_agent_parser.py Index: gluon/contrib/user_agent_parser.py ================================================================== --- /dev/null +++ gluon/contrib/user_agent_parser.py @@ -0,0 +1,420 @@ +""" +Extract client information from http user agent +The module does not try to detect all capabilities of browser in current form (it can easily be extended though). +Aim is + * fast + * very easy to extend + * reliable enough for practical purposes + * and assist python web apps to detect clients. + +Taken from http://pypi.python.org/pypi/httpagentparser (MIT license) +Modified my Ross Peoples for web2py to better support iPhone and iPad. +""" +import sys +from storage import Storage + +class DetectorsHub(dict): + _known_types = ['os', 'dist', 'flavor', 'browser'] + + def __init__(self, *args, **kw): + dict.__init__(self, *args, **kw) + for typ in self._known_types: + self.setdefault(typ, []) + self.registerDetectors() + + def register(self, detector): + if detector.info_type not in self._known_types: + self[detector.info_type] = [detector] + self._known_types.insert(detector.order, detector.info_type) + else: + self[detector.info_type].append(detector) + + def reorderByPrefs(self, detectors, prefs): + if prefs is None: + return [] + elif prefs == []: + return detectors + else: + prefs.insert(0, '') + def key_name(d): + return d.name in prefs and prefs.index(d.name) or sys.maxint + return sorted(detectors, key=key_name) + + def __iter__(self): + return iter(self._known_types) + + def registerDetectors(self): + detectors = [v() for v in globals().values() \ + if DetectorBase in getattr(v, '__mro__', [])] + for d in detectors: + if d.can_register: + self.register(d) + + +class DetectorBase(object): + name = "" # "to perform match in DetectorsHub object" + info_type = "override me" + result_key = "override me" + order = 10 # 0 is highest + look_for = "string to look for" + skip_if_found = [] # strings if present stop processin + can_register = False + is_mobile = False + prefs = Storage() # dict(info_type = [name1, name2], ..) + version_splitters = ["/", " "] + _suggested_detectors = None + + def __init__(self): + if not self.name: + self.name = self.__class__.__name__ + self.can_register = (self.__class__.__dict__.get('can_register', True)) + + def detect(self, agent, result): + if agent and self.checkWords(agent): + result[self.info_type] = Storage(name=self.name) + result[self.info_type].is_mobile = self.is_mobile + if not result.is_mobile: + result.is_mobile = result[self.info_type].is_mobile + + version = self.getVersion(agent) + if version: + result[self.info_type].version = version + + return True + return False + + def checkWords(self, agent): + for w in self.skip_if_found: + if w in agent: + return False + if self.look_for in agent: + return True + return False + + def getVersion(self, agent): + # -> version string /None + vs = self.version_splitters + return agent.split(self.look_for + vs[0])[-1].split(vs[1])[0].strip() + + +class OS(DetectorBase): + info_type = "os" + can_register = False + version_splitters = [";", " "] + + +class Dist(DetectorBase): + info_type = "dist" + can_register = False + + +class Flavor(DetectorBase): + info_type = "flavor" + can_register = False + + +class Browser(DetectorBase): + info_type = "browser" + can_register = False + + +class Macintosh(OS): + look_for = 'Macintosh' + prefs = Storage(dist=None) + def getVersion(self, agent): + pass + + +class Firefox(Browser): + look_for = "Firefox" + + +class Konqueror(Browser): + look_for = "Konqueror" + version_splitters = ["/", ";"] + + +class Opera(Browser): + look_for = "Opera" + def getVersion(self, agent): + return agent.split(self.look_for)[1][1:].split(' ')[0] + +class Netscape(Browser): + look_for = "Netscape" + +class MSIE(Browser): + look_for = "MSIE" + skip_if_found = ["Opera"] + name = "Microsoft Internet Explorer" + version_splitters = [" ", ";"] + + +class Galeon(Browser): + look_for = "Galeon" + + +class Safari(Browser): + look_for = "Safari" + + def checkWords(self, agent): + unless_list = ["Chrome", "OmniWeb"] + if self.look_for in agent: + for word in unless_list: + if word in agent: + return False + return True + + def getVersion(self, agent): + if "Version/" in agent: + return agent.split('Version/')[-1].split(' ')[0].strip() + else: + # Mobile Safari + return agent.split('Safari ')[-1].split(' ')[0].strip() + + +class Linux(OS): + look_for = 'Linux' + prefs = Storage(browser=["Firefox"], + dist=["Ubuntu", "Android"], flavor=None) + + def getVersion(self, agent): + pass + + +class Macintosh(OS): + look_for = 'Macintosh' + prefs = Storage(dist=None, flavor=['MacOS']) + def getVersion(self, agent): + pass + + +class MacOS(Flavor): + look_for = 'Mac OS' + prefs = Storage(browser=['Firefox', 'Opera', "Microsoft Internet Explorer"]) + + def getVersion(self, agent): + version_end_chars = [';', ')'] + part = agent.split('Mac OS')[-1].strip() + for c in version_end_chars: + if c in part: + version = part.split(c)[0] + break + return version.replace('_', '.') + + +class Windows(OS): + look_for = 'Windows' + prefs = Storage(browser=["Microsoft Internet Explorer", 'Firefox'], + dict=None, flavor=None) + + def getVersion(self, agent): + v = agent.split('Windows')[-1].split(';')[0].strip() + if ')' in v: + v = v.split(')')[0] + return v + + +class Ubuntu(Dist): + look_for = 'Ubuntu' + version_splitters = ["/", " "] + prefs = Storage(browser=['Firefox']) + + +class Debian(Dist): + look_for = 'Debian' + version_splitters = ["/", " "] + prefs = Storage(browser=['Firefox']) + + +class Chrome(Browser): + look_for = "Chrome" + version_splitters = ["/", " "] + +class ChromeOS(OS): + look_for = "CrOS" + version_splitters = [" ", " "] + prefs = Storage(browser=['Chrome']) + def getVersion(self, agent): + vs = self.version_splitters + return agent.split(self.look_for+vs[0])[-1].split(vs[1])[1].strip()[:-1] + +class Android(Dist): + look_for = 'Android' + is_mobile = True + + def getVersion(self, agent): + return agent.split('Android')[-1].split(';')[0].strip() + + +class iPhone(Dist): + look_for = 'iPhone' + is_mobile = True + + def getVersion(self, agent): + version_end_chars = ['like', ';', ')'] + part = agent.split('CPU OS')[-1].strip() + for c in version_end_chars: + if c in part: + version = 'iOS ' + part.split(c)[0].strip() + break + return version.replace('_', '.') + +class iPad(Dist): + look_for = 'iPad' + is_mobile = True + + def getVersion(self, agent): + version_end_chars = ['like', ';', ')'] + part = agent.split('CPU OS')[-1].strip() + for c in version_end_chars: + if c in part: + version = 'iOS ' + part.split(c)[0].strip() + break + return version.replace('_', '.') + +detectorshub = DetectorsHub() + +def detect(agent): + result = Storage() + prefs = Storage() + _suggested_detectors = [] + for info_type in detectorshub: + if not _suggested_detectors: + detectors = detectorshub[info_type] + _d_prefs = prefs.get(info_type, []) + detectors = detectorshub.reorderByPrefs(detectors, _d_prefs) + if "detector" in locals(): + detector._suggested_detectors = detectors + else: + detectors = _suggested_detectors + for detector in detectors: + # print "detector name: ", detector.name + if detector.detect(agent, result): + prefs = detector.prefs + _suggested_detectors = detector._suggested_detectors + break + return result + + +class Result(Storage): + def __missing__(self, k): + return "" + +""" +THIS VERSION OF DETECT CAUSES IndexErrors. + +def detect(agent): + result = Result() + _suggested_detectors = [] + for info_type in detectorshub: + detectors = _suggested_detectors or detectorshub[info_type] + for detector in detectors: + if detector.detect(agent, result): + if detector.prefs and not detector._suggested_detectors: + _suggested_detectors = detectorshub.reorderByPrefs( + detectors, detector.prefs.get(info_type)) + detector._suggested_detectors = _suggested_detectors + break + return result +""" + +def simple_detect(agent): + """ + -> (os, browser, is_mobile) # tuple of strings + """ + result = detect(agent) + os_list = [] + if 'flavor' in result: os_list.append(result['flavor']['name']) + if 'dist' in result: os_list.append(result['dist']['name']) + if 'os' in result: os_list.append(result['os']['name']) + + os = os_list and " ".join(os_list) or "Unknown OS" + os_version = os_list and ('flavor' in result and result['flavor'] and result['flavor'].get( + 'version')) or ('dist' in result and result['dist'] and result['dist'].get('version')) \ + or ('os' in result and result['os'] and result['os'].get('version')) or "" + browser = 'browser' in result and result['browser']['name'] \ + or 'Unknown Browser' + browser_version = 'browser' in result \ + and result['browser'].get('version') or "" + if browser_version: + browser = " ".join((browser, browser_version)) + if os_version: + os = " ".join((os, os_version)) + #is_mobile = ('dist' in result and result.dist.is_mobile) or ('os' in result and result.os.is_mobile) or False + return os, browser, result.is_mobile + + +if __name__ == '__main__': + import time + import unittest + + data = ( + ("Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-GB; rv:1.9.0.10) Gecko/2009042315 Firefox/3.0.10", + ('MacOS Macintosh X 10.5', 'Firefox 3.0.10'), + {'flavor': {'version': 'X 10.5', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '3.0.10', 'name': 'Firefox'}},), + ("Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_6) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.3 Safari/534.24,gzip(gfe)", + ('MacOS Macintosh X 10.6.6', 'Chrome 11.0.696.3'), + {'flavor': {'version': 'X 10.6.6', 'name': 'MacOS'}, 'os': {'name': 'Macintosh'}, 'browser': {'version': '11.0.696.3', 'name': 'Chrome'}},), + ("Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.2) Gecko/20100308 Ubuntu/10.04 (lucid) Firefox/3.6 GTB7.1", + ('Ubuntu Linux 10.04', 'Firefox 3.6'), + {'dist': {'version': '10.04', 'name': 'Ubuntu'}, 'os': {'name': 'Linux'}, 'browser': {'version': '3.6', 'name': 'Firefox'}},), + ("Mozilla/5.0 (Linux; U; Android 2.2.1; fr-ch; A43 Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1", + ('Android Linux 2.2.1', 'Safari 4.0'), + {'dist': {'version': '2.2.1', 'name': 'Android'}, 'os': {'name': 'Linux'}, 'browser': {'version': '4.0', 'name': 'Safari'}},), + ("Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420+ (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3", + ('MacOS IPhone X', 'Safari 3.0'), + {'flavor': {'version': 'X', 'name': 'MacOS'}, 'dist': {'version': 'X', 'name': 'IPhone'}, 'browser': {'version': '3.0', 'name': 'Safari'}},), + ("Mozilla/5.0 (X11; CrOS i686 0.0.0) AppleWebKit/534.24 (KHTML, like Gecko) Chrome/11.0.696.27 Safari/534.24,gzip(gfe)", + ('ChromeOS 0.0.0', 'Chrome 11.0.696.27'), + {'os': {'name': 'ChromeOS', 'version': '0.0.0'}, 'browser': {'name': 'Chrome', 'version': '11.0.696.27'}},), + ("Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.1) Opera 7.02 [en]", + ('Windows NT 5.1', 'Opera 7.02'), + {'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Opera', 'version': '7.02'}},), + ("Opera/9.80 (X11; Linux i686; U; en) Presto/2.9.168 Version/11.50", + ("Linux", "Opera 9.80"), + {"os": {"name": "Linux"}, "browser": {"name": "Opera", "version": "9.80"}},), + ("Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.5) Gecko/20060127 Netscape/8.1", + ("Windows NT 5.1", "Netscape 8.1"), + {'os': {'name': 'Windows', 'version': 'NT 5.1'}, 'browser': {'name': 'Netscape', 'version': '8.1'}},), + ) + + class TestHAP(unittest.TestCase): + def setUp(self): + self.harass_repeat = 1000 + self.data = data + + def test_simple_detect(self): + for agent, simple_res, res in data: + self.assertEqual(simple_detect(agent), simple_res) + + def test_detect(self): + for agent, simple_res, res in data: + self.assertEqual(detect(agent), res) + + def test_harass(self): + then = time.time() + for agent, simple_res, res in data * self.harass_repeat: + detect(agent) + time_taken = time.time() - then + no_of_tests = len(self.data) * self.harass_repeat + print "\nTime taken for %s detecttions: %s" \ + % (no_of_tests, time_taken) + print "Time taken for single detecttion: ", \ + time_taken / (len(self.data) * self.harass_repeat) + + unittest.main() + + +class mobilize(object): + + def __init__(self, func): + self.func = func + + def __call__(self): + from gluon import current + user_agent = current.request.user_agent() + if user_agent.is_mobile: + items = current.response.view.split('.') + items.insert(-1,'mobile') + current.response.view = '.'.join(items) + return self.func() ADDED gluon/custom_import.py Index: gluon/custom_import.py ================================================================== --- /dev/null +++ gluon/custom_import.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import __builtin__ +import os +import re +import sys +import threading + +# Install the new import function: +def custom_import_install(web2py_path): + global _web2py_importer + global _web2py_path + if _web2py_importer: + return # Already installed + _web2py_path = web2py_path + _web2py_importer = _Web2pyImporter(web2py_path) + __builtin__.__import__ = _web2py_importer + +def is_tracking_changes(): + """ + @return: True: neo_importer is tracking changes made to Python source + files. False: neo_import does not reload Python modules. + """ + + global _is_tracking_changes + return _is_tracking_changes + +def track_changes(track=True): + """ + Tell neo_importer to start/stop tracking changes made to Python modules. + @param track: True: Start tracking changes. False: Stop tracking changes. + """ + + global _is_tracking_changes + global _web2py_importer + global _web2py_date_tracker_importer + assert track is True or track is False, "Boolean expected." + if track == _is_tracking_changes: + return + if track: + if not _web2py_date_tracker_importer: + _web2py_date_tracker_importer = \ + _Web2pyDateTrackerImporter(_web2py_path) + __builtin__.__import__ = _web2py_date_tracker_importer + else: + __builtin__.__import__ = _web2py_importer + _is_tracking_changes = track + +_STANDARD_PYTHON_IMPORTER = __builtin__.__import__ # Keep standard importer +_web2py_importer = None # The standard web2py importer +_web2py_date_tracker_importer = None # The web2py importer with date tracking +_web2py_path = None # Absolute path of the web2py directory + +_is_tracking_changes = False # The tracking mode + +class _BaseImporter(object): + """ + The base importer. Dispatch the import the call to the standard Python + importer. + """ + + def begin(self): + """ + Many imports can be made for a single import statement. This method + help the management of this aspect. + """ + + def __call__(self, name, globals=None, locals=None, + fromlist=None, level=-1): + """ + The import method itself. + """ + return _STANDARD_PYTHON_IMPORTER(name, + globals, + locals, + fromlist, + level) + + def end(self): + """ + Needed for clean up. + """ + + +class _DateTrackerImporter(_BaseImporter): + """ + An importer tracking the date of the module files and reloading them when + they have changed. + """ + + _PACKAGE_PATH_SUFFIX = os.path.sep+"__init__.py" + + def __init__(self): + super(_DateTrackerImporter, self).__init__() + self._import_dates = {} # Import dates of the files of the modules + # Avoid reloading cause by file modifications of reload: + self._tl = threading.local() + self._tl._modules_loaded = None + + def begin(self): + self._tl._modules_loaded = set() + + def __call__(self, name, globals=None, locals=None, + fromlist=None, level=-1): + """ + The import method itself. + """ + + globals = globals or {} + locals = locals or {} + fromlist = fromlist or [] + + call_begin_end = self._tl._modules_loaded is None + if call_begin_end: + self.begin() + + try: + self._tl.globals = globals + self._tl.locals = locals + self._tl.level = level + + # Check the date and reload if needed: + self._update_dates(name, fromlist) + + # Try to load the module and update the dates if it works: + result = super(_DateTrackerImporter, self) \ + .__call__(name, globals, locals, fromlist, level) + # Module maybe loaded for the 1st time so we need to set the date + self._update_dates(name, fromlist) + return result + except Exception, e: + raise e # Don't hide something that went wrong + finally: + if call_begin_end: + self.end() + + def _update_dates(self, name, fromlist): + """ + Update all the dates associated to the statement import. A single + import statement may import many modules. + """ + + self._reload_check(name) + if fromlist: + for fromlist_name in fromlist: + self._reload_check("%s.%s" % (name, fromlist_name)) + + def _reload_check(self, name): + """ + Update the date associated to the module and reload the module if + the file has changed. + """ + + module = sys.modules.get(name) + file = self._get_module_file(module) + if file: + date = self._import_dates.get(file) + new_date = None + reload_mod = False + mod_to_pack = False # Module turning into a package? (special case) + try: + new_date = os.path.getmtime(file) + except: + self._import_dates.pop(file, None) # Clean up + # Handle module changing in package and + #package changing in module: + if file.endswith(".py"): + # Get path without file ext: + file = os.path.splitext(file)[0] + reload_mod = os.path.isdir(file) \ + and os.path.isfile(file+self._PACKAGE_PATH_SUFFIX) + mod_to_pack = reload_mod + else: # Package turning into module? + file += ".py" + reload_mod = os.path.isfile(file) + if reload_mod: + new_date = os.path.getmtime(file) # Refresh file date + if reload_mod or not date or new_date > date: + self._import_dates[file] = new_date + if reload_mod or (date and new_date > date): + if module not in self._tl._modules_loaded: + if mod_to_pack: + # Module turning into a package: + mod_name = module.__name__ + del sys.modules[mod_name] # Delete the module + # Reload the module: + super(_DateTrackerImporter, self).__call__ \ + (mod_name, self._tl.globals, self._tl.locals, [], + self._tl.level) + else: + reload(module) + self._tl._modules_loaded.add(module) + + def end(self): + self._tl._modules_loaded = None + + @classmethod + def _get_module_file(cls, module): + """ + Get the absolute path file associated to the module or None. + """ + + file = getattr(module, "__file__", None) + if file: + # Make path absolute if not: + #file = os.path.join(cls.web2py_path, file) + + file = os.path.splitext(file)[0]+".py" # Change .pyc for .py + if file.endswith(cls._PACKAGE_PATH_SUFFIX): + file = os.path.dirname(file) # Track dir for packages + return file + +class _Web2pyImporter(_BaseImporter): + """ + The standard web2py importer. Like the standard Python importer but it + tries to transform import statements as something like + "import applications.app_name.modules.x". If the import failed, fall back + on _BaseImporter. + """ + + _RE_ESCAPED_PATH_SEP = re.escape(os.path.sep) # os.path.sep escaped for re + + def __init__(self, web2py_path): + """ + @param web2py_path: The absolute path of the web2py installation. + """ + + global DEBUG + super(_Web2pyImporter, self).__init__() + self.web2py_path = web2py_path + self.__web2py_path_os_path_sep = self.web2py_path+os.path.sep + self.__web2py_path_os_path_sep_len = len(self.__web2py_path_os_path_sep) + self.__RE_APP_DIR = re.compile( + self._RE_ESCAPED_PATH_SEP.join( \ + ( \ + #"^" + re.escape(web2py_path), # Not working with Python 2.5 + "^(" + "applications", + "[^", + "]+)", + "", + ) )) + + def _matchAppDir(self, file_path): + """ + Does the file in a directory inside the "applications" directory? + """ + + if file_path.startswith(self.__web2py_path_os_path_sep): + file_path = file_path[self.__web2py_path_os_path_sep_len:] + return self.__RE_APP_DIR.match(file_path) + return False + + def __call__(self, name, globals=None, locals=None, + fromlist=None, level=-1): + """ + The import method itself. + """ + + globals = globals or {} + locals = locals or {} + fromlist = fromlist or [] + + self.begin() + #try: + # if not relative and not from applications: + if not name.startswith(".") and level <= 0 \ + and not name.startswith("applications.") \ + and isinstance(globals, dict): + # Get the name of the file do the import + caller_file_name = os.path.join(self.web2py_path, \ + globals.get("__file__", "")) + # Is the path in an application directory? + match_app_dir = self._matchAppDir(caller_file_name) + if match_app_dir: + try: + # Get the prefix to add for the import + # (like applications.app_name.modules): + modules_prefix = \ + ".".join((match_app_dir.group(1). \ + replace(os.path.sep, "."), "modules")) + if not fromlist: + # import like "import x" or "import x.y" + return self.__import__dot(modules_prefix, name, + globals, locals, fromlist, level) + else: + # import like "from x import a, b, ..." + return super(_Web2pyImporter, self) \ + .__call__(modules_prefix+"."+name, + globals, locals, fromlist, level) + except ImportError: + pass + return super(_Web2pyImporter, self).__call__(name, globals, locals, + fromlist, level) + #except Exception, e: + # raise e # Don't hide something that went wrong + #finally: + self.end() + + def __import__dot(self, prefix, name, globals, locals, fromlist, + level): + """ + Here we will import x.y.z as many imports like: + from applications.app_name.modules import x + from applications.app_name.modules.x import y + from applications.app_name.modules.x.y import z. + x will be the module returned. + """ + + result = None + for name in name.split("."): + new_mod = super(_Web2pyImporter, self).__call__(prefix, globals, + locals, [name], level) + try: + result = result or new_mod.__dict__[name] + except KeyError: + raise ImportError() + prefix += "." + name + return result + +class _Web2pyDateTrackerImporter(_Web2pyImporter, _DateTrackerImporter): + """ + Like _Web2pyImporter but using a _DateTrackerImporter. + """ + + + ADDED gluon/custom_import.pyc Index: gluon/custom_import.pyc ================================================================== --- /dev/null +++ gluon/custom_import.pyc cannot compute difference between binary files ADDED gluon/dal.py Index: gluon/dal.py ================================================================== --- /dev/null +++ gluon/dal.py @@ -0,0 +1,6316 @@ +#!/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Thanks to + * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support + * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support + * Denes + * Chris Clark + * clach05 + * Denes Lengyel + * and many others who have contributed to current and previous versions + +This file contains the DAL support for many relational databases, +including: +- SQLite +- MySQL +- Postgres +- Oracle +- MS SQL +- DB2 +- Interbase +- Ingres +- SapDB (experimental) +- Cubrid (experimental) +- CouchDB (experimental) +- MongoDB (in progress) +- Google:nosql +- Google:sql + +Example of usage: + +>>> # from dal import DAL, Field + +### create DAL connection (and create DB if not exists) +>>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) + +### define a table 'person' (create/aster as necessary) +>>> person = db.define_table('person',Field('name','string')) + +### insert a record +>>> id = person.insert(name='James') + +### retrieve it by id +>>> james = person(id) + +### retrieve it by name +>>> james = person(name='James') + +### retrieve it by arbitrary query +>>> query = (person.name=='James')&(person.name.startswith('J')) +>>> james = db(query).select(person.ALL)[0] + +### update one record +>>> james.update_record(name='Jim') + +### update multiple records by query +>>> db(person.name.like('J%')).update(name='James') +1 + +### delete records by query +>>> db(person.name.lower()=='jim').delete() +0 + +### retrieve multiple records (rows) +>>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) + +### further filter them +>>> james = people.find(lambda row: row.name=='James').first() +>>> print james.id, james.name +1 James + +### check aggrgates +>>> counter = person.id.count() +>>> print db(person).select(counter).first()(counter) +1 + +### delete one record +>>> james.delete_record() +1 + +### delete (drop) entire database table +>>> person.drop() + +Supported field types: +id string text boolean integer double decimal password upload blob time date datetime, + +Supported DAL URI strings: +'sqlite://test.db' +'sqlite:memory' +'jdbc:sqlite://test.db' +'mysql://root:none@localhost/test' +'postgres://mdipierro:none@localhost/test' +'jdbc:postgres://mdipierro:none@localhost/test' +'mssql://web2py:none@A64X2/web2py_test' +'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings +'oracle://username:password@database' +'firebird://user:password@server:3050/database' +'db2://DSN=dsn;UID=user;PWD=pass' +'firebird://username:password@hostname/database' +'firebird_embedded://username:password@c://path' +'informix://user:password@server:3050/database' +'informixu://user:password@server:3050/database' # unicode informix +'google:datastore' # for google app engine datastore +'google:sql' # for google app engine with sql (mysql compatible) +'teradata://DSN=dsn;UID=user;PWD=pass' # experimental + +For more info: +help(DAL) +help(Field) +""" + +################################################################################### +# this file orly exposes DAL and Field +################################################################################### + +__all__ = ['DAL', 'Field'] + +MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length +DEFAULTLENGTH = {'string':512, + 'password':512, + 'upload':512, + 'text':2**15, + 'blob':2**31} + +import re +import sys +import locale +import os +import types +import cPickle +import datetime +import threading +import time +import cStringIO +import csv +import copy +import socket +import logging +import copy_reg +import base64 +import shutil +import marshal +import decimal +import struct +import urllib +import hashlib +import uuid +import glob + +CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, + types.MethodType, types.BuiltinMethodType) + + +################################################################################### +# following checks allows running of dal without web2py as a standalone module +################################################################################### +try: + from utils import web2py_uuid +except ImportError: + import uuid + def web2py_uuid(): return str(uuid.uuid4()) + +try: + import portalocker + have_portalocker = True +except ImportError: + have_portalocker = False + +try: + import serializers + have_serializers = True +except ImportError: + have_serializers = False + +try: + import validators + have_validators = True +except ImportError: + have_validators = False + +logger = logging.getLogger("web2py.dal") +DEFAULT = lambda:0 + +sql_locker = threading.RLock() +thread = threading.local() + +# internal representation of tables with field +# <table>.<field>, tables and fields may only be [a-zA-Z0-0_] + +regex_dbname = re.compile('^(\w+)(\:\w+)*') +table_field = re.compile('^([\w_]+)\.([\w_]+)$') +regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') +regex_cleanup_fn = re.compile('[\'"\s;]+') +string_unpack=re.compile('(?<!\|)\|(?!\|)') +regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') + + + +# list of drivers will be built on the fly +# and lists only what is available +drivers = [] + +try: + from new import classobj + from google.appengine.ext import db as gae + from google.appengine.api import namespace_manager, rdbms + from google.appengine.api.datastore_types import Key ### needed for belongs on ID + from google.appengine.ext.db.polymodel import PolyModel + drivers.append('google') +except ImportError: + pass + +if not 'google' in drivers: + + try: + from pysqlite2 import dbapi2 as sqlite3 + drivers.append('pysqlite2') + except ImportError: + try: + from sqlite3 import dbapi2 as sqlite3 + drivers.append('SQLite3') + except ImportError: + logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') + + try: + import contrib.pymysql as pymysql + drivers.append('pymysql') + except ImportError: + logger.debug('no pymysql driver') + + try: + import psycopg2 + from psycopg2.extensions import adapt as psycopg2_adapt + drivers.append('PostgreSQL') + except ImportError: + logger.debug('no psycopg2 driver') + + try: + import cx_Oracle + drivers.append('Oracle') + except ImportError: + logger.debug('no cx_Oracle driver') + + try: + import pyodbc + drivers.append('MSSQL/DB2') + except ImportError: + logger.debug('no MSSQL/DB2 driver') + + try: + import kinterbasdb + drivers.append('Interbase') + except ImportError: + logger.debug('no kinterbasdb driver') + + try: + import firebirdsql + drivers.append('Firebird') + except ImportError: + logger.debug('no Firebird driver') + + try: + import informixdb + drivers.append('Informix') + logger.warning('Informix support is experimental') + except ImportError: + logger.debug('no informixdb driver') + + try: + import sapdb + drivers.append('SAPDB') + logger.warning('SAPDB support is experimental') + except ImportError: + logger.debug('no sapdb driver') + + try: + import cubriddb + drivers.append('Cubrid') + logger.warning('Cubrid support is experimental') + except ImportError: + logger.debug('no cubriddb driver') + + try: + from com.ziclix.python.sql import zxJDBC + import java.sql + # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ + from org.sqlite import JDBC # required by java.sql; ensure we have it + drivers.append('zxJDBC') + logger.warning('zxJDBC support is experimental') + is_jdbc = True + except ImportError: + logger.debug('no zxJDBC driver') + is_jdbc = False + + try: + import ingresdbi + drivers.append('Ingres') + except ImportError: + logger.debug('no Ingres driver') + # NOTE could try JDBC....... + + try: + import couchdb + drivers.append('CouchDB') + except ImportError: + logger.debug('no couchdb driver') + + try: + import pymongo + drivers.append('mongoDB') + except: + logger.debug('no mongoDB driver') + +def OR(a,b): + return a|b + +def AND(a,b): + return a&b + +if 'google' in drivers: + + is_jdbc = False + + class GAEDecimalProperty(gae.Property): + """ + GAE decimal implementation + """ + data_type = decimal.Decimal + + def __init__(self, precision, scale, **kwargs): + super(GAEDecimalProperty, self).__init__(self, **kwargs) + d = '1.' + for x in range(scale): + d += '0' + self.round = decimal.Decimal(d) + + def get_value_for_datastore(self, model_instance): + value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) + if value: + return str(value) + else: + return None + + def make_value_from_datastore(self, value): + if value: + return decimal.Decimal(value).quantize(self.round) + else: + return None + + def validate(self, value): + value = super(GAEDecimalProperty, self).validate(value) + if value is None or isinstance(value, decimal.Decimal): + return value + elif isinstance(value, basestring): + return decimal.Decimal(value) + raise gae.BadValueError("Property %s must be a Decimal or string." % self.name) + +################################################################################### +# class that handles connection pooling (all adapters derived form this one) +################################################################################### + +class ConnectionPool(object): + + pools = {} + check_active_connection = True + + @staticmethod + def set_folder(folder): + thread.folder = folder + + # ## this allows gluon to commit/rollback all dbs in this thread + + @staticmethod + def close_all_instances(action): + """ to close cleanly databases in a multithreaded environment """ + if not hasattr(thread,'instances'): + return + while thread.instances: + instance = thread.instances.pop() + getattr(instance,action)() + # ## if you want pools, recycle this connection + really = True + if instance.pool_size: + sql_locker.acquire() + pool = ConnectionPool.pools[instance.uri] + if len(pool) < instance.pool_size: + pool.append(instance.connection) + really = False + sql_locker.release() + if really: + getattr(instance,'close')() + return + + def find_or_make_work_folder(self): + """ this actually does not make the folder. it has to be there """ + if hasattr(thread,'folder'): + self.folder = thread.folder + else: + self.folder = thread.folder = '' + + # Creating the folder if it does not exist + if False and self.folder and not os.path.exists(self.folder): + os.mkdir(self.folder) + + def pool_connection(self, f, cursor=True): + """ + this function defines: self.connection and self.cursor (iff cursor is True) + if self.pool_size>0 it will try pull the connection from the pool + if the connection is not active (closed by db server) it will loop + if not self.pool_size or no active connections in pool makes a new one + """ + if not self.pool_size: + self.connection = f() + self.cursor = cursor and self.connection.cursor() + else: + uri = self.uri + while True: + sql_locker.acquire() + if not uri in ConnectionPool.pools: + ConnectionPool.pools[uri] = [] + if ConnectionPool.pools[uri]: + self.connection = ConnectionPool.pools[uri].pop() + sql_locker.release() + self.cursor = cursor and self.connection.cursor() + try: + if self.cursor and self.check_active_connection: + self.execute('SELECT 1;') + break + except: + pass + else: + sql_locker.release() + self.connection = f() + self.cursor = cursor and self.connection.cursor() + break + if not hasattr(thread,'instances'): + thread.instances = [] + thread.instances.append(self) + + +################################################################################### +# this is a generic adapter that does nothing; all others are derived form this one +################################################################################### + +class BaseAdapter(ConnectionPool): + + driver = None + maxcharlength = MAXCHARLENGTH + commit_on_alter_table = False + support_distributed_transaction = False + uploads_in_blob = False + types = { + 'boolean': 'CHAR(1)', + 'string': 'CHAR(%(length)s)', + 'text': 'TEXT', + 'password': 'CHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'CHAR(%(length)s)', + 'integer': 'INTEGER', + 'double': 'DOUBLE', + 'decimal': 'DOUBLE', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + } + + def adapt(self,obj): + return "'%s'" % obj.replace("'", "''") + + def integrity_error(self): + return self.driver.IntegrityError + + def operational_error(self): + return self.driver.OperationalError + + def file_exists(self, filename): + """ + to be used ONLY for files that on GAE may not be on filesystem + """ + return os.path.exists(filename) + + def file_open(self, filename, mode='rb', lock=True): + """ + to be used ONLY for files that on GAE may not be on filesystem + """ + fileobj = open(filename,mode) + if have_portalocker and lock: + if mode in ('r','rb'): + portalocker.lock(fileobj,portalocker.LOCK_SH) + elif mode in ('w','wb','a'): + portalocker.lock(fileobj,portalocker.LOCK_EX) + else: + fileobj.close() + raise RuntimeError, "Unsupported file_open mode" + return fileobj + + def file_close(self, fileobj, unlock=True): + """ + to be used ONLY for files that on GAE may not be on filesystem + """ + if fileobj: + if have_portalocker and unlock: + portalocker.unlock(fileobj) + fileobj.close() + + def file_delete(self, filename): + os.unlink(filename) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "None" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + class Dummy(object): + lastrowid = 1 + def __getattr__(self, value): + return lambda *a, **b: [] + self.connection = Dummy() + self.cursor = Dummy() + + def sequence_name(self,tablename): + return '%s_sequence' % tablename + + def trigger_name(self,tablename): + return '%s_sequence' % tablename + + + def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): + fields = [] + sql_fields = {} + sql_fields_aux = {} + TFK = {} + tablename = table._tablename + sortable = 0 + for field in table: + sortable += 1 + k = field.name + if isinstance(field.type,SQLCustomType): + ftype = field.type.native or field.type.type + elif field.type.startswith('reference'): + referenced = field.type[10:].strip() + constraint_name = self.constraint_name(tablename, field.name) + if hasattr(table,'_primarykey'): + rtablename,rfieldname = referenced.split('.') + rtable = table._db[rtablename] + rfield = rtable[rfieldname] + # must be PK reference or unique + if rfieldname in rtable._primarykey or rfield.unique: + ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) + # multicolumn primary key reference? + if not rfield.unique and len(rtable._primarykey)>1 : + # then it has to be a table level FK + if rtablename not in TFK: + TFK[rtablename] = {} + TFK[rtablename][rfieldname] = field.name + else: + ftype = ftype + \ + self.types['reference FK'] %dict(\ + constraint_name=constraint_name, + table_name=tablename, + field_name=field.name, + foreign_key='%s (%s)'%(rtablename, rfieldname), + on_delete_action=field.ondelete) + else: + # make a guess here for circular references + id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' + ftype = self.types[field.type[:9]]\ + % dict(table_name=tablename, + field_name=field.name, + constraint_name=constraint_name, + foreign_key=referenced + ('(%s)' % id_fieldname), + on_delete_action=field.ondelete) + elif field.type.startswith('list:reference'): + ftype = self.types[field.type[:14]] + elif field.type.startswith('decimal'): + precision, scale = map(int,field.type[8:-1].split(',')) + ftype = self.types[field.type[:7]] % \ + dict(precision=precision,scale=scale) + elif not field.type in self.types: + raise SyntaxError, 'Field: unknown field type: %s for %s' % \ + (field.type, field.name) + else: + ftype = self.types[field.type]\ + % dict(length=field.length) + if not field.type.startswith('id') and not field.type.startswith('reference'): + if field.notnull: + ftype += ' NOT NULL' + else: + ftype += self.ALLOW_NULL() + if field.unique: + ftype += ' UNIQUE' + + # add to list of fields + sql_fields[field.name] = dict(sortable=sortable, + type=str(field.type), + sql=ftype) + + if isinstance(field.default,(str,int,float)): + # caveat: sql_fields and sql_fields_aux differ for default values + # sql_fields is used to trigger migrations and sql_fields_aux + # are used for create table + # the reason is that we do not want to trigger a migration simply + # because a default value changes + not_null = self.NOT_NULL(field.default,field.type) + ftype = ftype.replace('NOT NULL',not_null) + sql_fields_aux[field.name] = dict(sql=ftype) + + fields.append('%s %s' % (field.name, ftype)) + other = ';' + + # backend-specific extensions to fields + if self.dbengine == 'mysql': + if not hasattr(table, "_primarykey"): + fields.append('PRIMARY KEY(%s)' % table._id.name) + other = ' ENGINE=InnoDB CHARACTER SET utf8;' + + fields = ',\n '.join(fields) + for rtablename in TFK: + rfields = TFK[rtablename] + pkeys = table._db[rtablename]._primarykey + fkeys = [ rfields[k] for k in pkeys ] + fields = fields + ',\n ' + \ + self.types['reference TFK'] %\ + dict(table_name=tablename, + field_name=', '.join(fkeys), + foreign_table=rtablename, + foreign_key=', '.join(pkeys), + on_delete_action=field.ondelete) + + if hasattr(table,'_primarykey'): + query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ + (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) + else: + query = '''CREATE TABLE %s(\n %s\n)%s''' % \ + (tablename, fields, other) + + if self.uri.startswith('sqlite:///'): + path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' + dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) + else: + dbpath = self.folder + + if not migrate: + return query + elif self.uri.startswith('sqlite:memory'): + table._dbt = None + elif isinstance(migrate, str): + table._dbt = os.path.join(dbpath, migrate) + else: + table._dbt = os.path.join(dbpath, '%s_%s.table' \ + % (table._db._uri_hash, tablename)) + if table._dbt: + table._loggername = os.path.join(dbpath, 'sql.log') + logfile = self.file_open(table._loggername, 'a') + else: + logfile = None + if not table._dbt or not self.file_exists(table._dbt): + if table._dbt: + logfile.write('timestamp: %s\n' + % datetime.datetime.today().isoformat()) + logfile.write(query + '\n') + if not fake_migrate: + self.create_sequence_and_triggers(query,table) + table._db.commit() + if table._dbt: + tfile = self.file_open(table._dbt, 'w') + cPickle.dump(sql_fields, tfile) + self.file_close(tfile) + if fake_migrate: + logfile.write('faked!\n') + else: + logfile.write('success!\n') + else: + tfile = self.file_open(table._dbt, 'r') + try: + sql_fields_old = cPickle.load(tfile) + except EOFError: + self.file_close(tfile) + self.file_close(logfile) + raise RuntimeError, 'File %s appears corrupted' % table._dbt + self.file_close(tfile) + if sql_fields != sql_fields_old: + self.migrate_table(table, + sql_fields, sql_fields_old, + sql_fields_aux, logfile, + fake_migrate=fake_migrate) + self.file_close(logfile) + return query + + def migrate_table( + self, + table, + sql_fields, + sql_fields_old, + sql_fields_aux, + logfile, + fake_migrate=False, + ): + tablename = table._tablename + def fix(item): + k,v=item + if not isinstance(v,dict): + v=dict(type='unkown',sql=v) + return k.lower(),v + ### make sure all field names are lower case to avoid conflicts + sql_fields = dict(map(fix,sql_fields.items())) + sql_fields_old = dict(map(fix,sql_fields_old.items())) + sql_fields_aux = dict(map(fix,sql_fields_aux.items())) + + keys = sql_fields.keys() + for key in sql_fields_old: + if not key in keys: + keys.append(key) + if self.dbengine == 'mssql': + new_add = '; ALTER TABLE %s ADD ' % tablename + else: + new_add = ', ADD ' + + metadata_change = False + sql_fields_current = copy.copy(sql_fields_old) + for key in keys: + query = None + if not key in sql_fields_old: + sql_fields_current[key] = sql_fields[key] + query = ['ALTER TABLE %s ADD %s %s;' % \ + (tablename, key, + sql_fields_aux[key]['sql'].replace(', ', new_add))] + metadata_change = True + elif self.dbengine == 'sqlite': + if key in sql_fields: + sql_fields_current[key] = sql_fields[key] + metadata_change = True + elif not key in sql_fields: + del sql_fields_current[key] + if not self.dbengine in ('firebird',): + query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] + else: + query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] + metadata_change = True + elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ + and not isinstance(table[key].type, SQLCustomType) \ + and not (table[key].type.startswith('reference') and \ + sql_fields[key]['sql'].startswith('INT,') and \ + sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): + sql_fields_current[key] = sql_fields[key] + t = tablename + tt = sql_fields_aux[key]['sql'].replace(', ', new_add) + if not self.dbengine in ('firebird',): + query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), + 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), + 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), + 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), + 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), + 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] + else: + query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), + 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), + 'ALTER TABLE %s DROP %s;' % (t, key), + 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), + 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), + 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] + metadata_change = True + elif sql_fields[key]['type'] != sql_fields_old[key]['type']: + sql_fields_current[key] = sql_fields[key] + metadata_change = True + + if query: + logfile.write('timestamp: %s\n' + % datetime.datetime.today().isoformat()) + table._db['_lastsql'] = '\n'.join(query) + for sub_query in query: + logfile.write(sub_query + '\n') + if not fake_migrate: + self.execute(sub_query) + # caveat. mysql, oracle and firebird do not allow multiple alter table + # in one transaction so we must commit partial transactions and + # update table._dbt after alter table. + if table._db._adapter.commit_on_alter_table: + table._db.commit() + tfile = self.file_open(table._dbt, 'w') + cPickle.dump(sql_fields_current, tfile) + self.file_close(tfile) + logfile.write('success!\n') + else: + logfile.write('faked!\n') + elif metadata_change: + tfile = self.file_open(table._dbt, 'w') + cPickle.dump(sql_fields_current, tfile) + self.file_close(tfile) + + if metadata_change and \ + not (query and self.dbengine in ('mysql','oracle','firebird')): + table._db.commit() + tfile = self.file_open(table._dbt, 'w') + cPickle.dump(sql_fields_current, tfile) + self.file_close(tfile) + + def LOWER(self,first): + return 'LOWER(%s)' % self.expand(first) + + def UPPER(self,first): + return 'UPPER(%s)' % self.expand(first) + + def EXTRACT(self,first,what): + return "EXTRACT(%s FROM %s)" % (what, self.expand(first)) + + def AGGREGATE(self,first,what): + return "%s(%s)" % (what,self.expand(first)) + + def JOIN(self): + return 'JOIN' + + def LEFT_JOIN(self): + return 'LEFT JOIN' + + def RANDOM(self): + return 'Random()' + + def NOT_NULL(self,default,field_type): + return 'NOT NULL DEFAULT %s' % self.represent(default,field_type) + + def COALESCE(self,first,second): + expressions = [self.expand(first)]+[self.expand(e) for e in second] + return 'COALESCE(%s)' % ','.join(expressions) + + def COALESCE_ZERO(self,first): + return 'COALESCE(%s,0)' % self.expand(first) + + def RAW(self,first): + return first + + def ALLOW_NULL(self): + return '' + + def SUBSTRING(self,field,parameters): + return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) + + def PRIMARY_KEY(self,key): + return 'PRIMARY KEY(%s)' % key + + def _drop(self,table,mode): + return ['DROP TABLE %s;' % table] + + def drop(self, table, mode=''): + if table._dbt: + logfile = self.file_open(table._loggername, 'a') + queries = self._drop(table, mode) + for query in queries: + if table._dbt: + logfile.write(query + '\n') + self.execute(query) + table._db.commit() + del table._db[table._tablename] + del table._db.tables[table._db.tables.index(table._tablename)] + table._db._update_referenced_by(table._tablename) + if table._dbt: + self.file_delete(table._dbt) + logfile.write('success!\n') + + def _insert(self,table,fields): + keys = ','.join(f.name for f,v in fields) + values = ','.join(self.expand(v,f.type) for f,v in fields) + return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) + + def insert(self,table,fields): + query = self._insert(table,fields) + try: + self.execute(query) + except Exception, e: + if isinstance(e,self.integrity_error_class()): + return None + raise e + if hasattr(table,'_primarykey'): + return dict([(k[0].name, k[1]) for k in fields \ + if k[0].name in table._primarykey]) + id = self.lastrowid(table) + if not isinstance(id,int): + return id + rid = Reference(id) + (rid._table, rid._record) = (table, None) + return rid + + def bulk_insert(self,table,items): + return [self.insert(table,item) for item in items] + + def NOT(self,first): + return '(NOT %s)' % self.expand(first) + + def AND(self,first,second): + return '(%s AND %s)' % (self.expand(first),self.expand(second)) + + def OR(self,first,second): + return '(%s OR %s)' % (self.expand(first),self.expand(second)) + + def BELONGS(self,first,second): + if isinstance(second,str): + return '(%s IN (%s))' % (self.expand(first),second[:-1]) + elif second==[] or second==(): + return '(1=0)' + items =','.join(self.expand(item,first.type) for item in second) + return '(%s IN (%s))' % (self.expand(first),items) + + def LIKE(self,first,second): + return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string')) + + def STARTSWITH(self,first,second): + return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string')) + + def ENDSWITH(self,first,second): + return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string')) + + def CONTAINS(self,first,second): + if first.type in ('string','text'): + key = '%'+str(second).replace('%','%%')+'%' + elif first.type.startswith('list:'): + key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' + return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string')) + + def EQ(self,first,second=None): + if second is None: + return '(%s IS NULL)' % self.expand(first) + return '(%s = %s)' % (self.expand(first),self.expand(second,first.type)) + + def NE(self,first,second=None): + if second is None: + return '(%s IS NOT NULL)' % self.expand(first) + return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type)) + + def LT(self,first,second=None): + return '(%s < %s)' % (self.expand(first),self.expand(second,first.type)) + + def LE(self,first,second=None): + return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type)) + + def GT(self,first,second=None): + return '(%s > %s)' % (self.expand(first),self.expand(second,first.type)) + + def GE(self,first,second=None): + return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type)) + + def ADD(self,first,second): + return '(%s + %s)' % (self.expand(first),self.expand(second,first.type)) + + def SUB(self,first,second): + return '(%s - %s)' % (self.expand(first),self.expand(second,first.type)) + + def MUL(self,first,second): + return '(%s * %s)' % (self.expand(first),self.expand(second,first.type)) + + def DIV(self,first,second): + return '(%s / %s)' % (self.expand(first),self.expand(second,first.type)) + + def MOD(self,first,second): + return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type)) + + def AS(self,first,second): + return '%s AS %s' % (self.expand(first),second) + + def ON(self,first,second): + return '%s ON %s' % (self.expand(first),self.expand(second)) + + def INVERT(self,first): + return '%s DESC' % self.expand(first) + + def COMMA(self,first,second): + return '%s, %s' % (self.expand(first),self.expand(second)) + + def expand(self,expression,field_type=None): + if isinstance(expression,Field): + return str(expression) + elif isinstance(expression, (Expression, Query)): + if not expression.second is None: + return expression.op(expression.first, expression.second) + elif not expression.first is None: + return expression.op(expression.first) + elif not isinstance(expression.op,str): + return expression.op() + else: + return '(%s)' % expression.op + elif field_type: + return self.represent(expression,field_type) + elif isinstance(expression,(list,tuple)): + return ','.join([self.represent(item,field_type) for item in expression]) + else: + return str(expression) + + def alias(self,table,alias): + """ + given a table object, makes a new table object + with alias name. + """ + other = copy.copy(table) + other['_ot'] = other._tablename + other['ALL'] = SQLALL(other) + other['_tablename'] = alias + for fieldname in other.fields: + other[fieldname] = copy.copy(other[fieldname]) + other[fieldname]._tablename = alias + other[fieldname].tablename = alias + other[fieldname].table = other + table._db[alias] = other + return other + + def _truncate(self,table,mode = ''): + tablename = table._tablename + return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')] + + def truncate(self,table,mode= ' '): + # Prepare functions "write_to_logfile" and "close_logfile" + if table._dbt: + logfile = self.file_open(table._loggername, 'a') + else: + class Logfile(object): + def write(self, value): + pass + def close(self): + pass + logfile = Logfile() + + try: + queries = table._db._adapter._truncate(table, mode) + for query in queries: + logfile.write(query + '\n') + self.execute(query) + table._db.commit() + logfile.write('success!\n') + finally: + logfile.close() + + def _update(self,tablename,query,fields): + query = self.filter_tenant(query,[tablename]) + if query: + sql_w = ' WHERE ' + self.expand(query) + else: + sql_w = '' + sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) + return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w) + + def update(self,tablename,query,fields): + sql = self._update(tablename,query,fields) + self.execute(sql) + try: + return self.cursor.rowcount + except: + return None + + def _delete(self,tablename, query): + query = self.filter_tenant(query,[tablename]) + if query: + sql_w = ' WHERE ' + self.expand(query) + else: + sql_w = '' + return 'DELETE FROM %s%s;' % (tablename, sql_w) + + def delete(self,tablename,query): + sql = self._delete(tablename,query) + ### special code to handle CASCADE in SQLite + db = self.db + table = db[tablename] + if self.dbengine=='sqlite' and table._referenced_by: + deleted = [x[table._id.name] for x in db(query).select(table._id)] + ### end special code to handle CASCADE in SQLite + self.execute(sql) + try: + counter = self.cursor.rowcount + except: + counter = None + ### special code to handle CASCADE in SQLite + if self.dbengine=='sqlite' and counter: + for tablename,fieldname in table._referenced_by: + f = db[tablename][fieldname] + if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': + db(db[tablename][fieldname].belongs(deleted)).delete() + ### end special code to handle CASCADE in SQLite + return counter + + def get_table(self,query): + tablenames = self.tables(query) + if len(tablenames)==1: + return tablenames[0] + elif len(tablenames)<1: + raise RuntimeError, "No table selected" + else: + raise RuntimeError, "Too many tables selected" + + def _select(self, query, fields, attributes): + for key in set(attributes.keys())-set(('orderby','groupby','limitby', + 'required','cache','left', + 'distinct','having', 'join')): + raise SyntaxError, 'invalid select attribute: %s' % key + # ## if not fields specified take them all from the requested tables + new_fields = [] + for item in fields: + if isinstance(item,SQLALL): + new_fields += item.table + else: + new_fields.append(item) + fields = new_fields + tablenames = self.tables(query) + query = self.filter_tenant(query,tablenames) + if not fields: + for table in tablenames: + for field in self.db[table]: + fields.append(field) + else: + for field in fields: + if isinstance(field,basestring) and table_field.match(field): + tn,fn = field.split('.') + field = self.db[tn][fn] + for tablename in self.tables(field): + if not tablename in tablenames: + tablenames.append(tablename) + if len(tablenames) < 1: + raise SyntaxError, 'Set: no tables selected' + sql_f = ', '.join(map(self.expand,fields)) + self._colnames = [c.strip() for c in sql_f.split(', ')] + if query: + sql_w = ' WHERE ' + self.expand(query) + else: + sql_w = '' + sql_o = '' + sql_s = '' + left = attributes.get('left', False) + inner_join = attributes.get('join', False) + distinct = attributes.get('distinct', False) + groupby = attributes.get('groupby', False) + orderby = attributes.get('orderby', False) + having = attributes.get('having', False) + limitby = attributes.get('limitby', False) + if distinct is True: + sql_s += 'DISTINCT' + elif distinct: + sql_s += 'DISTINCT ON (%s)' % distinct + if inner_join: + icommand = self.JOIN() + if not isinstance(inner_join, (tuple, list)): + inner_join = [inner_join] + ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] + ijoinon = [t for t in inner_join if isinstance(t, Expression)] + ijoinont = [t.first._tablename for t in ijoinon] + iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] + if left: + join = attributes['left'] + command = self.LEFT_JOIN() + if not isinstance(join, (tuple, list)): + join = [join] + joint = [t._tablename for t in join if not isinstance(t,Expression)] + joinon = [t for t in join if isinstance(t, Expression)] + #patch join+left patch (solves problem with ordering in left joins) + tables_to_merge={} + [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] + joinont = [t.first._tablename for t in joinon] + [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] + important_tablenames = joint + joinont + tables_to_merge.keys() + excluded = [t for t in tablenames if not t in important_tablenames ] + def alias(t): + return str(self.db[t]) + if inner_join and not left: + sql_t = ', '.join(alias(t) for t in iexcluded) + for t in ijoinon: + sql_t += ' %s %s' % (icommand, str(t)) + elif not inner_join and left: + sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()]) + if joint: + sql_t += ' %s %s' % (command, ','.join([t for t in joint])) + for t in joinon: + sql_t += ' %s %s' % (command, str(t)) + elif inner_join and left: + sql_t = ','.join([alias(t) for t in excluded + \ + tables_to_merge.keys() if t in iexcluded ]) + for t in ijoinon: + sql_t += ' %s %s' % (icommand, str(t)) + if joint: + sql_t += ' %s %s' % (command, ','.join([t for t in joint])) + for t in joinon: + sql_t += ' %s %s' % (command, str(t)) + else: + sql_t = ', '.join(alias(t) for t in tablenames) + if groupby: + if isinstance(groupby, (list, tuple)): + groupby = xorify(groupby) + sql_o += ' GROUP BY %s' % self.expand(groupby) + if having: + sql_o += ' HAVING %s' % attributes['having'] + if orderby: + if isinstance(orderby, (list, tuple)): + orderby = xorify(orderby) + if str(orderby) == '<random>': + sql_o += ' ORDER BY %s' % self.RANDOM() + else: + sql_o += ' ORDER BY %s' % self.expand(orderby) + if limitby: + if not orderby and tablenames: + sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) + # oracle does not support limitby + return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def select(self,query,fields,attributes): + """ + Always returns a Rows object, even if it may be empty + """ + def response(sql): + self.execute(sql) + return self.cursor.fetchall() + sql = self._select(query,fields,attributes) + if attributes.get('cache', None): + (cache_model, time_expire) = attributes['cache'] + del attributes['cache'] + key = self.uri + '/' + sql + key = (key<=200) and key or hashlib.md5(key).hexdigest() + rows = cache_model(key, lambda: response(sql), time_expire) + else: + rows = response(sql) + if isinstance(rows,tuple): + rows = list(rows) + limitby = attributes.get('limitby',None) or (0,) + rows = self.rowslice(rows,limitby[0],None) + return self.parse(rows,self._colnames) + + def _count(self,query,distinct=None): + tablenames = self.tables(query) + query = self.filter_tenant(query,tablenames) + if query: + sql_w = ' WHERE ' + self.expand(query) + else: + sql_w = '' + sql_t = ','.join(tablenames) + if distinct: + if isinstance(distinct,(list,tuple)): + distinct = xorify(distinct) + sql_d = self.expand(distinct) + return 'SELECT count(DISTINCT %s) FROM %s%s;' % (sql_d, sql_t, sql_w) + return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w) + + def count(self,query,distinct=None): + self.execute(self._count(query,distinct)) + return self.cursor.fetchone()[0] + + + def tables(self,query): + tables = set() + if isinstance(query, Field): + tables.add(query.tablename) + elif isinstance(query, (Expression, Query)): + if not query.first is None: + tables = tables.union(self.tables(query.first)) + if not query.second is None: + tables = tables.union(self.tables(query.second)) + return list(tables) + + def commit(self): + return self.connection.commit() + + def rollback(self): + return self.connection.rollback() + + def close(self): + return self.connection.close() + + def distributed_transaction_begin(self,key): + return + + def prepare(self,key): + self.connection.prepare() + + def commit_prepared(self,key): + self.connection.commit() + + def rollback_prepared(self,key): + self.connection.rollback() + + def concat_add(self,table): + return ', ADD ' + + def constraint_name(self, table, fieldname): + return '%s_%s__constraint' % (table,fieldname) + + def create_sequence_and_triggers(self, query, table, **args): + self.execute(query) + + def log_execute(self,*a,**b): + self.db._lastsql = a[0] + t0 = time.time() + ret = self.cursor.execute(*a,**b) + self.db._timings.append((a[0],time.time()-t0)) + return ret + + def execute(self,*a,**b): + return self.log_execute(*a, **b) + + def represent(self, obj, fieldtype): + if isinstance(obj,CALLABLETYPES): + obj = obj() + if isinstance(fieldtype, SQLCustomType): + return fieldtype.encoder(obj) + if isinstance(obj, (Expression, Field)): + return str(obj) + if fieldtype.startswith('list:'): + if not obj: + obj = [] + if not isinstance(obj, (list, tuple)): + obj = [obj] + if isinstance(obj, (list, tuple)): + obj = bar_encode(obj) + if obj is None: + return 'NULL' + if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: + return 'NULL' + r = self.represent_exceptions(obj,fieldtype) + if not r is None: + return r + if fieldtype == 'boolean': + if obj and not str(obj)[:1].upper() in ['F', '0']: + return "'T'" + else: + return "'F'" + if fieldtype == 'id' or fieldtype == 'integer': + return str(int(obj)) + if fieldtype.startswith('decimal'): + return str(obj) + elif fieldtype.startswith('reference'): # reference + if fieldtype.find('.')>0: + return repr(obj) + elif isinstance(obj, (Row, Reference)): + return str(obj['id']) + return str(int(obj)) + elif fieldtype == 'double': + return repr(float(obj)) + if isinstance(obj, unicode): + obj = obj.encode(self.db_codec) + if fieldtype == 'blob': + obj = base64.b64encode(str(obj)) + elif fieldtype == 'date': + if isinstance(obj, (datetime.date, datetime.datetime)): + obj = obj.isoformat()[:10] + else: + obj = str(obj) + elif fieldtype == 'datetime': + if isinstance(obj, datetime.datetime): + obj = obj.isoformat()[:19].replace('T',' ') + elif isinstance(obj, datetime.date): + obj = obj.isoformat()[:10]+' 00:00:00' + else: + obj = str(obj) + elif fieldtype == 'time': + if isinstance(obj, datetime.time): + obj = obj.isoformat()[:10] + else: + obj = str(obj) + if not isinstance(obj,str): + obj = str(obj) + try: + obj.decode(self.db_codec) + except: + obj = obj.decode('latin1').encode(self.db_codec) + return self.adapt(obj) + + def represent_exceptions(self, obj, fieldtype): + return None + + def lastrowid(self,table): + return None + + def integrity_error_class(self): + return type(None) + + def rowslice(self,rows,minimum=0,maximum=None): + """ by default this function does nothing, overload when db does not do slicing """ + return rows + + def parse(self, rows, colnames, blob_decode=True): + db = self.db + virtualtables = [] + new_rows = [] + for (i,row) in enumerate(rows): + new_row = Row() + for j,colname in enumerate(colnames): + value = row[j] + if not table_field.match(colnames[j]): + if not '_extra' in new_row: + new_row['_extra'] = Row() + new_row['_extra'][colnames[j]] = value + select_as_parser = re.compile("\s+AS\s+(\S+)") + new_column_name = select_as_parser.search(colnames[j]) + if not new_column_name is None: + column_name = new_column_name.groups(0) + setattr(new_row,column_name[0],value) + continue + (tablename, fieldname) = colname.split('.') + table = db[tablename] + field = table[fieldname] + field_type = field.type + if field.type != 'blob' and isinstance(value, str): + try: + value = value.decode(db._db_codec) + except Exception: + pass + if isinstance(value, unicode): + value = value.encode('utf-8') + if not tablename in new_row: + colset = new_row[tablename] = Row() + if tablename not in virtualtables: + virtualtables.append(tablename) + else: + colset = new_row[tablename] + + if isinstance(field_type, SQLCustomType): + colset[fieldname] = field_type.decoder(value) + # field_type = field_type.type + elif not isinstance(field_type, str) or value is None: + colset[fieldname] = value + elif isinstance(field_type, str) and \ + field_type.startswith('reference'): + referee = field_type[10:].strip() + if not '.' in referee: + colset[fieldname] = rid = Reference(value) + (rid._table, rid._record) = (db[referee], None) + else: ### reference not by id + colset[fieldname] = value + elif field_type == 'boolean': + if value == True or str(value)[:1].lower() == 't': + colset[fieldname] = True + else: + colset[fieldname] = False + elif field_type == 'date' \ + and (not isinstance(value, datetime.date)\ + or isinstance(value, datetime.datetime)): + (y, m, d) = map(int, str(value)[:10].strip().split('-')) + colset[fieldname] = datetime.date(y, m, d) + elif field_type == 'time' \ + and not isinstance(value, datetime.time): + time_items = map(int,str(value)[:8].strip().split(':')[:3]) + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + colset[fieldname] = datetime.time(h, mi, s) + elif field_type == 'datetime'\ + and not isinstance(value, datetime.datetime): + (y, m, d) = map(int,str(value)[:10].strip().split('-')) + time_items = map(int,str(value)[11:19].strip().split(':')[:3]) + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) + elif field_type == 'blob' and blob_decode: + colset[fieldname] = base64.b64decode(str(value)) + elif field_type.startswith('decimal'): + decimals = int(field_type[8:-1].split(',')[-1]) + if self.dbengine == 'sqlite': + value = ('%.' + str(decimals) + 'f') % value + if not isinstance(value, decimal.Decimal): + value = decimal.Decimal(str(value)) + colset[fieldname] = value + elif field_type.startswith('list:integer'): + if not self.dbengine=='google:datastore': + colset[fieldname] = bar_decode_integer(value) + else: + colset[fieldname] = value + elif field_type.startswith('list:reference'): + if not self.dbengine=='google:datastore': + colset[fieldname] = bar_decode_integer(value) + else: + colset[fieldname] = value + elif field_type.startswith('list:string'): + if not self.dbengine=='google:datastore': + colset[fieldname] = bar_decode_string(value) + else: + colset[fieldname] = value + else: + colset[fieldname] = value + if field_type == 'id': + id = colset[field.name] + colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) + colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() + for (referee_table, referee_name) in \ + table._referenced_by: + s = db[referee_table][referee_name] + referee_link = db._referee_name and \ + db._referee_name % dict(table=referee_table,field=referee_name) + if referee_link and not referee_link in colset: + colset[referee_link] = Set(db, s == id) + colset['id'] = id + new_rows.append(new_row) + + rowsobj = Rows(db, new_rows, colnames, rawrows=rows) + + for tablename in virtualtables: + ### new style virtual fields + table = db[tablename] + fields_virtual = [(f,v) for (f,v) in table.items() if isinstance(v,FieldVirtual)] + fields_lazy = [(f,v) for (f,v) in table.items() if isinstance(v,FieldLazy)] + if fields_virtual or fields_lazy: + for row in rowsobj.records: + box = row[tablename] + for f,v in fields_virtual: + box[f] = v.f(row) + for f,v in fields_lazy: + box[f] = (v.handler or VirtualCommand)(v.f,row) + + ### old style virtual fields + for item in table.virtualfields: + try: + rowsobj = rowsobj.setvirtualfields(**{tablename:item}) + except KeyError: + # to avoid breaking virtualfields when partial select + pass + return rowsobj + + def filter_tenant(self,query,tablenames): + fieldname = self.db._request_tenant + for tablename in tablenames: + table = self.db[tablename] + if fieldname in table: + default = table[fieldname].default + if not default is None: + newquery = table[fieldname]==default + if query is None: + query = newquery + else: + query = query&newquery + return query + +################################################################################### +# List of all the available adapters, they all extend BaseAdapter +################################################################################### + +class SQLiteAdapter(BaseAdapter): + + driver = globals().get('sqlite3',None) + + def EXTRACT(self,field,what): + return "web2py_extract('%s',%s)" % (what,self.expand(field)) + + @staticmethod + def web2py_extract(lookup, s): + table = { + 'year': (0, 4), + 'month': (5, 7), + 'day': (8, 10), + 'hour': (11, 13), + 'minute': (14, 16), + 'second': (17, 19), + } + try: + (i, j) = table[lookup] + return int(s[i:j]) + except: + return None + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "sqlite" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' + if uri.startswith('sqlite:memory'): + dbpath = ':memory:' + else: + dbpath = uri.split('://')[1] + if dbpath[0] != '/': + dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) + if not 'check_same_thread' in driver_args: + driver_args['check_same_thread'] = False + if not 'detect_types' in driver_args: + driver_args['detect_types'] = self.driver.PARSE_DECLTYPES + def connect(dbpath=dbpath, driver_args=driver_args): + return self.driver.Connection(dbpath, **driver_args) + self.pool_connection(connect) + self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) + + def _truncate(self,table,mode = ''): + tablename = table._tablename + return ['DELETE FROM %s;' % tablename, + "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename] + + def lastrowid(self,table): + return self.cursor.lastrowid + + +class JDBCSQLiteAdapter(SQLiteAdapter): + + driver = globals().get('zxJDBC',None) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "sqlite" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' + if uri.startswith('sqlite:memory'): + dbpath = ':memory:' + else: + dbpath = uri.split('://')[1] + if dbpath[0] != '/': + dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) + def connect(dbpath=dbpath,driver_args=driver_args): + return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args) + # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs + # self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) + + def execute(self,a): + return self.log_execute(a) + + +class MySQLAdapter(BaseAdapter): + + driver = globals().get('pymysql',None) + maxcharlength = 255 + commit_on_alter_table = True + support_distributed_transaction = True + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'LONGTEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'LONGBLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'DATETIME', + 'id': 'INT AUTO_INCREMENT NOT NULL', + 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'LONGTEXT', + 'list:string': 'LONGTEXT', + 'list:reference': 'LONGTEXT', + } + + def RANDOM(self): + return 'RAND()' + + def SUBSTRING(self,field,parameters): + return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) + + def _drop(self,table,mode): + # breaks db integrity but without this mysql does not drop table + return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;'] + + def distributed_transaction_begin(self,key): + self.execute('XA START;') + + def prepare(self,key): + self.execute("XA END;") + self.execute("XA PREPARE;") + + def commit_prepared(self,ley): + self.execute("XA COMMIT;") + + def rollback_prepared(self,key): + self.execute("XA ROLLBACK;") + + def concat_add(self,table): + return '; ALTER TABLE %s ADD ' % table + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "mysql" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) + if not m: + raise SyntaxError, \ + "Invalid URI string in DAL: %s" % self.uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + port = int(m.group('port') or '3306') + charset = m.group('charset') or 'utf8' + driver_args.update(dict(db=db, + user=credential_decoder(user), + passwd=credential_decoder(password), + host=host, + port=port, + charset=charset)) + def connect(driver_args=driver_args): + return self.driver.connect(**driver_args) + self.pool_connection(connect) + self.execute('SET FOREIGN_KEY_CHECKS=1;') + self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") + + def lastrowid(self,table): + self.execute('select last_insert_id();') + return int(self.cursor.fetchone()[0]) + +class PostgreSQLAdapter(BaseAdapter): + + driver = globals().get('psycopg2',None) + + support_distributed_transaction = True + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'TEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BYTEA', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'SERIAL PRIMARY KEY', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + } + + def adapt(self,obj): + return psycopg2_adapt(obj).getquoted() + + def sequence_name(self,table): + return '%s_id_Seq' % table + + def RANDOM(self): + return 'RANDOM()' + + def distributed_transaction_begin(self,key): + return + + def prepare(self,key): + self.execute("PREPARE TRANSACTION '%s';" % key) + + def commit_prepared(self,key): + self.execute("COMMIT PREPARED '%s';" % key) + + def rollback_prepared(self,key): + self.execute("ROLLBACK PREPARED '%s';" % key) + + def create_sequence_and_triggers(self, query, table, **args): + # following lines should only be executed if table._sequence_name does not exist + # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) + # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ + # % (table._tablename, table._fieldname, table._sequence_name)) + self.execute(query) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "postgres" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) + if not m: + raise SyntaxError, "Invalid URI string in DAL" + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + port = m.group('port') or '5432' + sslmode = m.group('sslmode') + if sslmode: + msg = ("dbname='%s' user='%s' host='%s'" + "port=%s password='%s' sslmode='%s'") \ + % (db, user, host, port, password, sslmode) + else: + msg = ("dbname='%s' user='%s' host='%s'" + "port=%s password='%s'") \ + % (db, user, host, port, password) + def connect(msg=msg,driver_args=driver_args): + return self.driver.connect(msg,**driver_args) + self.pool_connection(connect) + self.connection.set_client_encoding('UTF8') + self.execute("SET standard_conforming_strings=on;") + + def lastrowid(self,table): + self.execute("select currval('%s')" % table._sequence_name) + return int(self.cursor.fetchone()[0]) + + def LIKE(self,first,second): + return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string')) + + def STARTSWITH(self,first,second): + return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string')) + + def ENDSWITH(self,first,second): + return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string')) + + def CONTAINS(self,first,second): + if first.type in ('string','text'): + key = '%'+str(second).replace('%','%%')+'%' + elif first.type.startswith('list:'): + key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' + return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string')) + +class JDBCPostgreSQLAdapter(PostgreSQLAdapter): + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "postgres" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) + if not m: + raise SyntaxError, "Invalid URI string in DAL" + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + port = m.group('port') or '5432' + msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) + def connect(msg=msg,driver_args=driver_args): + return self.driver.connect(*msg,**driver_args) + self.pool_connection(connect) + self.connection.set_client_encoding('UTF8') + self.execute('BEGIN;') + self.execute("SET CLIENT_ENCODING TO 'UNICODE';") + + +class OracleAdapter(BaseAdapter): + + driver = globals().get('cx_Oracle',None) + + commit_on_alter_table = False + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR2(%(length)s)', + 'text': 'CLOB', + 'password': 'VARCHAR2(%(length)s)', + 'blob': 'CLOB', + 'upload': 'VARCHAR2(%(length)s)', + 'integer': 'INT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'CHAR(8)', + 'datetime': 'DATE', + 'id': 'NUMBER PRIMARY KEY', + 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + } + + def sequence_name(self,tablename): + return '%s_sequence' % tablename + + def trigger_name(self,tablename): + return '%s_trigger' % tablename + + def LEFT_JOIN(self): + return 'LEFT OUTER JOIN' + + def RANDOM(self): + return 'dbms_random.value' + + def NOT_NULL(self,default,field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + + def _drop(self,table,mode): + sequence_name = table._sequence_name + return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name] + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + if len(sql_w) > 1: + sql_w_row = sql_w + ' AND w_row > %i' % lmin + else: + sql_w_row = 'WHERE w_row > %i' % lmin + return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def constraint_name(self, tablename, fieldname): + constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) + if len(constraint_name)>30: + constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) + return constraint_name + + def represent_exceptions(self, obj, fieldtype): + if fieldtype == 'blob': + obj = base64.b64encode(str(obj)) + return ":CLOB('%s')" % obj + elif fieldtype == 'date': + if isinstance(obj, (datetime.date, datetime.datetime)): + obj = obj.isoformat()[:10] + else: + obj = str(obj) + return "to_date('%s','yyyy-mm-dd')" % obj + elif fieldtype == 'datetime': + if isinstance(obj, datetime.datetime): + obj = obj.isoformat()[:19].replace('T',' ') + elif isinstance(obj, datetime.date): + obj = obj.isoformat()[:10]+' 00:00:00' + else: + obj = str(obj) + return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj + return None + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "oracle" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + if not 'threaded' in driver_args: + driver_args['threaded']=True + def connect(uri=uri,driver_args=driver_args): + return self.driver.connect(uri,**driver_args) + self.pool_connection(connect) + self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") + self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") + oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") + + def execute(self, command): + args = [] + i = 1 + while True: + m = self.oracle_fix.match(command) + if not m: + break + command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] + args.append(m.group('clob')[6:-2].replace("''", "'")) + i += 1 + if command[-1:]==';': + command = command[:-1] + return self.log_execute(command, args) + + def create_sequence_and_triggers(self, query, table, **args): + tablename = table._tablename + sequence_name = table._sequence_name + trigger_name = table._trigger_name + self.execute(query) + self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) + self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name)) + + def lastrowid(self,table): + sequence_name = table._sequence_name + self.execute('SELECT %s.currval FROM dual;' % sequence_name) + return int(self.cursor.fetchone()[0]) + + +class MSSQLAdapter(BaseAdapter): + + driver = globals().get('pyodbc',None) + + types = { + 'boolean': 'BIT', + 'string': 'VARCHAR(%(length)s)', + 'text': 'TEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'IMAGE', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATETIME', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'INT IDENTITY PRIMARY KEY', + 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + } + + def EXTRACT(self,field,what): + return "DATEPART(%s,%s)" % (what, self.expand(field)) + + def LEFT_JOIN(self): + return 'LEFT OUTER JOIN' + + def RANDOM(self): + return 'NEWID()' + + def ALLOW_NULL(self): + return ' NULL' + + def SUBSTRING(self,field,parameters): + return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) + + def PRIMARY_KEY(self,key): + return 'PRIMARY KEY CLUSTERED (%s)' % key + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + sql_s += ' TOP %i' % lmax + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def represent_exceptions(self, obj, fieldtype): + if fieldtype == 'boolean': + if obj and not str(obj)[0].upper() == 'F': + return '1' + else: + return '0' + return None + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}, fake_connect=False): + self.db = db + self.dbengine = "mssql" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 + uri = uri.split('://')[1] + if '@' not in uri: + try: + m = re.compile('^(?P<dsn>.+)$').match(uri) + if not m: + raise SyntaxError, \ + 'Parsing uri string(%s) has no result' % self.uri + dsn = m.group('dsn') + if not dsn: + raise SyntaxError, 'DSN required' + except SyntaxError, e: + logger.error('NdGpatch error') + raise e + cnxn = 'DSN=%s' % dsn + else: + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) + if not m: + raise SyntaxError, \ + "Invalid URI string in DAL: %s" % uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + port = m.group('port') or '1433' + # Parse the optional url name-value arg pairs after the '?' + # (in the form of arg1=value1&arg2=value2&...) + # Default values (drivers like FreeTDS insist on uppercase parameter keys) + argsdict = { 'DRIVER':'{SQL Server}' } + urlargs = m.group('urlargs') or '' + argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') + for argmatch in argpattern.finditer(urlargs): + argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') + urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) + cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ + % (host, port, db, user, password, urlargs) + def connect(cnxn=cnxn,driver_args=driver_args): + return self.driver.connect(cnxn,**driver_args) + if not fake_connect: + self.pool_connection(connect) + + def lastrowid(self,table): + #self.execute('SELECT @@IDENTITY;') + self.execute('SELECT SCOPE_IDENTITY();') + return int(self.cursor.fetchone()[0]) + + def integrity_error_class(self): + return pyodbc.IntegrityError + + def rowslice(self,rows,minimum=0,maximum=None): + if maximum is None: + return rows[minimum:] + return rows[minimum:maximum] + + +class MSSQL2Adapter(MSSQLAdapter): + types = { + 'boolean': 'CHAR(1)', + 'string': 'NVARCHAR(%(length)s)', + 'text': 'NTEXT', + 'password': 'NVARCHAR(%(length)s)', + 'blob': 'IMAGE', + 'upload': 'NVARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATETIME', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'INT IDENTITY PRIMARY KEY', + 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + 'list:integer': 'NTEXT', + 'list:string': 'NTEXT', + 'list:reference': 'NTEXT', + } + + def represent(self, obj, fieldtype): + value = BaseAdapter.represent(self, obj, fieldtype) + if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'": + value = 'N'+value + return value + + def execute(self,a): + return self.log_execute(a.decode('utf8')) + + +class FireBirdAdapter(BaseAdapter): + + driver = globals().get('pyodbc',None) + + commit_on_alter_table = False + support_distributed_transaction = True + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'BLOB SUB_TYPE 1', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB SUB_TYPE 0', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'double': 'DOUBLE PRECISION', + 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER PRIMARY KEY', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'BLOB SUB_TYPE 1', + 'list:string': 'BLOB SUB_TYPE 1', + 'list:reference': 'BLOB SUB_TYPE 1', + } + + def sequence_name(self,tablename): + return 'genid_%s' % tablename + + def trigger_name(self,tablename): + return 'trg_id_%s' % tablename + + def RANDOM(self): + return 'RAND()' + + def NOT_NULL(self,default,field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + + def SUBSTRING(self,field,parameters): + return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1]) + + def _drop(self,table,mode): + sequence_name = table._sequence_name + return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name] + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def _truncate(self,table,mode = ''): + return ['DELETE FROM %s;' % table._tablename, + 'SET GENERATOR %s TO 0;' % table._sequence_name] + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "firebird" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) + if not m: + raise SyntaxError, "Invalid URI string in DAL: %s" % uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + port = int(m.group('port') or 3050) + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + charset = m.group('charset') or 'UTF8' + driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), + user = credential_decoder(user), + password = credential_decoder(password), + charset = charset)) + if adapter_args.has_key('driver_name'): + if adapter_args['driver_name'] == 'kinterbasdb': + self.driver = kinterbasdb + elif adapter_args['driver_name'] == 'firebirdsql': + self.driver = firebirdsql + else: + self.driver = kinterbasdb + def connect(driver_args=driver_args): + return self.driver.connect(**driver_args) + self.pool_connection(connect) + + def create_sequence_and_triggers(self, query, table, **args): + tablename = table._tablename + sequence_name = table._sequence_name + trigger_name = table._trigger_name + self.execute(query) + self.execute('create generator %s;' % sequence_name) + self.execute('set generator %s to 0;' % sequence_name) + self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name)) + + def lastrowid(self,table): + sequence_name = table._sequence_name + self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) + return int(self.cursor.fetchone()[0]) + + +class FireBirdEmbeddedAdapter(FireBirdAdapter): + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "firebird" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) + if not m: + raise SyntaxError, \ + "Invalid URI string in DAL: %s" % self.uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + pathdb = m.group('path') + if not pathdb: + raise SyntaxError, 'Path required' + charset = m.group('charset') + if not charset: + charset = 'UTF8' + host = '' + driver_args.update(dict(host=host, + database=pathdb, + user=credential_decoder(user), + password=credential_decoder(password), + charset=charset)) + #def connect(driver_args=driver_args): + # return kinterbasdb.connect(**driver_args) + if adapter_args.has_key('driver_name'): + if adapter_args['driver_name'] == 'kinterbasdb': + self.driver = kinterbasdb + elif adapter_args['driver_name'] == 'firebirdsql': + self.driver = firebirdsql + else: + self.driver = kinterbasdb + def connect(driver_args=driver_args): + return self.driver.connect(**driver_args) + self.pool_connection(connect) + + +class InformixAdapter(BaseAdapter): + + driver = globals().get('informixdb',None) + + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'BLOB SUB_TYPE 1', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB SUB_TYPE 0', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'SERIAL', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', + 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', + 'list:integer': 'BLOB SUB_TYPE 1', + 'list:string': 'BLOB SUB_TYPE 1', + 'list:reference': 'BLOB SUB_TYPE 1', + } + + def RANDOM(self): + return 'Random()' + + def NOT_NULL(self,default,field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + fetch_amt = lmax - lmin + dbms_version = int(self.connection.dbms_version.split('.')[0]) + if lmin and (dbms_version >= 10): + # Requires Informix 10.0+ + sql_s += ' SKIP %d' % (lmin, ) + if fetch_amt and (dbms_version >= 9): + # Requires Informix 9.0+ + sql_s += ' FIRST %d' % (fetch_amt, ) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def represent_exceptions(self, obj, fieldtype): + if fieldtype == 'date': + if isinstance(obj, (datetime.date, datetime.datetime)): + obj = obj.isoformat()[:10] + else: + obj = str(obj) + return "to_date('%s','yyyy-mm-dd')" % obj + elif fieldtype == 'datetime': + if isinstance(obj, datetime.datetime): + obj = obj.isoformat()[:19].replace('T',' ') + elif isinstance(obj, datetime.date): + obj = obj.isoformat()[:10]+' 00:00:00' + else: + obj = str(obj) + return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj + return None + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "informix" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) + if not m: + raise SyntaxError, \ + "Invalid URI string in DAL: %s" % self.uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + user = credential_decoder(user) + password = credential_decoder(password) + dsn = '%s@%s' % (db,host) + driver_args.update(dict(user=user,password=password,autocommit=True)) + def connect(dsn=dsn,driver_args=driver_args): + return self.driver.connect(dsn,**driver_args) + self.pool_connection(connect) + + def execute(self,command): + if command[-1:]==';': + command = command[:-1] + return self.log_execute(command) + + def lastrowid(self,table): + return self.cursor.sqlerrd[1] + + def integrity_error_class(self): + return informixdb.IntegrityError + + +class DB2Adapter(BaseAdapter): + + driver = globals().get('pyodbc',None) + + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'CLOB', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + } + + def LEFT_JOIN(self): + return 'LEFT OUTER JOIN' + + def RANDOM(self): + return 'RAND()' + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def represent_exceptions(self, obj, fieldtype): + if fieldtype == 'blob': + obj = base64.b64encode(str(obj)) + return "BLOB('%s')" % obj + elif fieldtype == 'datetime': + if isinstance(obj, datetime.datetime): + obj = obj.isoformat()[:19].replace('T','-').replace(':','.') + elif isinstance(obj, datetime.date): + obj = obj.isoformat()[:10]+'-00.00.00' + return "'%s'" % obj + return None + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "db2" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + cnxn = uri.split('://', 1)[1] + def connect(cnxn=cnxn,driver_args=driver_args): + return self.driver.connect(cnxn,**driver_args) + self.pool_connection(connect) + + def execute(self,command): + if command[-1:]==';': + command = command[:-1] + return self.log_execute(command) + + def lastrowid(self,table): + self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) + return int(self.cursor.fetchone()[0]) + + def rowslice(self,rows,minimum=0,maximum=None): + if maximum is None: + return rows[minimum:] + return rows[minimum:maximum] + + +class TeradataAdapter(DB2Adapter): + + driver = globals().get('pyodbc',None) + + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'CLOB', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + } + + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "teradata" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + cnxn = uri.split('://', 1)[1] + def connect(cnxn=cnxn,driver_args=driver_args): + return self.driver.connect(cnxn,**driver_args) + self.pool_connection(connect) + + +INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name + # (ANSI-SQL wants this form of name + # to be a delimited identifier) + +class IngresAdapter(BaseAdapter): + + driver = globals().get('ingresdbi',None) + + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'CLOB', + 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? + 'integer': 'INTEGER4', # or int8... + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'ANSIDATE', + 'time': 'TIME WITHOUT TIME ZONE', + 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', + 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, + 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + } + + def LEFT_JOIN(self): + return 'LEFT OUTER JOIN' + + def RANDOM(self): + return 'RANDOM()' + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + fetch_amt = lmax - lmin + if fetch_amt: + sql_s += ' FIRST %d ' % (fetch_amt, ) + if lmin: + # Requires Ingres 9.2+ + sql_o += ' OFFSET %d' % (lmin, ) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "ingres" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + connstr = self._uri.split(':', 1)[1] + # Simple URI processing + connstr = connstr.lstrip() + while connstr.startswith('/'): + connstr = connstr[1:] + database_name=connstr # Assume only (local) dbname is passed in + vnode = '(local)' + servertype = 'ingres' + trace = (0, None) # No tracing + driver_args.update(dict(database=database_name, + vnode=vnode, + servertype=servertype, + trace=trace)) + def connect(driver_args=driver_args): + return self.driver.connect(**driver_args) + self.pool_connection(connect) + + def create_sequence_and_triggers(self, query, table, **args): + # post create table auto inc code (if needed) + # modify table to btree for performance.... + # Older Ingres releases could use rule/trigger like Oracle above. + if hasattr(table,'_primarykey'): + modify_tbl_sql = 'modify %s to btree unique on %s' % \ + (table._tablename, + ', '.join(["'%s'" % x for x in table.primarykey])) + self.execute(modify_tbl_sql) + else: + tmp_seqname='%s_iisq' % table._tablename + query=query.replace(INGRES_SEQNAME, tmp_seqname) + self.execute('create sequence %s' % tmp_seqname) + self.execute(query) + self.execute('modify %s to btree unique on %s' % (table._tablename, 'id')) + + + def lastrowid(self,table): + tmp_seqname='%s_iisq' % table + self.execute('select current value for %s' % tmp_seqname) + return int(self.cursor.fetchone()[0]) # don't really need int type cast here... + + def integrity_error_class(self): + return ingresdbi.IntegrityError + + +class IngresUnicodeAdapter(IngresAdapter): + types = { + 'boolean': 'CHAR(1)', + 'string': 'NVARCHAR(%(length)s)', + 'text': 'NCLOB', + 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? + 'integer': 'INTEGER4', # or int8... + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'ANSIDATE', + 'time': 'TIME WITHOUT TIME ZONE', + 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', + 'id': 'integer4 not null unique with default next value for %s'% INGRES_SEQNAME, + 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO + 'list:integer': 'NCLOB', + 'list:string': 'NCLOB', + 'list:reference': 'NCLOB', + } + +class SAPDBAdapter(BaseAdapter): + + driver = globals().get('sapdb',None) + support_distributed_transaction = False + types = { + 'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'LONG', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'LONG', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'double': 'FLOAT', + 'decimal': 'FIXED(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INT PRIMARY KEY', + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'LONG', + 'list:string': 'LONG', + 'list:reference': 'LONG', + } + + def sequence_name(self,table): + return '%s_id_Seq' % table + + def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): + if limitby: + (lmin, lmax) = limitby + if len(sql_w) > 1: + sql_w_row = sql_w + ' AND w_row > %i' % lmin + else: + sql_w_row = 'WHERE w_row > %i' % lmin + return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def create_sequence_and_triggers(self, query, table, **args): + # following lines should only be executed if table._sequence_name does not exist + self.execute('CREATE SEQUENCE %s;' % table._sequence_name) + self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ + % (table._tablename, table._id.name, table._sequence_name)) + self.execute(query) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "sapdb" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) + if not m: + raise SyntaxError, "Invalid URI string in DAL" + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + def connect(user=user,password=password,database=db, + host=host,driver_args=driver_args): + return self.driver.Connection(user,password,database, + host,**driver_args) + self.pool_connection(connect) + + def lastrowid(self,table): + self.execute("select %s.NEXTVAL from dual" % table._sequence_name) + return int(self.cursor.fetchone()[0]) + +class CubridAdapter(MySQLAdapter): + + driver = globals().get('cubriddb',None) + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.dbengine = "cubrid" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.find_or_make_work_folder() + uri = uri.split('://')[1] + m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) + if not m: + raise SyntaxError, \ + "Invalid URI string in DAL: %s" % self.uri + user = credential_decoder(m.group('user')) + if not user: + raise SyntaxError, 'User required' + password = credential_decoder(m.group('password')) + if not password: + password = '' + host = m.group('host') + if not host: + raise SyntaxError, 'Host name required' + db = m.group('db') + if not db: + raise SyntaxError, 'Database name required' + port = int(m.group('port') or '30000') + charset = m.group('charset') or 'utf8' + user=credential_decoder(user), + passwd=credential_decoder(password), + def connect(host,port,db,user,passwd,driver_args=driver_args): + return self.driver.connect(host,port,db,user,passwd,**driver_args) + self.pool_connection(connect) + self.execute('SET FOREIGN_KEY_CHECKS=1;') + self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") + + +######## GAE MySQL ########## + +class DatabaseStoredFile: + + web2py_filesystem = False + + def escape(self,obj): + return self.db._adapter.esacpe(obj) + + def __init__(self,db,filename,mode): + if db._adapter.dbengine != 'mysql': + raise RuntimeError, "only MySQL can store metadata .table files in database for now" + self.db = db + self.filename = filename + self.mode = mode + if not self.web2py_filesystem: + self.db.executesql("CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(512), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;") + DatabaseStoredFile.web2py_filesystem = True + self.p=0 + self.data = '' + if mode in ('r','rw','a'): + query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ + % filename + rows = self.db.executesql(query) + if rows: + self.data = rows[0][0] + elif os.path.exists(filename): + datafile = open(filename, 'r') + try: + self.data = datafile.read() + finally: + datafile.close() + elif mode in ('r','rw'): + raise RuntimeError, "File %s does not exist" % filename + + def read(self, bytes): + data = self.data[self.p:self.p+bytes] + self.p += len(data) + return data + + def readline(self): + i = self.data.find('\n',self.p)+1 + if i>0: + data, self.p = self.data[self.p:i], i + else: + data, self.p = self.data[self.p:], len(self.data) + return data + + def write(self,data): + self.data += data + + def close(self): + self.db.executesql("DELETE FROM web2py_filesystem WHERE path=%s" \ + % self.adapt(self.filename)) + query = "INSERT INTO web2py_filesystem(path,content) VALUES (%s,%s)"\ + % (self.adapt(self.filename), self.adapt(self.data)) + self.db.executesql(query) + self.db.commit() + + @staticmethod + def exists(db,filename): + if os.path.exists(filename): + return True + query = "SELECT path FROM web2py_filesystem WHERE path=%s" \ + % self.adapt(filename) + if db.executesql(query): + return True + return False + + +class UseDatabaseStoredFile: + + def file_exists(self, filename): + return DatabaseStoredFile.exists(self.db,filename) + + def file_open(self, filename, mode='rb', lock=True): + return DatabaseStoredFile(self.db,filename,mode) + + def file_close(self, fileobj, unlock=True): + fileobj.close() + + def file_delete(self,filename): + query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename + self.db.executesql(query) + self.db.commit() + +class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): + + def __init__(self, db, uri='google:sql://realm:domain/database', + pool_size=0, folder=None, db_codec='UTF-8', + credential_decoder = lambda x:x, driver_args={}, + adapter_args={}): + + self.db = db + self.dbengine = "mysql" + self.uri = uri + self.pool_size = pool_size + self.folder = folder + self.db_codec = db_codec + self.folder = folder or '$HOME/'+thread.folder.split('/applications/',1)[1] + + m = re.compile('^(?P<instance>.*)/(?P<db>.*)$').match(self.uri[len('google:sql://'):]) + if not m: + raise SyntaxError, "Invalid URI string in SQLDB: %s" % self._uri + instance = credential_decoder(m.group('instance')) + db = credential_decoder(m.group('db')) + driver_args['instance'] = instance + createdb = adapter_args.get('createdb',True) + if not createdb: + driver_args['database'] = db + def connect(driver_args=driver_args): + return rdbms.connect(**driver_args) + self.pool_connection(connect) + if createdb: + # self.execute('DROP DATABASE %s' % db) + self.execute('CREATE DATABASE IF NOT EXISTS %s' % db) + self.execute('USE %s' % db) + self.execute("SET FOREIGN_KEY_CHECKS=1;") + self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") + +class NoSQLAdapter(BaseAdapter): + + @staticmethod + def to_unicode(obj): + if isinstance(obj, str): + return obj.decode('utf8') + elif not isinstance(obj, unicode): + return unicode(obj) + return obj + + def represent(self, obj, fieldtype): + if isinstance(obj,CALLABLETYPES): + obj = obj() + if isinstance(fieldtype, SQLCustomType): + return fieldtype.encoder(obj) + if isinstance(obj, (Expression, Field)): + raise SyntaxError, "non supported on GAE" + if self.dbengine=='google:datastore' in globals(): + if isinstance(fieldtype, gae.Property): + return obj + if fieldtype.startswith('list:'): + if not obj: + obj = [] + if not isinstance(obj, (list, tuple)): + obj = [obj] + if obj == '' and not fieldtype[:2] in ['st','te','pa','up']: + return None + if not obj is None: + if isinstance(obj, list) and not fieldtype.startswith('list'): + obj = [self.represent(o, fieldtype) for o in obj] + elif fieldtype in ('integer','id'): + obj = long(obj) + elif fieldtype == 'double': + obj = float(obj) + elif fieldtype.startswith('reference'): + if isinstance(obj, (Row, Reference)): + obj = obj['id'] + obj = long(obj) + elif fieldtype == 'boolean': + if obj and not str(obj)[0].upper() == 'F': + obj = True + else: + obj = False + elif fieldtype == 'date': + if not isinstance(obj, datetime.date): + (y, m, d) = map(int,str(obj).strip().split('-')) + obj = datetime.date(y, m, d) + elif isinstance(obj,datetime.datetime): + (y, m, d) = (obj.year, obj.month, obj.day) + obj = datetime.date(y, m, d) + elif fieldtype == 'time': + if not isinstance(obj, datetime.time): + time_items = map(int,str(obj).strip().split(':')[:3]) + if len(time_items) == 3: + (h, mi, s) = time_items + else: + (h, mi, s) = time_items + [0] + obj = datetime.time(h, mi, s) + elif fieldtype == 'datetime': + if not isinstance(obj, datetime.datetime): + (y, m, d) = map(int,str(obj)[:10].strip().split('-')) + time_items = map(int,str(obj)[11:].strip().split(':')[:3]) + while len(time_items)<3: + time_items.append(0) + (h, mi, s) = time_items + obj = datetime.datetime(y, m, d, h, mi, s) + elif fieldtype == 'blob': + pass + elif fieldtype.startswith('list:string'): + return map(self.to_unicode,obj) + elif fieldtype.startswith('list:'): + return map(int,obj) + else: + obj = self.to_unicode(obj) + return obj + + def _insert(self,table,fields): + return 'insert %s in %s' % (fields, table) + + def _count(self,query,distinct=None): + return 'count %s' % repr(query) + + def _select(self,query,fields,attributes): + return 'select %s where %s' % (repr(fields), repr(query)) + + def _delete(self,tablename, query): + return 'delete %s where %s' % (repr(tablename),repr(query)) + + def _update(self,tablename,query,fields): + return 'update %s (%s) where %s' % (repr(tablename), + repr(fields),repr(query)) + + def commit(self): + """ + remember: no transactions on many NoSQL + """ + pass + + def rollback(self): + """ + remember: no transactions on many NoSQL + """ + pass + + def close(self): + """ + remember: no transactions on many NoSQL + """ + pass + + + # these functions should never be called! + def OR(self,first,second): raise SyntaxError, "Not supported" + def AND(self,first,second): raise SyntaxError, "Not supported" + def AS(self,first,second): raise SyntaxError, "Not supported" + def ON(self,first,second): raise SyntaxError, "Not supported" + def STARTSWITH(self,first,second=None): raise SyntaxError, "Not supported" + def ENDSWITH(self,first,second=None): raise SyntaxError, "Not supported" + def ADD(self,first,second): raise SyntaxError, "Not supported" + def SUB(self,first,second): raise SyntaxError, "Not supported" + def MUL(self,first,second): raise SyntaxError, "Not supported" + def DIV(self,first,second): raise SyntaxError, "Not supported" + def LOWER(self,first): raise SyntaxError, "Not supported" + def UPPER(self,first): raise SyntaxError, "Not supported" + def EXTRACT(self,first,what): raise SyntaxError, "Not supported" + def AGGREGATE(self,first,what): raise SyntaxError, "Not supported" + def LEFT_JOIN(self): raise SyntaxError, "Not supported" + def RANDOM(self): raise SyntaxError, "Not supported" + def SUBSTRING(self,field,parameters): raise SyntaxError, "Not supported" + def PRIMARY_KEY(self,key): raise SyntaxError, "Not supported" + def LIKE(self,first,second): raise SyntaxError, "Not supported" + def drop(self,table,mode): raise SyntaxError, "Not supported" + def alias(self,table,alias): raise SyntaxError, "Not supported" + def migrate_table(self,*a,**b): raise SyntaxError, "Not supported" + def distributed_transaction_begin(self,key): raise SyntaxError, "Not supported" + def prepare(self,key): raise SyntaxError, "Not supported" + def commit_prepared(self,key): raise SyntaxError, "Not supported" + def rollback_prepared(self,key): raise SyntaxError, "Not supported" + def concat_add(self,table): raise SyntaxError, "Not supported" + def constraint_name(self, table, fieldname): raise SyntaxError, "Not supported" + def create_sequence_and_triggers(self, query, table, **args): pass + def log_execute(self,*a,**b): raise SyntaxError, "Not supported" + def execute(self,*a,**b): raise SyntaxError, "Not supported" + def represent_exceptions(self, obj, fieldtype): raise SyntaxError, "Not supported" + def lastrowid(self,table): raise SyntaxError, "Not supported" + def integrity_error_class(self): raise SyntaxError, "Not supported" + def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError, "Not supported" + + +class GAEF(object): + def __init__(self,name,op,value,apply): + self.name=name=='id' and '__key__' or name + self.op=op + self.value=value + self.apply=apply + def __repr__(self): + return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value)) + +class GoogleDatastoreAdapter(NoSQLAdapter): + uploads_in_blob = True + types = {} + + def file_exists(self, filename): pass + def file_open(self, filename, mode='rb', lock=True): pass + def file_close(self, fileobj, unlock=True): pass + + def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.types.update({ + 'boolean': gae.BooleanProperty, + 'string': (lambda: gae.StringProperty(multiline=True)), + 'text': gae.TextProperty, + 'password': gae.StringProperty, + 'blob': gae.BlobProperty, + 'upload': gae.StringProperty, + 'integer': gae.IntegerProperty, + 'double': gae.FloatProperty, + 'decimal': GAEDecimalProperty, + 'date': gae.DateProperty, + 'time': gae.TimeProperty, + 'datetime': gae.DateTimeProperty, + 'id': None, + 'reference': gae.IntegerProperty, + 'list:string': (lambda: gae.StringListProperty(default=None)), + 'list:integer': (lambda: gae.ListProperty(int,default=None)), + 'list:reference': (lambda: gae.ListProperty(int,default=None)), + }) + self.db = db + self.uri = uri + self.dbengine = 'google:datastore' + self.folder = folder + db['_lastsql'] = '' + self.db_codec = 'UTF-8' + self.pool_size = 0 + match = re.compile('.*://(?P<namespace>.+)').match(uri) + if match: + namespace_manager.set_namespace(match.group('namespace')) + + def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None): + myfields = {} + for k in table.fields: + if isinstance(polymodel,Table) and k in polymodel.fields(): + continue + field = table[k] + attr = {} + if isinstance(field.type, SQLCustomType): + ftype = self.types[field.type.native or field.type.type](**attr) + elif isinstance(field.type, gae.Property): + ftype = field.type + elif field.type.startswith('id'): + continue + elif field.type.startswith('decimal'): + precision, scale = field.type[7:].strip('()').split(',') + precision = int(precision) + scale = int(scale) + ftype = GAEDecimalProperty(precision, scale, **attr) + elif field.type.startswith('reference'): + if field.notnull: + attr = dict(required=True) + referenced = field.type[10:].strip() + ftype = self.types[field.type[:9]](table._db[referenced]) + elif field.type.startswith('list:reference'): + if field.notnull: + attr = dict(required=True) + referenced = field.type[15:].strip() + ftype = self.types[field.type[:14]](**attr) + elif field.type.startswith('list:'): + ftype = self.types[field.type](**attr) + elif not field.type in self.types\ + or not self.types[field.type]: + raise SyntaxError, 'Field: unknown field type: %s' % field.type + else: + ftype = self.types[field.type](**attr) + myfields[field.name] = ftype + if not polymodel: + table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) + elif polymodel==True: + table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) + elif isinstance(polymodel,Table): + table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) + else: + raise SyntaxError, "polymodel must be None, True, a table or a tablename" + return None + + def expand(self,expression,field_type=None): + if isinstance(expression,Field): + if expression.type in ('text','blob'): + raise SyntaxError, 'AppEngine does not index by: %s' % expression.type + return expression.name + elif isinstance(expression, (Expression, Query)): + if not expression.second is None: + return expression.op(expression.first, expression.second) + elif not expression.first is None: + return expression.op(expression.first) + else: + return expression.op() + elif field_type: + return self.represent(expression,field_type) + elif isinstance(expression,(list,tuple)): + return ','.join([self.represent(item,field_type) for item in expression]) + else: + return str(expression) + + ### TODO from gql.py Expression + def AND(self,first,second): + a = self.expand(first) + b = self.expand(second) + if b[0].name=='__key__' and a[0].name!='__key__': + return b+a + return a+b + + def EQ(self,first,second=None): + if isinstance(second, Key): + return [GAEF(first.name,'=',second,lambda a,b:a==b)] + return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)] + + def NE(self,first,second=None): + if first.type != 'id': + return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] + else: + second = Key.from_path(first._tablename, long(second)) + return [GAEF(first.name,'!=',second,lambda a,b:a!=b)] + + def LT(self,first,second=None): + if first.type != 'id': + return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] + else: + second = Key.from_path(first._tablename, long(second)) + return [GAEF(first.name,'<',second,lambda a,b:a<b)] + + def LE(self,first,second=None): + if first.type != 'id': + return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] + else: + second = Key.from_path(first._tablename, long(second)) + return [GAEF(first.name,'<=',second,lambda a,b:a<=b)] + + def GT(self,first,second=None): + if first.type != 'id' or second==0 or second == '0': + return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] + else: + second = Key.from_path(first._tablename, long(second)) + return [GAEF(first.name,'>',second,lambda a,b:a>b)] + + def GE(self,first,second=None): + if first.type != 'id': + return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] + else: + second = Key.from_path(first._tablename, long(second)) + return [GAEF(first.name,'>=',second,lambda a,b:a>=b)] + + def INVERT(self,first): + return '-%s' % first.name + + def COMMA(self,first,second): + return '%s, %s' % (self.expand(first),self.expand(second)) + + def BELONGS(self,first,second=None): + if not isinstance(second,(list, tuple)): + raise SyntaxError, "Not supported" + if first.type != 'id': + return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] + else: + second = [Key.from_path(first._tablename, i) for i in second] + return [GAEF(first.name,'in',second,lambda a,b:a in b)] + + def CONTAINS(self,first,second): + if not first.type.startswith('list:'): + raise SyntaxError, "Not supported" + return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:a in b)] + + def NOT(self,first): + nops = { self.EQ: self.NE, + self.NE: self.EQ, + self.LT: self.GE, + self.GT: self.LE, + self.LE: self.GT, + self.GE: self.LT} + if not isinstance(first,Query): + raise SyntaxError, "Not suported" + nop = nops.get(first.op,None) + if not nop: + raise SyntaxError, "Not suported %s" % first.op.__name__ + first.op = nop + return self.expand(first) + + def truncate(self,table,mode): + self.db(table._id > 0).delete() + + def select_raw(self,query,fields=None,attributes=None): + fields = fields or [] + attributes = attributes or {} + new_fields = [] + for item in fields: + if isinstance(item,SQLALL): + new_fields += item.table + else: + new_fields.append(item) + fields = new_fields + if query: + tablename = self.get_table(query) + elif fields: + tablename = fields[0].tablename + query = fields[0].table._id>0 + else: + raise SyntaxError, "Unable to determine a tablename" + query = self.filter_tenant(query,[tablename]) + tableobj = self.db[tablename]._tableobj + items = tableobj.all() + filters = self.expand(query) + for filter in filters: + if filter.name=='__key__' and filter.op=='>' and filter.value==0: + continue + elif filter.name=='__key__' and filter.op=='=': + if filter.value==0: + items = [] + elif isinstance(filter.value, Key): + item = tableobj.get(filter.value) + items = (item and [item]) or [] + else: + item = tableobj.get_by_id(filter.value) + items = (item and [item]) or [] + elif isinstance(items,list): # i.e. there is a single record! + items = [i for i in items if filter.apply(getattr(item,filter.name), + filter.value)] + else: + if filter.name=='__key__': items.order('__key__') + items = items.filter('%s %s' % (filter.name,filter.op),filter.value) + if not isinstance(items,list): + if attributes.get('left', None): + raise SyntaxError, 'Set: no left join in appengine' + if attributes.get('groupby', None): + raise SyntaxError, 'Set: no groupby in appengine' + orderby = attributes.get('orderby', False) + if orderby: + ### THIS REALLY NEEDS IMPROVEMENT !!! + if isinstance(orderby, (list, tuple)): + orderby = xorify(orderby) + if isinstance(orderby,Expression): + orderby = self.expand(orderby) + orders = orderby.split(', ') + for order in orders: + order={'-id':'-__key__','id':'__key__'}.get(order,order) + items = items.order(order) + if attributes.get('limitby', None): + (lmin, lmax) = attributes['limitby'] + (limit, offset) = (lmax - lmin, lmin) + items = items.fetch(limit, offset=offset) + fields = self.db[tablename].fields + return (items, tablename, fields) + + def select(self,query,fields,attributes): + (items, tablename, fields) = self.select_raw(query,fields,attributes) + # self.db['_lastsql'] = self._select(query,fields,attributes) + rows = [ + [t=='id' and int(item.key().id()) or getattr(item, t) for t in fields] + for item in items] + colnames = ['%s.%s' % (tablename, t) for t in fields] + return self.parse(rows, colnames, False) + + + def count(self,query,distinct=None): + if distinct: + raise RuntimeError, "COUNT DISTINCT not supported" + (items, tablename, fields) = self.select_raw(query) + # self.db['_lastsql'] = self._count(query) + try: + return len(items) + except TypeError: + return items.count(limit=None) + + def delete(self,tablename, query): + """ + This function was changed on 2010-05-04 because according to + http://code.google.com/p/googleappengine/issues/detail?id=3119 + GAE no longer support deleting more than 1000 records. + """ + # self.db['_lastsql'] = self._delete(tablename,query) + (items, tablename, fields) = self.select_raw(query) + # items can be one item or a query + if not isinstance(items,list): + counter = items.count(limit=None) + leftitems = items.fetch(1000) + while len(leftitems): + gae.delete(leftitems) + leftitems = items.fetch(1000) + else: + counter = len(items) + gae.delete(items) + return counter + + def update(self,tablename,query,update_fields): + # self.db['_lastsql'] = self._update(tablename,query,update_fields) + (items, tablename, fields) = self.select_raw(query) + counter = 0 + for item in items: + for field, value in update_fields: + setattr(item, field.name, self.represent(value,field.type)) + item.put() + counter += 1 + logger.info(str(counter)) + return counter + + def insert(self,table,fields): + dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) + # table._db['_lastsql'] = self._insert(table,fields) + tmp = table._tableobj(**dfields) + tmp.put() + rid = Reference(tmp.key().id()) + (rid._table, rid._record) = (table, None) + return rid + + def bulk_insert(self,table,items): + parsed_items = [] + for item in items: + dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) + parsed_items.append(table._tableobj(**dfields)) + gae.put(parsed_items) + return True + +def uuid2int(uuidv): + return uuid.UUID(uuidv).int + +def int2uuid(n): + return str(uuid.UUID(int=n)) + +class CouchDBAdapter(NoSQLAdapter): + uploads_in_blob = True + types = { + 'boolean': bool, + 'string': str, + 'text': str, + 'password': str, + 'blob': str, + 'upload': str, + 'integer': long, + 'double': float, + 'date': datetime.date, + 'time': datetime.time, + 'datetime': datetime.datetime, + 'id': long, + 'reference': long, + 'list:string': list, + 'list:integer': list, + 'list:reference': list, + } + + def file_exists(self, filename): pass + def file_open(self, filename, mode='rb', lock=True): pass + def file_close(self, fileobj, unlock=True): pass + + def expand(self,expression,field_type=None): + if isinstance(expression,Field): + if expression.type=='id': + return "%s._id" % expression.tablename + return BaseAdapter.expand(self,expression,field_type) + + def AND(self,first,second): + return '(%s && %s)' % (self.expand(first),self.expand(second)) + + def OR(self,first,second): + return '(%s || %s)' % (self.expand(first),self.expand(second)) + + def EQ(self,first,second): + if second is None: + return '(%s == null)' % self.expand(first) + return '(%s == %s)' % (self.expand(first),self.expand(second,first.type)) + + def NE(self,first,second): + if second is None: + return '(%s != null)' % self.expand(first) + return '(%s != %s)' % (self.expand(first),self.expand(second,first.type)) + + def COMMA(self,first,second): + return '%s + %s' % (self.expand(first),self.expand(second)) + + def represent(self, obj, fieldtype): + value = NoSQLAdapter.represent(self, obj, fieldtype) + if fieldtype=='id': + return repr(str(int(value))) + return repr(not isinstance(value,unicode) and value or value.encode('utf8')) + + def __init__(self,db,uri='couchdb://127.0.0.1:5984', + pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.uri = uri + self.dbengine = 'couchdb' + self.folder = folder + db['_lastsql'] = '' + self.db_codec = 'UTF-8' + self.pool_size = pool_size + + url='http://'+uri[10:] + def connect(url=url,driver_args=driver_args): + return couchdb.Server(url,**driver_args) + self.pool_connection(connect,cursor=False) + + def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): + if migrate: + try: + self.connection.create(table._tablename) + except: + pass + + def insert(self,table,fields): + id = uuid2int(web2py_uuid()) + ctable = self.connection[table._tablename] + values = dict((k.name,NoSQLAdapter.represent(self,v,k.type)) for k,v in fields) + values['_id'] = str(id) + ctable.save(values) + return id + + def _select(self,query,fields,attributes): + if not isinstance(query,Query): + raise SyntaxError, "Not Supported" + for key in set(attributes.keys())-set(('orderby','groupby','limitby', + 'required','cache','left', + 'distinct','having')): + raise SyntaxError, 'invalid select attribute: %s' % key + new_fields=[] + for item in fields: + if isinstance(item,SQLALL): + new_fields += item.table + else: + new_fields.append(item) + def uid(fd): + return fd=='id' and '_id' or fd + def get(row,fd): + return fd=='id' and int(row['_id']) or row.get(fd,None) + fields = new_fields + tablename = self.get_table(query) + fieldnames = [f.name for f in (fields or self.db[tablename])] + colnames = ['%s.%s' % (tablename,k) for k in fieldnames] + fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) + fn="function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);}" %\ + dict(t=tablename, + query=self.expand(query), + order='%s._id' % tablename, + fields=fields) + return fn, colnames + + def select(self,query,fields,attributes): + if not isinstance(query,Query): + raise SyntaxError, "Not Supported" + fn, colnames = self._select(query,fields,attributes) + tablename = colnames[0].split('.')[0] + ctable = self.connection[tablename] + rows = [cols['value'] for cols in ctable.query(fn)] + return self.parse(rows, colnames, False) + + def delete(self,tablename,query): + if not isinstance(query,Query): + raise SyntaxError, "Not Supported" + if query.first.type=='id' and query.op==self.EQ: + id = query.second + tablename = query.first.tablename + assert(tablename == query.first.tablename) + ctable = self.connection[tablename] + try: + del ctable[str(id)] + return 1 + except couchdb.http.ResourceNotFound: + return 0 + else: + tablename = self.get_table(query) + rows = self.select(query,[self.db[tablename]._id],{}) + ctable = self.connection[tablename] + for row in rows: + del ctable[str(row.id)] + return len(rows) + + def update(self,tablename,query,fields): + if not isinstance(query,Query): + raise SyntaxError, "Not Supported" + if query.first.type=='id' and query.op==self.EQ: + id = query.second + tablename = query.first.tablename + ctable = self.connection[tablename] + try: + doc = ctable[str(id)] + for key,value in fields: + doc[key.name] = NoSQLAdapter.represent(self,value,self.db[tablename][key.name].type) + ctable.save(doc) + return 1 + except couchdb.http.ResourceNotFound: + return 0 + else: + tablename = self.get_table(query) + rows = self.select(query,[self.db[tablename]._id],{}) + ctable = self.connection[tablename] + table = self.db[tablename] + for row in rows: + doc = ctable[str(row.id)] + for key,value in fields: + doc[key.name] = NoSQLAdapter.represent(self,value,table[key.name].type) + ctable.save(doc) + return len(rows) + + def count(self,query,distinct=None): + if distinct: + raise RuntimeError, "COUNT DISTINCT not supported" + if not isinstance(query,Query): + raise SyntaxError, "Not Supported" + tablename = self.get_table(query) + rows = self.select(query,[self.db[tablename]._id],{}) + return len(rows) + +def cleanup(text): + """ + validates that the given text is clean: only contains [0-9a-zA-Z_] + """ + + if re.compile('[^0-9a-zA-Z_]').findall(text): + raise SyntaxError, \ + 'only [0-9a-zA-Z_] allowed in table and field names, received %s' \ + % text + return text + + +class MongoDBAdapter(NoSQLAdapter): + uploads_in_blob = True + types = { + 'boolean': bool, + 'string': str, + 'text': str, + 'password': str, + 'blob': str, + 'upload': str, + 'integer': long, + 'double': float, + 'date': datetime.date, + 'time': datetime.time, + 'datetime': datetime.datetime, + 'id': long, + 'reference': long, + 'list:string': list, + 'list:integer': list, + 'list:reference': list, + } + + def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', + pool_size=0,folder=None,db_codec ='UTF-8', + credential_decoder=lambda x:x, driver_args={}, + adapter_args={}): + self.db = db + self.uri = uri + self.dbengine = 'mongodb' + self.folder = folder + db['_lastsql'] = '' + self.db_codec = 'UTF-8' + self.pool_size = pool_size + + m = re.compile('^(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(self.uri[10:]) + if not m: + raise SyntaxError, "Invalid URI string in DAL: %s" % self.uri + host = m.group('host') + if not host: + raise SyntaxError, 'mongodb: host name required' + dbname = m.group('db') + if not dbname: + raise SyntaxError, 'mongodb: db name required' + port = m.group('port') or 27017 + driver_args.update(dict(host=host,port=port)) + def connect(dbname=dbname,driver_args=driver_args): + return pymongo.Connection(**driver_args)[dbname] + self.pool_connection(connect,cursor=False) + + def insert(self,table,fields): + ctable = self.connection[table._tablename] + values = dict((k,self.represent(v,table[k].type)) for k,v in fields) + ctable.insert(values) + return uuid2int(id) + + + def count(self,query): + raise RuntimeError, "Not implemented" + + def select(self,query,fields,attributes): + raise RuntimeError, "Not implemented" + + def delete(self,tablename, query): + raise RuntimeError, "Not implemented" + + def update(self,tablename,query,fields): + raise RuntimeError, "Not implemented" + + +######################################################################## +# end of adapters +######################################################################## + +ADAPTERS = { + 'sqlite': SQLiteAdapter, + 'sqlite:memory': SQLiteAdapter, + 'mysql': MySQLAdapter, + 'postgres': PostgreSQLAdapter, + 'oracle': OracleAdapter, + 'mssql': MSSQLAdapter, + 'mssql2': MSSQL2Adapter, + 'db2': DB2Adapter, + 'teradata': TeradataAdapter, + 'informix': InformixAdapter, + 'firebird': FireBirdAdapter, + 'firebird_embedded': FireBirdAdapter, + 'ingres': IngresAdapter, + 'ingresu': IngresUnicodeAdapter, + 'sapdb': SAPDBAdapter, + 'cubrid': CubridAdapter, + 'jdbc:sqlite': JDBCSQLiteAdapter, + 'jdbc:sqlite:memory': JDBCSQLiteAdapter, + 'jdbc:postgres': JDBCPostgreSQLAdapter, + 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility + 'google:datastore': GoogleDatastoreAdapter, + 'google:sql': GoogleSQLAdapter, + 'couchdb': CouchDBAdapter, + 'mongodb': MongoDBAdapter, +} + + +def sqlhtml_validators(field): + """ + Field type validation, using web2py's validators mechanism. + + makes sure the content of a field is in line with the declared + fieldtype + """ + if not have_validators: + return [] + field_type, field_length = field.type, field.length + if isinstance(field_type, SQLCustomType): + if hasattr(field_type, 'validator'): + return field_type.validator + else: + field_type = field_type.type + elif not isinstance(field_type,str): + return [] + requires=[] + def ff(r,id): + row=r(id) + if not row: + return id + elif hasattr(r, '_format') and isinstance(r._format,str): + return r._format % row + elif hasattr(r, '_format') and callable(r._format): + return r._format(row) + else: + return id + if field_type == 'string': + requires.append(validators.IS_LENGTH(field_length)) + elif field_type == 'text': + requires.append(validators.IS_LENGTH(field_length)) + elif field_type == 'password': + requires.append(validators.IS_LENGTH(field_length)) + elif field_type == 'double': + requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) + elif field_type == 'integer': + requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) + elif field_type.startswith('decimal'): + requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) + elif field_type == 'date': + requires.append(validators.IS_DATE()) + elif field_type == 'time': + requires.append(validators.IS_TIME()) + elif field_type == 'datetime': + requires.append(validators.IS_DATETIME()) + elif field.db and field_type.startswith('reference') and \ + field_type.find('.') < 0 and \ + field_type[10:] in field.db.tables: + referenced = field.db[field_type[10:]] + def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) + field.represent = field.represent or repr_ref + if hasattr(referenced, '_format') and referenced._format: + requires = validators.IS_IN_DB(field.db,referenced._id, + referenced._format) + if field.unique: + requires._and = validators.IS_NOT_IN_DB(field.db,field) + if field.tablename == field_type[10:]: + return validators.IS_EMPTY_OR(requires) + return requires + elif field.db and field_type.startswith('list:reference') and \ + field_type.find('.') < 0 and \ + field_type[15:] in field.db.tables: + referenced = field.db[field_type[15:]] + def list_ref_repr(ids, row=None, r=referenced, f=ff): + if not ids: + return None + refs = r._db(r._id.belongs(ids)).select(r._id) + return (refs and ', '.join(str(f(r,ref.id)) for ref in refs) or '') + field.represent = field.represent or list_ref_repr + if hasattr(referenced, '_format') and referenced._format: + requires = validators.IS_IN_DB(field.db,referenced._id, + referenced._format,multiple=True) + else: + requires = validators.IS_IN_DB(field.db,referenced._id, + multiple=True) + if field.unique: + requires._and = validators.IS_NOT_IN_DB(field.db,field) + return requires + elif field_type.startswith('list:'): + def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) + field.represent = field.represent or repr_list + if field.unique: + requires.insert(0,validators.IS_NOT_IN_DB(field.db,field)) + sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] + if field.notnull and not field_type[:2] in sff: + requires.insert(0, validators.IS_NOT_EMPTY()) + elif not field.notnull and field_type[:2] in sff and requires: + requires[-1] = validators.IS_EMPTY_OR(requires[-1]) + return requires + + +def bar_escape(item): + return str(item).replace('|', '||') + +def bar_encode(items): + return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip()) + +def bar_decode_integer(value): + return [int(x) for x in value.split('|') if x.strip()] + +def bar_decode_string(value): + return [x.replace('||', '|') for x in string_unpack.split(value[1:-1]) if x.strip()] + + +class Row(dict): + + """ + a dictionary that lets you do d['a'] as well as d.a + this is only used to store a Row + """ + + def __getitem__(self, key): + key=str(key) + m = table_field.match(key) + if key in self.get('_extra',{}): + return self._extra[key] + elif m: + try: + return dict.__getitem__(self, m.group(1))[m.group(2)] + except (KeyError,TypeError): + key = m.group(2) + return dict.__getitem__(self, key) + + def __call__(self,key): + return self.__getitem__(key) + + def __setitem__(self, key, value): + dict.__setitem__(self, str(key), value) + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + self[key] = value + + def __repr__(self): + return '<Row ' + dict.__repr__(self) + '>' + + def __int__(self): + return dict.__getitem__(self,'id') + + def __eq__(self,other): + try: + return self.as_dict() == other.as_dict() + except AttributeError: + return False + + def __ne__(self,other): + return not (self == other) + + def __copy__(self): + return Row(dict(self)) + + def as_dict(self,datetime_to_str=False): + SERIALIZABLE_TYPES = (str,unicode,int,long,float,bool,list) + d = dict(self) + for k in copy.copy(d.keys()): + v=d[k] + if d[k] is None: + continue + elif isinstance(v,Row): + d[k]=v.as_dict() + elif isinstance(v,Reference): + d[k]=int(v) + elif isinstance(v,decimal.Decimal): + d[k]=float(v) + elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): + if datetime_to_str: + d[k] = v.isoformat().replace('T',' ')[:19] + elif not isinstance(v,SERIALIZABLE_TYPES): + del d[k] + return d + + +def Row_unpickler(data): + return Row(cPickle.loads(data)) + +def Row_pickler(data): + return Row_unpickler, (cPickle.dumps(data.as_dict(datetime_to_str=False)),) + +copy_reg.pickle(Row, Row_pickler, Row_unpickler) + + +################################################################################ +# Everything below should be independent on the specifics of the +# database and should for RDBMs and some NoSQL databases +################################################################################ + +class SQLCallableList(list): + def __call__(self): + return copy.copy(self) + +def smart_query(fields,text): + if not isinstance(fields,(list,tuple)): + fields = [fields] + new_fields = [] + for field in fields: + if isinstance(field,Field): + new_fields.append(field) + elif isinstance(field,Table): + for ofield in field: + new_fields.append(ofield) + else: + raise RuntimeError, "fields must be a list of fields" + field_map = {} + for field in fields: + n = field.name.lower() + if not n in field_map: + field_map[n] = field + n = str(field).lower() + if not n in field_map: + field_map[n] = field + re_constants = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') + constants = {} + i = 0 + while True: + m = re_constants.search(text) + if not m: break + text = text[:m.start()]+('#%i' % i)+text[m.end():] + constants[str(i)] = m.group()[1:-1] + i+=1 + text = re.sub('\s+',' ',text).lower() + for a,b in [('&','and'), + ('|','or'), + ('~','not'), + ('==','=='), + ('<','<'), + ('>','>'), + ('<=','<='), + ('>=','>='), + ('<>','!='), + ('=<','<='), + ('=>','>='), + ('=','=='), + (' less or equal than ','<='), + (' greater or equal than ','>='), + (' equal or less than ','<='), + (' equal or greater than ','>='), + (' less or equal ','<='), + (' greater or equal ','>='), + (' equal or less ','<='), + (' equal or greater ','>='), + (' not equal to ','!='), + (' not equal ','!='), + (' equal to ','=='), + (' equal ','=='), + (' equals ','!='), + (' less than ','<'), + (' greater than ','>'), + (' starts with ','startswith'), + (' ends with ','endswith'), + (' is ','==')]: + if a[0]==' ': + text = text.replace(' is'+a,' %s ' % b) + text = text.replace(a,' %s ' % b) + text = re.sub('\s+',' ',text).lower() + query = field = neg = op = logic = None + for item in text.split(): + if field is None: + if item == 'not': + neg = True + elif not neg and not logic and item in ('and','or'): + logic = item + elif item in field_map: + field = field_map[item] + else: + raise RuntimeError, "Invalid syntax" + elif not field is None and op is None: + op = item + elif not op is None: + if item.startswith('#'): + if not item[1:] in constants: + raise RuntimeError, "Invalid syntax" + value = constants[item[1:]] + else: + value = item + if op == '==': op = 'like' + if op == '==': new_query = field==value + elif op == '<': new_query = field<value + elif op == '>': new_query = field>value + elif op == '<=': new_query = field<=value + elif op == '>=': new_query = field>=value + elif op == 'contains': new_query = field.contains(value) + elif op == 'like': new_query = field.like(value) + elif op == 'startswith': new_query = field.startswith(value) + elif op == 'endswith': new_query = field.endswith(value) + else: raise RuntimeError, "Invalid operation" + if neg: new_query = ~new_query + if query is None: + query = new_query + elif logic == 'and': + query &= new_query + elif logic == 'or': + query |= new_query + field = op = neg = logic = None + return query + + +class DAL(dict): + + """ + an instance of this class represents a database connection + + Example:: + + db = DAL('sqlite://test.db') + db.define_table('tablename', Field('fieldname1'), + Field('fieldname2')) + """ + + @staticmethod + def set_folder(folder): + """ + # ## this allows gluon to set a folder for this thread + # ## <<<<<<<<< Should go away as new DAL replaces old sql.py + """ + BaseAdapter.set_folder(folder) + + @staticmethod + def distributed_transaction_begin(*instances): + if not instances: + return + thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) + keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] + instances = enumerate(instances) + for (i, db) in instances: + if not db._adapter.support_distributed_transaction(): + raise SyntaxError, \ + 'distributed transaction not suported by %s' % db._dbname + for (i, db) in instances: + db._adapter.distributed_transaction_begin(keys[i]) + + @staticmethod + def distributed_transaction_commit(*instances): + if not instances: + return + instances = enumerate(instances) + thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) + keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] + for (i, db) in instances: + if not db._adapter.support_distributed_transaction(): + raise SyntaxError, \ + 'distributed transaction not suported by %s' % db._dbanme + try: + for (i, db) in instances: + db._adapter.prepare(keys[i]) + except: + for (i, db) in instances: + db._adapter.rollback_prepared(keys[i]) + raise RuntimeError, 'failure to commit distributed transaction' + else: + for (i, db) in instances: + db._adapter.commit_prepared(keys[i]) + return + + + def __init__(self, uri='sqlite://dummy.db', + pool_size=0, folder=None, + db_codec='UTF-8', check_reserved=None, + migrate=True, fake_migrate=False, + migrate_enabled=True, fake_migrate_all=False, + decode_credentials=False, driver_args=None, + adapter_args=None, attempts=5, auto_import=False): + """ + Creates a new Database Abstraction Layer instance. + + Keyword arguments: + + :uri: string that contains information for connecting to a database. + (default: 'sqlite://dummy.db') + :pool_size: How many open connections to make to the database object. + :folder: <please update me> + :db_codec: string encoding of the database (default: 'UTF-8') + :check_reserved: list of adapters to check tablenames and column names + against sql reserved keywords. (Default None) + + * 'common' List of sql keywords that are common to all database types + such as "SELECT, INSERT". (recommended) + * 'all' Checks against all known SQL keywords. (not recommended) + <adaptername> Checks against the specific adapters list of keywords + (recommended) + * '<adaptername>_nonreserved' Checks against the specific adapters + list of nonreserved keywords. (if available) + :migrate (defaults to True) sets default migrate behavior for all tables + :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables + :migrate_enabled (defaults to True). If set to False disables ALL migrations + :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables + :attempts (defaults to 5). Number of times to attempt connecting + """ + if not decode_credentials: + credential_decoder = lambda cred: cred + else: + credential_decoder = lambda cred: urllib.unquote(cred) + if folder: + self.set_folder(folder) + self._uri = uri + self._pool_size = pool_size + self._db_codec = db_codec + self._lastsql = '' + self._timings = [] + self._pending_references = {} + self._request_tenant = 'request_tenant' + self._common_fields = [] + self._referee_name = '%(table)s' + if not str(attempts).isdigit() or attempts < 0: + attempts = 5 + if uri: + uris = isinstance(uri,(list,tuple)) and uri or [uri] + error = '' + connected = False + for k in range(attempts): + for uri in uris: + try: + if is_jdbc and not uri.startswith('jdbc:'): + uri = 'jdbc:'+uri + self._dbname = regex_dbname.match(uri).group() + if not self._dbname in ADAPTERS: + raise SyntaxError, "Error in URI '%s' or database not supported" % self._dbname + # notice that driver args or {} else driver_args + # defaults to {} global, not correct + args = (self,uri,pool_size,folder, + db_codec, credential_decoder, + driver_args or {}, adapter_args or {}) + self._adapter = ADAPTERS[self._dbname](*args) + connected = True + break + except SyntaxError: + raise + except Exception, error: + sys.stderr.write('DEBUG_c: Exception %r' % ((Exception, error,),)) + if connected: + break + else: + time.sleep(1) + if not connected: + raise RuntimeError, "Failure to connect, tried %d times:\n%s" % (attempts, error) + else: + args = (self,'None',0,folder,db_codec) + self._adapter = BaseAdapter(*args) + migrate = fake_migrate = False + adapter = self._adapter + self._uri_hash = hashlib.md5(adapter.uri).hexdigest() + self.tables = SQLCallableList() + self.check_reserved = check_reserved + if self.check_reserved: + from reserved_sql_keywords import ADAPTERS as RSK + self.RSK = RSK + self._migrate = migrate + self._fake_migrate = fake_migrate + self._migrate_enabled = migrate_enabled + self._fake_migrate_all = fake_migrate_all + if auto_import: + self.import_table_definitions(adapter.folder) + + def import_table_definitions(self,path,migrate=False,fake_migrate=False): + pattern = os.path.join(path,self._uri_hash+'_*.table') + for filename in glob.glob(pattern): + tfile = self._adapter.file_open(filename, 'r') + try: + sql_fields = cPickle.load(tfile) + name = filename[len(pattern)-7:-6] + mf = [(value['sortable'],Field(key,type=value['type'])) \ + for key, value in sql_fields.items()] + mf.sort(lambda a,b: cmp(a[0],b[0])) + self.define_table(name,*[item[1] for item in mf], + **dict(migrate=migrate,fake_migrate=fake_migrate)) + finally: + self._adapter.file_close(tfile) + + def check_reserved_keyword(self, name): + """ + Validates ``name`` against SQL keywords + Uses self.check_reserve which is a list of + operators to use. + self.check_reserved + ['common', 'postgres', 'mysql'] + self.check_reserved + ['all'] + """ + for backend in self.check_reserved: + if name.upper() in self.RSK[backend]: + raise SyntaxError, 'invalid table/column name "%s" is a "%s" reserved SQL keyword' % (name, backend.upper()) + + def __contains__(self, tablename): + if self.has_key(tablename): + return True + else: + return False + + def parse_as_rest(self,patterns,args,vars,query=None,nested_select=True): + """ + EXAMPLE: + +db.define_table('person',Field('name'),Field('info')) +db.define_table('pet',Field('person',db.person),Field('name'),Field('info')) + +@request.restful() +def index(): + def GET(*kargs,**kvars): + patterns = [ + "/persons[person]", + "/{person.name.startswith}", + "/{person.name}/:field", + "/{person.name}/pets[pet.person]", + "/{person.name}/pet[pet.person]/{pet.name}", + "/{person.name}/pet[pet.person]/{pet.name}/:field" + ] + parser = db.parse_as_rest(patterns,kargs,kvars) + if parser.status == 200: + return dict(content=parser.response) + else: + raise HTTP(parser.status,parser.error) + def POST(table_name,**kvars): + if table_name == 'person': + return db.person.validate_and_insert(**kvars) + elif table_name == 'pet': + return db.pet.validate_and_insert(**kvars) + else: + raise HTTP(400) + return locals() + """ + + db = self + re1 = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') + re2 = re.compile('^.+\[.+\]$') + + def auto_table(table,base='',depth=0): + patterns = [] + for field in db[table].fields: + if base: + tag = '%s/%s' % (base,field.replace('_','-')) + else: + tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) + f = db[table][field] + if not f.readable: continue + if f.type=='id' or 'slug' in field or f.type.startswith('reference'): + tag += '/{%s.%s}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + elif f.type.startswith('boolean'): + tag += '/{%s.%s}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + elif f.type.startswith('double') or f.type.startswith('integer'): + tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + elif f.type.startswith('list:'): + tag += '/{%s.%s.contains}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + elif f.type in ('date','datetime'): + tag+= '/{%s.%s.year}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + tag+='/{%s.%s.month}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + tag+='/{%s.%s.day}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + if f.type in ('datetime','time'): + tag+= '/{%s.%s.hour}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + tag+='/{%s.%s.minute}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + tag+='/{%s.%s.second}' % (table,field) + patterns.append(tag) + patterns.append(tag+'/:field') + if depth>0: + for rtable,rfield in db[table]._referenced_by: + tag+='/%s[%s.%s]' % (rtable,rtable,rfield) + patterns.append(tag) + patterns += auto_table(rtable,base=tag,depth=depth-1) + return patterns + + if patterns=='auto': + patterns=[] + for table in db.tables: + if not table.startswith('auth_'): + patterns += auto_table(table,base='',depth=1) + else: + i = 0 + while i<len(patterns): + pattern = patterns[i] + tokens = pattern.split('/') + if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): + new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1],'/'.join(tokens[:-1])) + patterns = patterns[:i]+new_patterns+patterns[i+1:] + i += len(new_patterns) + else: + i += 1 + if '/'.join(args) == 'patterns': + return Row({'status':200,'pattern':'list', + 'error':None,'response':patterns}) + for pattern in patterns: + otable=table=None + dbset=db(query) + i=0 + tags = pattern[1:].split('/') + # print pattern + if len(tags)!=len(args): + continue + for tag in tags: + # print i, tag, args[i] + if re1.match(tag): + # print 're1:'+tag + tokens = tag[1:-1].split('.') + table, field = tokens[0], tokens[1] + if not otable or table == otable: + if len(tokens)==2 or tokens[2]=='eq': + query = db[table][field]==args[i] + elif tokens[2]=='ne': + query = db[table][field]!=args[i] + elif tokens[2]=='lt': + query = db[table][field]<args[i] + elif tokens[2]=='gt': + query = db[table][field]>args[i] + elif tokens[2]=='ge': + query = db[table][field]>=args[i] + elif tokens[2]=='le': + query = db[table][field]<=args[i] + elif tokens[2]=='year': + query = db[table][field].year()==args[i] + elif tokens[2]=='month': + query = db[table][field].month()==args[i] + elif tokens[2]=='day': + query = db[table][field].day()==args[i] + elif tokens[2]=='hour': + query = db[table][field].hour()==args[i] + elif tokens[2]=='minute': + query = db[table][field].minutes()==args[i] + elif tokens[2]=='second': + query = db[table][field].seconds()==args[i] + elif tokens[2]=='startswith': + query = db[table][field].startswith(args[i]) + elif tokens[2]=='contains': + query = db[table][field].contains(args[i]) + else: + raise RuntimeError, "invalid pattern: %s" % pattern + if len(tokens)==4 and tokens[3]=='not': + query = ~query + elif len(tokens)>=4: + raise RuntimeError, "invalid pattern: %s" % pattern + dbset=dbset(query) + else: + raise RuntimeError, "missing relation in pattern: %s" % pattern + elif otable and re2.match(tag) and args[i]==tag[:tag.find('[')]: + # print 're2:'+tag + ref = tag[tag.find('[')+1:-1] + if '.' in ref: + table,field = ref.split('.') + # print table,field + if nested_select: + try: + dbset=db(db[table][field].belongs(dbset._select(db[otable]._id))) + except ValueError: + return Row({'status':400,'pattern':pattern, + 'error':'invalid path','response':None}) + else: + items = [item.id for item in dbset.select(db[otable]._id)] + dbset=db(db[table][field].belongs(items)) + else: + dbset=dbset(db[ref]) + elif tag==':field' and table: + # # print 're3:'+tag + field = args[i] + if not field in db[table]: break + try: + item = dbset.select(db[table][field],limitby=(0,1)).first() + except ValueError: + return Row({'status':400,'pattern':pattern, + 'error':'invalid path','response':None}) + if not item: + return Row({'status':404,'pattern':pattern, + 'error':'record not found','response':None}) + else: + return Row({'status':200,'response':item[field], + 'pattern':pattern}) + elif tag != args[i]: + break + otable = table + i += 1 + if i==len(tags) and table: + otable,ofield = vars.get('order','%s.%s' % (table,field)).split('.',1) + try: + if otable[:1]=='~': orderby = ~db[otable[1:]][ofield] + else: orderby = db[otable][ofield] + except KeyError: + return Row({'status':400,'error':'invalid orderby','response':None}) + fields = [field for field in db[table] if field.readable] + count = dbset.count() + try: + limits = (int(vars.get('min',0)),int(vars.get('max',1000))) + if limits[0]<0 or limits[1]<limits[0]: raise ValueError + except ValueError: + Row({'status':400,'error':'invalid limits','response':None}) + if count > limits[1]-limits[0]: + Row({'status':400,'error':'too many records','response':None}) + try: + response = dbset.select(limitby=limits,orderby=orderby,*fields) + except ValueError: + return Row({'status':400,'pattern':pattern, + 'error':'invalid path','response':None}) + return Row({'status':200,'response':response,'pattern':pattern}) + return Row({'status':400,'error':'no matching pattern','response':None}) + + + def define_table( + self, + tablename, + *fields, + **args + ): + + for key in args: + if key not in [ + 'migrate', + 'primarykey', + 'fake_migrate', + 'format', + 'trigger_name', + 'sequence_name', + 'polymodel', + 'table_class']: + raise SyntaxError, 'invalid table "%s" attribute: %s' \ + % (tablename, key) + migrate = self._migrate_enabled and args.get('migrate', + self._migrate) + fake_migrate = self._fake_migrate_all or args.get('fake_migrate', + self._fake_migrate) + table_class = args.get('table_class',Table) + format = args.get('format',None) + trigger_name = args.get('trigger_name', None) + sequence_name = args.get('sequence_name', None) + primarykey=args.get('primarykey',None) + polymodel=args.get('polymodel',None) + if not isinstance(tablename,str): + raise SyntaxError, "missing table name" + tablename = cleanup(tablename) + lowertablename = tablename.lower() + + if tablename.startswith('_') or hasattr(self,lowertablename) or \ + regex_python_keywords.match(tablename): + raise SyntaxError, 'invalid table name: %s' % tablename + elif lowertablename in self.tables: + raise SyntaxError, 'table already defined: %s' % tablename + elif self.check_reserved: + self.check_reserved_keyword(tablename) + + if self._common_fields: + fields = [f for f in fields] + [f for f in self._common_fields] + + t = self[tablename] = table_class(self, tablename, *fields, + **dict(primarykey=primarykey, + trigger_name=trigger_name, + sequence_name=sequence_name)) + # db magic + if self._uri in (None,'None'): + return t + + t._create_references() + + if migrate or self._adapter.dbengine=='google:datastore': + try: + sql_locker.acquire() + self._adapter.create_table(t,migrate=migrate, + fake_migrate=fake_migrate, + polymodel=polymodel) + finally: + sql_locker.release() + else: + t._dbt = None + self.tables.append(tablename) + t._format = format + return t + + def __iter__(self): + for tablename in self.tables: + yield self[tablename] + + def __getitem__(self, key): + return dict.__getitem__(self, str(key)) + + def __setitem__(self, key, value): + dict.__setitem__(self, str(key), value) + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key[:1]!='_' and key in self: + raise SyntaxError, \ + 'Object %s exists and cannot be redefined' % key + self[key] = value + + def __repr__(self): + return '<DAL ' + dict.__repr__(self) + '>' + + def smart_query(self,fields,text): + return Set(self, smart_query(fields,text)) + + def __call__(self, query=None): + if isinstance(query,Table): + query = query._id>0 + elif isinstance(query,Field): + query = query!=None + return Set(self, query) + + def commit(self): + self._adapter.commit() + + def rollback(self): + self._adapter.rollback() + + def executesql(self, query, placeholders=None, as_dict=False): + """ + placeholders is optional and will always be None when using DAL + if using raw SQL with placeholders, placeholders may be + a sequence of values to be substituted in + or, *if supported by the DB driver*, a dictionary with keys + matching named placeholders in your SQL. + + Added 2009-12-05 "as_dict" optional argument. Will always be + None when using DAL. If using raw SQL can be set to True + and the results cursor returned by the DB driver will be + converted to a sequence of dictionaries keyed with the db + field names. Tested with SQLite but should work with any database + since the cursor.description used to get field names is part of the + Python dbi 2.0 specs. Results returned with as_dict = True are + the same as those returned when applying .to_list() to a DAL query. + + [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] + + --bmeredyk + """ + if placeholders: + self._adapter.execute(query, placeholders) + else: + self._adapter.execute(query) + if as_dict: + if not hasattr(self._adapter.cursor,'description'): + raise RuntimeError, "database does not support executesql(...,as_dict=True)" + # Non-DAL legacy db query, converts cursor results to dict. + # sequence of 7-item sequences. each sequence tells about a column. + # first item is always the field name according to Python Database API specs + columns = self._adapter.cursor.description + # reduce the column info down to just the field names + fields = [f[0] for f in columns] + # will hold our finished resultset in a list + data = self._adapter.cursor.fetchall() + # convert the list for each row into a dictionary so it's + # easier to work with. row['field_name'] rather than row[0] + return [dict(zip(fields,row)) for row in data] + # see if any results returned from database + try: + return self._adapter.cursor.fetchall() + except: + return None + + def _update_referenced_by(self, other): + for tablename in self.tables: + by = self[tablename]._referenced_by + by[:] = [item for item in by if not item[0] == other] + + def export_to_csv_file(self, ofile, *args, **kwargs): + for table in self.tables: + ofile.write('TABLE %s\r\n' % table) + self(self[table]._id > 0).select().export_to_csv_file(ofile, *args, **kwargs) + ofile.write('\r\n\r\n') + ofile.write('END') + + def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', + unique='uuid', *args, **kwargs): + if id_map is None: id_map={} + for line in ifile: + line = line.strip() + if not line: + continue + elif line == 'END': + return + elif not line.startswith('TABLE ') or not line[6:] in self.tables: + raise SyntaxError, 'invalid file format' + else: + tablename = line[6:] + self[tablename].import_from_csv_file(ifile, id_map, null, + unique, *args, **kwargs) + + +class SQLALL(object): + """ + Helper class providing a comma-separated string having all the field names + (prefixed by table name and '.') + + normally only called from within gluon.sql + """ + + def __init__(self, table): + self.table = table + + def __str__(self): + return ', '.join([str(field) for field in self.table]) + + +class Reference(int): + + def __allocate(self): + if not self._record: + self._record = self._table[int(self)] + if not self._record: + raise RuntimeError, "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self)) + + def __getattr__(self, key): + if key == 'id': + return int(self) + self.__allocate() + return self._record.get(key, None) + + def get(self, key): + return self.__getattr__(key) + + def __setattr__(self, key, value): + if key.startswith('_'): + int.__setattr__(self, key, value) + return + self.__allocate() + self._record[key] = value + + def __getitem__(self, key): + if key == 'id': + return int(self) + self.__allocate() + return self._record.get(key, None) + + def __setitem__(self,key,value): + self.__allocate() + self._record[key] = value + + +def Reference_unpickler(data): + return marshal.loads(data) + +def Reference_pickler(data): + try: + marshal_dump = marshal.dumps(int(data)) + except AttributeError: + marshal_dump = 'i%s' % struct.pack('<i', int(data)) + return (Reference_unpickler, (marshal_dump,)) + +copy_reg.pickle(Reference, Reference_pickler, Reference_unpickler) + + +class Table(dict): + + """ + an instance of this class represents a database table + + Example:: + + db = DAL(...) + db.define_table('users', Field('name')) + db.users.insert(name='me') # print db.users._insert(...) to see SQL + db.users.drop() + """ + + def __init__( + self, + db, + tablename, + *fields, + **args + ): + """ + Initializes the table and performs checking on the provided fields. + + Each table will have automatically an 'id'. + + If a field is of type Table, the fields (excluding 'id') from that table + will be used instead. + + :raises SyntaxError: when a supplied field is of incorrect type. + """ + self._tablename = tablename + self._sequence_name = args.get('sequence_name',None) or \ + db and db._adapter.sequence_name(tablename) + self._trigger_name = args.get('trigger_name',None) or \ + db and db._adapter.trigger_name(tablename) + + primarykey = args.get('primarykey', None) + fieldnames,newfields=set(),[] + if primarykey: + if not isinstance(primarykey,list): + raise SyntaxError, \ + "primarykey must be a list of fields from table '%s'" \ + % tablename + self._primarykey = primarykey + elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: + field = Field('id', 'id') + newfields.append(field) + fieldnames.add('id') + self._id = field + for field in fields: + if not isinstance(field, (Field, Table)): + raise SyntaxError, \ + 'define_table argument is not a Field or Table: %s' % field + elif isinstance(field, Field) and not field.name in fieldnames: + if hasattr(field, '_db'): + field = copy.copy(field) + newfields.append(field) + fieldnames.add(field.name) + if field.type=='id': + self._id = field + elif isinstance(field, Table): + table = field + for field in table: + if not field.name in fieldnames and not field.type=='id': + newfields.append(copy.copy(field)) + fieldnames.add(field.name) + else: + # let's ignore new fields with duplicated names!!! + pass + fields = newfields + self._db = db + tablename = tablename + self.fields = SQLCallableList() + self.virtualfields = [] + fields = list(fields) + + if db and self._db._adapter.uploads_in_blob==True: + for field in fields: + if isinstance(field, Field) and field.type == 'upload'\ + and field.uploadfield is True: + tmp = field.uploadfield = '%s_blob' % field.name + fields.append(self._db.Field(tmp, 'blob', default='')) + + lower_fieldnames = set() + reserved = dir(Table) + ['fields'] + for field in fields: + if db and db.check_reserved: + db.check_reserved_keyword(field.name) + elif field.name in reserved: + raise SyntaxError, "field name %s not allowed" % field.name + + if field.name.lower() in lower_fieldnames: + raise SyntaxError, "duplicate field %s in table %s" \ + % (field.name, tablename) + else: + lower_fieldnames.add(field.name.lower()) + + self.fields.append(field.name) + self[field.name] = field + if field.type == 'id': + self['id'] = field + field.tablename = field._tablename = tablename + field.table = field._table = self + field.db = field._db = self._db + if self._db and not field.type in ('text','blob') and \ + self._db._adapter.maxcharlength < field.length: + field.length = self._db._adapter.maxcharlength + if field.requires == DEFAULT: + field.requires = sqlhtml_validators(field) + self.ALL = SQLALL(self) + + if hasattr(self,'_primarykey'): + for k in self._primarykey: + if k not in self.fields: + raise SyntaxError, \ + "primarykey must be a list of fields from table '%s " % tablename + else: + self[k].notnull = True + + def update(self,*args,**kwargs): + raise RuntimeError, "Syntax Not Supported" + + def _validate(self,**vars): + errors = Row() + for key,value in vars.items(): + value,error = self[key].validate(value) + if error: + errors[key] = error + return errors + + def _create_references(self): + pr = self._db._pending_references + self._referenced_by = [] + for fieldname in self.fields: + field=self[fieldname] + if isinstance(field.type,str) and field.type[:10] == 'reference ': + ref = field.type[10:].strip() + if not ref.split(): + raise SyntaxError, 'Table: reference to nothing: %s' %ref + refs = ref.split('.') + rtablename = refs[0] + if not rtablename in self._db: + pr[rtablename] = pr.get(rtablename,[]) + [field] + continue + rtable = self._db[rtablename] + if len(refs)==2: + rfieldname = refs[1] + if not hasattr(rtable,'_primarykey'): + raise SyntaxError,\ + 'keyed tables can only reference other keyed tables (for now)' + if rfieldname not in rtable.fields: + raise SyntaxError,\ + "invalid field '%s' for referenced table '%s' in table '%s'" \ + % (rfieldname, rtablename, self._tablename) + rtable._referenced_by.append((self._tablename, field.name)) + for referee in pr.get(self._tablename,[]): + self._referenced_by.append((referee._tablename,referee.name)) + + def _filter_fields(self, record, id=False): + return dict([(k, v) for (k, v) in record.items() if k + in self.fields and (self[k].type!='id' or id)]) + + def _build_query(self,key): + """ for keyed table only """ + query = None + for k,v in key.iteritems(): + if k in self._primarykey: + if query: + query = query & (self[k] == v) + else: + query = (self[k] == v) + else: + raise SyntaxError, \ + 'Field %s is not part of the primary key of %s' % \ + (k,self._tablename) + return query + + def __getitem__(self, key): + if not key: + return None + elif isinstance(key, dict): + """ for keyed table """ + query = self._build_query(key) + rows = self._db(query).select() + if rows: + return rows[0] + return None + elif str(key).isdigit(): + return self._db(self._id == key).select(limitby=(0,1)).first() + elif key: + return dict.__getitem__(self, str(key)) + + def __call__(self, key=DEFAULT, **kwargs): + if key!=DEFAULT: + if isinstance(key, Query): + record = self._db(key).select(limitby=(0,1)).first() + elif not str(key).isdigit(): + record = None + else: + record = self._db(self._id == key).select(limitby=(0,1)).first() + if record: + for k,v in kwargs.items(): + if record[k]!=v: return None + return record + elif kwargs: + query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.items()]) + return self._db(query).select(limitby=(0,1)).first() + else: + return None + + def __setitem__(self, key, value): + if isinstance(key, dict) and isinstance(value, dict): + """ option for keyed table """ + if set(key.keys()) == set(self._primarykey): + value = self._filter_fields(value) + kv = {} + kv.update(value) + kv.update(key) + if not self.insert(**kv): + query = self._build_query(key) + self._db(query).update(**self._filter_fields(value)) + else: + raise SyntaxError,\ + 'key must have all fields from primary key: %s'%\ + (self._primarykey) + elif str(key).isdigit(): + if key == 0: + self.insert(**self._filter_fields(value)) + elif not self._db(self._id == key)\ + .update(**self._filter_fields(value)): + raise SyntaxError, 'No such record: %s' % key + else: + if isinstance(key, dict): + raise SyntaxError,\ + 'value must be a dictionary: %s' % value + dict.__setitem__(self, str(key), value) + + def __delitem__(self, key): + if isinstance(key, dict): + query = self._build_query(key) + if not self._db(query).delete(): + raise SyntaxError, 'No such record: %s' % key + elif not str(key).isdigit() or not self._db(self._id == key).delete(): + raise SyntaxError, 'No such record: %s' % key + + def __getattr__(self, key): + return self[key] + + def __setattr__(self, key, value): + if key in self: + raise SyntaxError, 'Object exists and cannot be redefined: %s' % key + self[key] = value + + def __iter__(self): + for fieldname in self.fields: + yield self[fieldname] + + def __repr__(self): + return '<Table ' + dict.__repr__(self) + '>' + + def __str__(self): + if self.get('_ot', None): + return '%s AS %s' % (self._ot, self._tablename) + return self._tablename + + def _drop(self, mode = ''): + return self._db._adapter._drop(self, mode) + + def drop(self, mode = ''): + return self._db._adapter.drop(self,mode) + + def _listify(self,fields,update=False): + new_fields = [] + new_fields_names = [] + for name in fields: + if not name in self.fields: + if name != 'id': + raise SyntaxError, 'Field %s does not belong to the table' % name + else: + new_fields.append((self[name],fields[name])) + new_fields_names.append(name) + for ofield in self: + if not ofield.name in new_fields_names: + if not update and not ofield.default is None: + new_fields.append((ofield,ofield.default)) + elif update and not ofield.update is None: + new_fields.append((ofield,ofield.update)) + for ofield in self: + if not ofield.name in new_fields_names and ofield.compute: + try: + new_fields.append((ofield,ofield.compute(Row(fields)))) + except KeyError: + pass + if not update and ofield.required and not ofield.name in new_fields_names: + raise SyntaxError,'Table: missing required field: %s' % ofield.name + return new_fields + + def _insert(self, **fields): + return self._db._adapter._insert(self,self._listify(fields)) + + def insert(self, **fields): + return self._db._adapter.insert(self,self._listify(fields)) + + def validate_and_insert(self,**fields): + response = Row() + response.errors = self._validate(**fields) + if not response.errors: + response.id = self.insert(**fields) + else: + response.id = None + return response + + def update_or_insert(self, key=DEFAULT, **values): + if key==DEFAULT: + record = self(**values) + else: + record = self(key) + if record: + record.update_record(**values) + newid = None + else: + newid = self.insert(**values) + return newid + + def bulk_insert(self, items): + """ + here items is a list of dictionaries + """ + items = [self._listify(item) for item in items] + return self._db._adapter.bulk_insert(self,items) + + def _truncate(self, mode = None): + return self._db._adapter._truncate(self, mode) + + def truncate(self, mode = None): + return self._db._adapter.truncate(self, mode) + + def import_from_csv_file( + self, + csvfile, + id_map=None, + null='<NULL>', + unique='uuid', + *args, **kwargs + ): + """ + import records from csv file. Column headers must have same names as + table fields. field 'id' is ignored. If column names read 'table.file' + the 'table.' prefix is ignored. + 'unique' argument is a field which must be unique + (typically a uuid field) + """ + + delimiter = kwargs.get('delimiter', ',') + quotechar = kwargs.get('quotechar', '"') + quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) + + reader = csv.reader(csvfile, delimiter=delimiter, quotechar=quotechar, quoting=quoting) + colnames = None + if isinstance(id_map, dict): + if not self._tablename in id_map: + id_map[self._tablename] = {} + id_map_self = id_map[self._tablename] + + def fix(field, value, id_map): + list_reference_s='list:reference' + if value == null: + value = None + elif field.type=='blob': + value = base64.b64decode(value) + elif field.type=='double': + if not value.strip(): + value = None + else: + value = float(value) + elif field.type=='integer': + if not value.strip(): + value = None + else: + value = int(value) + elif field.type.startswith('list:string'): + value = bar_decode_string(value) + elif field.type.startswith(list_reference_s): + ref_table = field.type[len(list_reference_s):].strip() + value = [id_map[ref_table][int(v)] \ + for v in bar_decode_string(value)] + elif field.type.startswith('list:'): + value = bar_decode_integer(value) + elif id_map and field.type.startswith('reference'): + try: + value = id_map[field.type[9:].strip()][value] + except KeyError: + pass + return (field.name, value) + + def is_id(colname): + if colname in self: + return self[colname].type == 'id' + else: + return False + + for line in reader: + if not line: + break + if not colnames: + colnames = [x.split('.',1)[-1] for x in line][:len(line)] + cols, cid = [], [] + for i,colname in enumerate(colnames): + if is_id(colname): + cid = i + else: + cols.append(i) + if colname == unique: + unique_idx = i + else: + items = [fix(self[colnames[i]], line[i], id_map) \ + for i in cols if colnames[i] in self.fields] + # Validation. Check for duplicate of 'unique' &, + # if present, update instead of insert. + if not unique or unique not in colnames: + new_id = self.insert(**dict(items)) + else: + unique_value = line[unique_idx] + query = self._db[self][unique] == unique_value + record = self._db(query).select().first() + if record: + record.update_record(**dict(items)) + new_id = record[self._id.name] + else: + new_id = self.insert(**dict(items)) + if id_map and cid != []: + id_map_self[int(line[cid])] = new_id + + def with_alias(self, alias): + return self._db._adapter.alias(self,alias) + + def on(self, query): + return Expression(self._db,self._db._adapter.ON,self,query) + + + +class Expression(object): + + def __init__( + self, + db, + op, + first=None, + second=None, + type=None, + ): + + self.db = db + self.op = op + self.first = first + self.second = second + ### self._tablename = first._tablename ## CHECK + if not type and first and hasattr(first,'type'): + self.type = first.type + else: + self.type = type + + def sum(self): + return Expression(self.db, self.db._adapter.AGGREGATE, self, 'SUM', self.type) + + def max(self): + return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MAX', self.type) + + def min(self): + return Expression(self.db, self.db._adapter.AGGREGATE, self, 'MIN', self.type) + + def len(self): + return Expression(self.db, self.db._adapter.AGGREGATE, self, 'LENGTH', 'integer') + + def lower(self): + return Expression(self.db, self.db._adapter.LOWER, self, None, self.type) + + def upper(self): + return Expression(self.db, self.db._adapter.UPPER, self, None, self.type) + + def year(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'year', 'integer') + + def month(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'month', 'integer') + + def day(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'day', 'integer') + + def hour(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'hour', 'integer') + + def minutes(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'minute', 'integer') + + def coalesce(self,*others): + return Expression(self.db, self.db._adapter.COALESCE, self, others, self.type) + + def coalesce_zero(self): + return Expression(self.db, self.db._adapter.COALESCE_ZERO, self, None, self.type) + + def seconds(self): + return Expression(self.db, self.db._adapter.EXTRACT, self, 'second', 'integer') + + def __getslice__(self, start, stop): + if start < 0: + pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) + else: + pos0 = start + 1 + + if stop < 0: + length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) + elif stop == sys.maxint: + length = self.len() + else: + length = '(%s - %s)' % (stop + 1, pos0) + return Expression(self.db,self.db._adapter.SUBSTRING, + self, (pos0, length), self.type) + + def __getitem__(self, i): + return self[i:i + 1] + + def __str__(self): + return self.db._adapter.expand(self,self.type) + + def __or__(self, other): # for use in sortby + return Expression(self.db,self.db._adapter.COMMA,self,other,self.type) + + def __invert__(self): + if hasattr(self,'_op') and self.op == self.db._adapter.INVERT: + return self.first + return Expression(self.db,self.db._adapter.INVERT,self,type=self.type) + + def __add__(self, other): + return Expression(self.db,self.db._adapter.ADD,self,other,self.type) + + def __sub__(self, other): + if self.type == 'integer': + result_type = 'integer' + elif self.type in ['date','time','datetime','double']: + result_type = 'double' + else: + raise SyntaxError, "subtraction operation not supported for type" + return Expression(self.db,self.db._adapter.SUB,self,other, + result_type) + def __mul__(self, other): + return Expression(self.db,self.db._adapter.MUL,self,other,self.type) + + def __div__(self, other): + return Expression(self.db,self.db._adapter.DIV,self,other,self.type) + + def __mod__(self, other): + return Expression(self.db,self.db._adapter.MOD,self,other,self.type) + + def __eq__(self, value): + return Query(self.db, self.db._adapter.EQ, self, value) + + def __ne__(self, value): + return Query(self.db, self.db._adapter.NE, self, value) + + def __lt__(self, value): + return Query(self.db, self.db._adapter.LT, self, value) + + def __le__(self, value): + return Query(self.db, self.db._adapter.LE, self, value) + + def __gt__(self, value): + return Query(self.db, self.db._adapter.GT, self, value) + + def __ge__(self, value): + return Query(self.db, self.db._adapter.GE, self, value) + + def like(self, value): + return Query(self.db, self.db._adapter.LIKE, self, value) + + def belongs(self, value): + return Query(self.db, self.db._adapter.BELONGS, self, value) + + def startswith(self, value): + if not self.type in ('string', 'text'): + raise SyntaxError, "startswith used with incompatible field type" + return Query(self.db, self.db._adapter.STARTSWITH, self, value) + + def endswith(self, value): + if not self.type in ('string', 'text'): + raise SyntaxError, "endswith used with incompatible field type" + return Query(self.db, self.db._adapter.ENDSWITH, self, value) + + def contains(self, value, all=False): + if isinstance(value,(list,tuple)): + subqueries = [self.contains(str(v).strip()) for v in value if str(v).strip()] + return reduce(all and AND or OR, subqueries) + if not self.type in ('string', 'text') and not self.type.startswith('list:'): + raise SyntaxError, "contains used with incompatible field type" + return Query(self.db, self.db._adapter.CONTAINS, self, value) + + def with_alias(self,alias): + return Expression(self.db,self.db._adapter.AS,self,alias,self.type) + + # for use in both Query and sortby + + +class SQLCustomType(object): + """ + allows defining of custom SQL types + + Example:: + + decimal = SQLCustomType( + type ='double', + native ='integer', + encoder =(lambda x: int(float(x) * 100)), + decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) + ) + + db.define_table( + 'example', + Field('value', type=decimal) + ) + + :param type: the web2py type (default = 'string') + :param native: the backend type + :param encoder: how to encode the value to store it in the backend + :param decoder: how to decode the value retrieved from the backend + :param validator: what validators to use ( default = None, will use the + default validator for type) + """ + + def __init__( + self, + type='string', + native=None, + encoder=None, + decoder=None, + validator=None, + _class=None, + ): + + self.type = type + self.native = native + self.encoder = encoder or (lambda x: x) + self.decoder = decoder or (lambda x: x) + self.validator = validator + self._class = _class or type + + def startswith(self, dummy=None): + return False + + def __getslice__(self, a=0, b=100): + return None + + def __getitem__(self, i): + return None + + def __str__(self): + return self._class + +class FieldVirtual(object): + def __init__(self,f): + self.f = f + +class FieldLazy(object): + def __init__(self,f,handler=None): + self.f = f + self.handler = handler + + +class Field(Expression): + + Virtual = FieldVirtual + Lazy = FieldLazy + + """ + an instance of this class represents a database field + + example:: + + a = Field(name, 'string', length=32, default=None, required=False, + requires=IS_NOT_EMPTY(), ondelete='CASCADE', + notnull=False, unique=False, + uploadfield=True, widget=None, label=None, comment=None, + uploadfield=True, # True means store on disk, + # 'a_field_name' means store in this field in db + # False means file content will be discarded. + writable=True, readable=True, update=None, authorize=None, + autodelete=False, represent=None, uploadfolder=None, + uploadseparate=False # upload to separate directories by uuid_keys + # first 2 character and tablename.fieldname + # False - old behavior + # True - put uploaded file in + # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] + # directory) + + to be used as argument of DAL.define_table + + allowed field types: + string, boolean, integer, double, text, blob, + date, time, datetime, upload, password + + strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) + fields should have a default or they will be required in SQLFORMs + the requires argument is used to validate the field input in SQLFORMs + + """ + + def __init__( + self, + fieldname, + type='string', + length=None, + default=DEFAULT, + required=False, + requires=DEFAULT, + ondelete='CASCADE', + notnull=False, + unique=False, + uploadfield=True, + widget=None, + label=DEFAULT, + comment=None, + writable=True, + readable=True, + update=None, + authorize=None, + autodelete=False, + represent=None, + uploadfolder=None, + uploadseparate=False, + compute=None, + custom_store=None, + custom_retrieve=None, + custom_delete=None, + ): + self.db = None + self.op = None + self.first = None + self.second = None + if not isinstance(fieldname,str): + raise SyntaxError, "missing field name" + if fieldname.startswith(':'): + fieldname,readable,writable=fieldname[1:],False,False + elif fieldname.startswith('.'): + fieldname,readable,writable=fieldname[1:],False,False + if '=' in fieldname: + fieldname,default = fieldname.split('=',1) + self.name = fieldname = cleanup(fieldname) + if hasattr(Table,fieldname) or fieldname[0] == '_' or \ + regex_python_keywords.match(fieldname): + raise SyntaxError, 'Field: invalid field name: %s' % fieldname + if isinstance(type, Table): + type = 'reference ' + type._tablename + self.type = type # 'string', 'integer' + self.length = (length is None) and DEFAULTLENGTH.get(type,512) or length + if default==DEFAULT: + self.default = update or None + else: + self.default = default + self.required = required # is this field required + self.ondelete = ondelete.upper() # this is for reference fields only + self.notnull = notnull + self.unique = unique + self.uploadfield = uploadfield + self.uploadfolder = uploadfolder + self.uploadseparate = uploadseparate + self.widget = widget + if label == DEFAULT: + self.label = ' '.join(i.capitalize() for i in fieldname.split('_')) + else: + self.label = label or '' + self.comment = comment + self.writable = writable + self.readable = readable + self.update = update + self.authorize = authorize + self.autodelete = autodelete + if not represent and type in ('list:integer','list:string'): + represent=lambda x,r=None: ', '.join(str(y) for y in x or []) + self.represent = represent + self.compute = compute + self.isattachment = True + self.custom_store = custom_store + self.custom_retrieve = custom_retrieve + self.custom_delete = custom_delete + if self.label is None: + self.label = ' '.join([x.capitalize() for x in + fieldname.split('_')]) + if requires is None: + self.requires = [] + else: + self.requires = requires + + def store(self, file, filename=None, path=None): + if self.custom_store: + return self.custom_store(file,filename,path) + if not filename: + filename = file.name + filename = os.path.basename(filename.replace('/', os.sep)\ + .replace('\\', os.sep)) + m = re.compile('\.(?P<e>\w{1,5})$').search(filename) + extension = m and m.group('e') or 'txt' + uuid_key = web2py_uuid().replace('-', '')[-16:] + encoded_filename = base64.b16encode(filename).lower() + newfilename = '%s.%s.%s.%s' % \ + (self._tablename, self.name, uuid_key, encoded_filename) + newfilename = newfilename[:200] + '.' + extension + if isinstance(self.uploadfield,Field): + blob_uploadfield_name = self.uploadfield.uploadfield + keys={self.uploadfield.name: newfilename, + blob_uploadfield_name: file.read()} + self.uploadfield.table.insert(**keys) + elif self.uploadfield == True: + if path: + pass + elif self.uploadfolder: + path = self.uploadfolder + elif self.db._adapter.folder: + path = os.path.join(self.db._adapter.folder, '..', 'uploads') + else: + raise RuntimeError, "you must specify a Field(...,uploadfolder=...)" + if self.uploadseparate: + path = os.path.join(path,"%s.%s" % (self._tablename, self.name),uuid_key[:2]) + if not os.path.exists(path): + os.makedirs(path) + pathfilename = os.path.join(path, newfilename) + dest_file = open(pathfilename, 'wb') + try: + shutil.copyfileobj(file, dest_file) + finally: + dest_file.close() + return newfilename + + def retrieve(self, name, path=None): + if self.custom_retrieve: + return self.custom_retrieve(name, path) + import http + if self.authorize or isinstance(self.uploadfield, str): + row = self.db(self == name).select().first() + if not row: + raise http.HTTP(404) + if self.authorize and not self.authorize(row): + raise http.HTTP(403) + try: + m = regex_content.match(name) + if not m or not self.isattachment: + raise TypeError, 'Can\'t retrieve %s' % name + filename = base64.b16decode(m.group('name'), True) + filename = regex_cleanup_fn.sub('_', filename) + except (TypeError, AttributeError): + filename = name + if isinstance(self.uploadfield, str): # ## if file is in DB + return (filename, cStringIO.StringIO(row[self.uploadfield] or '')) + elif isinstance(self.uploadfield,Field): + blob_uploadfield_name = self.uploadfield.uploadfield + query = self.uploadfield == name + data = self.uploadfield.table(query)[blob_uploadfield_name] + return (filename, cStringIO.StringIO(data)) + else: + # ## if file is on filesystem + if path: + pass + elif self.uploadfolder: + path = self.uploadfolder + else: + path = os.path.join(self.db._adapter.folder, '..', 'uploads') + if self.uploadseparate: + t = m.group('table') + f = m.group('field') + u = m.group('uuidkey') + path = os.path.join(path,"%s.%s" % (t,f),u[:2]) + return (filename, open(os.path.join(path, name), 'rb')) + + def formatter(self, value): + if value is None or not self.requires: + return value + if not isinstance(self.requires, (list, tuple)): + requires = [self.requires] + elif isinstance(self.requires, tuple): + requires = list(self.requires) + else: + requires = copy.copy(self.requires) + requires.reverse() + for item in requires: + if hasattr(item, 'formatter'): + value = item.formatter(value) + return value + + def validate(self, value): + if not self.requires: + return (value, None) + requires = self.requires + if not isinstance(requires, (list, tuple)): + requires = [requires] + for validator in requires: + (value, error) = validator(value) + if error: + return (value, error) + return (value, None) + + def count(self): + return Expression(self.db, self.db._adapter.AGGREGATE, self, 'COUNT', 'integer') + + def __nonzero__(self): + return True + + def __str__(self): + try: + return '%s.%s' % (self.tablename, self.name) + except: + return '<no table>.%s' % self.name + + +def raw(s): return Expression(None,s) + +class Query(object): + + """ + a query object necessary to define a set. + it can be stored or can be passed to DAL.__call__() to obtain a Set + + Example:: + + query = db.users.name=='Max' + set = db(query) + records = set.select() + + """ + + def __init__( + self, + db, + op, + first=None, + second=None, + ): + self.db = self._db = db + self.op = op + self.first = first + self.second = second + + def __str__(self): + return self.db._adapter.expand(self) + + def __and__(self, other): + return Query(self.db,self.db._adapter.AND,self,other) + + def __or__(self, other): + return Query(self.db,self.db._adapter.OR,self,other) + + def __invert__(self): + if self.op==self.db._adapter.NOT: + return self.first + return Query(self.db,self.db._adapter.NOT,self) + + +regex_quotes = re.compile("'[^']*'") + + +def xorify(orderby): + if not orderby: + return None + orderby2 = orderby[0] + for item in orderby[1:]: + orderby2 = orderby2 | item + return orderby2 + + +class Set(object): + + """ + a Set represents a set of records in the database, + the records are identified by the query=Query(...) object. + normally the Set is generated by DAL.__call__(Query(...)) + + given a set, for example + set = db(db.users.name=='Max') + you can: + set.update(db.users.name='Massimo') + set.delete() # all elements in the set + set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) + and take subsets: + subset = set(db.users.id<5) + """ + + def __init__(self, db, query): + self.db = db + self._db = db # for backward compatibility + self.query = query + + def __call__(self, query): + if isinstance(query,Table): + query = query._id>0 + elif isinstance(query,str): + query = raw(query) + elif isinstance(query,Field): + query = query!=None + if self.query: + return Set(self.db, self.query & query) + else: + return Set(self.db, query) + + def _count(self,distinct=None): + return self.db._adapter._count(self.query,distinct) + + def _select(self, *fields, **attributes): + return self.db._adapter._select(self.query,fields,attributes) + + def _delete(self): + tablename=self.db._adapter.get_table(self.query) + return self.db._adapter._delete(tablename,self.query) + + def _update(self, **update_fields): + tablename = self.db._adapter.get_table(self.query) + fields = self.db[tablename]._listify(update_fields,update=True) + return self.db._adapter._update(tablename,self.query,fields) + + def isempty(self): + return not self.select(limitby=(0,1)) + + def count(self,distinct=None): + return self.db._adapter.count(self.query,distinct) + + def select(self, *fields, **attributes): + return self.db._adapter.select(self.query,fields,attributes) + + def delete(self): + tablename=self.db._adapter.get_table(self.query) + self.delete_uploaded_files() + return self.db._adapter.delete(tablename,self.query) + + def update(self, **update_fields): + tablename = self.db._adapter.get_table(self.query) + fields = self.db[tablename]._listify(update_fields,update=True) + if not fields: + raise SyntaxError, "No fields to update" + self.delete_uploaded_files(update_fields) + return self.db._adapter.update(tablename,self.query,fields) + + def validate_and_update(self, **update_fields): + tablename = self.db._adapter.get_table(self.query) + response = Row() + response.errors = self.db[tablename]._validate(**update_fields) + fields = self.db[tablename]._listify(update_fields,update=True) + if not fields: + raise SyntaxError, "No fields to update" + self.delete_uploaded_files(update_fields) + if not response.errors: + response.updated = self.db._adapter.update(tablename,self.query,fields) + else: + response.updated = None + return response + + def delete_uploaded_files(self, upload_fields=None): + table = self.db[self.db._adapter.tables(self.query)[0]] + # ## mind uploadfield==True means file is not in DB + if upload_fields: + fields = upload_fields.keys() + else: + fields = table.fields + fields = [f for f in fields if table[f].type == 'upload' + and table[f].uploadfield == True + and table[f].autodelete] + if not fields: + return + for record in self.select(*[table[f] for f in fields]): + for fieldname in fields: + field = table[fieldname] + oldname = record.get(fieldname, None) + if not oldname: + continue + if upload_fields and oldname == upload_fields[fieldname]: + continue + if field.custom_delete: + field.custom_delete(oldname) + else: + uploadfolder = field.uploadfolder + if not uploadfolder: + uploadfolder = os.path.join(self.db._adapter.folder, '..', 'uploads') + if field.uploadseparate: + items = oldname.split('.') + uploadfolder = os.path.join(uploadfolder, + "%s.%s" % (items[0], items[1]), + items[2][:2]) + oldpath = os.path.join(uploadfolder, oldname) + if os.path.exists(oldpath): + os.unlink(oldpath) + +def update_record(pack, a=None): + (colset, table, id) = pack + b = a or dict(colset) + c = dict([(k,v) for (k,v) in b.items() if k in table.fields and table[k].type!='id']) + table._db(table._id==id).update(**c) + for (k, v) in c.items(): + colset[k] = v + +class VirtualCommand(object): + def __init__(self,method,row): + self.method=method + #self.instance=instance + self.row=row + def __call__(self,*args,**kwargs): + return self.method(self.row,*args,**kwargs) + +def lazy_virtualfield(f): + f.__lazy__ = True + return f + +class Rows(object): + + """ + A wrapper for the return value of a select. It basically represents a table. + It has an iterator and each row is represented as a dictionary. + """ + + # ## TODO: this class still needs some work to care for ID/OID + + def __init__( + self, + db=None, + records=[], + colnames=[], + compact=True, + rawrows=None + ): + self.db = db + self.records = records + self.colnames = colnames + self.compact = compact + self.response = rawrows + + def setvirtualfields(self,**keyed_virtualfields): + """ + db.define_table('x',Field('number','integer')) + if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] + + from gluon.dal import lazy_virtualfield + + class MyVirtualFields(object): + # normal virtual field (backward compatible, discouraged) + def normal_shift(self): return self.x.number+1 + # lazy virtual field (because of @staticmethod) + @lazy_virtualfield + def lazy_shift(instance,row,delta=4): return row.x.number+delta + db.x.virtualfields.append(MyVirtualFields()) + + for row in db(db.x).select(): + print row.number, row.normal_shift, row.lazy_shift(delta=7) + """ + if not keyed_virtualfields: + return self + for row in self.records: + for (tablename,virtualfields) in keyed_virtualfields.items(): + attributes = dir(virtualfields) + if not tablename in row: + box = row[tablename] = Row() + else: + box = row[tablename] + updated = False + for attribute in attributes: + if attribute[0] != '_': + method = getattr(virtualfields,attribute) + if hasattr(method,'__lazy__'): + box[attribute]=VirtualCommand(method,row) + elif type(method)==types.MethodType: + if not updated: + virtualfields.__dict__.update(row) + updated = True + box[attribute]=method() + return self + + def __and__(self,other): + if self.colnames!=other.colnames: raise Exception, 'Cannot & incompatible Rows objects' + records = self.records+other.records + return Rows(self.db,records,self.colnames) + + def __or__(self,other): + if self.colnames!=other.colnames: raise Exception, 'Cannot | incompatible Rows objects' + records = self.records + records += [record for record in other.records \ + if not record in records] + return Rows(self.db,records,self.colnames) + + def __nonzero__(self): + if len(self.records): + return 1 + return 0 + + def __len__(self): + return len(self.records) + + def __getslice__(self, a, b): + return Rows(self.db,self.records[a:b],self.colnames) + + def __getitem__(self, i): + row = self.records[i] + keys = row.keys() + if self.compact and len(keys) == 1 and keys[0] != '_extra': + return row[row.keys()[0]] + return row + + def __iter__(self): + """ + iterator over records + """ + + for i in xrange(len(self)): + yield self[i] + + def __str__(self): + """ + serializes the table into a csv file + """ + + s = cStringIO.StringIO() + self.export_to_csv_file(s) + return s.getvalue() + + def first(self): + if not self.records: + return None + return self[0] + + def last(self): + if not self.records: + return None + return self[-1] + + def find(self,f): + """ + returns a new Rows object, a subset of the original object, + filtered by the function f + """ + if not self.records: + return Rows(self.db, [], self.colnames) + records = [] + for i in range(0,len(self)): + row = self[i] + if f(row): + records.append(self.records[i]) + return Rows(self.db, records, self.colnames) + + def exclude(self, f): + """ + removes elements from the calling Rows object, filtered by the function f, + and returns a new Rows object containing the removed elements + """ + if not self.records: + return Rows(self.db, [], self.colnames) + removed = [] + i=0 + while i<len(self): + row = self[i] + if f(row): + removed.append(self.records[i]) + del self.records[i] + else: + i += 1 + return Rows(self.db, removed, self.colnames) + + def sort(self, f, reverse=False): + """ + returns a list of sorted elements (not sorted in place) + """ + return Rows(self.db,sorted(self,key=f,reverse=reverse),self.colnames) + + def as_list(self, + compact=True, + storage_to_dict=True, + datetime_to_str=True): + """ + returns the data as a list or dictionary. + :param storage_to_dict: when True returns a dict, otherwise a list(default True) + :param datetime_to_str: convert datetime fields as strings (default True) + """ + (oc, self.compact) = (self.compact, compact) + if storage_to_dict: + items = [item.as_dict(datetime_to_str) for item in self] + else: + items = [item for item in self] + self.compact = compact + return items + + + def as_dict(self, + key='id', + compact=True, + storage_to_dict=True, + datetime_to_str=True): + """ + returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) + + :param key: the name of the field to be used as dict key, normally the id + :param compact: ? (default True) + :param storage_to_dict: when True returns a dict, otherwise a list(default True) + :param datetime_to_str: convert datetime fields as strings (default True) + """ + rows = self.as_list(compact, storage_to_dict, datetime_to_str) + if isinstance(key,str) and key.count('.')==1: + (table, field) = key.split('.') + return dict([(r[table][field],r) for r in rows]) + elif isinstance(key,str): + return dict([(r[key],r) for r in rows]) + else: + return dict([(key(r),r) for r in rows]) + + def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs): + """ + export data to csv, the first line contains the column names + + :param ofile: where the csv must be exported to + :param null: how null values must be represented (default '<NULL>') + :param delimiter: delimiter to separate values (default ',') + :param quotechar: character to use to quote string values (default '"') + :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) + :param represent: use the fields .represent value (default False) + :param colnames: list of column names to use (default self.colnames) + This will only work when exporting rows objects!!!! + DO NOT use this with db.export_to_csv() + """ + delimiter = kwargs.get('delimiter', ',') + quotechar = kwargs.get('quotechar', '"') + quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) + represent = kwargs.get('represent', False) + writer = csv.writer(ofile, delimiter=delimiter, + quotechar=quotechar, quoting=quoting) + colnames = kwargs.get('colnames', self.colnames) + # a proper csv starting with the column names + writer.writerow(colnames) + + def none_exception(value): + """ + returns a cleaned up value that can be used for csv export: + - unicode text is encoded as such + - None values are replaced with the given representation (default <NULL>) + """ + if value is None: + return null + elif isinstance(value, unicode): + return value.encode('utf8') + elif isinstance(value,Reference): + return int(value) + elif hasattr(value, 'isoformat'): + return value.isoformat()[:19].replace('T', ' ') + elif isinstance(value, (list,tuple)): # for type='list:..' + return bar_encode(value) + return value + + for record in self: + row = [] + for col in colnames: + if not table_field.match(col): + row.append(record._extra[col]) + else: + (t, f) = col.split('.') + field = self.db[t][f] + if isinstance(record.get(t, None), (Row,dict)): + value = record[t][f] + else: + value = record[f] + if field.type=='blob' and not value is None: + value = base64.b64encode(value) + elif represent and field.represent: + value = field.represent(value) + row.append(none_exception(value)) + writer.writerow(row) + + def xml(self): + """ + serializes the table using sqlhtml.SQLTABLE (if present) + """ + + import sqlhtml + return sqlhtml.SQLTABLE(self).xml() + + def json(self, mode='object', default=None): + """ + serializes the table to a JSON list of objects + """ + mode = mode.lower() + if not mode in ['object', 'array']: + raise SyntaxError, 'Invalid JSON serialization mode: %s' % mode + + def inner_loop(record, col): + (t, f) = col.split('.') + res = None + if not table_field.match(col): + key = col + res = record._extra[col] + else: + key = f + if isinstance(record.get(t, None), Row): + res = record[t][f] + else: + res = record[f] + if mode == 'object': + return (key, res) + else: + return res + + if mode == 'object': + items = [dict([inner_loop(record, col) for col in + self.colnames]) for record in self] + else: + items = [[inner_loop(record, col) for col in self.colnames] + for record in self] + if have_serializers: + return serializers.json(items,default=default or serializers.custom_json) + else: + import simplejson + return simplejson.dumps(items) + +def Rows_unpickler(data): + return cPickle.loads(data) + +def Rows_pickler(data): + return Rows_unpickler, \ + (cPickle.dumps(data.as_list(storage_to_dict=True, + datetime_to_str=False)),) + +copy_reg.pickle(Rows, Rows_pickler, Rows_unpickler) + + +################################################################################ +# dummy function used to define some doctests +################################################################################ + +def test_all(): + """ + + >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") + >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) + >>> tmp = db.define_table('users',\ + Field('stringf', 'string', length=32, required=True),\ + Field('booleanf', 'boolean', default=False),\ + Field('passwordf', 'password', notnull=True),\ + Field('uploadf', 'upload'),\ + Field('blobf', 'blob'),\ + Field('integerf', 'integer', unique=True),\ + Field('doublef', 'double', unique=True,notnull=True),\ + Field('datef', 'date', default=datetime.date.today()),\ + Field('timef', 'time'),\ + Field('datetimef', 'datetime'),\ + migrate='test_user.table') + + Insert a field + + >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ + uploadf=None, integerf=5, doublef=3.14,\ + datef=datetime.date(2001, 1, 1),\ + timef=datetime.time(12, 30, 15),\ + datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) + 1 + + Drop the table + + >>> db.users.drop() + + Examples of insert, select, update, delete + + >>> tmp = db.define_table('person',\ + Field('name'),\ + Field('birth','date'),\ + migrate='test_person.table') + >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') + >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') + + commented len(db().select(db.person.ALL)) + commented 2 + + >>> me = db(db.person.id==person_id).select()[0] # test select + >>> me.name + 'Massimo' + >>> db(db.person.name=='Massimo').update(name='massimo') # test update + 1 + >>> db(db.person.name=='Marco').select().first().delete_record() # test delete + 1 + + Update a single record + + >>> me.update_record(name=\"Max\") + >>> me.name + 'Max' + + Examples of complex search conditions + + >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) + 1 + >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) + 1 + >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) + 1 + >>> me = db(db.person.id==person_id).select(db.person.name)[0] + >>> me.name + 'Max' + + Examples of search conditions using extract from date/datetime/time + + >>> len(db(db.person.birth.month()==12).select()) + 1 + >>> len(db(db.person.birth.year()>1900).select()) + 1 + + Example of usage of NULL + + >>> len(db(db.person.birth==None).select()) ### test NULL + 0 + >>> len(db(db.person.birth!=None).select()) ### test NULL + 1 + + Examples of search conditions using lower, upper, and like + + >>> len(db(db.person.name.upper()=='MAX').select()) + 1 + >>> len(db(db.person.name.like('%ax')).select()) + 1 + >>> len(db(db.person.name.upper().like('%AX')).select()) + 1 + >>> len(db(~db.person.name.upper().like('%AX')).select()) + 0 + + orderby, groupby and limitby + + >>> people = db().select(db.person.name, orderby=db.person.name) + >>> order = db.person.name|~db.person.birth + >>> people = db().select(db.person.name, orderby=order) + + >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) + + >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) + + Example of one 2 many relation + + >>> tmp = db.define_table('dog',\ + Field('name'),\ + Field('birth','date'),\ + Field('owner',db.person),\ + migrate='test_dog.table') + >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) + 1 + + A simple JOIN + + >>> len(db(db.dog.owner==db.person.id).select()) + 1 + + >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) + 1 + + Drop tables + + >>> db.dog.drop() + >>> db.person.drop() + + Example of many 2 many relation and Set + + >>> tmp = db.define_table('author', Field('name'),\ + migrate='test_author.table') + >>> tmp = db.define_table('paper', Field('title'),\ + migrate='test_paper.table') + >>> tmp = db.define_table('authorship',\ + Field('author_id', db.author),\ + Field('paper_id', db.paper),\ + migrate='test_authorship.table') + >>> aid = db.author.insert(name='Massimo') + >>> pid = db.paper.insert(title='QCD') + >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) + + Define a Set + + >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) + >>> rows = authored_papers.select(db.author.name, db.paper.title) + >>> for row in rows: print row.author.name, row.paper.title + Massimo QCD + + Example of search condition using belongs + + >>> set = (1, 2, 3) + >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) + >>> print rows[0].title + QCD + + Example of search condition using nested select + + >>> nested_select = db()._select(db.authorship.paper_id) + >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) + >>> print rows[0].title + QCD + + Example of expressions + + >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) + >>> db(mynumber.id>0).delete() + 0 + >>> for i in range(10): tmp = mynumber.insert(x=i) + >>> db(mynumber.id>0).select(mynumber.x.sum())[0](mynumber.x.sum()) + 45 + + >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) + 5 + + Output in csv + + >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() + author.name,paper.title\r + Massimo,QCD + + Delete all leftover tables + + >>> DAL.distributed_transaction_commit(db) + + >>> db.mynumber.drop() + >>> db.authorship.drop() + >>> db.author.drop() + >>> db.paper.drop() + """ +################################################################################ +# deprecated since the new DAL; here only for backward compatibility +################################################################################ + +SQLField = Field +SQLTable = Table +SQLXorable = Expression +SQLQuery = Query +SQLSet = Set +SQLRows = Rows +SQLStorage = Row +SQLDB = DAL +GQLDB = DAL +DAL.Field = Field # was necessary in gluon/globals.py session.connect +DAL.Table = Table # was necessary in gluon/globals.py session.connect + +################################################################################ +# run tests +################################################################################ + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + + + ADDED gluon/dal.pyc Index: gluon/dal.pyc ================================================================== --- /dev/null +++ gluon/dal.pyc cannot compute difference between binary files ADDED gluon/debug.py Index: gluon/debug.py ================================================================== --- /dev/null +++ gluon/debug.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Developed by Massimo Di Pierro <mdipierro@cs.depaul.edu>, +limodou <limodou@gmail.com> and srackham <srackham@gmail.com>. +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +""" + +import logging +import pdb +import Queue +import sys + +logger = logging.getLogger("web2py") + +class Pipe(Queue.Queue): + def __init__(self, name, mode='r', *args, **kwargs): + self.__name = name + Queue.Queue.__init__(self, *args, **kwargs) + + def write(self, data): + logger.debug("debug %s writting %s" % (self.__name, data)) + self.put(data) + + def flush(self): + # mark checkpoint (complete message) + logger.debug("debug %s flushing..." % self.__name) + self.put(None) + # wait until it is processed + self.join() + logger.debug("debug %s flush done" % self.__name) + + def read(self, count=None, timeout=None): + logger.debug("debug %s reading..." % (self.__name, )) + data = self.get(block=True, timeout=timeout) + # signal that we are ready + self.task_done() + logger.debug("debug %s read %s" % (self.__name, data)) + return data + + def readline(self): + logger.debug("debug %s readline..." % (self.__name, )) + return self.read() + + +pipe_in = Pipe('in') +pipe_out = Pipe('out') + +debugger = pdb.Pdb(completekey=None, stdin=pipe_in, stdout=pipe_out,) + +def set_trace(): + "breakpoint shortcut (like pdb)" + logger.info("DEBUG: set_trace!") + debugger.set_trace(sys._getframe().f_back) + + +def stop_trace(): + "stop waiting for the debugger (called atexit)" + # this should prevent communicate is wait forever a command result + # and the main thread has finished + logger.info("DEBUG: stop_trace!") + pipe_out.write("debug finished!") + pipe_out.write(None) + #pipe_out.flush() + +def communicate(command=None): + "send command to debbuger, wait result" + if command is not None: + logger.info("DEBUG: sending command %s" % command) + pipe_in.write(command) + #pipe_in.flush() + result = [] + while True: + data = pipe_out.read() + if data is None: + break + result.append(data) + logger.info("DEBUG: result %s" % repr(result)) + return ''.join(result) + + + + + ADDED gluon/decoder.py Index: gluon/decoder.py ================================================================== --- /dev/null +++ gluon/decoder.py @@ -0,0 +1,77 @@ +import codecs, encodings + +"""Caller will hand this library a buffer and ask it to either convert +it or auto-detect the type. + +Based on http://code.activestate.com/recipes/52257/ + +Licensed under the PSF License +""" + +# None represents a potentially variable byte. "##" in the XML spec... +autodetect_dict={ # bytepattern : ("name", + (0x00, 0x00, 0xFE, 0xFF) : ("ucs4_be"), + (0xFF, 0xFE, 0x00, 0x00) : ("ucs4_le"), + (0xFE, 0xFF, None, None) : ("utf_16_be"), + (0xFF, 0xFE, None, None) : ("utf_16_le"), + (0x00, 0x3C, 0x00, 0x3F) : ("utf_16_be"), + (0x3C, 0x00, 0x3F, 0x00) : ("utf_16_le"), + (0x3C, 0x3F, 0x78, 0x6D): ("utf_8"), + (0x4C, 0x6F, 0xA7, 0x94): ("EBCDIC") + } + +def autoDetectXMLEncoding(buffer): + """ buffer -> encoding_name + The buffer should be at least 4 bytes long. + Returns None if encoding cannot be detected. + Note that encoding_name might not have an installed + decoder (e.g. EBCDIC) + """ + # a more efficient implementation would not decode the whole + # buffer at once but otherwise we'd have to decode a character at + # a time looking for the quote character...that's a pain + + encoding = "utf_8" # according to the XML spec, this is the default + # this code successively tries to refine the default + # whenever it fails to refine, it falls back to + # the last place encoding was set. + if len(buffer)>=4: + bytes = (byte1, byte2, byte3, byte4) = tuple(map(ord, buffer[0:4])) + enc_info = autodetect_dict.get(bytes, None) + if not enc_info: # try autodetection again removing potentially + # variable bytes + bytes = (byte1, byte2, None, None) + enc_info = autodetect_dict.get(bytes) + else: + enc_info = None + + if enc_info: + encoding = enc_info # we've got a guess... these are + #the new defaults + + # try to find a more precise encoding using xml declaration + secret_decoder_ring = codecs.lookup(encoding)[1] + (decoded,length) = secret_decoder_ring(buffer) + first_line = decoded.split("\n")[0] + if first_line and first_line.startswith(u"<?xml"): + encoding_pos = first_line.find(u"encoding") + if encoding_pos!=-1: + # look for double quote + quote_pos=first_line.find('"', encoding_pos) + + if quote_pos==-1: # look for single quote + quote_pos=first_line.find("'", encoding_pos) + + if quote_pos>-1: + quote_char,rest=(first_line[quote_pos], + first_line[quote_pos+1:]) + encoding=rest[:rest.find(quote_char)] + + return encoding + +def decoder(buffer): + encoding = autoDetectXMLEncoding(buffer) + return buffer.decode(encoding).encode('utf8') + + + ADDED gluon/decoder.pyc Index: gluon/decoder.pyc ================================================================== --- /dev/null +++ gluon/decoder.pyc cannot compute difference between binary files ADDED gluon/fileutils.py Index: gluon/fileutils.py ================================================================== --- /dev/null +++ gluon/fileutils.py @@ -0,0 +1,399 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import storage +import os +import re +import tarfile +import glob +import time +import datetime +from http import HTTP +from gzip import open as gzopen +from settings import global_settings + + +__all__ = [ + 'parse_version', + 'read_file', + 'write_file', + 'readlines_file', + 'up', + 'abspath', + 'mktree', + 'listdir', + 'recursive_unlink', + 'cleanpath', + 'tar', + 'untar', + 'tar_compiled', + 'get_session', + 'check_credentials', + 'w2p_pack', + 'w2p_unpack', + 'w2p_pack_plugin', + 'w2p_unpack_plugin', + 'fix_newlines', + 'make_fake_file_like_object', + ] + +def parse_version(version = "Version 1.99.0 (2011-09-19 08:23:26)"): + re_version = re.compile('[^\d]+ (\d+)\.(\d+)\.(\d+)\s*\((?P<datetime>.+?)\)\s*(?P<type>[a-z]+)?') + m = re_version.match(version) + a,b,c = int(m.group(1)),int(m.group(2)),int(m.group(3)), + s = m.group('type') or 'dev' + d = datetime.datetime.strptime(m.group('datetime'),'%Y-%m-%d %H:%M:%S') + return (a,b,c,d,s) + +def read_file(filename, mode='r'): + "returns content from filename, making sure to close the file explicitly on exit." + f = open(filename, mode) + try: + return f.read() + finally: + f.close() + +def write_file(filename, value, mode='w'): + "writes <value> to filename, making sure to close the file explicitly on exit." + f = open(filename, mode) + try: + return f.write(value) + finally: + f.close() + +def readlines_file(filename, mode='r'): + "applies .split('\n') to the output of read_file()" + return read_file(filename, mode).split('\n') + +def abspath(*relpath, **base): + "convert relative path to absolute path based (by default) on applications_parent" + path = os.path.join(*relpath) + gluon = base.get('gluon', False) + if os.path.isabs(path): + return path + if gluon: + return os.path.join(global_settings.gluon_parent, path) + return os.path.join(global_settings.applications_parent, path) + + +def mktree(path): + head,tail =os.path.split(path) + if head: + if tail: mktree(head) + if not os.path.exists(head): + os.mkdir(head) + +def listdir( + path, + expression='^.+$', + drop=True, + add_dirs=False, + sort=True, + ): + """ + like os.listdir() but you can specify a regex pattern to filter files. + if add_dirs is True, the returned items will have the full path. + """ + if path[-1:] != os.path.sep: + path = path + os.path.sep + if drop: + n = len(path) + else: + n = 0 + regex = re.compile(expression) + items = [] + for (root, dirs, files) in os.walk(path, topdown=True): + for dir in dirs[:]: + if dir.startswith('.'): + dirs.remove(dir) + if add_dirs: + items.append(root[n:]) + for file in sorted(files): + if regex.match(file) and not file.startswith('.'): + items.append(os.path.join(root, file)[n:]) + if sort: + return sorted(items) + else: + return items + + +def recursive_unlink(f): + if os.path.isdir(f): + for s in os.listdir(f): + recursive_unlink(os.path.join(f,s)) + os.rmdir(f) + elif os.path.isfile(f): + os.unlink(f) + + +def cleanpath(path): + """ + turns any expression/path into a valid filename. replaces / with _ and + removes special characters. + """ + + items = path.split('.') + if len(items) > 1: + path = re.sub('[^\w\.]+', '_', '_'.join(items[:-1]) + '.' + + ''.join(items[-1:])) + else: + path = re.sub('[^\w\.]+', '_', ''.join(items[-1:])) + return path + + +def _extractall(filename, path='.', members=None): + if not hasattr(tarfile.TarFile, 'extractall'): + from tarfile import ExtractError + + class TarFile(tarfile.TarFile): + + def extractall(self, path='.', members=None): + """Extract all members from the archive to the current working + directory and set owner, modification time and permissions on + directories afterwards. `path' specifies a different directory + to extract to. `members' is optional and must be a subset of the + list returned by getmembers(). + """ + + directories = [] + if members is None: + members = self + for tarinfo in members: + if tarinfo.isdir(): + + # Extract directory with a safe mode, so that + # all files below can be extracted as well. + + try: + os.makedirs(os.path.join(path, + tarinfo.name), 0777) + except EnvironmentError: + pass + directories.append(tarinfo) + else: + self.extract(tarinfo, path) + + # Reverse sort directories. + + directories.sort(lambda a, b: cmp(a.name, b.name)) + directories.reverse() + + # Set correct owner, mtime and filemode on directories. + + for tarinfo in directories: + path = os.path.join(path, tarinfo.name) + try: + self.chown(tarinfo, path) + self.utime(tarinfo, path) + self.chmod(tarinfo, path) + except ExtractError, e: + if self.errorlevel > 1: + raise + else: + self._dbg(1, 'tarfile: %s' % e) + + + _cls = TarFile + else: + _cls = tarfile.TarFile + + tar = _cls(filename, 'r') + ret = tar.extractall(path, members) + tar.close() + return ret + +def tar(file, dir, expression='^.+$'): + """ + tars dir into file, only tars file that match expression + """ + + tar = tarfile.TarFile(file, 'w') + try: + for file in listdir(dir, expression, add_dirs=True): + tar.add(os.path.join(dir, file), file, False) + finally: + tar.close() + +def untar(file, dir): + """ + untar file into dir + """ + + _extractall(file, dir) + + +def w2p_pack(filename, path, compiled=False): + filename = abspath(filename) + path = abspath(path) + tarname = filename + '.tar' + if compiled: + tar_compiled(tarname, path, '^[\w\.\-]+$') + else: + tar(tarname, path, '^[\w\.\-]+$') + w2pfp = gzopen(filename, 'wb') + tarfp = open(tarname, 'rb') + w2pfp.write(tarfp.read()) + w2pfp.close() + tarfp.close() + os.unlink(tarname) + +def w2p_unpack(filename, path, delete_tar=True): + filename = abspath(filename) + path = abspath(path) + if filename[-4:] == '.w2p' or filename[-3:] == '.gz': + if filename[-4:] == '.w2p': + tarname = filename[:-4] + '.tar' + else: + tarname = filename[:-3] + '.tar' + fgzipped = gzopen(filename, 'rb') + tarfile = open(tarname, 'wb') + tarfile.write(fgzipped.read()) + tarfile.close() + fgzipped.close() + else: + tarname = filename + untar(tarname, path) + if delete_tar: + os.unlink(tarname) + + +def w2p_pack_plugin(filename, path, plugin_name): + """Pack the given plugin into a w2p file. + Will match files at: + <path>/*/plugin_[name].* + <path>/*/plugin_[name]/* + """ + filename = abspath(filename) + path = abspath(path) + if not filename.endswith('web2py.plugin.%s.w2p' % plugin_name): + raise Exception, "Not a web2py plugin name" + plugin_tarball = tarfile.open(filename, 'w:gz') + try: + app_dir = path + while app_dir[-1]=='/': + app_dir = app_dir[:-1] + files1=glob.glob(os.path.join(app_dir,'*/plugin_%s.*' % plugin_name)) + files2=glob.glob(os.path.join(app_dir,'*/plugin_%s/*' % plugin_name)) + for file in files1+files2: + plugin_tarball.add(file, arcname=file[len(app_dir)+1:]) + finally: + plugin_tarball.close() + + +def w2p_unpack_plugin(filename, path, delete_tar=True): + filename = abspath(filename) + path = abspath(path) + if not os.path.basename(filename).startswith('web2py.plugin.'): + raise Exception, "Not a web2py plugin" + w2p_unpack(filename,path,delete_tar) + + +def tar_compiled(file, dir, expression='^.+$'): + """ + used to tar a compiled application. + the content of models, views, controllers is not stored in the tar file. + """ + + tar = tarfile.TarFile(file, 'w') + for file in listdir(dir, expression, add_dirs=True): + filename = os.path.join(dir, file) + if os.path.islink(filename): + continue + if os.path.isfile(filename) and file[-4:] != '.pyc': + if file[:6] == 'models': + continue + if file[:5] == 'views': + continue + if file[:11] == 'controllers': + continue + if file[:7] == 'modules': + continue + tar.add(filename, file, False) + tar.close() + +def up(path): + return os.path.dirname(os.path.normpath(path)) + + +def get_session(request, other_application='admin'): + """ checks that user is authorized to access other_application""" + if request.application == other_application: + raise KeyError + try: + session_id = request.cookies['session_id_' + other_application].value + osession = storage.load_storage(os.path.join( + up(request.folder), other_application, 'sessions', session_id)) + except: + osession = storage.Storage() + return osession + + +def check_credentials(request, other_application='admin', expiration = 60*60): + """ checks that user is authorized to access other_application""" + if request.env.web2py_runtime_gae: + from google.appengine.api import users + if users.is_current_user_admin(): + return True + else: + login_html = '<a href="%s">Sign in with your google account</a>.' \ + % users.create_login_url(request.env.path_info) + raise HTTP(200, '<html><body>%s</body></html>' % login_html) + else: + dt = time.time() - expiration + s = get_session(request, other_application) + return (s.authorized and s.last_time and s.last_time > dt) + + +def fix_newlines(path): + regex = re.compile(r'''(\r +|\r| +)''') + for filename in listdir(path, '.*\.(py|html)$', drop=False): + rdata = read_file(filename, 'rb') + wdata = regex.sub('\n', rdata) + if wdata != rdata: + write_file(filename, wdata, 'wb') + +def copystream( + src, + dest, + size, + chunk_size=10 ** 5, + ): + """ + this is here because I think there is a bug in shutil.copyfileobj + """ + while size > 0: + if size < chunk_size: + data = src.read(size) + else: + data = src.read(chunk_size) + length = len(data) + if length > size: + (data, length) = (data[:size], size) + size -= length + if length == 0: + break + dest.write(data) + if length < chunk_size: + break + dest.seek(0) + return + + +def make_fake_file_like_object(): + class LogFile(object): + def write(self, value): + pass + def close(self): + pass + return LogFile() + + + ADDED gluon/fileutils.pyc Index: gluon/fileutils.pyc ================================================================== --- /dev/null +++ gluon/fileutils.pyc cannot compute difference between binary files ADDED gluon/globals.py Index: gluon/globals.py ================================================================== --- /dev/null +++ gluon/globals.py @@ -0,0 +1,549 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Contains the classes for the global used variables: + +- Request +- Response +- Session + +""" + +from storage import Storage, List +from streamer import streamer, stream_file_or_304_or_206, DEFAULT_CHUNK_SIZE +from xmlrpc import handler +from contenttype import contenttype +from html import xmlescape, TABLE, TR, PRE +from http import HTTP +from fileutils import up +from serializers import json, custom_json +import settings +from utils import web2py_uuid +from settings import global_settings + +import hashlib +import portalocker +import cPickle +import cStringIO +import datetime +import re +import Cookie +import os +import sys +import traceback +import threading + +regex_session_id = re.compile('^([\w\-]+/)?[\w\-\.]+$') + +__all__ = ['Request', 'Response', 'Session'] + +current = threading.local() # thread-local storage for request-scope globals + +class Request(Storage): + + """ + defines the request object and the default values of its members + + - env: environment variables, by gluon.main.wsgibase() + - cookies + - get_vars + - post_vars + - vars + - folder + - application + - function + - args + - extension + - now: datetime.datetime.today() + - restful() + """ + + def __init__(self): + self.wsgi = Storage() # hooks to environ and start_response + self.env = Storage() + self.cookies = Cookie.SimpleCookie() + self.get_vars = Storage() + self.post_vars = Storage() + self.vars = Storage() + self.folder = None + self.application = None + self.function = None + self.args = List() + self.extension = 'html' + self.now = datetime.datetime.now() + self.utcnow = datetime.datetime.utcnow() + self.is_restful = False + self.is_https = False + self.is_local = False + self.global_settings = settings.global_settings + + def compute_uuid(self): + self.uuid = '%s/%s.%s.%s' % ( + self.application, + self.client.replace(':', '_'), + self.now.strftime('%Y-%m-%d.%H-%M-%S'), + web2py_uuid()) + return self.uuid + + def user_agent(self): + from gluon.contrib import user_agent_parser + session = current.session + session._user_agent = session._user_agent or \ + user_agent_parser.detect(self.env.http_user_agent) + return session._user_agent + + def restful(self): + def wrapper(action,self=self): + def f(_action=action,_self=self,*a,**b): + self.is_restful = True + method = _self.env.request_method + if len(_self.args) and '.' in _self.args[-1]: + _self.args[-1],_self.extension = _self.args[-1].rsplit('.',1) + current.response.headers['Content-Type'] = \ + contenttype(_self.extension.lower()) + if not method in ['GET','POST','DELETE','PUT']: + raise HTTP(400,"invalid method") + rest_action = _action().get(method,None) + if not rest_action: + raise HTTP(400,"method not supported") + try: + return rest_action(*_self.args,**_self.vars) + except TypeError, e: + exc_type, exc_value, exc_traceback = sys.exc_info() + if len(traceback.extract_tb(exc_traceback))==1: + raise HTTP(400,"invalid arguments") + else: + raise e + f.__doc__ = action.__doc__ + f.__name__ = action.__name__ + return f + return wrapper + + +class Response(Storage): + + """ + defines the response object and the default values of its members + response.write( ) can be used to write in the output html + """ + + def __init__(self): + self.status = 200 + self.headers = Storage() + self.headers['X-Powered-By'] = 'web2py' + self.body = cStringIO.StringIO() + self.session_id = None + self.cookies = Cookie.SimpleCookie() + self.postprocessing = [] + self.flash = '' # used by the default view layout + self.meta = Storage() # used by web2py_ajax.html + self.menu = [] # used by the default view layout + self.files = [] # used by web2py_ajax.html + self.generic_patterns = [] # patterns to allow generic views + self._vars = None + self._caller = lambda f: f() + self._view_environment = None + self._custom_commit = None + self._custom_rollback = None + + def write(self, data, escape=True): + if not escape: + self.body.write(str(data)) + else: + self.body.write(xmlescape(data)) + + def render(self, *a, **b): + from compileapp import run_view_in + if len(a) > 2: + raise SyntaxError, 'Response.render can be called with two arguments, at most' + elif len(a) == 2: + (view, self._vars) = (a[0], a[1]) + elif len(a) == 1 and isinstance(a[0], str): + (view, self._vars) = (a[0], {}) + elif len(a) == 1 and hasattr(a[0], 'read') and callable(a[0].read): + (view, self._vars) = (a[0], {}) + elif len(a) == 1 and isinstance(a[0], dict): + (view, self._vars) = (None, a[0]) + else: + (view, self._vars) = (None, {}) + self._vars.update(b) + self._view_environment.update(self._vars) + if view: + import cStringIO + (obody, oview) = (self.body, self.view) + (self.body, self.view) = (cStringIO.StringIO(), view) + run_view_in(self._view_environment) + page = self.body.getvalue() + self.body.close() + (self.body, self.view) = (obody, oview) + else: + run_view_in(self._view_environment) + page = self.body.getvalue() + return page + + def include_meta(self): + s = '' + for key,value in (self.meta or {}).items(): + s += '<meta name="%s" content="%s" />' % (key,xmlescape(value)) + self.write(s,escape=False) + + def include_files(self): + s = '' + for k,f in enumerate(self.files or []): + if not f in self.files[:k]: + filename = f.lower().split('?')[0] + if filename.endswith('.css'): + s += '<link href="%s" rel="stylesheet" type="text/css" />' % f + elif filename.endswith('.js'): + s += '<script src="%s" type="text/javascript"></script>' % f + self.write(s,escape=False) + + def stream( + self, + stream, + chunk_size = DEFAULT_CHUNK_SIZE, + request=None, + ): + """ + if a controller function:: + + return response.stream(file, 100) + + the file content will be streamed at 100 bytes at the time + """ + + if isinstance(stream, (str, unicode)): + stream_file_or_304_or_206(stream, + chunk_size=chunk_size, + request=request, + headers=self.headers) + + # ## the following is for backward compatibility + + if hasattr(stream, 'name'): + filename = stream.name + else: + filename = None + keys = [item.lower() for item in self.headers] + if filename and not 'content-type' in keys: + self.headers['Content-Type'] = contenttype(filename) + if filename and not 'content-length' in keys: + try: + self.headers['Content-Length'] = \ + os.path.getsize(filename) + except OSError: + pass + if request and request.env.web2py_use_wsgi_file_wrapper: + wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) + else: + wrapped = streamer(stream, chunk_size=chunk_size) + return wrapped + + def download(self, request, db, chunk_size = DEFAULT_CHUNK_SIZE, attachment=True): + """ + example of usage in controller:: + + def download(): + return response.download(request, db) + + downloads from http://..../download/filename + """ + + import contenttype as c + if not request.args: + raise HTTP(404) + name = request.args[-1] + items = re.compile('(?P<table>.*?)\.(?P<field>.*?)\..*')\ + .match(name) + if not items: + raise HTTP(404) + (t, f) = (items.group('table'), items.group('field')) + field = db[t][f] + try: + (filename, stream) = field.retrieve(name) + except IOError: + raise HTTP(404) + self.headers['Content-Type'] = c.contenttype(name) + if attachment: + self.headers['Content-Disposition'] = \ + "attachment; filename=%s" % filename + return self.stream(stream, chunk_size = chunk_size, request=request) + + def json(self, data, default=None): + return json(data, default = default or custom_json) + + def xmlrpc(self, request, methods): + """ + assuming:: + + def add(a, b): + return a+b + + if a controller function \"func\":: + + return response.xmlrpc(request, [add]) + + the controller will be able to handle xmlrpc requests for + the add function. Example:: + + import xmlrpclib + connection = xmlrpclib.ServerProxy('http://hostname/app/contr/func') + print connection.add(3, 4) + + """ + + return handler(request, self, methods) + + def toolbar(self): + from html import DIV, SCRIPT, BEAUTIFY, TAG, URL + BUTTON = TAG.button + admin = URL("admin","default","design", + args=current.request.application) + from gluon.dal import thread + dbstats = [TABLE(*[TR(PRE(row[0]),'%.2fms' % (row[1]*1000)) \ + for row in i.db._timings]) \ + for i in thread.instances] + u = web2py_uuid() + return DIV( + BUTTON('design',_onclick="document.location='%s'" % admin), + BUTTON('request',_onclick="jQuery('#request-%s').slideToggle()"%u), + DIV(BEAUTIFY(current.request),_class="hidden",_id="request-%s"%u), + BUTTON('session',_onclick="jQuery('#session-%s').slideToggle()"%u), + DIV(BEAUTIFY(current.session),_class="hidden",_id="session-%s"%u), + BUTTON('response',_onclick="jQuery('#response-%s').slideToggle()"%u), + DIV(BEAUTIFY(current.response),_class="hidden",_id="response-%s"%u), + BUTTON('db stats',_onclick="jQuery('#db-stats-%s').slideToggle()"%u), + DIV(BEAUTIFY(dbstats),_class="hidden",_id="db-stats-%s"%u), + SCRIPT("jQuery('.hidden').hide()") + ) + +class Session(Storage): + + """ + defines the session object and the default values of its members (None) + """ + + def connect( + self, + request, + response, + db=None, + tablename='web2py_session', + masterapp=None, + migrate=True, + separate = None, + check_client=False, + ): + """ + separate can be separate=lambda(session_name): session_name[-2:] + and it is used to determine a session prefix. + separate can be True and it is set to session_name[-2:] + """ + if separate == True: + separate = lambda session_name: session_name[-2:] + self._unlock(response) + if not masterapp: + masterapp = request.application + response.session_id_name = 'session_id_%s' % masterapp.lower() + + if not db: + if global_settings.db_sessions is True or masterapp in global_settings.db_sessions: + return + response.session_new = False + client = request.client.replace(':', '.') + if response.session_id_name in request.cookies: + response.session_id = \ + request.cookies[response.session_id_name].value + if regex_session_id.match(response.session_id): + response.session_filename = \ + os.path.join(up(request.folder), masterapp, + 'sessions', response.session_id) + else: + response.session_id = None + if response.session_id: + try: + response.session_file = \ + open(response.session_filename, 'rb+') + try: + portalocker.lock(response.session_file, + portalocker.LOCK_EX) + response.session_locked = True + self.update(cPickle.load(response.session_file)) + response.session_file.seek(0) + oc = response.session_filename.split('/')[-1].split('-')[0] + if check_client and client!=oc: + raise Exception, "cookie attack" + finally: + pass + #This causes admin login to break. Must find out why. + #self._close(response) + except: + response.session_id = None + if not response.session_id: + uuid = web2py_uuid() + response.session_id = '%s-%s' % (client, uuid) + if separate: + prefix = separate(response.session_id) + response.session_id = '%s/%s' % (prefix,response.session_id) + response.session_filename = \ + os.path.join(up(request.folder), masterapp, + 'sessions', response.session_id) + response.session_new = True + else: + if global_settings.db_sessions is not True: + global_settings.db_sessions.add(masterapp) + response.session_db = True + if response.session_file: + self._close(response) + if settings.global_settings.web2py_runtime_gae: + # in principle this could work without GAE + request.tickets_db = db + if masterapp == request.application: + table_migrate = migrate + else: + table_migrate = False + tname = tablename + '_' + masterapp + table = db.get(tname, None) + if table is None: + table = db.define_table( + tname, + db.Field('locked', 'boolean', default=False), + db.Field('client_ip', length=64), + db.Field('created_datetime', 'datetime', + default=request.now), + db.Field('modified_datetime', 'datetime'), + db.Field('unique_key', length=64), + db.Field('session_data', 'blob'), + migrate=table_migrate, + ) + try: + key = request.cookies[response.session_id_name].value + (record_id, unique_key) = key.split(':') + if record_id == '0': + raise Exception, 'record_id == 0' + rows = db(table.id == record_id).select() + if len(rows) == 0 or rows[0].unique_key != unique_key: + raise Exception, 'No record' + + # rows[0].update_record(locked=True) + + session_data = cPickle.loads(rows[0].session_data) + self.update(session_data) + except Exception: + record_id = None + unique_key = web2py_uuid() + session_data = {} + response._dbtable_and_field = \ + (response.session_id_name, table, record_id, unique_key) + response.session_id = '%s:%s' % (record_id, unique_key) + response.cookies[response.session_id_name] = response.session_id + response.cookies[response.session_id_name]['path'] = '/' + self.__hash = hashlib.md5(str(self)).digest() + if self.flash: + (response.flash, self.flash) = (self.flash, None) + + def is_new(self): + if self._start_timestamp: + return False + else: + self._start_timestamp = datetime.datetime.today() + return True + + def is_expired(self, seconds = 3600): + now = datetime.datetime.today() + if not self._last_timestamp or \ + self._last_timestamp + datetime.timedelta(seconds = seconds) > now: + self._last_timestamp = now + return False + else: + return True + + def secure(self): + self._secure = True + + def forget(self, response=None): + self._close(response) + self._forget = True + + def _try_store_in_db(self, request, response): + + # don't save if file-based sessions, no session id, or session being forgotten + if not response.session_db or not response.session_id or self._forget: + return + + # don't save if no change to session + __hash = self.__hash + if __hash is not None: + del self.__hash + if __hash == hashlib.md5(str(self)).digest(): + return + + (record_id_name, table, record_id, unique_key) = \ + response._dbtable_and_field + dd = dict(locked=False, client_ip=request.env.remote_addr, + modified_datetime=request.now, + session_data=cPickle.dumps(dict(self)), + unique_key=unique_key) + if record_id: + table._db(table.id == record_id).update(**dd) + else: + record_id = table.insert(**dd) + response.cookies[response.session_id_name] = '%s:%s'\ + % (record_id, unique_key) + response.cookies[response.session_id_name]['path'] = '/' + + def _try_store_on_disk(self, request, response): + + # don't save if sessions not not file-based + if response.session_db: + return + + # don't save if no change to session + __hash = self.__hash + if __hash is not None: + del self.__hash + if __hash == hashlib.md5(str(self)).digest(): + self._close(response) + return + + if not response.session_id or self._forget: + self._close(response) + return + + if response.session_new: + # Tests if the session sub-folder exists, if not, create it + session_folder = os.path.dirname(response.session_filename) + if not os.path.exists(session_folder): + os.mkdir(session_folder) + response.session_file = open(response.session_filename, 'wb') + portalocker.lock(response.session_file, portalocker.LOCK_EX) + response.session_locked = True + + if response.session_file: + cPickle.dump(dict(self), response.session_file) + response.session_file.truncate() + self._close(response) + + def _unlock(self, response): + if response and response.session_file and response.session_locked: + try: + portalocker.unlock(response.session_file) + response.session_locked = False + except: ### this should never happen but happens in Windows + pass + + def _close(self, response): + if response and response.session_file: + self._unlock(response) + try: + response.session_file.close() + del response.session_file + except: + pass + + ADDED gluon/globals.pyc Index: gluon/globals.pyc ================================================================== --- /dev/null +++ gluon/globals.pyc cannot compute difference between binary files ADDED gluon/highlight.py Index: gluon/highlight.py ================================================================== --- /dev/null +++ gluon/highlight.py @@ -0,0 +1,338 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import re +import cgi + +__all__ = ['highlight'] + + +class Highlighter(object): + + """ + Do syntax highlighting. + """ + + def __init__( + self, + mode, + link=None, + styles=None, + ): + """ + Initialise highlighter: + mode = language (PYTHON, WEB2PY,C, CPP, HTML, HTML_PLAIN) + """ + styles = styles or {} + mode = mode.upper() + if link and link[-1] != '/': + link = link + '/' + self.link = link + self.styles = styles + self.output = [] + self.span_style = None + if mode == 'WEB2PY': + (mode, self.suppress_tokens) = ('PYTHON', []) + elif mode == 'PYTHON': + self.suppress_tokens = ['GOTOHTML'] + elif mode == 'CPP': + (mode, self.suppress_tokens) = ('C', []) + elif mode == 'C': + self.suppress_tokens = ['CPPKEYWORD'] + elif mode == 'HTML_PLAIN': + (mode, self.suppress_tokens) = ('HTML', ['GOTOPYTHON']) + elif mode == 'HTML': + self.suppress_tokens = [] + else: + raise SyntaxError, 'Unknown mode: %s' % mode + self.mode = mode + + def c_tokenizer( + self, + token, + match, + style, + ): + """ + Callback for C specific highlighting. + """ + + value = cgi.escape(match.group()) + self.change_style(token, style) + self.output.append(value) + + def python_tokenizer( + self, + token, + match, + style, + ): + """ + Callback for python specific highlighting. + """ + + value = cgi.escape(match.group()) + if token == 'MULTILINESTRING': + self.change_style(token, style) + self.output.append(value) + self.strMultilineString = match.group(1) + return 'PYTHONMultilineString' + elif token == 'ENDMULTILINESTRING': + if match.group(1) == self.strMultilineString: + self.output.append(value) + self.strMultilineString = '' + return 'PYTHON' + if style and style[:5] == 'link:': + self.change_style(None, None) + (url, style) = style[5:].split(';', 1) + if url == 'None' or url == '': + self.output.append('<span style="%s">%s</span>' + % (style, value)) + else: + self.output.append('<a href="%s%s" style="%s">%s</a>' + % (url, value, style, value)) + else: + self.change_style(token, style) + self.output.append(value) + if token == 'GOTOHTML': + return 'HTML' + return None + + def html_tokenizer( + self, + token, + match, + style, + ): + """ + Callback for HTML specific highlighting. + """ + + value = cgi.escape(match.group()) + self.change_style(token, style) + self.output.append(value) + if token == 'GOTOPYTHON': + return 'PYTHON' + return None + + all_styles = { + 'C': (c_tokenizer, ( + ('COMMENT', re.compile(r'//.*\r?\n'), + 'color: green; font-style: italic'), + ('MULTILINECOMMENT', re.compile(r'/\*.*?\*/', re.DOTALL), + 'color: green; font-style: italic'), + ('PREPROCESSOR', re.compile(r'\s*#.*?[^\\]\s*\n', + re.DOTALL), 'color: magenta; font-style: italic'), + ('PUNC', re.compile(r'[-+*!&|^~/%\=<>\[\]{}(),.:]'), + 'font-weight: bold'), + ('NUMBER', + re.compile(r'0x[0-9a-fA-F]+|[+-]?\d+(\.\d+)?([eE][+-]\d+)?|\d+'), + 'color: red'), + ('KEYWORD', re.compile(r'(sizeof|int|long|short|char|void|' + + r'signed|unsigned|float|double|' + + r'goto|break|return|continue|asm|' + + r'case|default|if|else|switch|while|for|do|' + + r'struct|union|enum|typedef|' + + r'static|register|auto|volatile|extern|const)(?![a-zA-Z0-9_])'), + 'color:#185369; font-weight: bold'), + ('CPPKEYWORD', + re.compile(r'(class|private|protected|public|template|new|delete|' + + r'this|friend|using|inline|export|bool|throw|try|catch|' + + r'operator|typeid|virtual)(?![a-zA-Z0-9_])'), + 'color: blue; font-weight: bold'), + ('STRING', re.compile(r'r?u?\'(.*?)(?<!\\)\'|"(.*?)(?<!\\)"'), + 'color: #FF9966'), + ('IDENTIFIER', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*'), + None), + ('WHITESPACE', re.compile(r'[ \r\n]+'), 'Keep'), + )), + 'PYTHON': (python_tokenizer, ( + ('GOTOHTML', re.compile(r'\}\}'), 'color: red'), + ('PUNC', re.compile(r'[-+*!|&^~/%\=<>\[\]{}(),.:]'), + 'font-weight: bold'), + ('NUMBER', + re.compile(r'0x[0-9a-fA-F]+|[+-]?\d+(\.\d+)?([eE][+-]\d+)?|\d+' + ), 'color: red'), + ('KEYWORD', + re.compile(r'(def|class|break|continue|del|exec|finally|pass|' + + r'print|raise|return|try|except|global|assert|lambda|' + + r'yield|for|while|if|elif|else|and|in|is|not|or|import|' + + r'from|True|False)(?![a-zA-Z0-9_])'), + 'color:#185369; font-weight: bold'), + ('WEB2PY', + re.compile(r'(request|response|session|cache|redirect|local_import|HTTP|TR|XML|URL|BEAUTIFY|A|BODY|BR|B|CAT|CENTER|CODE|COL|COLGROUP|DIV|EM|EMBED|FIELDSET|LEGEND|FORM|H1|H2|H3|H4|H5|H6|IFRAME|HEAD|HR|HTML|I|IMG|INPUT|LABEL|LI|LINK|MARKMIN|MENU|META|OBJECT|OL|ON|OPTION|P|PRE|SCRIPT|SELECT|SPAN|STYLE|TABLE|THEAD|TBODY|TFOOT|TAG|TD|TEXTAREA|TH|TITLE|TT|T|UL|XHTML|IS_SLUG|IS_STRONG|IS_LOWER|IS_UPPER|IS_ALPHANUMERIC|IS_DATETIME|IS_DATETIME_IN_RANGE|IS_DATE|IS_DATE_IN_RANGE|IS_DECIMAL_IN_RANGE|IS_EMAIL|IS_EXPR|IS_FLOAT_IN_RANGE|IS_IMAGE|IS_INT_IN_RANGE|IS_IN_SET|IS_IPV4|IS_LIST_OF|IS_LENGTH|IS_MATCH|IS_EQUAL_TO|IS_EMPTY_OR|IS_NULL_OR|IS_NOT_EMPTY|IS_TIME|IS_UPLOAD_FILENAME|IS_URL|CLEANUP|CRYPT|IS_IN_DB|IS_NOT_IN_DB|DAL|Field|SQLFORM|SQLTABLE|xmlescape|embed64)(?![a-zA-Z0-9_])' + ), 'link:%(link)s;text-decoration:None;color:#FF5C1F;'), + ('MAGIC', re.compile(r'self|None'), + 'color:#185369; font-weight: bold'), + ('MULTILINESTRING', re.compile(r'r?u?(\'\'\'|""")'), + 'color: #FF9966'), + ('STRING', re.compile(r'r?u?\'(.*?)(?<!\\)\'|"(.*?)(?<!\\)"' + ), 'color: #FF9966'), + ('IDENTIFIER', re.compile(r'[a-zA-Z_][a-zA-Z0-9_]*'), + None), + ('COMMENT', re.compile(r'\#.*\r?\n'), + 'color: green; font-style: italic'), + ('WHITESPACE', re.compile(r'[ \r\n]+'), 'Keep'), + )), + 'PYTHONMultilineString': (python_tokenizer, + (('ENDMULTILINESTRING', + re.compile(r'.*?("""|\'\'\')', + re.DOTALL), 'color: darkred'), )), + 'HTML': (html_tokenizer, ( + ('GOTOPYTHON', re.compile(r'\{\{'), 'color: red'), + ('COMMENT', re.compile(r'<!--[^>]*-->|<!>'), + 'color: green; font-style: italic'), + ('XMLCRAP', re.compile(r'<![^>]*>'), + 'color: blue; font-style: italic'), + ('SCRIPT', re.compile(r'<script .*?</script>', re.IGNORECASE + + re.DOTALL), 'color: black'), + ('TAG', re.compile(r'</?\s*[a-zA-Z0-9]+'), + 'color: darkred; font-weight: bold'), + ('ENDTAG', re.compile(r'/?>'), + 'color: darkred; font-weight: bold'), + )), + } + + def highlight(self, data): + """ + Syntax highlight some python code. + Returns html version of code. + """ + + i = 0 + mode = self.mode + while i < len(data): + for (token, o_re, style) in Highlighter.all_styles[mode][1]: + if not token in self.suppress_tokens: + match = o_re.match(data, i) + if match: + if style: + new_mode = \ + Highlighter.all_styles[mode][0](self, + token, match, style + % dict(link=self.link)) + else: + new_mode = \ + Highlighter.all_styles[mode][0](self, + token, match, style) + if not new_mode is None: + mode = new_mode + i += max(1, len(match.group())) + break + else: + self.change_style(None, None) + self.output.append(data[i]) + i += 1 + self.change_style(None, None) + return ''.join(self.output).expandtabs(4) + + def change_style(self, token, style): + """ + Generate output to change from existing style to another style only. + """ + + if token in self.styles: + style = self.styles[token] + if self.span_style != style: + if style != 'Keep': + if not self.span_style is None: + self.output.append('</span>') + if not style is None: + self.output.append('<span style="%s">' % style) + self.span_style = style + + +def highlight( + code, + language, + link='/examples/globals/vars/', + counter=1, + styles=None, + highlight_line=None, + attributes=None, + ): + styles = styles or {} + attributes = attributes or {} + if not 'CODE' in styles: + code_style = """ + font-size: 11px; + font-family: Bitstream Vera Sans Mono,monospace; + background-color: transparent; + margin: 0; + padding: 5px; + border: none; + overflow: auto; + white-space: pre !important;\n""" + else: + code_style = styles['CODE'] + if not 'LINENUMBERS' in styles: + linenumbers_style = """ + font-size: 11px; + font-family: Bitstream Vera Sans Mono,monospace; + background-color: transparent; + margin: 0; + padding: 5px; + border: none; + color: #A0A0A0;\n""" + else: + linenumbers_style = styles['LINENUMBERS'] + if not 'LINEHIGHLIGHT' in styles: + linehighlight_style = "background-color: #EBDDE2;" + else: + linehighlight_style = styles['LINEHIGHLIGHT'] + + if language and language.upper() in ['PYTHON', 'C', 'CPP', 'HTML', + 'WEB2PY']: + code = Highlighter(language, link, styles).highlight(code) + else: + code = cgi.escape(code) + lines = code.split('\n') + + if counter is None: + linenumbers = [''] * len(lines) + elif isinstance(counter, str): + linenumbers = [cgi.escape(counter)] * len(lines) + else: + linenumbers = [str(i + counter) + '.' for i in + xrange(len(lines))] + + if highlight_line: + if counter and not isinstance(counter, str): + lineno = highlight_line - counter + else: + lineno = highlight_line + if lineno<len(lines): + lines[lineno] = '<div style="%s">%s</div>' % (linehighlight_style, lines[lineno]) + linenumbers[lineno] = '<div style="%s">%s</div>' % (linehighlight_style, linenumbers[lineno]) + + code = '<br/>'.join(lines) + numbers = '<br/>'.join(linenumbers) + + items = attributes.items() + fa = ' '.join([key[1:].lower() for (key, value) in items if key[:1] + == '_' and value is None] + ['%s="%s"' + % (key[1:].lower(), str(value).replace('"', "'")) + for (key, value) in attributes.items() if key[:1] + == '_' and value]) + if fa: + fa = ' ' + fa + return '<table%s><tr valign="top"><td style="width:40px; text-align: right;"><pre style="%s">%s</pre></td><td><pre style="%s">%s</pre></td></tr></table>'\ + % (fa, linenumbers_style, numbers, code_style, code) + + +if __name__ == '__main__': + import sys + argfp = open(sys.argv[1]) + data = argfp.read() + argfp.close() + print '<html><body>' + highlight(data, sys.argv[2])\ + + '</body></html>' + + + ADDED gluon/highlight.pyc Index: gluon/highlight.pyc ================================================================== --- /dev/null +++ gluon/highlight.pyc cannot compute difference between binary files ADDED gluon/html.py Index: gluon/html.py ================================================================== --- /dev/null +++ gluon/html.py @@ -0,0 +1,2286 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import cgi +import os +import re +import copy +import types +import urllib +import base64 +import sanitizer +import rewrite +import itertools +import decoder +import copy_reg +import cPickle +import marshal +from HTMLParser import HTMLParser +from htmlentitydefs import name2codepoint +from contrib.markmin.markmin2html import render + +from storage import Storage +from highlight import highlight +from utils import web2py_uuid, hmac_hash + +import hmac +import hashlib + +regex_crlf = re.compile('\r|\n') + +join = ''.join + +__all__ = [ + 'A', + 'B', + 'BEAUTIFY', + 'BODY', + 'BR', + 'BUTTON', + 'CENTER', + 'CAT', + 'CODE', + 'COL', + 'COLGROUP', + 'DIV', + 'EM', + 'EMBED', + 'FIELDSET', + 'FORM', + 'H1', + 'H2', + 'H3', + 'H4', + 'H5', + 'H6', + 'HEAD', + 'HR', + 'HTML', + 'I', + 'IFRAME', + 'IMG', + 'INPUT', + 'LABEL', + 'LEGEND', + 'LI', + 'LINK', + 'OL', + 'UL', + 'MARKMIN', + 'MENU', + 'META', + 'OBJECT', + 'ON', + 'OPTION', + 'P', + 'PRE', + 'SCRIPT', + 'OPTGROUP', + 'SELECT', + 'SPAN', + 'STYLE', + 'TABLE', + 'TAG', + 'TD', + 'TEXTAREA', + 'TH', + 'THEAD', + 'TBODY', + 'TFOOT', + 'TITLE', + 'TR', + 'TT', + 'URL', + 'XHTML', + 'XML', + 'xmlescape', + 'embed64', + ] + + +def xmlescape(data, quote = True): + """ + returns an escaped string of the provided data + + :param data: the data to be escaped + :param quote: optional (default False) + """ + + # first try the xml function + if hasattr(data,'xml') and callable(data.xml): + return data.xml() + + # otherwise, make it a string + if not isinstance(data, (str, unicode)): + data = str(data) + elif isinstance(data, unicode): + data = data.encode('utf8', 'xmlcharrefreplace') + + # ... and do the escaping + data = cgi.escape(data, quote).replace("'","'") + return data + + +def URL( + a=None, + c=None, + f=None, + r=None, + args=None, + vars=None, + anchor='', + extension=None, + env=None, + hmac_key=None, + hash_vars=True, + salt=None, + user_signature=None, + scheme=None, + host=None, + port=None, + encode_embedded_slash=False, + ): + """ + generate a URL + + example:: + + >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], + ... vars={'p':1, 'q':2}, anchor='1')) + '/a/c/f/x/y/z?p=1&q=2#1' + + >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], + ... vars={'p':(1,3), 'q':2}, anchor='1')) + '/a/c/f/x/y/z?p=1&p=3&q=2#1' + + >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], + ... vars={'p':(3,1), 'q':2}, anchor='1')) + '/a/c/f/x/y/z?p=3&p=1&q=2#1' + + >>> str(URL(a='a', c='c', f='f', anchor='1+2')) + '/a/c/f#1%2B2' + + >>> str(URL(a='a', c='c', f='f', args=['x', 'y', 'z'], + ... vars={'p':(1,3), 'q':2}, anchor='1', hmac_key='key')) + '/a/c/f/x/y/z?p=1&p=3&q=2&_signature=a32530f0d0caa80964bb92aad2bedf8a4486a31f#1' + + >>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'])) + '/a/c/f/w/x/y/z' + + >>> str(URL(a='a', c='c', f='f', args=['w/x', 'y/z'], encode_embedded_slash=True)) + '/a/c/f/w%2Fx/y%2Fz' + + generates a url '/a/c/f' corresponding to application a, controller c + and function f. If r=request is passed, a, c, f are set, respectively, + to r.application, r.controller, r.function. + + The more typical usage is: + + URL(r=request, f='index') that generates a url for the index function + within the present application and controller. + + :param a: application (default to current if r is given) + :param c: controller (default to current if r is given) + :param f: function (default to current if r is given) + :param r: request (optional) + :param args: any arguments (optional) + :param vars: any variables (optional) + :param anchor: anchorname, without # (optional) + :param hmac_key: key to use when generating hmac signature (optional) + :param hash_vars: which of the vars to include in our hmac signature + True (default) - hash all vars, False - hash none of the vars, + iterable - hash only the included vars ['key1','key2'] + :param scheme: URI scheme (True, 'http' or 'https', etc); forces absolute URL (optional) + :param host: string to force absolute URL with host (True means http_host) + :param port: optional port number (forces absolute URL) + + :raises SyntaxError: when no application, controller or function is + available + :raises SyntaxError: when a CRLF is found in the generated url + """ + + if args in (None,[]): args = [] + vars = vars or {} + application = None + controller = None + function = None + + if not r: + if a and not c and not f: (f,a,c)=(a,c,f) + elif a and c and not f: (c,f,a)=(a,c,f) + from globals import current + if hasattr(current,'request'): + r = current.request + if r: + application = r.application + controller = r.controller + function = r.function + env = r.env + if extension is None and r.extension != 'html': + extension = r.extension + if a: + application = a + if c: + controller = c + if f: + if not isinstance(f, str): + function = f.__name__ + elif '.' in f: + function, extension = f.split('.', 1) + else: + function = f + + function2 = '%s.%s' % (function,extension or 'html') + + if not (application and controller and function): + raise SyntaxError, 'not enough information to build the url' + + if not isinstance(args, (list, tuple)): + args = [args] + + if args: + if encode_embedded_slash: + other = '/' + '/'.join([urllib.quote(str(x), '') for x in args]) + else: + other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) + else: + other = '' + + if other.endswith('/'): + other += '/' # add trailing slash to make last trailing empty arg explicit + + if vars.has_key('_signature'): vars.pop('_signature') + list_vars = [] + for (key, vals) in sorted(vars.items()): + if not isinstance(vals, (list, tuple)): + vals = [vals] + for val in vals: + list_vars.append((key, val)) + + if user_signature: + from globals import current + if current.session.auth: + hmac_key = current.session.auth.hmac_key + + if hmac_key: + # generate an hmac signature of the vars & args so can later + # verify the user hasn't messed with anything + + h_args = '/%s/%s/%s%s' % (application, controller, function2, other) + + # how many of the vars should we include in our hash? + if hash_vars is True: # include them all + h_vars = list_vars + elif hash_vars is False: # include none of them + h_vars = '' + else: # include just those specified + if hash_vars and not isinstance(hash_vars, (list, tuple)): + hash_vars = [hash_vars] + h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] + + # re-assembling the same way during hash authentication + message = h_args + '?' + urllib.urlencode(sorted(h_vars)) + + sig = hmac_hash(message, hmac_key, digest_alg='sha1', salt=salt) + # add the signature into vars + list_vars.append(('_signature', sig)) + + if list_vars: + other += '?%s' % urllib.urlencode(list_vars) + if anchor: + other += '#' + urllib.quote(str(anchor)) + if extension: + function += '.' + extension + + if regex_crlf.search(join([application, controller, function, other])): + raise SyntaxError, 'CRLF Injection Detected' + url = rewrite.url_out(r, env, application, controller, function, + args, other, scheme, host, port) + return url + + +def verifyURL(request, hmac_key=None, hash_vars=True, salt=None, user_signature=None): + """ + Verifies that a request's args & vars have not been tampered with by the user + + :param request: web2py's request object + :param hmac_key: the key to authenticate with, must be the same one previously + used when calling URL() + :param hash_vars: which vars to include in our hashing. (Optional) + Only uses the 1st value currently + True (or undefined) means all, False none, + an iterable just the specified keys + + do not call directly. Use instead: + + URL.verify(hmac_key='...') + + the key has to match the one used to generate the URL. + + >>> r = Storage() + >>> gv = Storage(p=(1,3),q=2,_signature='a32530f0d0caa80964bb92aad2bedf8a4486a31f') + >>> r.update(dict(application='a', controller='c', function='f', extension='html')) + >>> r['args'] = ['x', 'y', 'z'] + >>> r['get_vars'] = gv + >>> verifyURL(r, 'key') + True + >>> verifyURL(r, 'kay') + False + >>> r.get_vars.p = (3, 1) + >>> verifyURL(r, 'key') + True + >>> r.get_vars.p = (3, 2) + >>> verifyURL(r, 'key') + False + + """ + + if not request.get_vars.has_key('_signature'): + return False # no signature in the request URL + + # check if user_signature requires + if user_signature: + from globals import current + if not current.session: + return False + hmac_key = current.session.auth.hmac_key + if not hmac_key: + return False + + # get our sig from request.get_vars for later comparison + original_sig = request.get_vars._signature + + # now generate a new hmac for the remaining args & vars + vars, args = request.get_vars, request.args + + # remove the signature var since it was not part of our signed message + request.get_vars.pop('_signature') + + # join all the args & vars into one long string + + # always include all of the args + other = args and urllib.quote('/' + '/'.join([str(x) for x in args])) or '' + h_args = '/%s/%s/%s.%s%s' % (request.application, + request.controller, + request.function, + request.extension, + other) + + # but only include those vars specified (allows more flexibility for use with + # forms or ajax) + + list_vars = [] + for (key, vals) in sorted(vars.items()): + if not isinstance(vals, (list, tuple)): + vals = [vals] + for val in vals: + list_vars.append((key, val)) + + # which of the vars are to be included? + if hash_vars is True: # include them all + h_vars = list_vars + elif hash_vars is False: # include none of them + h_vars = '' + else: # include just those specified + # wrap in a try - if the desired vars have been removed it'll fail + try: + if hash_vars and not isinstance(hash_vars, (list, tuple)): + hash_vars = [hash_vars] + h_vars = [(k, v) for (k, v) in list_vars if k in hash_vars] + except: + # user has removed one of our vars! Immediate fail + return False + # build the full message string with both args & vars + message = h_args + '?' + urllib.urlencode(sorted(h_vars)) + + # hash with the hmac_key provided + sig = hmac_hash(message, str(hmac_key), digest_alg='sha1', salt=salt) + + # put _signature back in get_vars just in case a second call to URL.verify is performed + # (otherwise it'll immediately return false) + request.get_vars['_signature'] = original_sig + + # return whether or not the signature in the request matched the one we just generated + # (I.E. was the message the same as the one we originally signed) + return original_sig == sig + +URL.verify = verifyURL + +ON = True + + +class XmlComponent(object): + """ + Abstract root for all Html components + """ + + # TODO: move some DIV methods to here + + def xml(self): + raise NotImplementedError + + +class XML(XmlComponent): + """ + use it to wrap a string that contains XML/HTML so that it will not be + escaped by the template + + example: + + >>> XML('<h1>Hello</h1>').xml() + '<h1>Hello</h1>' + """ + + def __init__( + self, + text, + sanitize = False, + permitted_tags = [ + 'a', + 'b', + 'blockquote', + 'br/', + 'i', + 'li', + 'ol', + 'ul', + 'p', + 'cite', + 'code', + 'pre', + 'img/', + 'h1','h2','h3','h4','h5','h6', + 'table','tr','td','div', + ], + allowed_attributes = { + 'a': ['href', 'title'], + 'img': ['src', 'alt'], + 'blockquote': ['type'], + 'td': ['colspan'], + }, + ): + """ + :param text: the XML text + :param sanitize: sanitize text using the permitted tags and allowed + attributes (default False) + :param permitted_tags: list of permitted tags (default: simple list of + tags) + :param allowed_attributes: dictionary of allowed attributed (default + for A, IMG and BlockQuote). + The key is the tag; the value is a list of allowed attributes. + """ + + if sanitize: + text = sanitizer.sanitize(text, permitted_tags, + allowed_attributes) + if isinstance(text, unicode): + text = text.encode('utf8', 'xmlcharrefreplace') + elif not isinstance(text, str): + text = str(text) + self.text = text + + def xml(self): + return self.text + + def __str__(self): + return self.xml() + + def __add__(self,other): + return '%s%s' % (self,other) + + def __radd__(self,other): + return '%s%s' % (other,self) + + def __cmp__(self,other): + return cmp(str(self),str(other)) + + def __hash__(self): + return hash(str(self)) + + def __getattr__(self,name): + return getattr(str(self),name) + + def __getitem__(self,i): + return str(self)[i] + + def __getslice__(self,i,j): + return str(self)[i:j] + + def __iter__(self): + for c in str(self): yield c + + def __len__(self): + return len(str(self)) + + def flatten(self,render=None): + """ + return the text stored by the XML object rendered by the render function + """ + if render: + return render(self.text,None,{}) + return self.text + + def elements(self, *args, **kargs): + """ + to be considered experimental since the behavior of this method is questionable + another options could be TAG(self.text).elements(*args,**kargs) + """ + return [] + +### important to allow safe session.flash=T(....) +def XML_unpickle(data): + return marshal.loads(data) +def XML_pickle(data): + return XML_unpickle, (marshal.dumps(str(data)),) +copy_reg.pickle(XML, XML_pickle, XML_unpickle) + + + +class DIV(XmlComponent): + """ + HTML helper, for easy generating and manipulating a DOM structure. + Little or no validation is done. + + Behaves like a dictionary regarding updating of attributes. + Behaves like a list regarding inserting/appending components. + + example:: + + >>> DIV('hello', 'world', _style='color:red;').xml() + '<div style=\"color:red;\">helloworld</div>' + + all other HTML helpers are derived from DIV. + + _something=\"value\" attributes are transparently translated into + something=\"value\" HTML attributes + """ + + # name of the tag, subclasses should update this + # tags ending with a '/' denote classes that cannot + # contain components + tag = 'div' + + def __init__(self, *components, **attributes): + """ + :param *components: any components that should be nested in this element + :param **attributes: any attributes you want to give to this element + + :raises SyntaxError: when a stand alone tag receives components + """ + + if self.tag[-1:] == '/' and components: + raise SyntaxError, '<%s> tags cannot have components'\ + % self.tag + if len(components) == 1 and isinstance(components[0], (list,tuple)): + self.components = list(components[0]) + else: + self.components = list(components) + self.attributes = attributes + self._fixup() + # converts special attributes in components attributes + self._postprocessing() + self.parent = None + for c in self.components: + self._setnode(c) + + def update(self, **kargs): + """ + dictionary like updating of the tag attributes + """ + + for (key, value) in kargs.items(): + self[key] = value + return self + + def append(self, value): + """ + list style appending of components + + >>> a=DIV() + >>> a.append(SPAN('x')) + >>> print a + <div><span>x</span></div> + """ + self._setnode(value) + ret = self.components.append(value) + self._fixup() + return ret + + def insert(self, i, value): + """ + list style inserting of components + + >>> a=DIV() + >>> a.insert(0,SPAN('x')) + >>> print a + <div><span>x</span></div> + """ + self._setnode(value) + ret = self.components.insert(i, value) + self._fixup() + return ret + + def __getitem__(self, i): + """ + gets attribute with name 'i' or component #i. + If attribute 'i' is not found returns None + + :param i: index + if i is a string: the name of the attribute + otherwise references to number of the component + """ + + if isinstance(i, str): + try: + return self.attributes[i] + except KeyError: + return None + else: + return self.components[i] + + def __setitem__(self, i, value): + """ + sets attribute with name 'i' or component #i. + + :param i: index + if i is a string: the name of the attribute + otherwise references to number of the component + :param value: the new value + """ + self._setnode(value) + if isinstance(i, (str, unicode)): + self.attributes[i] = value + else: + self.components[i] = value + + def __delitem__(self, i): + """ + deletes attribute with name 'i' or component #i. + + :param i: index + if i is a string: the name of the attribute + otherwise references to number of the component + """ + + if isinstance(i, str): + del self.attributes[i] + else: + del self.components[i] + + def __len__(self): + """ + returns the number of included components + """ + return len(self.components) + + def __nonzero__(self): + """ + always return True + """ + return True + + def _fixup(self): + """ + Handling of provided components. + + Nothing to fixup yet. May be overridden by subclasses, + eg for wrapping some components in another component or blocking them. + """ + return + + def _wrap_components(self, allowed_parents, + wrap_parent = None, + wrap_lambda = None): + """ + helper for _fixup. Checks if a component is in allowed_parents, + otherwise wraps it in wrap_parent + + :param allowed_parents: (tuple) classes that the component should be an + instance of + :param wrap_parent: the class to wrap the component in, if needed + :param wrap_lambda: lambda to use for wrapping, if needed + + """ + components = [] + for c in self.components: + if isinstance(c, allowed_parents): + pass + elif wrap_lambda: + c = wrap_lambda(c) + else: + c = wrap_parent(c) + if isinstance(c,DIV): + c.parent = self + components.append(c) + self.components = components + + def _postprocessing(self): + """ + Handling of attributes (normally the ones not prefixed with '_'). + + Nothing to postprocess yet. May be overridden by subclasses + """ + return + + def _traverse(self, status, hideerror=False): + # TODO: docstring + newstatus = status + for c in self.components: + if hasattr(c, '_traverse') and callable(c._traverse): + c.vars = self.vars + c.request_vars = self.request_vars + c.errors = self.errors + c.latest = self.latest + c.session = self.session + c.formname = self.formname + c['hideerror']=hideerror + newstatus = c._traverse(status,hideerror) and newstatus + + # for input, textarea, select, option + # deal with 'value' and 'validation' + + name = self['_name'] + if newstatus: + newstatus = self._validate() + self._postprocessing() + elif 'old_value' in self.attributes: + self['value'] = self['old_value'] + self._postprocessing() + elif name and name in self.vars: + self['value'] = self.vars[name] + self._postprocessing() + if name: + self.latest[name] = self['value'] + return newstatus + + def _validate(self): + """ + nothing to validate yet. May be overridden by subclasses + """ + return True + + def _setnode(self,value): + if isinstance(value,DIV): + value.parent = self + + def _xml(self): + """ + helper for xml generation. Returns separately: + - the component attributes + - the generated xml of the inner components + + Component attributes start with an underscore ('_') and + do not have a False or None value. The underscore is removed. + A value of True is replaced with the attribute name. + + :returns: tuple: (attributes, components) + """ + + # get the attributes for this component + # (they start with '_', others may have special meanings) + fa = '' + for key in sorted(self.attributes): + value = self[key] + if key[:1] != '_': + continue + name = key[1:] + if value is True: + value = name + elif value is False or value is None: + continue + fa += ' %s="%s"' % (name, xmlescape(value, True)) + + # get the xml for the inner components + co = join([xmlescape(component) for component in + self.components]) + + return (fa, co) + + def xml(self): + """ + generates the xml for this component. + """ + + (fa, co) = self._xml() + + if not self.tag: + return co + + if self.tag[-1:] == '/': + # <tag [attributes] /> + return '<%s%s />' % (self.tag[:-1], fa) + + # else: <tag [attributes]> inner components xml </tag> + return '<%s%s>%s</%s>' % (self.tag, fa, co, self.tag) + + def __str__(self): + """ + str(COMPONENT) returns equals COMPONENT.xml() + """ + + return self.xml() + + def flatten(self, render=None): + """ + return the text stored by the DIV object rendered by the render function + the render function must take text, tagname, and attributes + render=None is equivalent to render=lambda text, tag, attr: text + + >>> markdown = lambda text,tag=None,attributes={}: \ + {None: re.sub('\s+',' ',text), \ + 'h1':'#'+text+'\\n\\n', \ + 'p':text+'\\n'}.get(tag,text) + >>> a=TAG('<h1>Header</h1><p>this is a test</p>') + >>> a.flatten(markdown) + '#Header\\n\\nthis is a test\\n' + """ + + text = '' + for c in self.components: + if isinstance(c,XmlComponent): + s=c.flatten(render) + elif render: + s=render(str(c)) + else: + s=str(c) + text+=s + if render: + text = render(text,self.tag,self.attributes) + return text + + regex_tag=re.compile('^[\w\-\:]+') + regex_id=re.compile('#([\w\-]+)') + regex_class=re.compile('\.([\w\-]+)') + regex_attr=re.compile('\[([\w\-\:]+)=(.*?)\]') + + + def elements(self, *args, **kargs): + """ + find all component that match the supplied attribute dictionary, + or None if nothing could be found + + All components of the components are searched. + + >>> a = DIV(DIV(SPAN('x'),3,DIV(SPAN('y')))) + >>> for c in a.elements('span',first_only=True): c[0]='z' + >>> print a + <div><div><span>z</span>3<div><span>y</span></div></div></div> + >>> for c in a.elements('span'): c[0]='z' + >>> print a + <div><div><span>z</span>3<div><span>z</span></div></div></div> + + It also supports a syntax compatible with jQuery + + >>> a=TAG('<div><span><a id="1-1" u:v=$>hello</a></span><p class="this is a test">world</p></div>') + >>> for e in a.elements('div a#1-1, p.is'): print e.flatten() + hello + world + >>> for e in a.elements('#1-1'): print e.flatten() + hello + >>> a.elements('a[u:v=$]')[0].xml() + '<a id="1-1" u:v="$">hello</a>' + + >>> a=FORM( INPUT(_type='text'), SELECT(range(1)), TEXTAREA() ) + >>> for c in a.elements('input, select, textarea'): c['_disabled'] = 'disabled' + >>> a.xml() + '<form action="" enctype="multipart/form-data" method="post"><input disabled="disabled" type="text" /><select disabled="disabled"><option value="0">0</option></select><textarea cols="40" disabled="disabled" rows="10"></textarea></form>' + """ + if len(args)==1: + args = [a.strip() for a in args[0].split(',')] + if len(args)>1: + subset = [self.elements(a,**kargs) for a in args] + return reduce(lambda a,b:a+b,subset,[]) + elif len(args)==1: + items = args[0].split() + if len(items)>1: + subset=[a.elements(' '.join(items[1:]),**kargs) for a in self.elements(items[0])] + return reduce(lambda a,b:a+b,subset,[]) + else: + item=items[0] + if '#' in item or '.' in item or '[' in item: + match_tag = self.regex_tag.search(item) + match_id = self.regex_id.search(item) + match_class = self.regex_class.search(item) + match_attr = self.regex_attr.finditer(item) + args = [] + if match_tag: args = [match_tag.group()] + if match_id: kargs['_id'] = match_id.group(1) + if match_class: kargs['_class'] = re.compile('(?<!\w)%s(?!\w)' % \ + match_class.group(1).replace('-','\\-').replace(':','\\:')) + for item in match_attr: + kargs['_'+item.group(1)]=item.group(2) + return self.elements(*args,**kargs) + # make a copy of the components + matches = [] + first_only = False + if kargs.has_key("first_only"): + first_only = kargs["first_only"] + del kargs["first_only"] + # check if the component has an attribute with the same + # value as provided + check = True + tag = getattr(self,'tag').replace("/","") + if args and tag not in args: + check = False + for (key, value) in kargs.items(): + if isinstance(value,(str,int)): + if self[key] != str(value): + check = False + elif key in self.attributes: + if not value.search(str(self[key])): + check = False + else: + check = False + if 'find' in kargs: + find = kargs['find'] + for c in self.components: + if isinstance(find,(str,int)): + if isinstance(c,str) and str(find) in c: + check = True + else: + if isinstance(c,str) and find.search(c): + check = True + # if found, return the component + if check: + matches.append(self) + if first_only: + return matches + # loop the copy + for c in self.components: + if isinstance(c, XmlComponent): + kargs['first_only'] = first_only + child_matches = c.elements( *args, **kargs ) + if first_only and len(child_matches) != 0: + return child_matches + matches.extend( child_matches ) + return matches + + + def element(self, *args, **kargs): + """ + find the first component that matches the supplied attribute dictionary, + or None if nothing could be found + + Also the components of the components are searched. + """ + kargs['first_only'] = True + elements = self.elements(*args, **kargs) + if not elements: + # we found nothing + return None + return elements[0] + + def siblings(self,*args,**kargs): + """ + find all sibling components that match the supplied argument list + and attribute dictionary, or None if nothing could be found + """ + sibs = [s for s in self.parent.components if not s == self] + matches = [] + first_only = False + if kargs.has_key("first_only"): + first_only = kargs["first_only"] + del kargs["first_only"] + for c in sibs: + try: + check = True + tag = getattr(c,'tag').replace("/","") + if args and tag not in args: + check = False + for (key, value) in kargs.items(): + if c[key] != value: + check = False + if check: + matches.append(c) + if first_only: break + except: + pass + return matches + + def sibling(self,*args,**kargs): + """ + find the first sibling component that match the supplied argument list + and attribute dictionary, or None if nothing could be found + """ + kargs['first_only'] = True + sibs = self.siblings(*args, **kargs) + if not sibs: + return None + return sibs[0] + +class CAT(DIV): + + tag = '' + +def TAG_unpickler(data): + return cPickle.loads(data) + +def TAG_pickler(data): + d = DIV() + d.__dict__ = data.__dict__ + marshal_dump = cPickle.dumps(d) + return (TAG_unpickler, (marshal_dump,)) + +class __TAG__(XmlComponent): + + """ + TAG factory example:: + + >>> print TAG.first(TAG.second('test'), _key = 3) + <first key=\"3\"><second>test</second></first> + + """ + + def __getitem__(self, name): + return self.__getattr__(name) + + def __getattr__(self, name): + if name[-1:] == '_': + name = name[:-1] + '/' + if isinstance(name,unicode): + name = name.encode('utf-8') + class __tag__(DIV): + tag = name + copy_reg.pickle(__tag__, TAG_pickler, TAG_unpickler) + return lambda *a, **b: __tag__(*a, **b) + + def __call__(self,html): + return web2pyHTMLParser(decoder.decoder(html)).tree + +TAG = __TAG__() + + +class HTML(DIV): + """ + There are four predefined document type definitions. + They can be specified in the 'doctype' parameter: + + -'strict' enables strict doctype + -'transitional' enables transitional doctype (default) + -'frameset' enables frameset doctype + -'html5' enables HTML 5 doctype + -any other string will be treated as user's own doctype + + 'lang' parameter specifies the language of the document. + Defaults to 'en'. + + See also :class:`DIV` + """ + + tag = 'html' + + strict = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' + transitional = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\n' + frameset = '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN" "http://www.w3.org/TR/html4/frameset.dtd">\n' + html5 = '<!DOCTYPE HTML>\n' + + def xml(self): + lang = self['lang'] + if not lang: + lang = 'en' + self.attributes['_lang'] = lang + doctype = self['doctype'] + if doctype: + if doctype == 'strict': + doctype = self.strict + elif doctype == 'transitional': + doctype = self.transitional + elif doctype == 'frameset': + doctype = self.frameset + elif doctype == 'html5': + doctype = self.html5 + else: + doctype = '%s\n' % doctype + else: + doctype = self.transitional + (fa, co) = self._xml() + return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) + +class XHTML(DIV): + """ + This is XHTML version of the HTML helper. + + There are three predefined document type definitions. + They can be specified in the 'doctype' parameter: + + -'strict' enables strict doctype + -'transitional' enables transitional doctype (default) + -'frameset' enables frameset doctype + -any other string will be treated as user's own doctype + + 'lang' parameter specifies the language of the document and the xml document. + Defaults to 'en'. + + 'xmlns' parameter specifies the xml namespace. + Defaults to 'http://www.w3.org/1999/xhtml'. + + See also :class:`DIV` + """ + + tag = 'html' + + strict = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">\n' + transitional = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n' + frameset = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Frameset//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-frameset.dtd">\n' + xmlns = 'http://www.w3.org/1999/xhtml' + + def xml(self): + xmlns = self['xmlns'] + if xmlns: + self.attributes['_xmlns'] = xmlns + else: + self.attributes['_xmlns'] = self.xmlns + lang = self['lang'] + if not lang: + lang = 'en' + self.attributes['_lang'] = lang + self.attributes['_xml:lang'] = lang + doctype = self['doctype'] + if doctype: + if doctype == 'strict': + doctype = self.strict + elif doctype == 'transitional': + doctype = self.transitional + elif doctype == 'frameset': + doctype = self.frameset + else: + doctype = '%s\n' % doctype + else: + doctype = self.transitional + (fa, co) = self._xml() + return '%s<%s%s>%s</%s>' % (doctype, self.tag, fa, co, self.tag) + + +class HEAD(DIV): + + tag = 'head' + +class TITLE(DIV): + + tag = 'title' + + +class META(DIV): + + tag = 'meta/' + + +class LINK(DIV): + + tag = 'link/' + + +class SCRIPT(DIV): + + tag = 'script' + + def xml(self): + (fa, co) = self._xml() + # no escaping of subcomponents + co = '\n'.join([str(component) for component in + self.components]) + if co: + # <script [attributes]><!--//--><![CDATA[//><!-- + # script body + # //--><!]]></script> + # return '<%s%s><!--//--><![CDATA[//><!--\n%s\n//--><!]]></%s>' % (self.tag, fa, co, self.tag) + return '<%s%s><!--\n%s\n//--></%s>' % (self.tag, fa, co, self.tag) + else: + return DIV.xml(self) + + +class STYLE(DIV): + + tag = 'style' + + def xml(self): + (fa, co) = self._xml() + # no escaping of subcomponents + co = '\n'.join([str(component) for component in + self.components]) + if co: + # <style [attributes]><!--/*--><![CDATA[/*><!--*/ + # style body + # /*]]>*/--></style> + return '<%s%s><!--/*--><![CDATA[/*><!--*/\n%s\n/*]]>*/--></%s>' % (self.tag, fa, co, self.tag) + else: + return DIV.xml(self) + + +class IMG(DIV): + + tag = 'img/' + + +class SPAN(DIV): + + tag = 'span' + + +class BODY(DIV): + + tag = 'body' + + +class H1(DIV): + + tag = 'h1' + + +class H2(DIV): + + tag = 'h2' + + +class H3(DIV): + + tag = 'h3' + + +class H4(DIV): + + tag = 'h4' + + +class H5(DIV): + + tag = 'h5' + + +class H6(DIV): + + tag = 'h6' + + +class P(DIV): + """ + Will replace ``\\n`` by ``<br />`` if the `cr2br` attribute is provided. + + see also :class:`DIV` + """ + + tag = 'p' + + def xml(self): + text = DIV.xml(self) + if self['cr2br']: + text = text.replace('\n', '<br />') + return text + + +class B(DIV): + + tag = 'b' + + +class BR(DIV): + + tag = 'br/' + + +class HR(DIV): + + tag = 'hr/' + + +class A(DIV): + + tag = 'a' + + def xml(self): + if self['delete']: + d = "jQuery(this).closest('%s').remove();" % self['delete'] + else: + d = '' + if self['component']: + self['_onclick']="web2py_component('%s','%s');%sreturn false;" % \ + (self['component'],self['target'] or '',d) + self['_href'] = self['_href'] or '#null' + elif self['callback']: + if d: + self['_onclick']="if(confirm(w2p_ajax_confirm_message||'Are you sure you want o delete this object?')){ajax('%s',[],'%s');%s};return false;" % (self['callback'],self['target'] or '',d) + else: + self['_onclick']="ajax('%s',[],'%s');%sreturn false;" % \ + (self['callback'],self['target'] or '',d) + self['_href'] = self['_href'] or '#null' + elif self['cid']: + self['_onclick']='web2py_component("%s","%s");return false;' % \ + (self['_href'],self['cid']) + return DIV.xml(self) + + +class BUTTON(DIV): + + tag = 'button' + + +class EM(DIV): + + tag = 'em' + + +class EMBED(DIV): + + tag = 'embed/' + + +class TT(DIV): + + tag = 'tt' + + +class PRE(DIV): + + tag = 'pre' + + +class CENTER(DIV): + + tag = 'center' + + +class CODE(DIV): + + """ + displays code in HTML with syntax highlighting. + + :param attributes: optional attributes: + + - language: indicates the language, otherwise PYTHON is assumed + - link: can provide a link + - styles: for styles + + Example:: + + {{=CODE(\"print 'hello world'\", language='python', link=None, + counter=1, styles={}, highlight_line=None)}} + + + supported languages are \"python\", \"html_plain\", \"c\", \"cpp\", + \"web2py\", \"html\". + The \"html\" language interprets {{ and }} tags as \"web2py\" code, + \"html_plain\" doesn't. + + if a link='/examples/global/vars/' is provided web2py keywords are linked to + the online docs. + + the counter is used for line numbering, counter can be None or a prompt + string. + """ + + def xml(self): + language = self['language'] or 'PYTHON' + link = self['link'] + counter = self.attributes.get('counter', 1) + highlight_line = self.attributes.get('highlight_line', None) + styles = self['styles'] or {} + return highlight( + join(self.components), + language=language, + link=link, + counter=counter, + styles=styles, + attributes=self.attributes, + highlight_line=highlight_line, + ) + + +class LABEL(DIV): + + tag = 'label' + + +class LI(DIV): + + tag = 'li' + + +class UL(DIV): + """ + UL Component. + + If subcomponents are not LI-components they will be wrapped in a LI + + see also :class:`DIV` + """ + + tag = 'ul' + + def _fixup(self): + self._wrap_components(LI, LI) + + +class OL(UL): + + tag = 'ol' + + +class TD(DIV): + + tag = 'td' + + +class TH(DIV): + + tag = 'th' + + +class TR(DIV): + """ + TR Component. + + If subcomponents are not TD/TH-components they will be wrapped in a TD + + see also :class:`DIV` + """ + + tag = 'tr' + + def _fixup(self): + self._wrap_components((TD, TH), TD) + +class THEAD(DIV): + + tag = 'thead' + + def _fixup(self): + self._wrap_components(TR, TR) + + +class TBODY(DIV): + + tag = 'tbody' + + def _fixup(self): + self._wrap_components(TR, TR) + + +class TFOOT(DIV): + + tag = 'tfoot' + + def _fixup(self): + self._wrap_components(TR, TR) + + +class COL(DIV): + + tag = 'col' + + +class COLGROUP(DIV): + + tag = 'colgroup' + + +class TABLE(DIV): + """ + TABLE Component. + + If subcomponents are not TR/TBODY/THEAD/TFOOT-components + they will be wrapped in a TR + + see also :class:`DIV` + """ + + tag = 'table' + + def _fixup(self): + self._wrap_components((TR, TBODY, THEAD, TFOOT, COL, COLGROUP), TR) + +class I(DIV): + + tag = 'i' + +class IFRAME(DIV): + + tag = 'iframe' + + +class INPUT(DIV): + + """ + INPUT Component + + examples:: + + >>> INPUT(_type='text', _name='name', value='Max').xml() + '<input name=\"name\" type=\"text\" value=\"Max\" />' + + >>> INPUT(_type='checkbox', _name='checkbox', value='on').xml() + '<input checked=\"checked\" name=\"checkbox\" type=\"checkbox\" value=\"on\" />' + + >>> INPUT(_type='radio', _name='radio', _value='yes', value='yes').xml() + '<input checked=\"checked\" name=\"radio\" type=\"radio\" value=\"yes\" />' + + >>> INPUT(_type='radio', _name='radio', _value='no', value='yes').xml() + '<input name=\"radio\" type=\"radio\" value=\"no\" />' + + the input helper takes two special attributes value= and requires=. + + :param value: used to pass the initial value for the input field. + value differs from _value because it works for checkboxes, radio, + textarea and select/option too. + + - for a checkbox value should be '' or 'on'. + - for a radio or select/option value should be the _value + of the checked/selected item. + + :param requires: should be None, or a validator or a list of validators + for the value of the field. + """ + + tag = 'input/' + + def _validate(self): + + # # this only changes value, not _value + + name = self['_name'] + if name is None or name == '': + return True + name = str(name) + + if self['_type'] != 'checkbox': + self['old_value'] = self['value'] or self['_value'] or '' + value = self.request_vars.get(name, '') + self['value'] = value + else: + self['old_value'] = self['value'] or False + value = self.request_vars.get(name) + if isinstance(value, (tuple, list)): + self['value'] = self['_value'] in value + else: + self['value'] = self['_value'] == value + requires = self['requires'] + if requires: + if not isinstance(requires, (list, tuple)): + requires = [requires] + for validator in requires: + (value, errors) = validator(value) + if not errors is None: + self.vars[name] = value + self.errors[name] = errors + break + if not name in self.errors: + self.vars[name] = value + return True + return False + + def _postprocessing(self): + t = self['_type'] + if not t: + t = self['_type'] = 'text' + t = t.lower() + value = self['value'] + if self['_value'] is None: + _value = None + else: + _value = str(self['_value']) + if t == 'checkbox' and not '_checked' in self.attributes: + if not _value: + _value = self['_value'] = 'on' + if not value: + value = [] + elif value is True: + value = [_value] + elif not isinstance(value,(list,tuple)): + value = str(value).split('|') + self['_checked'] = _value in value and 'checked' or None + elif t == 'radio' and not '_checked' in self.attributes: + if str(value) == str(_value): + self['_checked'] = 'checked' + else: + self['_checked'] = None + elif t == 'text' or t == 'hidden': + if value is None: + self['value'] = _value + else: + self['_value'] = value + + def xml(self): + name = self.attributes.get('_name', None) + if name and hasattr(self, 'errors') \ + and self.errors.get(name, None) \ + and self['hideerror'] != True: + return DIV.xml(self) + DIV(self.errors[name], _class='error', + errors=None, _id='%s__error' % name).xml() + else: + return DIV.xml(self) + + +class TEXTAREA(INPUT): + + """ + example:: + + TEXTAREA(_name='sometext', value='blah '*100, requires=IS_NOT_EMPTY()) + + 'blah blah blah ...' will be the content of the textarea field. + """ + + tag = 'textarea' + + def _postprocessing(self): + if not '_rows' in self.attributes: + self['_rows'] = 10 + if not '_cols' in self.attributes: + self['_cols'] = 40 + if not self['value'] is None: + self.components = [self['value']] + elif self.components: + self['value'] = self.components[0] + + +class OPTION(DIV): + + tag = 'option' + + def _fixup(self): + if not '_value' in self.attributes: + self.attributes['_value'] = str(self.components[0]) + + +class OBJECT(DIV): + + tag = 'object' + +class OPTGROUP(DIV): + + tag = 'optgroup' + + def _fixup(self): + components = [] + for c in self.components: + if isinstance(c, OPTION): + components.append(c) + else: + components.append(OPTION(c, _value=str(c))) + self.components = components + + +class SELECT(INPUT): + + """ + example:: + + >>> from validators import IS_IN_SET + >>> SELECT('yes', 'no', _name='selector', value='yes', + ... requires=IS_IN_SET(['yes', 'no'])).xml() + '<select name=\"selector\"><option selected=\"selected\" value=\"yes\">yes</option><option value=\"no\">no</option></select>' + + """ + + tag = 'select' + + def _fixup(self): + components = [] + for c in self.components: + if isinstance(c, (OPTION, OPTGROUP)): + components.append(c) + else: + components.append(OPTION(c, _value=str(c))) + self.components = components + + def _postprocessing(self): + component_list = [] + for c in self.components: + if isinstance(c, OPTGROUP): + component_list.append(c.components) + else: + component_list.append([c]) + options = itertools.chain(*component_list) + + value = self['value'] + if not value is None: + if not self['_multiple']: + for c in options: # my patch + if value and str(c['_value'])==str(value): + c['_selected'] = 'selected' + else: + c['_selected'] = None + else: + if isinstance(value,(list,tuple)): + values = [str(item) for item in value] + else: + values = [str(value)] + for c in options: # my patch + if value and str(c['_value']) in values: + c['_selected'] = 'selected' + else: + c['_selected'] = None + + +class FIELDSET(DIV): + + tag = 'fieldset' + + +class LEGEND(DIV): + + tag = 'legend' + + +class FORM(DIV): + + """ + example:: + + >>> from validators import IS_NOT_EMPTY + >>> form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) + >>> form.xml() + '<form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"test\" type=\"text\" /></form>' + + a FORM is container for INPUT, TEXTAREA, SELECT and other helpers + + form has one important method:: + + form.accepts(request.vars, session) + + if form is accepted (and all validators pass) form.vars contains the + accepted vars, otherwise form.errors contains the errors. + in case of errors the form is modified to present the errors to the user. + """ + + tag = 'form' + + def __init__(self, *components, **attributes): + DIV.__init__(self, *components, **attributes) + self.vars = Storage() + self.errors = Storage() + self.latest = Storage() + self.accepted = None # none for not submitted + + def accepts( + self, + request_vars, + session=None, + formname='default', + keepvalues=False, + onvalidation=None, + hideerror=False, + **kwargs + ): + """ + kwargs is not used but allows to specify the same interface for FROM and SQLFORM + """ + if request_vars.__class__.__name__ == 'Request': + request_vars=request_vars.post_vars + self.errors.clear() + self.request_vars = Storage() + self.request_vars.update(request_vars) + self.session = session + self.formname = formname + self.keepvalues = keepvalues + + # if this tag is a form and we are in accepting mode (status=True) + # check formname and formkey + + status = True + if self.session: + formkey = self.session.get('_formkey[%s]' % self.formname, None) + # check if user tampering with form and void CSRF + if formkey != self.request_vars._formkey: + status = False + if self.formname != self.request_vars._formname: + status = False + if status and self.session: + # check if editing a record that has been modified by the server + if hasattr(self,'record_hash') and self.record_hash != formkey: + status = False + self.record_changed = True + status = self._traverse(status,hideerror) + if onvalidation: + if isinstance(onvalidation, dict): + onsuccess = onvalidation.get('onsuccess', None) + onfailure = onvalidation.get('onfailure', None) + if onsuccess and status: + onsuccess(self) + if onfailure and request_vars and not status: + onfailure(self) + status = len(self.errors) == 0 + elif status: + if isinstance(onvalidation, (list, tuple)): + [f(self) for f in onvalidation] + else: + onvalidation(self) + if self.errors: + status = False + if not session is None: + if hasattr(self,'record_hash'): + formkey = self.record_hash + else: + formkey = web2py_uuid() + self.formkey = session['_formkey[%s]' % formname] = formkey + if status and not keepvalues: + self._traverse(False,hideerror) + self.accepted = status + return status + + def _postprocessing(self): + if not '_action' in self.attributes: + self['_action'] = '' + if not '_method' in self.attributes: + self['_method'] = 'post' + if not '_enctype' in self.attributes: + self['_enctype'] = 'multipart/form-data' + + def hidden_fields(self): + c = [] + if 'hidden' in self.attributes: + for (key, value) in self.attributes.get('hidden',{}).items(): + c.append(INPUT(_type='hidden', _name=key, _value=value)) + + if hasattr(self, 'formkey') and self.formkey: + c.append(INPUT(_type='hidden', _name='_formkey', + _value=self.formkey)) + if hasattr(self, 'formname') and self.formname: + c.append(INPUT(_type='hidden', _name='_formname', + _value=self.formname)) + return DIV(c, _class="hidden") + + def xml(self): + newform = FORM(*self.components, **self.attributes) + hidden_fields = self.hidden_fields() + if hidden_fields.components: + newform.append(hidden_fields) + return DIV.xml(newform) + + def validate(self,**kwargs): + """ + This function validates the form, + you can use it instead of directly form.accepts. + + Usage: + In controller + + def action(): + form=FORM(INPUT(_name=\"test\", requires=IS_NOT_EMPTY())) + form.validate() #you can pass some args here - see below + return dict(form=form) + + This can receive a bunch of arguments + + onsuccess = 'flash' - will show message_onsuccess in response.flash + None - will do nothing + can be a function (lambda form: pass) + onfailure = 'flash' - will show message_onfailure in response.flash + None - will do nothing + can be a function (lambda form: pass) + message_onsuccess + message_onfailure + next = where to redirect in case of success + any other kwargs will be passed for form.accepts(...) + """ + from gluon import current, redirect + kwargs['request_vars'] = kwargs.get('request_vars',current.request.post_vars) + kwargs['session'] = kwargs.get('session',current.session) + kwargs['dbio'] = kwargs.get('dbio',False) # necessary for SQLHTML forms + + onsuccess = kwargs.get('onsuccess','flash') + onfailure = kwargs.get('onfailure','flash') + message_onsuccess = kwargs.get('message_onsuccess', + current.T("Success!")) + message_onfailure = kwargs.get('message_onfailure', + current.T("Errors in form, please check it out.")) + next = kwargs.get('next',None) + for key in ('message_onsuccess','message_onfailure','onsuccess', + 'onfailure','next'): + if key in kwargs: + del kwargs[key] + + if self.accepts(**kwargs): + if onsuccess == 'flash': + if next: + current.session.flash = message_onsuccess + else: + current.response.flash = message_onsuccess + elif callable(onsuccess): + onsuccess(self) + if next: + if self.vars.id: + next = next.replace('[id]',str(self.vars.id)) + next = next % self.vars + if not next.startswith('/'): + next = URL(next) + redirect(next) + return True + elif self.errors: + if onfailure == 'flash': + current.response.flash = message_onfailure + elif callable(onfailure): + onfailure(self) + return False + + def process(self, **kwargs): + """ + Perform the .validate() method but returns the form + + Usage in controllers: + # directly on return + def action(): + #some code here + return dict(form=FORM(...).process(...)) + + You can use it with FORM, SQLFORM or FORM based plugins + + Examples: + #response.flash messages + def action(): + form = SQLFORM(db.table).process(message_onsuccess='Sucess!') + retutn dict(form=form) + + # callback function + # callback receives True or False as first arg, and a list of args. + def my_callback(status, msg): + response.flash = "Success! "+msg if status else "Errors occured" + + # after argument can be 'flash' to response.flash messages + # or a function name to use as callback or None to do nothing. + def action(): + return dict(form=SQLFORM(db.table).process(onsuccess=my_callback) + """ + kwargs['dbio'] = kwargs.get('dbio',True) # necessary for SQLHTML forms + self.validate(**kwargs) + return self + + +class BEAUTIFY(DIV): + + """ + example:: + + >>> BEAUTIFY(['a', 'b', {'hello': 'world'}]).xml() + '<div><table><tr><td><div>a</div></td></tr><tr><td><div>b</div></td></tr><tr><td><div><table><tr><td style="font-weight:bold;">hello</td><td valign="top">:</td><td><div>world</div></td></tr></table></div></td></tr></table></div>' + + turns any list, dictionary, etc into decent looking html. + Two special attributes are + :sorted: a function that takes the dict and returned sorted keys + :keyfilter: a funciton that takes a key and returns its representation + or None if the key is to be skipped. By default key[:1]=='_' is skipped. + """ + + tag = 'div' + + @staticmethod + def no_underscore(key): + if key[:1]=='_': + return None + return key + + def __init__(self, component, **attributes): + self.components = [component] + self.attributes = attributes + sorter = attributes.get('sorted',sorted) + keyfilter = attributes.get('keyfilter',BEAUTIFY.no_underscore) + components = [] + attributes = copy.copy(self.attributes) + level = attributes['level'] = attributes.get('level',6) - 1 + if '_class' in attributes: + attributes['_class'] += 'i' + if level == 0: + return + for c in self.components: + if hasattr(c,'xml') and callable(c.xml): + components.append(c) + continue + elif hasattr(c,'keys') and callable(c.keys): + rows = [] + try: + keys = (sorter and sorter(c)) or c + for key in keys: + if isinstance(key,(str,unicode)) and keyfilter: + filtered_key = keyfilter(key) + else: + filtered_key = str(key) + if filtered_key is None: + continue + value = c[key] + if type(value) == types.LambdaType: + continue + rows.append(TR(TD(filtered_key, _style='font-weight:bold;'), + TD(':',_valign='top'), + TD(BEAUTIFY(value, **attributes)))) + components.append(TABLE(*rows, **attributes)) + continue + except: + pass + if isinstance(c, str): + components.append(str(c)) + elif isinstance(c, unicode): + components.append(c.encode('utf8')) + elif isinstance(c, (list, tuple)): + items = [TR(TD(BEAUTIFY(item, **attributes))) + for item in c] + components.append(TABLE(*items, **attributes)) + elif isinstance(c, cgi.FieldStorage): + components.append('FieldStorage object') + else: + components.append(repr(c)) + self.components = components + + +class MENU(DIV): + """ + Used to build menus + + Optional arguments + _class: defaults to 'web2py-menu web2py-menu-vertical' + ul_class: defaults to 'web2py-menu-vertical' + li_class: defaults to 'web2py-menu-expand' + + Example: + menu = MENU([['name', False, URL(...), [submenu]], ...]) + {{=menu}} + """ + + tag = 'ul' + + def __init__(self, data, **args): + self.data = data + self.attributes = args + if not '_class' in self.attributes: + self['_class'] = 'web2py-menu web2py-menu-vertical' + if not 'ul_class' in self.attributes: + self['ul_class'] = 'web2py-menu-vertical' + if not 'li_class' in self.attributes: + self['li_class'] = 'web2py-menu-expand' + if not 'li_active' in self.attributes: + self['li_active'] = 'web2py-menu-active' + + def serialize(self, data, level=0): + if level == 0: + ul = UL(**self.attributes) + else: + ul = UL(_class=self['ul_class']) + for item in data: + (name, active, link) = item[:3] + if isinstance(link,DIV): + li = LI(link) + elif 'no_link_url' in self.attributes and self['no_link_url']==link: + li = LI(DIV(name)) + elif link: + li = LI(A(name, _href=link)) + else: + li = LI(A(name, _href='#', + _onclick='javascript:void(0);return false;')) + if len(item) > 3 and item[3]: + li['_class'] = self['li_class'] + li.append(self.serialize(item[3], level+1)) + if active or ('active_url' in self.attributes and self['active_url']==link): + if li['_class']: + li['_class'] = li['_class']+' '+self['li_active'] + else: + li['_class'] = self['li_active'] + ul.append(li) + return ul + + def xml(self): + return self.serialize(self.data, 0).xml() + + +def embed64( + filename = None, + file = None, + data = None, + extension = 'image/gif', + ): + """ + helper to encode the provided (binary) data into base64. + + :param filename: if provided, opens and reads this file in 'rb' mode + :param file: if provided, reads this file + :param data: if provided, uses the provided data + """ + + if filename and os.path.exists(file): + fp = open(filename, 'rb') + data = fp.read() + fp.close() + data = base64.b64encode(data) + return 'data:%s;base64,%s' % (extension, data) + + +def test(): + """ + Example: + + >>> from validators import * + >>> print DIV(A('click me', _href=URL(a='a', c='b', f='c')), BR(), HR(), DIV(SPAN(\"World\"), _class='unknown')).xml() + <div><a href=\"/a/b/c\">click me</a><br /><hr /><div class=\"unknown\"><span>World</span></div></div> + >>> print DIV(UL(\"doc\",\"cat\",\"mouse\")).xml() + <div><ul><li>doc</li><li>cat</li><li>mouse</li></ul></div> + >>> print DIV(UL(\"doc\", LI(\"cat\", _class='feline'), 18)).xml() + <div><ul><li>doc</li><li class=\"feline\">cat</li><li>18</li></ul></div> + >>> print TABLE(['a', 'b', 'c'], TR('d', 'e', 'f'), TR(TD(1), TD(2), TD(3))).xml() + <table><tr><td>a</td><td>b</td><td>c</td></tr><tr><td>d</td><td>e</td><td>f</td></tr><tr><td>1</td><td>2</td><td>3</td></tr></table> + >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_EXPR('int(value)<10'))) + >>> print form.xml() + <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" /></form> + >>> print form.accepts({'myvar':'34'}, formname=None) + False + >>> print form.xml() + <form action="" enctype="multipart/form-data" method="post"><input name="myvar" type="text" value="34" /><div class="error" id="myvar__error">invalid expression</div></form> + >>> print form.accepts({'myvar':'4'}, formname=None, keepvalues=True) + True + >>> print form.xml() + <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"4\" /></form> + >>> form=FORM(SELECT('cat', 'dog', _name='myvar')) + >>> print form.accepts({'myvar':'dog'}, formname=None, keepvalues=True) + True + >>> print form.xml() + <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><select name=\"myvar\"><option value=\"cat\">cat</option><option selected=\"selected\" value=\"dog\">dog</option></select></form> + >>> form=FORM(INPUT(_type='text', _name='myvar', requires=IS_MATCH('^\w+$', 'only alphanumeric!'))) + >>> print form.accepts({'myvar':'as df'}, formname=None) + False + >>> print form.xml() + <form action=\"\" enctype=\"multipart/form-data\" method=\"post\"><input name=\"myvar\" type=\"text\" value=\"as df\" /><div class=\"error\" id=\"myvar__error\">only alphanumeric!</div></form> + >>> session={} + >>> form=FORM(INPUT(value=\"Hello World\", _name=\"var\", requires=IS_MATCH('^\w+$'))) + >>> if form.accepts({}, session,formname=None): print 'passed' + >>> if form.accepts({'var':'test ', '_formkey': session['_formkey[None]']}, session, formname=None): print 'passed' + """ + pass + + +class web2pyHTMLParser(HTMLParser): + """ + obj = web2pyHTMLParser(text) parses and html/xml text into web2py helpers. + obj.tree contains the root of the tree, and tree can be manipulated + + >>> str(web2pyHTMLParser('hello<div a="b" c=3>wor<ld<span>xxx</span>y<script/>yy</div>zzz').tree) + 'hello<div a="b" c="3">wor<ld<span>xxx</span>y<script></script>yy</div>zzz' + >>> str(web2pyHTMLParser('<div>a<span>b</div>c').tree) + '<div>a<span>b</span></div>c' + >>> tree = web2pyHTMLParser('hello<div a="b">world</div>').tree + >>> tree.element(_a='b')['_c']=5 + >>> str(tree) + 'hello<div a="b" c="5">world</div>' + """ + def __init__(self,text,closed=('input','link')): + HTMLParser.__init__(self) + self.tree = self.parent = TAG['']() + self.closed = closed + self.tags = [x for x in __all__ if isinstance(eval(x),DIV)] + self.last = None + self.feed(text) + def handle_starttag(self, tagname, attrs): + if tagname.upper() in self.tags: + tag=eval(tagname.upper()) + else: + if tagname in self.closed: tagname+='/' + tag = TAG[tagname]() + for key,value in attrs: tag['_'+key]=value + tag.parent = self.parent + self.parent.append(tag) + if not tag.tag.endswith('/'): + self.parent=tag + else: + self.last = tag.tag[:-1] + def handle_data(self,data): + try: + self.parent.append(data.encode('utf8','xmlcharref')) + except: + self.parent.append(data.decode('latin1').encode('utf8','xmlcharref')) + def handle_charref(self,name): + if name[1].lower()=='x': + self.parent.append(unichr(int(name[2:], 16)).encode('utf8')) + else: + self.parent.append(unichr(int(name[1:], 10)).encode('utf8')) + def handle_entityref(self,name): + self.parent.append(unichr(name2codepoint[name]).encode('utf8')) + def handle_endtag(self, tagname): + # this deals with unbalanced tags + if tagname==self.last: + return + while True: + try: + parent_tagname=self.parent.tag + self.parent = self.parent.parent + except: + raise RuntimeError, "unable to balance tag %s" % tagname + if parent_tagname[:len(tagname)]==tagname: break + +def markdown_serializer(text,tag=None,attr=None): + attr = attr or {} + if tag is None: return re.sub('\s+',' ',text) + if tag=='br': return '\n\n' + if tag=='h1': return '#'+text+'\n\n' + if tag=='h2': return '#'*2+text+'\n\n' + if tag=='h3': return '#'*3+text+'\n\n' + if tag=='h4': return '#'*4+text+'\n\n' + if tag=='p': return text+'\n\n' + if tag=='b' or tag=='strong': return '**%s**' % text + if tag=='em' or tag=='i': return '*%s*' % text + if tag=='tt' or tag=='code': return '`%s`' % text + if tag=='a': return '[%s](%s)' % (text,attr.get('_href','')) + if tag=='img': return '![%s](%s)' % (attr.get('_alt',''),attr.get('_src','')) + return text + +def markmin_serializer(text,tag=None,attr=None): + attr = attr or {} + # if tag is None: return re.sub('\s+',' ',text) + if tag=='br': return '\n\n' + if tag=='h1': return '# '+text+'\n\n' + if tag=='h2': return '#'*2+' '+text+'\n\n' + if tag=='h3': return '#'*3+' '+text+'\n\n' + if tag=='h4': return '#'*4+' '+text+'\n\n' + if tag=='p': return text+'\n\n' + if tag=='li': return '\n- '+text.replace('\n',' ') + if tag=='tr': return text[3:].replace('\n',' ')+'\n' + if tag in ['table','blockquote']: return '\n-----\n'+text+'\n------\n' + if tag in ['td','th']: return ' | '+text + if tag in ['b','strong','label']: return '**%s**' % text + if tag in ['em','i']: return "''%s''" % text + if tag in ['tt']: return '``%s``' % text.strip() + if tag in ['code']: return '``\n%s``' % text + if tag=='a': return '[[%s %s]]' % (text,attr.get('_href','')) + if tag=='img': return '[[%s %s left]]' % (attr.get('_alt','no title'),attr.get('_src','')) + return text + + +class MARKMIN(XmlComponent): + """ + For documentation: http://web2py.com/examples/static/markmin.html + """ + def __init__(self, text, extra=None, allowed=None, sep='p'): + self.text = text + self.extra = extra or {} + self.allowed = allowed or {} + self.sep = sep + + def xml(self): + """ + calls the gluon.contrib.markmin render function to convert the wiki syntax + """ + return render(self.text,extra=self.extra,allowed=self.allowed,sep=self.sep) + + def __str__(self): + return self.xml() + + def flatten(self,render=None): + """ + return the text stored by the MARKMIN object rendered by the render function + """ + return self.text + + def elements(self, *args, **kargs): + """ + to be considered experimental since the behavior of this method is questionable + another options could be TAG(self.text).elements(*args,**kargs) + """ + return [self.text] + + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + ADDED gluon/html.pyc Index: gluon/html.pyc ================================================================== --- /dev/null +++ gluon/html.pyc cannot compute difference between binary files ADDED gluon/http.py Index: gluon/http.py ================================================================== --- /dev/null +++ gluon/http.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +__all__ = ['HTTP', 'redirect'] + +defined_status = { + 200: 'OK', + 201: 'CREATED', + 202: 'ACCEPTED', + 203: 'NON-AUTHORITATIVE INFORMATION', + 204: 'NO CONTENT', + 205: 'RESET CONTENT', + 206: 'PARTIAL CONTENT', + 301: 'MOVED PERMANENTLY', + 302: 'FOUND', + 303: 'SEE OTHER', + 304: 'NOT MODIFIED', + 305: 'USE PROXY', + 307: 'TEMPORARY REDIRECT', + 400: 'BAD REQUEST', + 401: 'UNAUTHORIZED', + 403: 'FORBIDDEN', + 404: 'NOT FOUND', + 405: 'METHOD NOT ALLOWED', + 406: 'NOT ACCEPTABLE', + 407: 'PROXY AUTHENTICATION REQUIRED', + 408: 'REQUEST TIMEOUT', + 409: 'CONFLICT', + 410: 'GONE', + 411: 'LENGTH REQUIRED', + 412: 'PRECONDITION FAILED', + 413: 'REQUEST ENTITY TOO LARGE', + 414: 'REQUEST-URI TOO LONG', + 415: 'UNSUPPORTED MEDIA TYPE', + 416: 'REQUESTED RANGE NOT SATISFIABLE', + 417: 'EXPECTATION FAILED', + 500: 'INTERNAL SERVER ERROR', + 501: 'NOT IMPLEMENTED', + 502: 'BAD GATEWAY', + 503: 'SERVICE UNAVAILABLE', + 504: 'GATEWAY TIMEOUT', + 505: 'HTTP VERSION NOT SUPPORTED', + } + +# If web2py is executed with python2.4 we need +# to use Exception instead of BaseException + +try: + BaseException +except NameError: + BaseException = Exception + + +class HTTP(BaseException): + + def __init__( + self, + status, + body='', + **headers + ): + self.status = status + self.body = body + self.headers = headers + + def to(self, responder): + if self.status in defined_status: + status = '%d %s' % (self.status, defined_status[self.status]) + else: + status = str(self.status) + ' ' + if not 'Content-Type' in self.headers: + self.headers['Content-Type'] = 'text/html; charset=UTF-8' + body = self.body + if status[:1] == '4': + if not body: + body = status + if isinstance(body, str): + if len(body)<512 and self.headers['Content-Type'].startswith('text/html'): + body += '<!-- %s //-->' % ('x'*512) ### trick IE + self.headers['Content-Length'] = len(body) + headers = [] + for (k, v) in self.headers.items(): + if isinstance(v, list): + for item in v: + headers.append((k, str(item))) + else: + headers.append((k, str(v))) + responder(status, headers) + if hasattr(body, '__iter__') and not isinstance(self.body, str): + return body + return [str(body)] + + @property + def message(self): + ''' + compose a message describing this exception + + "status defined_status [web2py_error]" + + message elements that are not defined are omitted + ''' + msg = '%(status)d' + if self.status in defined_status: + msg = '%(status)d %(defined_status)s' + if 'web2py_error' in self.headers: + msg += ' [%(web2py_error)s]' + return msg % dict(status=self.status, + defined_status=defined_status.get(self.status), + web2py_error=self.headers.get('web2py_error')) + + def __str__(self): + "stringify me" + return self.message + + +def redirect(location, how=303): + if not location: + return + location = location.replace('\r', '%0D').replace('\n', '%0A') + raise HTTP(how, + 'You are being redirected <a href="%s">here</a>' % location, + Location=location) + + + ADDED gluon/http.pyc Index: gluon/http.pyc ================================================================== --- /dev/null +++ gluon/http.pyc cannot compute difference between binary files ADDED gluon/import_all.py Index: gluon/import_all.py ================================================================== --- /dev/null +++ gluon/import_all.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +This file is not strictly required by web2py. It is used for three purposes: + +1) check that all required modules are installed properly +2) provide py2exe and py2app a list of modules to be packaged in the binary +3) (optional) preload modules in memory to speed up http responses + +""" + +import os +import sys + +base_modules = ['aifc', 'anydbm', 'array', 'asynchat', 'asyncore', 'atexit', + 'audioop', 'base64', 'BaseHTTPServer', 'Bastion', 'binascii', + 'binhex', 'bisect', 'bz2', 'calendar', 'cgi', 'CGIHTTPServer', + 'cgitb', 'chunk', 'cmath', 'cmd', 'code', 'codecs', 'codeop', + 'collections', 'colorsys', 'compileall', 'compiler', + 'compiler.ast', 'compiler.visitor', 'ConfigParser', + 'contextlib', 'Cookie', 'cookielib', 'copy', 'copy_reg', + 'cPickle', 'cProfile', 'cStringIO', 'csv', 'ctypes', + 'datetime', 'decimal', 'difflib', 'dircache', 'dis', + 'doctest', 'DocXMLRPCServer', 'dumbdbm', 'dummy_thread', + 'dummy_threading', 'email', 'email.charset', 'email.encoders', + 'email.errors', 'email.generator', 'email.header', + 'email.iterators', 'email.message', 'email.mime', + 'email.mime.audio', 'email.mime.base', 'email.mime.image', + 'email.mime.message', 'email.mime.multipart', + 'email.mime.nonmultipart', 'email.mime.text', 'email.parser', + 'email.utils', 'encodings.idna', 'errno', 'exceptions', + 'filecmp', 'fileinput', 'fnmatch', 'formatter', 'fpformat', + 'ftplib', 'functools', 'gc', 'getopt', 'getpass', 'gettext', + 'glob', 'gzip', 'hashlib', 'heapq', 'hmac', 'hotshot', + 'hotshot.stats', 'htmlentitydefs', 'htmllib', 'HTMLParser', + 'httplib', 'imaplib', 'imghdr', 'imp', 'inspect', + 'itertools', 'keyword', 'linecache', 'locale', 'logging', + 'macpath', 'mailbox', 'mailcap', 'marshal', 'math', + 'mimetools', 'mimetypes', 'mmap', 'modulefinder', 'mutex', + 'netrc', 'new', 'nntplib', 'operator', 'optparse', 'os', + 'parser', 'pdb', 'pickle', 'pickletools', 'pkgutil', + 'platform', 'poplib', 'pprint', 'py_compile', 'pyclbr', + 'pydoc', 'Queue', 'quopri', 'random', 're', 'repr', + 'rexec', 'rfc822', 'rlcompleter', 'robotparser', 'runpy', + 'sched', 'select', 'sgmllib', 'shelve', + 'shlex', 'shutil', 'signal', 'SimpleHTTPServer', + 'SimpleXMLRPCServer', 'site', 'smtpd', 'smtplib', + 'sndhdr', 'socket', 'SocketServer', 'sqlite3', + 'stat', 'statvfs', 'string', 'StringIO', + 'stringprep', 'struct', 'subprocess', 'sunau', 'symbol', + 'tabnanny', 'tarfile', 'telnetlib', 'tempfile', 'textwrap', 'thread', 'threading', + 'time', 'timeit', 'Tix', 'Tkinter', 'token', + 'tokenize', 'trace', 'traceback', 'types', + 'unicodedata', 'unittest', 'urllib', 'urllib2', + 'urlparse', 'user', 'UserDict', 'UserList', 'UserString', + 'uu', 'uuid', 'warnings', 'wave', 'weakref', 'webbrowser', + 'whichdb', 'wsgiref', 'wsgiref.handlers', 'wsgiref.headers', + 'wsgiref.simple_server', 'wsgiref.util', 'wsgiref.validate', + 'xdrlib', 'xml.dom', 'xml.dom.minidom', 'xml.dom.pulldom', + 'xml.etree.ElementTree', 'xml.parsers.expat', 'xml.sax', + 'xml.sax.handler', 'xml.sax.saxutils', 'xml.sax.xmlreader', + 'xmlrpclib', 'zipfile', 'zipimport', 'zlib', 'mhlib', + 'MimeWriter', 'mimify', 'multifile', 'sets'] + +contributed_modules = [] +for root, dirs, files in os.walk('gluon'): + for candidate in ['.'.join( + os.path.join(root, os.path.splitext(name)[0]).split(os.sep)) + for name in files if name.endswith('.py') + and root.split(os.sep) != ['gluon', 'tests'] + ]: + contributed_modules.append(candidate) + +# Python base version +python_version = sys.version[:3] + +# Modules which we want to raise an Exception if they are missing +alert_dependency = ['hashlib', 'uuid'] + +# Now we remove the blacklisted modules if we are using the stated +# python version. +# +# List of modules deprecated in Python 2.6 or 2.7 that are in the above set +py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] +py27_deprecated = [] # ['optparse'] but we need it for now + +if python_version >= '2.6': + base_modules += ['json', 'multiprocessing'] + base_modules = list(set(base_modules).difference(set(py26_deprecated))) + +if python_version >= '2.7': + base_modules += ['argparse'] + base_modules = list(set(base_modules).difference(set(py27_deprecated))) + +# Now iterate in the base_modules, trying to do the import +for module in base_modules + contributed_modules: + try: + __import__(module, globals(), locals(), []) + except: + # Raise an exception if the current module is a dependency + if module in alert_dependency: + msg = "Missing dependency: %(module)s\n" % locals() + msg += "Try the following command: " + msg += "easy_install-%(python_version)s -U %(module)s" % locals() + raise ImportError, msg + + + ADDED gluon/languages.py Index: gluon/languages.py ================================================================== --- /dev/null +++ gluon/languages.py @@ -0,0 +1,353 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import os +import re +import cgi +import portalocker +import logging +import marshal +import copy_reg +from fileutils import listdir +import settings +from cfs import getcfs + +__all__ = ['translator', 'findT', 'update_all_languages'] + +is_gae = settings.global_settings.web2py_runtime_gae + +# pattern to find T(blah blah blah) expressions + +PY_STRING_LITERAL_RE = r'(?<=[^\w]T\()(?P<name>'\ + + r"[uU]?[rR]?(?:'''(?:[^']|'{1,2}(?!'))*''')|"\ + + r"(?:'(?:[^'\\]|\\.)*')|" + r'(?:"""(?:[^"]|"{1,2}(?!"))*""")|'\ + + r'(?:"(?:[^"\\]|\\.)*"))' + +regex_translate = re.compile(PY_STRING_LITERAL_RE, re.DOTALL) + +# patter for a valid accept_language + +regex_language = \ + re.compile('^[a-zA-Z]{2}(\-[a-zA-Z]{2})?(\-[a-zA-Z]+)?$') + + +def read_dict_aux(filename): + fp = open(filename, 'r') + portalocker.lock(fp, portalocker.LOCK_SH) + lang_text = fp.read().replace('\r\n', '\n') + portalocker.unlock(fp) + fp.close() + if not lang_text.strip(): + return {} + try: + return eval(lang_text) + except: + logging.error('Syntax error in %s' % filename) + return {} + +def read_dict(filename): + return getcfs('language:%s'%filename,filename, + lambda filename=filename:read_dict_aux(filename)) + +def utf8_repr(s): + r''' # note that we use raw strings to avoid having to use double back slashes below + + utf8_repr() works same as repr() when processing ascii string + >>> utf8_repr('abc') == utf8_repr("abc") == repr('abc') == repr("abc") == "'abc'" + True + >>> utf8_repr('a"b"c') == repr('a"b"c') == '\'a"b"c\'' + True + >>> utf8_repr("a'b'c") == repr("a'b'c") == '"a\'b\'c"' + True + >>> utf8_repr('a\'b"c') == repr('a\'b"c') == utf8_repr("a'b\"c") == repr("a'b\"c") == '\'a\\\'b"c\'' + True + >>> utf8_repr('a\r\nb') == repr('a\r\nb') == "'a\\r\\nb'" # Test for \r, \n + True + + Unlike repr(), utf8_repr() remains utf8 content when processing utf8 string + >>> utf8_repr('中文字') == utf8_repr("中文字") == "'中文字'" != repr('中文字') + True + >>> utf8_repr('中"文"字') == "'中\"文\"字'" != repr('中"文"字') + True + >>> utf8_repr("中'文'字") == '"中\'文\'字"' != repr("中'文'字") + True + >>> utf8_repr('中\'文"字') == utf8_repr("中'文\"字") == '\'中\\\'文"字\'' != repr('中\'文"字') == repr("中'文\"字") + True + >>> utf8_repr('中\r\n文') == "'中\\r\\n文'" != repr('中\r\n文') # Test for \r, \n + True + ''' + if (s.find("'") >= 0) and (s.find('"') < 0): # only single quote exists + s = ''.join(['"', s, '"']) # s = ''.join(['"', s.replace('"','\\"'), '"']) + else: + s = ''.join(["'", s.replace("'","\\'"), "'"]) + return s.replace("\n","\\n").replace("\r","\\r") + + +def write_dict(filename, contents): + try: + fp = open(filename, 'w') + except IOError: + logging.error('Unable to write to file %s' % filename) + return + portalocker.lock(fp, portalocker.LOCK_EX) + fp.write('# coding: utf8\n{\n') + for key in sorted(contents): + fp.write('%s: %s,\n' % (utf8_repr(key), utf8_repr(contents[key]))) + fp.write('}\n') + portalocker.unlock(fp) + fp.close() + + +class lazyT(object): + + """ + never to be called explicitly, returned by translator.__call__ + """ + + m = None + s = None + T = None + + def __init__( + self, + message, + symbols = {}, + T = None, + ): + self.m = message + self.s = symbols + self.T = T + + def __repr__(self): + return "<lazyT %s>" % (repr(str(self.m)), ) + + def __str__(self): + return self.T.translate(self.m, self.s) + + def __eq__(self, other): + return self.T.translate(self.m, self.s) == other + + def __ne__(self, other): + return self.T.translate(self.m, self.s) != other + + def __add__(self, other): + return '%s%s' % (self, other) + + def __radd__(self, other): + return '%s%s' % (other, self) + + def __cmp__(self,other): + return cmp(str(self),str(other)) + + def __hash__(self): + return hash(str(self)) + + def __getattr__(self, name): + return getattr(str(self),name) + + def __getitem__(self, i): + return str(self)[i] + + def __getslice__(self, i, j): + return str(self)[i:j] + + def __iter__(self): + for c in str(self): yield c + + def __len__(self): + return len(str(self)) + + def xml(self): + return cgi.escape(str(self)) + + def encode(self, *a, **b): + return str(self).encode(*a, **b) + + def decode(self, *a, **b): + return str(self).decode(*a, **b) + + def read(self): + return str(self) + + def __mod__(self, symbols): + return self.T.translate(self.m, symbols) + + +class translator(object): + + """ + this class is instantiated by gluon.compileapp.build_environment + as the T object + + :: + + T.force(None) # turns off translation + T.force('fr, it') # forces web2py to translate using fr.py or it.py + + T(\"Hello World\") # translates \"Hello World\" using the selected file + + notice 1: there is no need to force since, by default, T uses + accept_language to determine a translation file. + + notice 2: en and en-en are considered different languages! + """ + + def __init__(self, request): + self.request = request + self.folder = request.folder + self.current_languages = ['en'] + self.accepted_language = None + self.language_file = None + self.http_accept_language = request.env.http_accept_language + self.requested_languages = self.force(self.http_accept_language) + self.lazy = True + self.otherTs = {} + + def get_possible_languages(self): + possible_languages = [lang for lang in self.current_languages] + file_ending = re.compile("\.py$") + for langfile in os.listdir(os.path.join(self.folder,'languages')): + if file_ending.search(langfile): + possible_languages.append(file_ending.sub('',langfile)) + return possible_languages + + def set_current_languages(self, *languages): + if len(languages) == 1 and isinstance(languages[0], (tuple, list)): + languages = languages[0] + self.current_languages = languages + self.force(self.http_accept_language) + + def force(self, *languages): + if not languages or languages[0] is None: + languages = [] + if len(languages) == 1 and isinstance(languages[0], (str, unicode)): + languages = languages[0] + if languages: + if isinstance(languages, (str, unicode)): + accept_languages = languages.split(';') + languages = [] + [languages.extend(al.split(',')) for al in accept_languages] + languages = [item.strip().lower() for item in languages \ + if regex_language.match(item.strip())] + + for language in languages: + if language in self.current_languages: + self.accepted_language = language + break + filename = os.path.join(self.folder, 'languages/', language + '.py') + if os.path.exists(filename): + self.accepted_language = language + self.language_file = filename + self.t = read_dict(filename) + return languages + self.language_file = None + self.t = {} # ## no language by default + return languages + + def __call__(self, message, symbols={}, language=None): + if not language: + if self.lazy: + return lazyT(message, symbols, self) + else: + return self.translate(message, symbols) + else: + try: + otherT = self.otherTs[language] + except KeyError: + otherT = self.otherTs[language] = translator(self.request) + otherT.force(language) + return otherT(message,symbols) + + def translate(self, message, symbols): + """ + user ## to add a comment into a translation string + the comment can be useful do discriminate different possible + translations for the same string (for example different locations) + + T(' hello world ') -> ' hello world ' + T(' hello world ## token') -> 'hello world' + T('hello ## world ## token') -> 'hello ## world' + + the ## notation is ignored in multiline strings and strings that + start with ##. this is to allow markmin syntax to be translated + """ + #for some reason languages.py gets executed before gaehandler.py + # is able to set web2py_runtime_gae, so re-check here + is_gae = settings.global_settings.web2py_runtime_gae + if not message.startswith('#') and not '\n' in message: + tokens = message.rsplit('##', 1) + else: + # this allows markmin syntax in translations + tokens = [message] + if len(tokens) == 2: + tokens[0] = tokens[0].strip() + message = tokens[0] + '##' + tokens[1].strip() + mt = self.t.get(message, None) + if mt is None: + self.t[message] = mt = tokens[0] + if self.language_file and not is_gae: + write_dict(self.language_file, self.t) + if symbols or symbols == 0: + return mt % symbols + return mt + + +def findT(path, language='en-us'): + """ + must be run by the admin app + """ + filename = os.path.join(path, 'languages', '%s.py' % language) + sentences = read_dict(filename) + mp = os.path.join(path, 'models') + cp = os.path.join(path, 'controllers') + vp = os.path.join(path, 'views') + for file in listdir(mp, '.+\.py', 0) + listdir(cp, '.+\.py', 0)\ + + listdir(vp, '.+\.html', 0): + fp = open(file, 'r') + portalocker.lock(fp, portalocker.LOCK_SH) + data = fp.read() + portalocker.unlock(fp) + fp.close() + items = regex_translate.findall(data) + for item in items: + try: + message = eval(item) + if not message.startswith('#') and not '\n' in message: + tokens = message.rsplit('##', 1) + else: + # this allows markmin syntax in translations + tokens = [message] + if len(tokens) == 2: + message = tokens[0].strip() + '##' + tokens[1].strip() + if message and not message in sentences: + sentences[message] = message + except: + pass + write_dict(filename, sentences) + +### important to allow safe session.flash=T(....) +def lazyT_unpickle(data): + return marshal.loads(data) +def lazyT_pickle(data): + return lazyT_unpickle, (marshal.dumps(str(data)),) +copy_reg.pickle(lazyT, lazyT_pickle, lazyT_unpickle) + +def update_all_languages(application_path): + path = os.path.join(application_path, 'languages/') + for language in listdir(path, '^\w+(\-\w+)?\.py$'): + findT(application_path, language[:-3]) + + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + + ADDED gluon/languages.pyc Index: gluon/languages.pyc ================================================================== --- /dev/null +++ gluon/languages.pyc cannot compute difference between binary files ADDED gluon/main.py Index: gluon/main.py ================================================================== --- /dev/null +++ gluon/main.py @@ -0,0 +1,822 @@ +#!/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Contains: + +- wsgibase: the gluon wsgi application + +""" + +import gc +import cgi +import cStringIO +import Cookie +import os +import re +import copy +import sys +import time +import thread +import datetime +import signal +import socket +import tempfile +import random +import string +import platform +from fileutils import abspath, write_file, parse_version +from settings import global_settings +from admin import add_path_first, create_missing_folders, create_missing_app_folders +from globals import current + +from custom_import import custom_import_install +from contrib.simplejson import dumps + +# Remarks: +# calling script has inserted path to script directory into sys.path +# applications_parent (path to applications/, site-packages/ etc) +# defaults to that directory set sys.path to +# ("", gluon_parent/site-packages, gluon_parent, ...) +# +# this is wrong: +# web2py_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) +# because we do not want the path to this file which may be Library.zip +# gluon_parent is the directory containing gluon, web2py.py, logging.conf +# and the handlers. +# applications_parent (web2py_path) is the directory containing applications/ +# and routes.py +# The two are identical unless web2py_path is changed via the web2py.py -f folder option +# main.web2py_path is the same as applications_parent (for backward compatibility) + +if not hasattr(os, 'mkdir'): + global_settings.db_sessions = True +if global_settings.db_sessions is not True: + global_settings.db_sessions = set() +global_settings.gluon_parent = os.environ.get('web2py_path', os.getcwd()) +global_settings.applications_parent = global_settings.gluon_parent +web2py_path = global_settings.applications_parent # backward compatibility +global_settings.app_folders = set() +global_settings.debugging = False + +custom_import_install(web2py_path) + +create_missing_folders() + +# set up logging for subsequent imports +import logging +import logging.config +logpath = abspath("logging.conf") +if os.path.exists(logpath): + logging.config.fileConfig(abspath("logging.conf")) +else: + logging.basicConfig() +logger = logging.getLogger("web2py") + +from restricted import RestrictedError +from http import HTTP, redirect +from globals import Request, Response, Session +from compileapp import build_environment, run_models_in, \ + run_controller_in, run_view_in +from fileutils import copystream +from contenttype import contenttype +from dal import BaseAdapter +from settings import global_settings +from validators import CRYPT +from cache import Cache +from html import URL as Url +import newcron +import rewrite + +__all__ = ['wsgibase', 'save_password', 'appfactory', 'HttpServer'] + +requests = 0 # gc timer + +# Security Checks: validate URL and session_id here, +# accept_language is validated in languages + +# pattern used to validate client address +regex_client = re.compile('[\w\-:]+(\.[\w\-]+)*\.?') # ## to account for IPV6 + +version_info = open(abspath('VERSION', gluon=True), 'r') +web2py_version = parse_version(version_info.read().strip()) +version_info.close() +global_settings.web2py_version = web2py_version + +try: + import rocket +except: + if not global_settings.web2py_runtime_gae: + logger.warn('unable to import Rocket') + +rewrite.load() + +def get_client(env): + """ + guess the client address from the environment variables + + first tries 'http_x_forwarded_for', secondly 'remote_addr' + if all fails assume '127.0.0.1' (running locally) + """ + g = regex_client.search(env.get('http_x_forwarded_for', '')) + if g: + return g.group() + g = regex_client.search(env.get('remote_addr', '')) + if g: + return g.group() + return '127.0.0.1' + +def copystream_progress(request, chunk_size= 10**5): + """ + copies request.env.wsgi_input into request.body + and stores progress upload status in cache.ram + X-Progress-ID:length and X-Progress-ID:uploaded + """ + if not request.env.content_length: + return cStringIO.StringIO() + source = request.env.wsgi_input + size = int(request.env.content_length) + dest = tempfile.TemporaryFile() + if not 'X-Progress-ID' in request.vars: + copystream(source, dest, size, chunk_size) + return dest + cache_key = 'X-Progress-ID:'+request.vars['X-Progress-ID'] + cache = Cache(request) + cache.ram(cache_key+':length', lambda: size, 0) + cache.ram(cache_key+':uploaded', lambda: 0, 0) + while size > 0: + if size < chunk_size: + data = source.read(size) + cache.ram.increment(cache_key+':uploaded', size) + else: + data = source.read(chunk_size) + cache.ram.increment(cache_key+':uploaded', chunk_size) + length = len(data) + if length > size: + (data, length) = (data[:size], size) + size -= length + if length == 0: + break + dest.write(data) + if length < chunk_size: + break + dest.seek(0) + cache.ram(cache_key+':length', None) + cache.ram(cache_key+':uploaded', None) + return dest + + +def serve_controller(request, response, session): + """ + this function is used to generate a dynamic page. + It first runs all models, then runs the function in the controller, + and then tries to render the output using a view/template. + this function must run from the [application] folder. + A typical example would be the call to the url + /[application]/[controller]/[function] that would result in a call + to [function]() in applications/[application]/[controller].py + rendered by applications/[application]/views/[controller]/[function].html + """ + + # ################################################## + # build environment for controller and view + # ################################################## + + environment = build_environment(request, response, session) + + # set default view, controller can override it + + response.view = '%s/%s.%s' % (request.controller, + request.function, + request.extension) + + # also, make sure the flash is passed through + # ################################################## + # process models, controller and view (if required) + # ################################################## + + run_models_in(environment) + response._view_environment = copy.copy(environment) + page = run_controller_in(request.controller, request.function, environment) + if isinstance(page, dict): + response._vars = page + for key in page: + response._view_environment[key] = page[key] + run_view_in(response._view_environment) + page = response.body.getvalue() + # logic to garbage collect after exec, not always, once every 100 requests + global requests + requests = ('requests' in globals()) and (requests+1) % 100 or 0 + if not requests: gc.collect() + # end garbage collection logic + raise HTTP(response.status, page, **response.headers) + + +def start_response_aux(status, headers, exc_info, response=None): + """ + in controller you can use:: + + - request.wsgi.environ + - request.wsgi.start_response + + to call third party WSGI applications + """ + response.status = str(status).split(' ',1)[0] + response.headers = dict(headers) + return lambda *args, **kargs: response.write(escape=False,*args,**kargs) + + +def middleware_aux(request, response, *middleware_apps): + """ + In you controller use:: + + @request.wsgi.middleware(middleware1, middleware2, ...) + + to decorate actions with WSGI middleware. actions must return strings. + uses a simulated environment so it may have weird behavior in some cases + """ + def middleware(f): + def app(environ, start_response): + data = f() + start_response(response.status,response.headers.items()) + if isinstance(data,list): + return data + return [data] + for item in middleware_apps: + app=item(app) + def caller(app): + return app(request.wsgi.environ,request.wsgi.start_response) + return lambda caller=caller, app=app: caller(app) + return middleware + +def environ_aux(environ,request): + new_environ = copy.copy(environ) + new_environ['wsgi.input'] = request.body + new_environ['wsgi.version'] = 1 + return new_environ + +def parse_get_post_vars(request, environ): + + # always parse variables in URL for GET, POST, PUT, DELETE, etc. in get_vars + dget = cgi.parse_qsl(request.env.query_string or '', keep_blank_values=1) + for (key, value) in dget: + if key in request.get_vars: + if isinstance(request.get_vars[key], list): + request.get_vars[key] += [value] + else: + request.get_vars[key] = [request.get_vars[key]] + [value] + else: + request.get_vars[key] = value + request.vars[key] = request.get_vars[key] + + # parse POST variables on POST, PUT, BOTH only in post_vars + request.body = copystream_progress(request) ### stores request body + if (request.body and request.env.request_method in ('POST', 'PUT', 'BOTH')): + dpost = cgi.FieldStorage(fp=request.body,environ=environ,keep_blank_values=1) + # The same detection used by FieldStorage to detect multipart POSTs + is_multipart = dpost.type[:10] == 'multipart/' + request.body.seek(0) + isle25 = sys.version_info[1] <= 5 + + def listify(a): + return (not isinstance(a,list) and [a]) or a + try: + keys = sorted(dpost) + except TypeError: + keys = [] + for key in keys: + dpk = dpost[key] + # if en element is not a file replace it with its value else leave it alone + if isinstance(dpk, list): + if not dpk[0].filename: + value = [x.value for x in dpk] + else: + value = [x for x in dpk] + elif not dpk.filename: + value = dpk.value + else: + value = dpk + pvalue = listify(value) + if key in request.vars: + gvalue = listify(request.vars[key]) + if isle25: + value = pvalue + gvalue + elif is_multipart: + pvalue = pvalue[len(gvalue):] + else: + pvalue = pvalue[:-len(gvalue)] + request.vars[key] = value + if len(pvalue): + request.post_vars[key] = (len(pvalue)>1 and pvalue) or pvalue[0] + + +def wsgibase(environ, responder): + """ + this is the gluon wsgi application. the first function called when a page + is requested (static or dynamic). it can be called by paste.httpserver + or by apache mod_wsgi. + + - fills request with info + - the environment variables, replacing '.' with '_' + - adds web2py path and version info + - compensates for fcgi missing path_info and query_string + - validates the path in url + + The url path must be either: + + 1. for static pages: + + - /<application>/static/<file> + + 2. for dynamic pages: + + - /<application>[/<controller>[/<function>[/<sub>]]][.<extension>] + - (sub may go several levels deep, currently 3 levels are supported: + sub1/sub2/sub3) + + The naming conventions are: + + - application, controller, function and extension may only contain + [a-zA-Z0-9_] + - file and sub may also contain '-', '=', '.' and '/' + """ + + current.__dict__.clear() + request = Request() + response = Response() + session = Session() + request.env.web2py_path = global_settings.applications_parent + request.env.web2py_version = web2py_version + request.env.update(global_settings) + static_file = False + try: + try: + try: + # ################################################## + # handle fcgi missing path_info and query_string + # select rewrite parameters + # rewrite incoming URL + # parse rewritten header variables + # parse rewritten URL + # serve file if static + # ################################################## + + if not environ.get('PATH_INFO',None) and \ + environ.get('REQUEST_URI',None): + # for fcgi, get path_info and query_string from request_uri + items = environ['REQUEST_URI'].split('?') + environ['PATH_INFO'] = items[0] + if len(items) > 1: + environ['QUERY_STRING'] = items[1] + else: + environ['QUERY_STRING'] = '' + if not environ.get('HTTP_HOST',None): + environ['HTTP_HOST'] = '%s:%s' % (environ.get('SERVER_NAME'), + environ.get('SERVER_PORT')) + + (static_file, environ) = rewrite.url_in(request, environ) + if static_file: + if request.env.get('query_string', '')[:10] == 'attachment': + response.headers['Content-Disposition'] = 'attachment' + response.stream(static_file, request=request) + + # ################################################## + # fill in request items + # ################################################## + + http_host = request.env.http_host.split(':',1)[0] + + local_hosts = [http_host,'::1','127.0.0.1','::ffff:127.0.0.1'] + if not global_settings.web2py_runtime_gae: + local_hosts += [socket.gethostname(), + socket.gethostbyname(http_host)] + request.client = get_client(request.env) + request.folder = abspath('applications', + request.application) + os.sep + x_req_with = str(request.env.http_x_requested_with).lower() + request.ajax = x_req_with == 'xmlhttprequest' + request.cid = request.env.http_web2py_component_element + request.is_local = request.env.remote_addr in local_hosts + request.is_https = request.env.wsgi_url_scheme \ + in ['https', 'HTTPS'] or request.env.https == 'on' + + # ################################################## + # compute a request.uuid to be used for tickets and toolbar + # ################################################## + + response.uuid = request.compute_uuid() + + # ################################################## + # access the requested application + # ################################################## + + if not os.path.exists(request.folder): + if request.application == rewrite.thread.routes.default_application and request.application != 'welcome': + request.application = 'welcome' + redirect(Url(r=request)) + elif rewrite.thread.routes.error_handler: + _handler = rewrite.thread.routes.error_handler + redirect(Url(_handler['application'], + _handler['controller'], + _handler['function'], + args=request.application)) + else: + raise HTTP(404, rewrite.thread.routes.error_message \ + % 'invalid request', + web2py_error='invalid application') + request.url = Url(r=request, args=request.args, + extension=request.raw_extension) + + # ################################################## + # build missing folders + # ################################################## + + create_missing_app_folders(request) + + # ################################################## + # get the GET and POST data + # ################################################## + + parse_get_post_vars(request, environ) + + # ################################################## + # expose wsgi hooks for convenience + # ################################################## + + request.wsgi.environ = environ_aux(environ,request) + request.wsgi.start_response = \ + lambda status='200', headers=[], \ + exec_info=None, response=response: \ + start_response_aux(status, headers, exec_info, response) + request.wsgi.middleware = \ + lambda *a: middleware_aux(request,response,*a) + + # ################################################## + # load cookies + # ################################################## + + if request.env.http_cookie: + try: + request.cookies.load(request.env.http_cookie) + except Cookie.CookieError, e: + pass # invalid cookies + + # ################################################## + # try load session or create new session file + # ################################################## + + session.connect(request, response) + + # ################################################## + # set no-cache headers + # ################################################## + + response.headers['Content-Type'] = \ + contenttype('.'+request.extension) + response.headers['Cache-Control'] = \ + 'no-store, no-cache, must-revalidate, post-check=0, pre-check=0' + response.headers['Expires'] = \ + time.strftime('%a, %d %b %Y %H:%M:%S GMT', time.gmtime()) + response.headers['Pragma'] = 'no-cache' + + # ################################################## + # run controller + # ################################################## + + serve_controller(request, response, session) + + except HTTP, http_response: + if static_file: + return http_response.to(responder) + + if request.body: + request.body.close() + + # ################################################## + # on success, try store session in database + # ################################################## + session._try_store_in_db(request, response) + + # ################################################## + # on success, commit database + # ################################################## + + if response._custom_commit: + response._custom_commit() + else: + BaseAdapter.close_all_instances('commit') + + # ################################################## + # if session not in db try store session on filesystem + # this must be done after trying to commit database! + # ################################################## + + session._try_store_on_disk(request, response) + + # ################################################## + # store cookies in headers + # ################################################## + + if request.cid: + + if response.flash and not 'web2py-component-flash' in http_response.headers: + http_response.headers['web2py-component-flash'] = \ + str(response.flash).replace('\n','') + if response.js and not 'web2py-component-command' in http_response.headers: + http_response.headers['web2py-component-command'] = \ + response.js.replace('\n','') + if session._forget and \ + response.session_id_name in response.cookies: + del response.cookies[response.session_id_name] + elif session._secure: + response.cookies[response.session_id_name]['secure'] = True + if len(response.cookies)>0: + http_response.headers['Set-Cookie'] = \ + [str(cookie)[11:] for cookie in response.cookies.values()] + ticket=None + + except RestrictedError, e: + + if request.body: + request.body.close() + + # ################################################## + # on application error, rollback database + # ################################################## + + ticket = e.log(request) or 'unknown' + if response._custom_rollback: + response._custom_rollback() + else: + BaseAdapter.close_all_instances('rollback') + + http_response = \ + HTTP(500, rewrite.thread.routes.error_message_ticket % \ + dict(ticket=ticket), + web2py_error='ticket %s' % ticket) + + except: + + if request.body: + request.body.close() + + # ################################################## + # on application error, rollback database + # ################################################## + + try: + if response._custom_rollback: + response._custom_rollback() + else: + BaseAdapter.close_all_instances('rollback') + except: + pass + e = RestrictedError('Framework', '', '', locals()) + ticket = e.log(request) or 'unrecoverable' + http_response = \ + HTTP(500, rewrite.thread.routes.error_message_ticket \ + % dict(ticket=ticket), + web2py_error='ticket %s' % ticket) + + finally: + if response and hasattr(response, 'session_file') \ + and response.session_file: + response.session_file.close() +# if global_settings.debugging: +# import gluon.debug +# gluon.debug.stop_trace() + + session._unlock(response) + http_response, new_environ = rewrite.try_rewrite_on_error( + http_response, request, environ, ticket) + if not http_response: + return wsgibase(new_environ,responder) + if global_settings.web2py_crontype == 'soft': + newcron.softcron(global_settings.applications_parent).start() + return http_response.to(responder) + + +def save_password(password, port): + """ + used by main() to save the password in the parameters_port.py file. + """ + + password_file = abspath('parameters_%i.py' % port) + if password == '<random>': + # make up a new password + chars = string.letters + string.digits + password = ''.join([random.choice(chars) for i in range(8)]) + cpassword = CRYPT()(password)[0] + print '******************* IMPORTANT!!! ************************' + print 'your admin password is "%s"' % password + print '*********************************************************' + elif password == '<recycle>': + # reuse the current password if any + if os.path.exists(password_file): + return + else: + password = '' + elif password.startswith('<pam_user:'): + # use the pam password for specified user + cpassword = password[1:-1] + else: + # use provided password + cpassword = CRYPT()(password)[0] + fp = open(password_file, 'w') + if password: + fp.write('password="%s"\n' % cpassword) + else: + fp.write('password=None\n') + fp.close() + + +def appfactory(wsgiapp=wsgibase, + logfilename='httpserver.log', + profilerfilename='profiler.log'): + """ + generates a wsgi application that does logging and profiling and calls + wsgibase + + .. function:: gluon.main.appfactory( + [wsgiapp=wsgibase + [, logfilename='httpserver.log' + [, profilerfilename='profiler.log']]]) + + """ + if profilerfilename and os.path.exists(profilerfilename): + os.unlink(profilerfilename) + locker = thread.allocate_lock() + + def app_with_logging(environ, responder): + """ + a wsgi app that does logging and profiling and calls wsgibase + """ + status_headers = [] + + def responder2(s, h): + """ + wsgi responder app + """ + status_headers.append(s) + status_headers.append(h) + return responder(s, h) + + time_in = time.time() + ret = [0] + if not profilerfilename: + ret[0] = wsgiapp(environ, responder2) + else: + import cProfile + import pstats + logger.warn('profiler is on. this makes web2py slower and serial') + + locker.acquire() + cProfile.runctx('ret[0] = wsgiapp(environ, responder2)', + globals(), locals(), profilerfilename+'.tmp') + stat = pstats.Stats(profilerfilename+'.tmp') + stat.stream = cStringIO.StringIO() + stat.strip_dirs().sort_stats("time").print_stats(80) + profile_out = stat.stream.getvalue() + profile_file = open(profilerfilename, 'a') + profile_file.write('%s\n%s\n%s\n%s\n\n' % \ + ('='*60, environ['PATH_INFO'], '='*60, profile_out)) + profile_file.close() + locker.release() + try: + line = '%s, %s, %s, %s, %s, %s, %f\n' % ( + environ['REMOTE_ADDR'], + datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'), + environ['REQUEST_METHOD'], + environ['PATH_INFO'].replace(',', '%2C'), + environ['SERVER_PROTOCOL'], + (status_headers[0])[:3], + time.time() - time_in, + ) + if not logfilename: + sys.stdout.write(line) + elif isinstance(logfilename, str): + write_file(logfilename, line, 'a') + else: + logfilename.write(line) + except: + pass + return ret[0] + + return app_with_logging + + +class HttpServer(object): + """ + the web2py web server (Rocket) + """ + + def __init__( + self, + ip='127.0.0.1', + port=8000, + password='', + pid_filename='httpserver.pid', + log_filename='httpserver.log', + profiler_filename=None, + ssl_certificate=None, + ssl_private_key=None, + ssl_ca_certificate=None, + min_threads=None, + max_threads=None, + server_name=None, + request_queue_size=5, + timeout=10, + shutdown_timeout=None, # Rocket does not use a shutdown timeout + path=None, + interfaces=None # Rocket is able to use several interfaces - must be list of socket-tuples as string + ): + """ + starts the web server. + """ + + if interfaces: + # if interfaces is specified, it must be tested for rocket parameter correctness + # not necessarily completely tested (e.g. content of tuples or ip-format) + import types + if isinstance(interfaces,types.ListType): + for i in interfaces: + if not isinstance(i,types.TupleType): + raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" + else: + raise "Wrong format for rocket interfaces parameter - see http://packages.python.org/rocket/" + + if path: + # if a path is specified change the global variables so that web2py + # runs from there instead of cwd or os.environ['web2py_path'] + global web2py_path + path = os.path.normpath(path) + web2py_path = path + global_settings.applications_parent = path + os.chdir(path) + [add_path_first(p) for p in (path, abspath('site-packages'), "")] + + save_password(password, port) + self.pid_filename = pid_filename + if not server_name: + server_name = socket.gethostname() + logger.info('starting web server...') + rocket.SERVER_NAME = server_name + sock_list = [ip, port] + if not ssl_certificate or not ssl_private_key: + logger.info('SSL is off') + elif not rocket.ssl: + logger.warning('Python "ssl" module unavailable. SSL is OFF') + elif not os.path.exists(ssl_certificate): + logger.warning('unable to open SSL certificate. SSL is OFF') + elif not os.path.exists(ssl_private_key): + logger.warning('unable to open SSL private key. SSL is OFF') + else: + sock_list.extend([ssl_private_key, ssl_certificate]) + if ssl_ca_certificate: + sock_list.append(ssl_ca_certificate) + + logger.info('SSL is ON') + app_info = {'wsgi_app': appfactory(wsgibase, + log_filename, + profiler_filename) } + + self.server = rocket.Rocket(interfaces or tuple(sock_list), + method='wsgi', + app_info=app_info, + min_threads=min_threads, + max_threads=max_threads, + queue_size=int(request_queue_size), + timeout=int(timeout), + handle_signals=False, + ) + + + def start(self): + """ + start the web server + """ + try: + signal.signal(signal.SIGTERM, lambda a, b, s=self: s.stop()) + signal.signal(signal.SIGINT, lambda a, b, s=self: s.stop()) + except: + pass + write_file(self.pid_filename, str(os.getpid())) + self.server.start() + + def stop(self, stoplogging=False): + """ + stop cron and the web server + """ + newcron.stopcron() + self.server.stop(stoplogging) + try: + os.unlink(self.pid_filename) + except: + pass + + + ADDED gluon/main.pyc Index: gluon/main.pyc ================================================================== --- /dev/null +++ gluon/main.pyc cannot compute difference between binary files ADDED gluon/myregex.py Index: gluon/myregex.py ================================================================== --- /dev/null +++ gluon/myregex.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import re + +# pattern to find defined tables + +regex_tables = re.compile(\ + """^[\w]+\.define_table\(\s*[\'\"](?P<name>[\w_]+)[\'\"]""", + flags=re.M) + +# pattern to find exposed functions in controller + +regex_expose = re.compile(\ + '^def\s+(?P<name>(?:[a-zA-Z0-9]\w*)|(?:_[a-zA-Z0-9]\w*))\(\)\s*:', + flags=re.M) + +regex_include = re.compile(\ + '(?P<all>\{\{\s*include\s+[\'"](?P<name>[^\'"]*)[\'"]\s*\}\})') + +regex_extend = re.compile(\ + '^\s*(?P<all>\{\{\s*extend\s+[\'"](?P<name>[^\'"]+)[\'"]\s*\}\})',re.MULTILINE) + + + ADDED gluon/myregex.pyc Index: gluon/myregex.pyc ================================================================== --- /dev/null +++ gluon/myregex.pyc cannot compute difference between binary files ADDED gluon/newcron.py Index: gluon/newcron.py ================================================================== --- /dev/null +++ gluon/newcron.py @@ -0,0 +1,315 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Created by Attila Csipa <web2py@csipa.in.rs> +Modified by Massimo Di Pierro <mdipierro@cs.depaul.edu> +""" + +import sys +import os +import threading +import logging +import time +import sched +import re +import datetime +import platform +import portalocker +import fileutils +import cPickle +from settings import global_settings + +logger = logging.getLogger("web2py.cron") +_cron_stopping = False + +def stopcron(): + "graceful shutdown of cron" + global _cron_stopping + _cron_stopping = True + +class extcron(threading.Thread): + + def __init__(self, applications_parent): + threading.Thread.__init__(self) + self.setDaemon(False) + self.path = applications_parent + crondance(self.path, 'external', startup=True) + + def run(self): + if not _cron_stopping: + logger.debug('external cron invocation') + crondance(self.path, 'external', startup=False) + +class hardcron(threading.Thread): + + def __init__(self, applications_parent): + threading.Thread.__init__(self) + self.setDaemon(True) + self.path = applications_parent + crondance(self.path, 'hard', startup=True) + + def launch(self): + if not _cron_stopping: + logger.debug('hard cron invocation') + crondance(self.path, 'hard', startup = False) + + def run(self): + s = sched.scheduler(time.time, time.sleep) + logger.info('Hard cron daemon started') + while not _cron_stopping: + now = time.time() + s.enter(60 - now % 60, 1, self.launch, ()) + s.run() + +class softcron(threading.Thread): + + def __init__(self, applications_parent): + threading.Thread.__init__(self) + self.path = applications_parent + crondance(self.path, 'soft', startup=True) + + def run(self): + if not _cron_stopping: + logger.debug('soft cron invocation') + crondance(self.path, 'soft', startup=False) + +class Token(object): + + def __init__(self,path): + self.path = os.path.join(path, 'cron.master') + if not os.path.exists(self.path): + fileutils.write_file(self.path, '', 'wb') + self.master = None + self.now = time.time() + + def acquire(self,startup=False): + """ + returns the time when the lock is acquired or + None if cron already running + + lock is implemented by writing a pickle (start, stop) in cron.master + start is time when cron job starts and stop is time when cron completed + stop == 0 if job started but did not yet complete + if a cron job started within less than 60 seconds, acquire returns None + if a cron job started before 60 seconds and did not stop, + a warning is issue "Stale cron.master detected" + """ + if portalocker.LOCK_EX is None: + logger.warning('WEB2PY CRON: Disabled because no file locking') + return None + self.master = open(self.path,'rb+') + try: + ret = None + portalocker.lock(self.master,portalocker.LOCK_EX) + try: + (start, stop) = cPickle.load(self.master) + except: + (start, stop) = (0, 1) + if startup or self.now - start > 59.99: + ret = self.now + if not stop: + # this happens if previous cron job longer than 1 minute + logger.warning('WEB2PY CRON: Stale cron.master detected') + logger.debug('WEB2PY CRON: Acquiring lock') + self.master.seek(0) + cPickle.dump((self.now,0),self.master) + finally: + portalocker.unlock(self.master) + if not ret: + # do this so no need to release + self.master.close() + return ret + + def release(self): + """ + this function writes into cron.master the time when cron job + was completed + """ + if not self.master.closed: + portalocker.lock(self.master,portalocker.LOCK_EX) + logger.debug('WEB2PY CRON: Releasing cron lock') + self.master.seek(0) + (start, stop) = cPickle.load(self.master) + if start == self.now: # if this is my lock + self.master.seek(0) + cPickle.dump((self.now,time.time()),self.master) + portalocker.unlock(self.master) + self.master.close() + + +def rangetolist(s, period='min'): + retval = [] + if s.startswith('*'): + if period == 'min': + s = s.replace('*', '0-59', 1) + elif period == 'hr': + s = s.replace('*', '0-23', 1) + elif period == 'dom': + s = s.replace('*', '1-31', 1) + elif period == 'mon': + s = s.replace('*', '1-12', 1) + elif period == 'dow': + s = s.replace('*', '0-6', 1) + m = re.compile(r'(\d+)-(\d+)/(\d+)') + match = m.match(s) + if match: + for i in range(int(match.group(1)), int(match.group(2)) + 1): + if i % int(match.group(3)) == 0: + retval.append(i) + return retval + + +def parsecronline(line): + task = {} + if line.startswith('@reboot'): + line=line.replace('@reboot', '-1 * * * *') + elif line.startswith('@yearly'): + line=line.replace('@yearly', '0 0 1 1 *') + elif line.startswith('@annually'): + line=line.replace('@annually', '0 0 1 1 *') + elif line.startswith('@monthly'): + line=line.replace('@monthly', '0 0 1 * *') + elif line.startswith('@weekly'): + line=line.replace('@weekly', '0 0 * * 0') + elif line.startswith('@daily'): + line=line.replace('@daily', '0 0 * * *') + elif line.startswith('@midnight'): + line=line.replace('@midnight', '0 0 * * *') + elif line.startswith('@hourly'): + line=line.replace('@hourly', '0 * * * *') + params = line.strip().split(None, 6) + if len(params) < 7: + return None + daysofweek={'sun':0,'mon':1,'tue':2,'wed':3,'thu':4,'fri':5,'sat':6} + for (s, id) in zip(params[:5], ['min', 'hr', 'dom', 'mon', 'dow']): + if not s in [None, '*']: + task[id] = [] + vals = s.split(',') + for val in vals: + if val != '-1' and '-' in val and '/' not in val: + val = '%s/1' % val + if '/' in val: + task[id] += rangetolist(val, id) + elif val.isdigit() or val=='-1': + task[id].append(int(val)) + elif id=='dow' and val[:3].lower() in daysofweek: + task[id].append(daysofweek(val[:3].lower())) + task['user'] = params[5] + task['cmd'] = params[6] + return task + + +class cronlauncher(threading.Thread): + + def __init__(self, cmd, shell=True): + threading.Thread.__init__(self) + if platform.system() == 'Windows': + shell = False + elif isinstance(cmd,list): + cmd = ' '.join(cmd) + self.cmd = cmd + self.shell = shell + + def run(self): + import subprocess + proc = subprocess.Popen(self.cmd, + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=self.shell) + (stdoutdata,stderrdata) = proc.communicate() + if proc.returncode != 0: + logger.warning( + 'WEB2PY CRON Call returned code %s:\n%s' % \ + (proc.returncode, stdoutdata+stderrdata)) + else: + logger.debug('WEB2PY CRON Call returned success:\n%s' \ + % stdoutdata) + +def crondance(applications_parent, ctype='soft', startup=False): + apppath = os.path.join(applications_parent,'applications') + cron_path = os.path.join(applications_parent) + token = Token(cron_path) + cronmaster = token.acquire(startup=startup) + if not cronmaster: + return + now_s = time.localtime() + checks=(('min',now_s.tm_min), + ('hr',now_s.tm_hour), + ('mon',now_s.tm_mon), + ('dom',now_s.tm_mday), + ('dow',(now_s.tm_wday+1)%7)) + + apps = [x for x in os.listdir(apppath) + if os.path.isdir(os.path.join(apppath, x))] + + for app in apps: + if _cron_stopping: + break; + apath = os.path.join(apppath,app) + cronpath = os.path.join(apath, 'cron') + crontab = os.path.join(cronpath, 'crontab') + if not os.path.exists(crontab): + continue + try: + cronlines = fileutils.readlines_file(crontab, 'rt') + lines = [x.strip() for x in cronlines if x.strip() and not x.strip().startswith('#')] + tasks = [parsecronline(cline) for cline in lines] + except Exception, e: + logger.error('WEB2PY CRON: crontab read error %s' % e) + continue + + for task in tasks: + if _cron_stopping: + break; + commands = [sys.executable] + w2p_path = fileutils.abspath('web2py.py', gluon=True) + if os.path.exists(w2p_path): + commands.append(w2p_path) + if global_settings.applications_parent != global_settings.gluon_parent: + commands.extend(('-f', global_settings.applications_parent)) + citems = [(k in task and not v in task[k]) for k,v in checks] + task_min= task.get('min',[]) + if not task: + continue + elif not startup and task_min == [-1]: + continue + elif task_min != [-1] and reduce(lambda a,b: a or b, citems): + continue + logger.info('WEB2PY CRON (%s): %s executing %s in %s at %s' \ + % (ctype, app, task.get('cmd'), + os.getcwd(), datetime.datetime.now())) + action, command, models = False, task['cmd'], '' + if command.startswith('**'): + (action,models,command) = (True,'',command[2:]) + elif command.startswith('*'): + (action,models,command) = (True,'-M',command[1:]) + else: + action=False + if action and command.endswith('.py'): + commands.extend(('-J', # cron job + models, # import models? + '-S', app, # app name + '-a', '"<recycle>"', # password + '-R', command)) # command + shell = True + elif action: + commands.extend(('-J', # cron job + models, # import models? + '-S', app+'/'+command, # app name + '-a', '"<recycle>"')) # password + shell = True + else: + commands = command + shell = False + try: + cronlauncher(commands, shell=shell).start() + except Exception, e: + logger.warning( + 'WEB2PY CRON: Execution error for %s: %s' \ + % (task.get('cmd'), e)) + token.release() + + + ADDED gluon/newcron.pyc Index: gluon/newcron.pyc ================================================================== --- /dev/null +++ gluon/newcron.pyc cannot compute difference between binary files ADDED gluon/portalocker.py Index: gluon/portalocker.py ================================================================== --- /dev/null +++ gluon/portalocker.py @@ -0,0 +1,125 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# portalocker.py - Cross-platform (posix/nt) API for flock-style file locking. +# Requires python 1.5.2 or better. + +""" +Cross-platform (posix/nt) API for flock-style file locking. + +Synopsis: + + import portalocker + file = open(\"somefile\", \"r+\") + portalocker.lock(file, portalocker.LOCK_EX) + file.seek(12) + file.write(\"foo\") + file.close() + +If you know what you're doing, you may choose to + + portalocker.unlock(file) + +before closing the file, but why? + +Methods: + + lock( file, flags ) + unlock( file ) + +Constants: + + LOCK_EX + LOCK_SH + LOCK_NB + +I learned the win32 technique for locking files from sample code +provided by John Nielsen <nielsenjf@my-deja.com> in the documentation +that accompanies the win32 modules. + +Author: Jonathan Feinberg <jdf@pobox.com> +Version: $Id: portalocker.py,v 1.3 2001/05/29 18:47:55 Administrator Exp $ +""" + +import os +import logging +import platform +logger = logging.getLogger("web2py") + +os_locking = None +try: + import fcntl + os_locking = 'posix' +except: + pass +try: + import win32con + import win32file + import pywintypes + os_locking = 'windows' +except: + pass + +if os_locking == 'windows': + LOCK_EX = win32con.LOCKFILE_EXCLUSIVE_LOCK + LOCK_SH = 0 # the default + LOCK_NB = win32con.LOCKFILE_FAIL_IMMEDIATELY + + # is there any reason not to reuse the following structure? + + __overlapped = pywintypes.OVERLAPPED() + + def lock(file, flags): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.LockFileEx(hfile, flags, 0, 0x7fff0000, __overlapped) + + def unlock(file): + hfile = win32file._get_osfhandle(file.fileno()) + win32file.UnlockFileEx(hfile, 0, 0x7fff0000, __overlapped) + + +elif os_locking == 'posix': + LOCK_EX = fcntl.LOCK_EX + LOCK_SH = fcntl.LOCK_SH + LOCK_NB = fcntl.LOCK_NB + + def lock(file, flags): + fcntl.flock(file.fileno(), flags) + + def unlock(file): + fcntl.flock(file.fileno(), fcntl.LOCK_UN) + + +else: + if platform.system() == 'Windows': + logger.error('no file locking, you must install the win32 extensions from: http://sourceforge.net/projects/pywin32/files/') + else: + logger.debug('no file locking, this will cause problems') + + LOCK_EX = None + LOCK_SH = None + LOCK_NB = None + + def lock(file, flags): + pass + + def unlock(file): + pass + + +if __name__ == '__main__': + from time import time, strftime, localtime + import sys + + log = open('log.txt', 'a+') + lock(log, LOCK_EX) + + timestamp = strftime('%m/%d/%Y %H:%M:%S\n', localtime(time())) + log.write(timestamp) + + print 'Wrote lines. Hit enter to release lock.' + dummy = sys.stdin.readline() + + log.close() + + + ADDED gluon/portalocker.pyc Index: gluon/portalocker.pyc ================================================================== --- /dev/null +++ gluon/portalocker.pyc cannot compute difference between binary files ADDED gluon/reserved_sql_keywords.py Index: gluon/reserved_sql_keywords.py ================================================================== --- /dev/null +++ gluon/reserved_sql_keywords.py @@ -0,0 +1,1716 @@ +# encoding utf-8 + +__author__ = "Thadeus Burgess <thadeusb@thadeusb.com>" + +# we classify as "non-reserved" those key words that are explicitly known +# to the parser but are allowed as column or table names. Some key words +# that are otherwise non-reserved cannot be used as function or data type n +# ames and are in the nonreserved list. (Most of these words represent +# built-in functions or data types with special syntax. The function +# or type is still available but it cannot be redefined by the user.) +# Labeled "reserved" are those tokens that are not allowed as column or +# table names. Some reserved key words are allowable as names for +# functions or data typesself. + +# Note at the bottom of the list is a dict containing references to the +# tuples, and also if you add a list don't forget to remove its default +# set of COMMON. + +# Keywords that are adapter specific. Such as a list of "postgresql" +# or "mysql" keywords + +# These are keywords that are common to all SQL dialects, and should +# never be used as a table or column. Even if you use one of these +# the cursor will throw an OperationalError for the SQL syntax. +COMMON = set(( + 'SELECT', + 'INSERT', + 'DELETE', + 'UPDATE', + 'DROP', + 'CREATE', + 'ALTER', + + 'WHERE', + 'FROM', + 'INNER', + 'JOIN', + 'AND', + 'OR', + 'LIKE', + 'ON', + 'IN', + 'SET', + + 'BY', + 'GROUP', + 'ORDER', + 'LEFT', + 'OUTER', + + 'IF', + 'END', + 'THEN', + 'LOOP', + 'AS', + 'ELSE', + 'FOR', + + 'CASE', + 'WHEN', + 'MIN', + 'MAX', + 'DISTINCT', +)) + + +POSTGRESQL = set(( + 'FALSE', + 'TRUE', + 'ALL', + 'ANALYSE', + 'ANALYZE', + 'AND', + 'ANY', + 'ARRAY', + 'AS', + 'ASC', + 'ASYMMETRIC', + 'AUTHORIZATION', + 'BETWEEN', + 'BIGINT', + 'BINARY', + 'BIT', + 'BOOLEAN', + 'BOTH', + 'CASE', + 'CAST', + 'CHAR', + 'CHARACTER', + 'CHECK', + 'COALESCE', + 'COLLATE', + 'COLUMN', + 'CONSTRAINT', + 'CREATE', + 'CROSS', + 'CURRENT_CATALOG', + 'CURRENT_DATE', + 'CURRENT_ROLE', + 'CURRENT_SCHEMA', + 'CURRENT_TIME', + 'CURRENT_TIMESTAMP', + 'CURRENT_USER', + 'DEC', + 'DECIMAL', + 'DEFAULT', + 'DEFERRABLE', + 'DESC', + 'DISTINCT', + 'DO', + 'ELSE', + 'END', + 'EXCEPT', + 'EXISTS', + 'EXTRACT', + 'FETCH', + 'FLOAT', + 'FOR', + 'FOREIGN', + 'FREEZE', + 'FROM', + 'FULL', + 'GRANT', + 'GREATEST', + 'GROUP', + 'HAVING', + 'ILIKE', + 'IN', + 'INITIALLY', + 'INNER', + 'INOUT', + 'INT', + 'INTEGER', + 'INTERSECT', + 'INTERVAL', + 'INTO', + 'IS', + 'ISNULL', + 'JOIN', + 'LEADING', + 'LEAST', + 'LEFT', + 'LIKE', + 'LIMIT', + 'LOCALTIME', + 'LOCALTIMESTAMP', + 'NATIONAL', + 'NATURAL', + 'NCHAR', + 'NEW', + 'NONE', + 'NOT', + 'NOTNULL', + 'NULL', + 'NULLIF', + 'NUMERIC', + 'OFF', + 'OFFSET', + 'OLD', + 'ON', + 'ONLY', + 'OR', + 'ORDER', + 'OUT', + 'OUTER', + 'OVERLAPS', + 'OVERLAY', + 'PLACING', + 'POSITION', + 'PRECISION', + 'PRIMARY', + 'REAL', + 'REFERENCES', + 'RETURNING', + 'RIGHT', + 'ROW', + 'SELECT', + 'SESSION_USER', + 'SETOF', + 'SIMILAR', + 'SMALLINT', + 'SOME', + 'SUBSTRING', + 'SYMMETRIC', + 'TABLE', + 'THEN', + 'TIME', + 'TIMESTAMP', + 'TO', + 'TRAILING', + 'TREAT', + 'TRIM', + 'UNION', + 'UNIQUE', + 'USER', + 'USING', + 'VALUES', + 'VARCHAR', + 'VARIADIC', + 'VERBOSE', + 'WHEN', + 'WHERE', + 'WITH', + 'XMLATTRIBUTES', + 'XMLCONCAT', + 'XMLELEMENT', + 'XMLFOREST', + 'XMLPARSE', + 'XMLPI', + 'XMLROOT', + 'XMLSERIALIZE', +)) + + +POSTGRESQL_NONRESERVED = set(( + 'A', + 'ABORT', + 'ABS', + 'ABSENT', + 'ABSOLUTE', + 'ACCESS', + 'ACCORDING', + 'ACTION', + 'ADA', + 'ADD', + 'ADMIN', + 'AFTER', + 'AGGREGATE', + 'ALIAS', + 'ALLOCATE', + 'ALSO', + 'ALTER', + 'ALWAYS', + 'ARE', + 'ARRAY_AGG', + 'ASENSITIVE', + 'ASSERTION', + 'ASSIGNMENT', + 'AT', + 'ATOMIC', + 'ATTRIBUTE', + 'ATTRIBUTES', + 'AVG', + 'BACKWARD', + 'BASE64', + 'BEFORE', + 'BEGIN', + 'BERNOULLI', + 'BIT_LENGTH', + 'BITVAR', + 'BLOB', + 'BOM', + 'BREADTH', + 'BY', + 'C', + 'CACHE', + 'CALL', + 'CALLED', + 'CARDINALITY', + 'CASCADE', + 'CASCADED', + 'CATALOG', + 'CATALOG_NAME', + 'CEIL', + 'CEILING', + 'CHAIN', + 'CHAR_LENGTH', + 'CHARACTER_LENGTH', + 'CHARACTER_SET_CATALOG', + 'CHARACTER_SET_NAME', + 'CHARACTER_SET_SCHEMA', + 'CHARACTERISTICS', + 'CHARACTERS', + 'CHECKED', + 'CHECKPOINT', + 'CLASS', + 'CLASS_ORIGIN', + 'CLOB', + 'CLOSE', + 'CLUSTER', + 'COBOL', + 'COLLATION', + 'COLLATION_CATALOG', + 'COLLATION_NAME', + 'COLLATION_SCHEMA', + 'COLLECT', + 'COLUMN_NAME', + 'COLUMNS', + 'COMMAND_FUNCTION', + 'COMMAND_FUNCTION_CODE', + 'COMMENT', + 'COMMIT', + 'COMMITTED', + 'COMPLETION', + 'CONCURRENTLY', + 'CONDITION', + 'CONDITION_NUMBER', + 'CONFIGURATION', + 'CONNECT', + 'CONNECTION', + 'CONNECTION_NAME', + 'CONSTRAINT_CATALOG', + 'CONSTRAINT_NAME', + 'CONSTRAINT_SCHEMA', + 'CONSTRAINTS', + 'CONSTRUCTOR', + 'CONTAINS', + 'CONTENT', + 'CONTINUE', + 'CONVERSION', + 'CONVERT', + 'COPY', + 'CORR', + 'CORRESPONDING', + 'COST', + 'COUNT', + 'COVAR_POP', + 'COVAR_SAMP', + 'CREATEDB', + 'CREATEROLE', + 'CREATEUSER', + 'CSV', + 'CUBE', + 'CUME_DIST', + 'CURRENT', + 'CURRENT_DEFAULT_TRANSFORM_GROUP', + 'CURRENT_PATH', + 'CURRENT_TRANSFORM_GROUP_FOR_TYPE', + 'CURSOR', + 'CURSOR_NAME', + 'CYCLE', + 'DATA', + 'DATABASE', + 'DATE', + 'DATETIME_INTERVAL_CODE', + 'DATETIME_INTERVAL_PRECISION', + 'DAY', + 'DEALLOCATE', + 'DECLARE', + 'DEFAULTS', + 'DEFERRED', + 'DEFINED', + 'DEFINER', + 'DEGREE', + 'DELETE', + 'DELIMITER', + 'DELIMITERS', + 'DENSE_RANK', + 'DEPTH', + 'DEREF', + 'DERIVED', + 'DESCRIBE', + 'DESCRIPTOR', + 'DESTROY', + 'DESTRUCTOR', + 'DETERMINISTIC', + 'DIAGNOSTICS', + 'DICTIONARY', + 'DISABLE', + 'DISCARD', + 'DISCONNECT', + 'DISPATCH', + 'DOCUMENT', + 'DOMAIN', + 'DOUBLE', + 'DROP', + 'DYNAMIC', + 'DYNAMIC_FUNCTION', + 'DYNAMIC_FUNCTION_CODE', + 'EACH', + 'ELEMENT', + 'EMPTY', + 'ENABLE', + 'ENCODING', + 'ENCRYPTED', + 'END-EXEC', + 'ENUM', + 'EQUALS', + 'ESCAPE', + 'EVERY', + 'EXCEPTION', + 'EXCLUDE', + 'EXCLUDING', + 'EXCLUSIVE', + 'EXEC', + 'EXECUTE', + 'EXISTING', + 'EXP', + 'EXPLAIN', + 'EXTERNAL', + 'FAMILY', + 'FILTER', + 'FINAL', + 'FIRST', + 'FIRST_VALUE', + 'FLAG', + 'FLOOR', + 'FOLLOWING', + 'FORCE', + 'FORTRAN', + 'FORWARD', + 'FOUND', + 'FREE', + 'FUNCTION', + 'FUSION', + 'G', + 'GENERAL', + 'GENERATED', + 'GET', + 'GLOBAL', + 'GO', + 'GOTO', + 'GRANTED', + 'GROUPING', + 'HANDLER', + 'HEADER', + 'HEX', + 'HIERARCHY', + 'HOLD', + 'HOST', + 'HOUR', +# 'ID', + 'IDENTITY', + 'IF', + 'IGNORE', + 'IMMEDIATE', + 'IMMUTABLE', + 'IMPLEMENTATION', + 'IMPLICIT', + 'INCLUDING', + 'INCREMENT', + 'INDENT', + 'INDEX', + 'INDEXES', + 'INDICATOR', + 'INFIX', + 'INHERIT', + 'INHERITS', + 'INITIALIZE', + 'INPUT', + 'INSENSITIVE', + 'INSERT', + 'INSTANCE', + 'INSTANTIABLE', + 'INSTEAD', + 'INTERSECTION', + 'INVOKER', + 'ISOLATION', + 'ITERATE', + 'K', + 'KEY', + 'KEY_MEMBER', + 'KEY_TYPE', + 'LAG', + 'LANCOMPILER', + 'LANGUAGE', + 'LARGE', + 'LAST', + 'LAST_VALUE', + 'LATERAL', + 'LC_COLLATE', + 'LC_CTYPE', + 'LEAD', + 'LENGTH', + 'LESS', + 'LEVEL', + 'LIKE_REGEX', + 'LISTEN', + 'LN', + 'LOAD', + 'LOCAL', + 'LOCATION', + 'LOCATOR', + 'LOCK', + 'LOGIN', + 'LOWER', + 'M', + 'MAP', + 'MAPPING', + 'MATCH', + 'MATCHED', + 'MAX', + 'MAX_CARDINALITY', + 'MAXVALUE', + 'MEMBER', + 'MERGE', + 'MESSAGE_LENGTH', + 'MESSAGE_OCTET_LENGTH', + 'MESSAGE_TEXT', + 'METHOD', + 'MIN', + 'MINUTE', + 'MINVALUE', + 'MOD', + 'MODE', + 'MODIFIES', + 'MODIFY', + 'MODULE', + 'MONTH', + 'MORE', + 'MOVE', + 'MULTISET', + 'MUMPS', +# 'NAME', + 'NAMES', + 'NAMESPACE', + 'NCLOB', + 'NESTING', + 'NEXT', + 'NFC', + 'NFD', + 'NFKC', + 'NFKD', + 'NIL', + 'NO', + 'NOCREATEDB', + 'NOCREATEROLE', + 'NOCREATEUSER', + 'NOINHERIT', + 'NOLOGIN', + 'NORMALIZE', + 'NORMALIZED', + 'NOSUPERUSER', + 'NOTHING', + 'NOTIFY', + 'NOWAIT', + 'NTH_VALUE', + 'NTILE', + 'NULLABLE', + 'NULLS', + 'NUMBER', + 'OBJECT', + 'OCCURRENCES_REGEX', + 'OCTET_LENGTH', + 'OCTETS', + 'OF', + 'OIDS', + 'OPEN', + 'OPERATION', + 'OPERATOR', + 'OPTION', + 'OPTIONS', + 'ORDERING', + 'ORDINALITY', + 'OTHERS', + 'OUTPUT', + 'OVER', + 'OVERRIDING', + 'OWNED', + 'OWNER', + 'P', + 'PAD', + 'PARAMETER', + 'PARAMETER_MODE', + 'PARAMETER_NAME', + 'PARAMETER_ORDINAL_POSITION', + 'PARAMETER_SPECIFIC_CATALOG', + 'PARAMETER_SPECIFIC_NAME', + 'PARAMETER_SPECIFIC_SCHEMA', + 'PARAMETERS', + 'PARSER', + 'PARTIAL', + 'PARTITION', + 'PASCAL', + 'PASSING', +# 'PASSWORD', + 'PATH', + 'PERCENT_RANK', + 'PERCENTILE_CONT', + 'PERCENTILE_DISC', + 'PLANS', + 'PLI', + 'POSITION_REGEX', + 'POSTFIX', + 'POWER', + 'PRECEDING', + 'PREFIX', + 'PREORDER', + 'PREPARE', + 'PREPARED', + 'PRESERVE', + 'PRIOR', + 'PRIVILEGES', + 'PROCEDURAL', + 'PROCEDURE', + 'PUBLIC', + 'QUOTE', + 'RANGE', + 'RANK', + 'READ', + 'READS', + 'REASSIGN', + 'RECHECK', + 'RECURSIVE', + 'REF', + 'REFERENCING', + 'REGR_AVGX', + 'REGR_AVGY', + 'REGR_COUNT', + 'REGR_INTERCEPT', + 'REGR_R2', + 'REGR_SLOPE', + 'REGR_SXX', + 'REGR_SXY', + 'REGR_SYY', + 'REINDEX', + 'RELATIVE', + 'RELEASE', + 'RENAME', + 'REPEATABLE', + 'REPLACE', + 'REPLICA', + 'RESET', + 'RESPECT', + 'RESTART', + 'RESTRICT', + 'RESULT', + 'RETURN', + 'RETURNED_CARDINALITY', + 'RETURNED_LENGTH', + 'RETURNED_OCTET_LENGTH', + 'RETURNED_SQLSTATE', + 'RETURNS', + 'REVOKE', +# 'ROLE', + 'ROLLBACK', + 'ROLLUP', + 'ROUTINE', + 'ROUTINE_CATALOG', + 'ROUTINE_NAME', + 'ROUTINE_SCHEMA', + 'ROW_COUNT', + 'ROW_NUMBER', + 'ROWS', + 'RULE', + 'SAVEPOINT', + 'SCALE', + 'SCHEMA', + 'SCHEMA_NAME', + 'SCOPE', + 'SCOPE_CATALOG', + 'SCOPE_NAME', + 'SCOPE_SCHEMA', + 'SCROLL', + 'SEARCH', + 'SECOND', + 'SECTION', + 'SECURITY', + 'SELF', + 'SENSITIVE', + 'SEQUENCE', + 'SERIALIZABLE', + 'SERVER', + 'SERVER_NAME', + 'SESSION', + 'SET', + 'SETS', + 'SHARE', + 'SHOW', + 'SIMPLE', + 'SIZE', + 'SOURCE', + 'SPACE', + 'SPECIFIC', + 'SPECIFIC_NAME', + 'SPECIFICTYPE', + 'SQL', + 'SQLCODE', + 'SQLERROR', + 'SQLEXCEPTION', + 'SQLSTATE', + 'SQLWARNING', + 'SQRT', + 'STABLE', + 'STANDALONE', + 'START', + 'STATE', + 'STATEMENT', + 'STATIC', + 'STATISTICS', + 'STDDEV_POP', + 'STDDEV_SAMP', + 'STDIN', + 'STDOUT', + 'STORAGE', + 'STRICT', + 'STRIP', + 'STRUCTURE', + 'STYLE', + 'SUBCLASS_ORIGIN', + 'SUBLIST', + 'SUBMULTISET', + 'SUBSTRING_REGEX', + 'SUM', + 'SUPERUSER', + 'SYSID', + 'SYSTEM', + 'SYSTEM_USER', + 'T', +# 'TABLE_NAME', + 'TABLESAMPLE', + 'TABLESPACE', + 'TEMP', + 'TEMPLATE', + 'TEMPORARY', + 'TERMINATE', + 'TEXT', + 'THAN', + 'TIES', + 'TIMEZONE_HOUR', + 'TIMEZONE_MINUTE', + 'TOP_LEVEL_COUNT', + 'TRANSACTION', + 'TRANSACTION_ACTIVE', + 'TRANSACTIONS_COMMITTED', + 'TRANSACTIONS_ROLLED_BACK', + 'TRANSFORM', + 'TRANSFORMS', + 'TRANSLATE', + 'TRANSLATE_REGEX', + 'TRANSLATION', + 'TRIGGER', + 'TRIGGER_CATALOG', + 'TRIGGER_NAME', + 'TRIGGER_SCHEMA', + 'TRIM_ARRAY', + 'TRUNCATE', + 'TRUSTED', + 'TYPE', + 'UESCAPE', + 'UNBOUNDED', + 'UNCOMMITTED', + 'UNDER', + 'UNENCRYPTED', + 'UNKNOWN', + 'UNLISTEN', + 'UNNAMED', + 'UNNEST', + 'UNTIL', + 'UNTYPED', + 'UPDATE', + 'UPPER', + 'URI', + 'USAGE', + 'USER_DEFINED_TYPE_CATALOG', + 'USER_DEFINED_TYPE_CODE', + 'USER_DEFINED_TYPE_NAME', + 'USER_DEFINED_TYPE_SCHEMA', + 'VACUUM', + 'VALID', + 'VALIDATOR', + 'VALUE', + 'VAR_POP', + 'VAR_SAMP', + 'VARBINARY', + 'VARIABLE', + 'VARYING', + 'VERSION', + 'VIEW', + 'VOLATILE', + 'WHENEVER', + 'WHITESPACE', + 'WIDTH_BUCKET', + 'WINDOW', + 'WITHIN', + 'WITHOUT', + 'WORK', + 'WRAPPER', + 'WRITE', + 'XML', + 'XMLAGG', + 'XMLBINARY', + 'XMLCAST', + 'XMLCOMMENT', + 'XMLDECLARATION', + 'XMLDOCUMENT', + 'XMLEXISTS', + 'XMLITERATE', + 'XMLNAMESPACES', + 'XMLQUERY', + 'XMLSCHEMA', + 'XMLTABLE', + 'XMLTEXT', + 'XMLVALIDATE', + 'YEAR', + 'YES', + 'ZONE', +)) + +#Thanks villas +FIREBIRD = set(( + 'ABS', + 'ACTIVE', + 'ADMIN', + 'AFTER', + 'ASCENDING', + 'AUTO', + 'AUTODDL', + 'BASED', + 'BASENAME', + 'BASE_NAME', + 'BEFORE', + 'BIT_LENGTH', + 'BLOB', + 'BLOBEDIT', + 'BOOLEAN', + 'BOTH', + 'BUFFER', + 'CACHE', + 'CHAR_LENGTH', + 'CHARACTER_LENGTH', + 'CHECK_POINT_LEN', + 'CHECK_POINT_LENGTH', + 'CLOSE', + 'COMMITTED', + 'COMPILETIME', + 'COMPUTED', + 'CONDITIONAL', + 'CONNECT', + 'CONTAINING', + 'CROSS', + 'CSTRING', + 'CURRENT_CONNECTION', + 'CURRENT_ROLE', + 'CURRENT_TRANSACTION', + 'CURRENT_USER', + 'DATABASE', + 'DB_KEY', + 'DEBUG', + 'DESCENDING', + 'DISCONNECT', + 'DISPLAY', + 'DO', + 'ECHO', + 'EDIT', + 'ENTRY_POINT', + 'EVENT', + 'EXIT', + 'EXTERN', + 'FALSE', + 'FETCH', + 'FILE', + 'FILTER', + 'FREE_IT', + 'FUNCTION', + 'GDSCODE', + 'GENERATOR', + 'GEN_ID', + 'GLOBAL', + 'GROUP_COMMIT_WAIT', + 'GROUP_COMMIT_WAIT_TIME', + 'HELP', + 'IF', + 'INACTIVE', + 'INDEX', + 'INIT', + 'INPUT_TYPE', + 'INSENSITIVE', + 'ISQL', + 'LC_MESSAGES', + 'LC_TYPE', + 'LEADING', + 'LENGTH', + 'LEV', + 'LOGFILE', + 'LOG_BUFFER_SIZE', + 'LOG_BUF_SIZE', + 'LONG', + 'LOWER', + 'MANUAL', + 'MAXIMUM', + 'MAXIMUM_SEGMENT', + 'MAX_SEGMENT', + 'MERGE', + 'MESSAGE', + 'MINIMUM', + 'MODULE_NAME', + 'NOAUTO', + 'NUM_LOG_BUFS', + 'NUM_LOG_BUFFERS', + 'OCTET_LENGTH', + 'OPEN', + 'OUTPUT_TYPE', + 'OVERFLOW', + 'PAGE', + 'PAGELENGTH', + 'PAGES', + 'PAGE_SIZE', + 'PARAMETER', +# 'PASSWORD', + 'PLAN', + 'POST_EVENT', + 'QUIT', + 'RAW_PARTITIONS', + 'RDB$DB_KEY', + 'RECORD_VERSION', + 'RECREATE', + 'RECURSIVE', + 'RELEASE', + 'RESERV', + 'RESERVING', + 'RETAIN', + 'RETURN', + 'RETURNING_VALUES', + 'RETURNS', +# 'ROLE', + 'ROW_COUNT', + 'ROWS', + 'RUNTIME', + 'SAVEPOINT', + 'SECOND', + 'SENSITIVE', + 'SHADOW', + 'SHARED', + 'SHELL', + 'SHOW', + 'SINGULAR', + 'SNAPSHOT', + 'SORT', + 'STABILITY', + 'START', + 'STARTING', + 'STARTS', + 'STATEMENT', + 'STATIC', + 'STATISTICS', + 'SUB_TYPE', + 'SUSPEND', + 'TERMINATOR', + 'TRAILING', + 'TRIGGER', + 'TRIM', + 'TRUE', + 'TYPE', + 'UNCOMMITTED', + 'UNKNOWN', + 'USING', + 'VARIABLE', + 'VERSION', + 'WAIT', + 'WEEKDAY', + 'WHILE', + 'YEARDAY', +)) +FIREBIRD_NONRESERVED = set(( + 'BACKUP', + 'BLOCK', + 'COALESCE', + 'COLLATION', + 'COMMENT', + 'DELETING', + 'DIFFERENCE', + 'IIF', + 'INSERTING', + 'LAST', + 'LEAVE', + 'LOCK', + 'NEXT', + 'NULLIF', + 'NULLS', + 'RESTART', + 'RETURNING', + 'SCALAR_ARRAY', + 'SEQUENCE', + 'STATEMENT', + 'UPDATING', + 'ABS', + 'ACCENT', + 'ACOS', + 'ALWAYS', + 'ASCII_CHAR', + 'ASCII_VAL', + 'ASIN', + 'ATAN', + 'ATAN2', + 'BACKUP', + 'BIN_AND', + 'BIN_OR', + 'BIN_SHL', + 'BIN_SHR', + 'BIN_XOR', + 'BLOCK', + 'CEIL', + 'CEILING', + 'COLLATION', + 'COMMENT', + 'COS', + 'COSH', + 'COT', + 'DATEADD', + 'DATEDIFF', + 'DECODE', + 'DIFFERENCE', + 'EXP', + 'FLOOR', + 'GEN_UUID', + 'GENERATED', + 'HASH', + 'IIF', + 'LIST', + 'LN', + 'LOG', + 'LOG10', + 'LPAD', + 'MATCHED', + 'MATCHING', + 'MAXVALUE', + 'MILLISECOND', + 'MINVALUE', + 'MOD', + 'NEXT', + 'OVERLAY', + 'PAD', + 'PI', + 'PLACING', + 'POWER', + 'PRESERVE', + 'RAND', + 'REPLACE', + 'RESTART', + 'RETURNING', + 'REVERSE', + 'ROUND', + 'RPAD', + 'SCALAR_ARRAY', + 'SEQUENCE', + 'SIGN', + 'SIN', + 'SINH', + 'SPACE', + 'SQRT', + 'TAN', + 'TANH', + 'TEMPORARY', + 'TRUNC', + 'WEEK', +)) + +# Thanks Jonathan Lundell +MYSQL = set(( + 'ACCESSIBLE', + 'ADD', + 'ALL', + 'ALTER', + 'ANALYZE', + 'AND', + 'AS', + 'ASC', + 'ASENSITIVE', + 'BEFORE', + 'BETWEEN', + 'BIGINT', + 'BINARY', + 'BLOB', + 'BOTH', + 'BY', + 'CALL', + 'CASCADE', + 'CASE', + 'CHANGE', + 'CHAR', + 'CHARACTER', + 'CHECK', + 'COLLATE', + 'COLUMN', + 'CONDITION', + 'CONSTRAINT', + 'CONTINUE', + 'CONVERT', + 'CREATE', + 'CROSS', + 'CURRENT_DATE', + 'CURRENT_TIME', + 'CURRENT_TIMESTAMP', + 'CURRENT_USER', + 'CURSOR', + 'DATABASE', + 'DATABASES', + 'DAY_HOUR', + 'DAY_MICROSECOND', + 'DAY_MINUTE', + 'DAY_SECOND', + 'DEC', + 'DECIMAL', + 'DECLARE', + 'DEFAULT', + 'DELAYED', + 'DELETE', + 'DESC', + 'DESCRIBE', + 'DETERMINISTIC', + 'DISTINCT', + 'DISTINCTROW', + 'DIV', + 'DOUBLE', + 'DROP', + 'DUAL', + 'EACH', + 'ELSE', + 'ELSEIF', + 'ENCLOSED', + 'ESCAPED', + 'EXISTS', + 'EXIT', + 'EXPLAIN', + 'FALSE', + 'FETCH', + 'FLOAT', + 'FLOAT4', + 'FLOAT8', + 'FOR', + 'FORCE', + 'FOREIGN', + 'FROM', + 'FULLTEXT', + 'GRANT', + 'GROUP', + 'HAVING', + 'HIGH_PRIORITY', + 'HOUR_MICROSECOND', + 'HOUR_MINUTE', + 'HOUR_SECOND', + 'IF', + 'IGNORE', + 'IGNORE_SERVER_IDS', + 'IGNORE_SERVER_IDS', + 'IN', + 'INDEX', + 'INFILE', + 'INNER', + 'INOUT', + 'INSENSITIVE', + 'INSERT', + 'INT', + 'INT1', + 'INT2', + 'INT3', + 'INT4', + 'INT8', + 'INTEGER', + 'INTERVAL', + 'INTO', + 'IS', + 'ITERATE', + 'JOIN', + 'KEY', + 'KEYS', + 'KILL', + 'LEADING', + 'LEAVE', + 'LEFT', + 'LIKE', + 'LIMIT', + 'LINEAR', + 'LINES', + 'LOAD', + 'LOCALTIME', + 'LOCALTIMESTAMP', + 'LOCK', + 'LONG', + 'LONGBLOB', + 'LONGTEXT', + 'LOOP', + 'LOW_PRIORITY', + 'MASTER_HEARTBEAT_PERIOD', + 'MASTER_HEARTBEAT_PERIOD', + 'MASTER_SSL_VERIFY_SERVER_CERT', + 'MATCH', + 'MAXVALUE', + 'MAXVALUE', + 'MEDIUMBLOB', + 'MEDIUMINT', + 'MEDIUMTEXT', + 'MIDDLEINT', + 'MINUTE_MICROSECOND', + 'MINUTE_SECOND', + 'MOD', + 'MODIFIES', + 'NATURAL', + 'NO_WRITE_TO_BINLOG', + 'NOT', + 'NULL', + 'NUMERIC', + 'ON', + 'OPTIMIZE', + 'OPTION', + 'OPTIONALLY', + 'OR', + 'ORDER', + 'OUT', + 'OUTER', + 'OUTFILE', + 'PRECISION', + 'PRIMARY', + 'PROCEDURE', + 'PURGE', + 'RANGE', + 'READ', + 'READ_WRITE', + 'READS', + 'REAL', + 'REFERENCES', + 'REGEXP', + 'RELEASE', + 'RENAME', + 'REPEAT', + 'REPLACE', + 'REQUIRE', + 'RESIGNAL', + 'RESIGNAL', + 'RESTRICT', + 'RETURN', + 'REVOKE', + 'RIGHT', + 'RLIKE', + 'SCHEMA', + 'SCHEMAS', + 'SECOND_MICROSECOND', + 'SELECT', + 'SENSITIVE', + 'SEPARATOR', + 'SET', + 'SHOW', + 'SIGNAL', + 'SIGNAL', + 'SMALLINT', + 'SPATIAL', + 'SPECIFIC', + 'SQL', + 'SQL_BIG_RESULT', + 'SQL_CALC_FOUND_ROWS', + 'SQL_SMALL_RESULT', + 'SQLEXCEPTION', + 'SQLSTATE', + 'SQLWARNING', + 'SSL', + 'STARTING', + 'STRAIGHT_JOIN', + 'TABLE', + 'TERMINATED', + 'THEN', + 'TINYBLOB', + 'TINYINT', + 'TINYTEXT', + 'TO', + 'TRAILING', + 'TRIGGER', + 'TRUE', + 'UNDO', + 'UNION', + 'UNIQUE', + 'UNLOCK', + 'UNSIGNED', + 'UPDATE', + 'USAGE', + 'USE', + 'USING', + 'UTC_DATE', + 'UTC_TIME', + 'UTC_TIMESTAMP', + 'VALUES', + 'VARBINARY', + 'VARCHAR', + 'VARCHARACTER', + 'VARYING', + 'WHEN', + 'WHERE', + 'WHILE', + 'WITH', + 'WRITE', + 'XOR', + 'YEAR_MONTH', + 'ZEROFILL', +)) + +MSSQL = set(( + 'ADD', + 'ALL', + 'ALTER', + 'AND', + 'ANY', + 'AS', + 'ASC', + 'AUTHORIZATION', + 'BACKUP', + 'BEGIN', + 'BETWEEN', + 'BREAK', + 'BROWSE', + 'BULK', + 'BY', + 'CASCADE', + 'CASE', + 'CHECK', + 'CHECKPOINT', + 'CLOSE', + 'CLUSTERED', + 'COALESCE', + 'COLLATE', + 'COLUMN', + 'COMMIT', + 'COMPUTE', + 'CONSTRAINT', + 'CONTAINS', + 'CONTAINSTABLE', + 'CONTINUE', + 'CONVERT', + 'CREATE', + 'CROSS', + 'CURRENT', + 'CURRENT_DATE', + 'CURRENT_TIME', + 'CURRENT_TIMESTAMP', + 'CURRENT_USER', + 'CURSOR', + 'DATABASE', + 'DBCC', + 'DEALLOCATE', + 'DECLARE', + 'DEFAULT', + 'DELETE', + 'DENY', + 'DESC', + 'DISK', + 'DISTINCT', + 'DISTRIBUTED', + 'DOUBLE', + 'DROP', + 'DUMMY', + 'DUMP', + 'ELSE', + 'END', + 'ERRLVL', + 'ESCAPE', + 'EXCEPT', + 'EXEC', + 'EXECUTE', + 'EXISTS', + 'EXIT', + 'FETCH', + 'FILE', + 'FILLFACTOR', + 'FOR', + 'FOREIGN', + 'FREETEXT', + 'FREETEXTTABLE', + 'FROM', + 'FULL', + 'FUNCTION', + 'GOTO', + 'GRANT', + 'GROUP', + 'HAVING', + 'HOLDLOCK', + 'IDENTITY', + 'IDENTITY_INSERT', + 'IDENTITYCOL', + 'IF', + 'IN', + 'INDEX', + 'INNER', + 'INSERT', + 'INTERSECT', + 'INTO', + 'IS', + 'JOIN', + 'KEY', + 'KILL', + 'LEFT', + 'LIKE', + 'LINENO', + 'LOAD', + 'NATIONAL ', + 'NOCHECK', + 'NONCLUSTERED', + 'NOT', + 'NULL', + 'NULLIF', + 'OF', + 'OFF', + 'OFFSETS', + 'ON', + 'OPEN', + 'OPENDATASOURCE', + 'OPENQUERY', + 'OPENROWSET', + 'OPENXML', + 'OPTION', + 'OR', + 'ORDER', + 'OUTER', + 'OVER', + 'PERCENT', + 'PLAN', + 'PRECISION', + 'PRIMARY', + 'PRINT', + 'PROC', + 'PROCEDURE', + 'PUBLIC', + 'RAISERROR', + 'READ', + 'READTEXT', + 'RECONFIGURE', + 'REFERENCES', + 'REPLICATION', + 'RESTORE', + 'RESTRICT', + 'RETURN', + 'REVOKE', + 'RIGHT', + 'ROLLBACK', + 'ROWCOUNT', + 'ROWGUIDCOL', + 'RULE', + 'SAVE', + 'SCHEMA', + 'SELECT', + 'SESSION_USER', + 'SET', + 'SETUSER', + 'SHUTDOWN', + 'SOME', + 'STATISTICS', + 'SYSTEM_USER', + 'TABLE', + 'TEXTSIZE', + 'THEN', + 'TO', + 'TOP', + 'TRAN', + 'TRANSACTION', + 'TRIGGER', + 'TRUNCATE', + 'TSEQUAL', + 'UNION', + 'UNIQUE', + 'UPDATE', + 'UPDATETEXT', + 'USE', + 'USER', + 'VALUES', + 'VARYING', + 'VIEW', + 'WAITFOR', + 'WHEN', + 'WHERE', + 'WHILE', + 'WITH', + 'WRITETEXT', +)) + +ORACLE = set(( + 'ACCESS', + 'ADD', + 'ALL', + 'ALTER', + 'AND', + 'ANY', + 'AS', + 'ASC', + 'AUDIT', + 'BETWEEN', + 'BY', + 'CHAR', + 'CHECK', + 'CLUSTER', + 'COLUMN', + 'COMMENT', + 'COMPRESS', + 'CONNECT', + 'CREATE', + 'CURRENT', + 'DATE', + 'DECIMAL', + 'DEFAULT', + 'DELETE', + 'DESC', + 'DISTINCT', + 'DROP', + 'ELSE', + 'EXCLUSIVE', + 'EXISTS', + 'FILE', + 'FLOAT', + 'FOR', + 'FROM', + 'GRANT', + 'GROUP', + 'HAVING', + 'IDENTIFIED', + 'IMMEDIATE', + 'IN', + 'INCREMENT', + 'INDEX', + 'INITIAL', + 'INSERT', + 'INTEGER', + 'INTERSECT', + 'INTO', + 'IS', + 'LEVEL', + 'LIKE', + 'LOCK', + 'LONG', + 'MAXEXTENTS', + 'MINUS', + 'MLSLABEL', + 'MODE', + 'MODIFY', + 'NOAUDIT', + 'NOCOMPRESS', + 'NOT', + 'NOWAIT', + 'NULL', + 'NUMBER', + 'OF', + 'OFFLINE', + 'ON', + 'ONLINE', + 'OPTION', + 'OR', + 'ORDER', + 'PCTFREE', + 'PRIOR', + 'PRIVILEGES', + 'PUBLIC', + 'RAW', + 'RENAME', + 'RESOURCE', + 'REVOKE', + 'ROW', + 'ROWID', + 'ROWNUM', + 'ROWS', + 'SELECT', + 'SESSION', + 'SET', + 'SHARE', + 'SIZE', + 'SMALLINT', + 'START', + 'SUCCESSFUL', + 'SYNONYM', + 'SYSDATE', + 'TABLE', + 'THEN', + 'TO', + 'TRIGGER', + 'UID', + 'UNION', + 'UNIQUE', + 'UPDATE', + 'USER', + 'VALIDATE', + 'VALUES', + 'VARCHAR', + 'VARCHAR2', + 'VIEW', + 'WHENEVER', + 'WHERE', + 'WITH', +)) + +SQLITE = set(( + 'ABORT', + 'ACTION', + 'ADD', + 'AFTER', + 'ALL', + 'ALTER', + 'ANALYZE', + 'AND', + 'AS', + 'ASC', + 'ATTACH', + 'AUTOINCREMENT', + 'BEFORE', + 'BEGIN', + 'BETWEEN', + 'BY', + 'CASCADE', + 'CASE', + 'CAST', + 'CHECK', + 'COLLATE', + 'COLUMN', + 'COMMIT', + 'CONFLICT', + 'CONSTRAINT', + 'CREATE', + 'CROSS', + 'CURRENT_DATE', + 'CURRENT_TIME', + 'CURRENT_TIMESTAMP', + 'DATABASE', + 'DEFAULT', + 'DEFERRABLE', + 'DEFERRED', + 'DELETE', + 'DESC', + 'DETACH', + 'DISTINCT', + 'DROP', + 'EACH', + 'ELSE', + 'END', + 'ESCAPE', + 'EXCEPT', + 'EXCLUSIVE', + 'EXISTS', + 'EXPLAIN', + 'FAIL', + 'FOR', + 'FOREIGN', + 'FROM', + 'FULL', + 'GLOB', + 'GROUP', + 'HAVING', + 'IF', + 'IGNORE', + 'IMMEDIATE', + 'IN', + 'INDEX', + 'INDEXED', + 'INITIALLY', + 'INNER', + 'INSERT', + 'INSTEAD', + 'INTERSECT', + 'INTO', + 'IS', + 'ISNULL', + 'JOIN', + 'KEY', + 'LEFT', + 'LIKE', + 'LIMIT', + 'MATCH', + 'NATURAL', + 'NO', + 'NOT', + 'NOTNULL', + 'NULL', + 'OF', + 'OFFSET', + 'ON', + 'OR', + 'ORDER', + 'OUTER', + 'PLAN', + 'PRAGMA', + 'PRIMARY', + 'QUERY', + 'RAISE', + 'REFERENCES', + 'REGEXP', + 'REINDEX', + 'RELEASE', + 'RENAME', + 'REPLACE', + 'RESTRICT', + 'RIGHT', + 'ROLLBACK', + 'ROW', + 'SAVEPOINT', + 'SELECT', + 'SET', + 'TABLE', + 'TEMP', + 'TEMPORARY', + 'THEN', + 'TO', + 'TRANSACTION', + 'TRIGGER', + 'UNION', + 'UNIQUE', + 'UPDATE', + 'USING', + 'VACUUM', + 'VALUES', + 'VIEW', + 'VIRTUAL', + 'WHEN', + 'WHERE', +)) + +# remove from here when you add a list. +JDBCSQLITE = SQLITE +DB2 = INFORMIX = INGRES = JDBCPOSTGRESQL = COMMON + +ADAPTERS = { + 'sqlite': SQLITE, + 'mysql': MYSQL, + 'postgres': POSTGRESQL, + 'postgres_nonreserved': POSTGRESQL_NONRESERVED, + 'oracle': ORACLE, + 'mssql': MSSQL, + 'mssql2': MSSQL, + 'db2': DB2, + 'informix': INFORMIX, + 'firebird': FIREBIRD, + 'firebird_embedded': FIREBIRD, + 'firebird_nonreserved': FIREBIRD_NONRESERVED, + 'ingres': INGRES, + 'ingresu': INGRES, + 'jdbc:sqlite': JDBCSQLITE, + 'jdbc:postgres': JDBCPOSTGRESQL, + 'common': COMMON, +} + +ADAPTERS['all'] = reduce(lambda a,b:a.union(b),(x for x in ADAPTERS.values())) + + + ADDED gluon/restricted.py Index: gluon/restricted.py ================================================================== --- /dev/null +++ gluon/restricted.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import sys +import cPickle +import traceback +import types +import os +import datetime +import logging + +from utils import web2py_uuid +from storage import Storage +from http import HTTP +from html import BEAUTIFY + +logger = logging.getLogger("web2py") + +__all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2'] + +class TicketStorage(Storage): + + """ + defines the ticket object and the default values of its members (None) + """ + + def __init__( + self, + db=None, + tablename='web2py_ticket' + ): + self.db = db + self.tablename = tablename + + def store(self, request, ticket_id, ticket_data): + """ + stores the ticket. It will figure out if this must be on disk or in db + """ + if self.db: + self._store_in_db(request, ticket_id, ticket_data) + else: + self._store_on_disk(request, ticket_id, ticket_data) + + def _store_in_db(self, request, ticket_id, ticket_data): + table = self._get_table(self.db, self.tablename, request.application) + table.insert(ticket_id=ticket_id, + ticket_data=cPickle.dumps(ticket_data), + created_datetime=request.now) + logger.error('In FILE: %(layer)s\n\n%(traceback)s\n' % ticket_data) + + def _store_on_disk(self, request, ticket_id, ticket_data): + ef = self._error_file(request, ticket_id, 'wb') + try: + cPickle.dump(ticket_data, ef) + finally: + ef.close() + + def _error_file(self, request, ticket_id, mode, app=None): + root = request.folder + if app: + root = os.path.join(os.path.join(root, '..'), app) + errors_folder = os.path.abspath(os.path.join(root, 'errors'))#.replace('\\', '/') + return open(os.path.join(errors_folder, ticket_id), mode) + + def _get_table(self, db, tablename, app): + tablename = tablename + '_' + app + table = db.get(tablename, None) + if table is None: + db.rollback() # not necessary but one day + # any app may store tickets on DB + table = db.define_table( + tablename, + db.Field('ticket_id', length=100), + db.Field('ticket_data', 'text'), + db.Field('created_datetime', 'datetime'), + ) + return table + + def load( + self, + request, + app, + ticket_id, + ): + if not self.db: + ef = self._error_file(request, ticket_id, 'rb', app) + try: + return cPickle.load(ef) + finally: + ef.close() + table = self._get_table(self.db, self.tablename, app) + rows = self.db(table.ticket_id == ticket_id).select() + if rows: + return cPickle.loads(rows[0].ticket_data) + return None + + +class RestrictedError(Exception): + """ + class used to wrap an exception that occurs in the restricted environment + below. the traceback is used to log the exception and generate a ticket. + """ + + def __init__( + self, + layer='', + code='', + output='', + environment=None, + ): + """ + layer here is some description of where in the system the exception + occurred. + """ + if environment is None: environment = {} + self.layer = layer + self.code = code + self.output = output + self.environment = environment + if layer: + try: + self.traceback = traceback.format_exc() + except: + self.traceback = 'no traceback because template parting error' + try: + self.snapshot = snapshot(context=10,code=code, + environment=self.environment) + except: + self.snapshot = {} + else: + self.traceback = '(no error)' + self.snapshot = {} + + def log(self, request): + """ + logs the exception. + """ + + try: + d = { + 'layer': str(self.layer), + 'code': str(self.code), + 'output': str(self.output), + 'traceback': str(self.traceback), + 'snapshot': self.snapshot, + } + ticket_storage = TicketStorage(db=request.tickets_db) + ticket_storage.store(request, request.uuid.split('/',1)[1], d) + return request.uuid + except: + logger.error(self.traceback) + return None + + + def load(self, request, app, ticket_id): + """ + loads a logged exception. + """ + ticket_storage = TicketStorage(db=request.tickets_db) + d = ticket_storage.load(request, app, ticket_id) + + self.layer = d['layer'] + self.code = d['code'] + self.output = d['output'] + self.traceback = d['traceback'] + self.snapshot = d.get('snapshot') + + +def compile2(code,layer): + """ + The +'\n' is necessary else compile fails when code ends in a comment. + """ + return compile(code.rstrip().replace('\r\n','\n')+'\n', layer, 'exec') + +def restricted(code, environment=None, layer='Unknown'): + """ + runs code in environment and returns the output. if an exception occurs + in code it raises a RestrictedError containing the traceback. layer is + passed to RestrictedError to identify where the error occurred. + """ + if environment is None: environment = {} + environment['__file__'] = layer + try: + if type(code) == types.CodeType: + ccode = code + else: + ccode = compile2(code,layer) + exec ccode in environment + except HTTP: + raise + except Exception, error: + # XXX Show exception in Wing IDE if running in debugger + if __debug__ and 'WINGDB_ACTIVE' in os.environ: + etype, evalue, tb = sys.exc_info() + sys.excepthook(etype, evalue, tb) + raise RestrictedError(layer, code, '', environment) + +def snapshot(info=None, context=5, code=None, environment=None): + """Return a dict describing a given traceback (based on cgitb.text).""" + import os, types, time, traceback, linecache, inspect, pydoc, cgitb + + # if no exception info given, get current: + etype, evalue, etb = info or sys.exc_info() + + if type(etype) is types.ClassType: + etype = etype.__name__ + + # create a snapshot dict with some basic information + s = {} + s['pyver'] = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + s['date'] = time.ctime(time.time()) + + # start to process frames + records = inspect.getinnerframes(etb, context) + s['frames'] = [] + for frame, file, lnum, func, lines, index in records: + file = file and os.path.abspath(file) or '?' + args, varargs, varkw, locals = inspect.getargvalues(frame) + call = '' + if func != '?': + call = inspect.formatargvalues(args, varargs, varkw, locals, + formatvalue=lambda value: '=' + pydoc.text.repr(value)) + + # basic frame information + f = {'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum} + + highlight = {} + def reader(lnum=[lnum]): + highlight[lnum[0]] = 1 + try: return linecache.getline(file, lnum[0]) + finally: lnum[0] += 1 + vars = cgitb.scanvars(reader, frame, locals) + + # if it is a view, replace with generated code + if file.endswith('html'): + lmin = lnum>context and (lnum-context) or 0 + lmax = lnum+context + lines = code.split("\n")[lmin:lmax] + index = min(context, lnum) - 1 + + if index is not None: + i = lnum - index + for line in lines: + f['lines'][i] = line.rstrip() + i += 1 + + # dump local variables (referenced in current line only) + f['dump'] = {} + for name, where, value in vars: + if name in f['dump']: continue + if value is not cgitb.__UNDEF__: + if where == 'global': name = 'global ' + name + elif where != 'local': name = where + name.split('.')[-1] + f['dump'][name] = pydoc.text.repr(value) + else: + f['dump'][name] = 'undefined' + + s['frames'].append(f) + + # add exception type, value and attributes + s['etype'] = str(etype) + s['evalue'] = str(evalue) + s['exception'] = {} + if isinstance(evalue, BaseException): + for name in dir(evalue): + # prevent py26 DeprecatedWarning: + if name!='message' or sys.version_info<(2.6): + value = pydoc.text.repr(getattr(evalue, name)) + s['exception'][name] = value + + # add all local values (of last frame) to the snapshot + s['locals'] = {} + for name, value in locals.items(): + s['locals'][name] = pydoc.text.repr(value) + + # add web2py environment variables + for k,v in environment.items(): + if k in ('request', 'response', 'session'): + s[k] = BEAUTIFY(v) + + return s + + + ADDED gluon/restricted.pyc Index: gluon/restricted.pyc ================================================================== --- /dev/null +++ gluon/restricted.pyc cannot compute difference between binary files ADDED gluon/rewrite.py Index: gluon/rewrite.py ================================================================== --- /dev/null +++ gluon/rewrite.py @@ -0,0 +1,1241 @@ +#!/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +gluon.rewrite parses incoming URLs and formats outgoing URLs for gluon.html.URL. + +In addition, it rewrites both incoming and outgoing URLs based on the (optional) user-supplied routes.py, +which also allows for rewriting of certain error messages. + +routes.py supports two styles of URL rewriting, depending on whether 'routers' is defined. +Refer to router.example.py and routes.example.py for additional documentation. + +""" + +import os +import re +import logging +import traceback +import threading +import urllib +from storage import Storage, List +from http import HTTP +from fileutils import abspath, read_file +from settings import global_settings + +logger = logging.getLogger('web2py.rewrite') + +thread = threading.local() # thread-local storage for routing parameters + +def _router_default(): + "return new copy of default base router" + router = Storage( + default_application = 'init', + applications = 'ALL', + default_controller = 'default', + controllers = 'DEFAULT', + default_function = 'index', + functions = dict(), + default_language = None, + languages = None, + root_static = ['favicon.ico', 'robots.txt'], + domains = None, + exclusive_domain = False, + map_hyphen = False, + acfe_match = r'\w+$', # legal app/ctlr/fcn/ext + file_match = r'(\w+[-=./]?)+$', # legal file (path) name + args_match = r'([\w@ -]+[=.]?)*$', # legal arg in args + ) + return router + +def _params_default(app=None): + "return new copy of default parameters" + p = Storage() + p.name = app or "BASE" + p.default_application = app or "init" + p.default_controller = "default" + p.default_function = "index" + p.routes_app = [] + p.routes_in = [] + p.routes_out = [] + p.routes_onerror = [] + p.routes_apps_raw = [] + p.error_handler = None + p.error_message = '<html><body><h1>%s</h1></body></html>' + p.error_message_ticket = \ + '<html><body><h1>Internal error</h1>Ticket issued: <a href="/admin/default/ticket/%(ticket)s" target="_blank">%(ticket)s</a></body><!-- this is junk text else IE does not display the page: '+('x'*512)+' //--></html>' + p.routers = None + return p + +params_apps = dict() +params = _params_default(app=None) # regex rewrite parameters +thread.routes = params # default to base regex rewrite parameters +routers = None + +ROUTER_KEYS = set(('default_application', 'applications', 'default_controller', 'controllers', + 'default_function', 'functions', 'default_language', 'languages', + 'domain', 'domains', 'root_static', 'path_prefix', + 'exclusive_domain', 'map_hyphen', 'map_static', + 'acfe_match', 'file_match', 'args_match')) + +ROUTER_BASE_KEYS = set(('applications', 'default_application', 'domains', 'path_prefix')) + +# The external interface to rewrite consists of: +# +# load: load routing configuration file(s) +# url_in: parse and rewrite incoming URL +# url_out: assemble and rewrite outgoing URL +# +# thread.routes.default_application +# thread.routes.error_message +# thread.routes.error_message_ticket +# thread.routes.try_redirect_on_error +# thread.routes.error_handler +# +# filter_url: helper for doctest & unittest +# filter_err: helper for doctest & unittest +# regex_filter_out: doctest + +def url_in(request, environ): + "parse and rewrite incoming URL" + if routers: + return map_url_in(request, environ) + return regex_url_in(request, environ) + +def url_out(request, env, application, controller, function, args, other, scheme, host, port): + "assemble and rewrite outgoing URL" + if routers: + acf = map_url_out(request, env, application, controller, function, args, other, scheme, host, port) + url = '%s%s' % (acf, other) + else: + url = '/%s/%s/%s%s' % (application, controller, function, other) + url = regex_filter_out(url, env) + # + # fill in scheme and host if absolute URL is requested + # scheme can be a string, eg 'http', 'https', 'ws', 'wss' + # + if scheme or port is not None: + if host is None: # scheme or port implies host + host = True + if not scheme or scheme is True: + if request and request.env: + scheme = request.env.get('WSGI_URL_SCHEME', 'http').lower() + else: + scheme = 'http' # some reasonable default in case we need it + if host is not None: + if host is True: + host = request.env.http_host + if host: + if port is None: + port = '' + else: + port = ':%s' % port + url = '%s://%s%s%s' % (scheme, host, port, url) + return url + +def try_rewrite_on_error(http_response, request, environ, ticket=None): + """ + called from main.wsgibase to rewrite the http response. + """ + status = int(str(http_response.status).split()[0]) + if status>=399 and thread.routes.routes_onerror: + keys=set(('%s/%s' % (request.application, status), + '%s/*' % (request.application), + '*/%s' % (status), + '*/*')) + for (key,uri) in thread.routes.routes_onerror: + if key in keys: + if uri == '!': + # do nothing! + return http_response, environ + elif '?' in uri: + path_info, query_string = uri.split('?',1) + query_string += '&' + else: + path_info, query_string = uri, '' + query_string += \ + 'code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ + (status,ticket,request.env.request_uri,request.url) + if uri.startswith('http://') or uri.startswith('https://'): + # make up a response + url = path_info+'?'+query_string + message = 'You are being redirected <a href="%s">here</a>' + return HTTP(303, message % url, Location=url), environ + elif path_info!=environ['PATH_INFO']: + # rewrite request, call wsgibase recursively, avoid loop + environ['PATH_INFO'] = path_info + environ['QUERY_STRING'] = query_string + return None, environ + # do nothing! + return http_response, environ + +def try_redirect_on_error(http_object, request, ticket=None): + "called from main.wsgibase to rewrite the http response" + status = int(str(http_object.status).split()[0]) + if status>399 and thread.routes.routes_onerror: + keys=set(('%s/%s' % (request.application, status), + '%s/*' % (request.application), + '*/%s' % (status), + '*/*')) + for (key,redir) in thread.routes.routes_onerror: + if key in keys: + if redir == '!': + break + elif '?' in redir: + url = '%s&code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ + (redir,status,ticket,request.env.request_uri,request.url) + else: + url = '%s?code=%s&ticket=%s&requested_uri=%s&request_url=%s' % \ + (redir,status,ticket,request.env.request_uri,request.url) + return HTTP(303, + 'You are being redirected <a href="%s">here</a>' % url, + Location=url) + return http_object + + +def load(routes='routes.py', app=None, data=None, rdict=None): + """ + load: read (if file) and parse routes + store results in params + (called from main.py at web2py initialization time) + If data is present, it's used instead of the routes.py contents. + If rdict is present, it must be a dict to be used for routers (unit test) + """ + global params + global routers + if app is None: + # reinitialize + global params_apps + params_apps = dict() + params = _params_default(app=None) # regex rewrite parameters + thread.routes = params # default to base regex rewrite parameters + routers = None + + if isinstance(rdict, dict): + symbols = dict(routers=rdict) + path = 'rdict' + else: + if data is not None: + path = 'routes' + else: + if app is None: + path = abspath(routes) + else: + path = abspath('applications', app, routes) + if not os.path.exists(path): + return + data = read_file(path).replace('\r\n','\n') + + symbols = {} + try: + exec (data + '\n') in symbols + except SyntaxError, e: + logger.error( + '%s has a syntax error and will not be loaded\n' % path + + traceback.format_exc()) + raise e + + p = _params_default(app) + + for sym in ('routes_app', 'routes_in', 'routes_out'): + if sym in symbols: + for (k, v) in symbols[sym]: + p[sym].append(compile_regex(k, v)) + for sym in ('routes_onerror', 'routes_apps_raw', + 'error_handler','error_message', 'error_message_ticket', + 'default_application','default_controller', 'default_function'): + if sym in symbols: + p[sym] = symbols[sym] + if 'routers' in symbols: + p.routers = Storage(symbols['routers']) + for key in p.routers: + if isinstance(p.routers[key], dict): + p.routers[key] = Storage(p.routers[key]) + + if app is None: + params = p # install base rewrite parameters + thread.routes = params # install default as current routes + # + # create the BASE router if routers in use + # + routers = params.routers # establish routers if present + if isinstance(routers, dict): + routers = Storage(routers) + if routers is not None: + router = _router_default() + if routers.BASE: + router.update(routers.BASE) + routers.BASE = router + + # scan each app in applications/ + # create a router, if routers are in use + # parse the app-specific routes.py if present + # + all_apps = [] + for appname in [app for app in os.listdir(abspath('applications')) if not app.startswith('.')]: + if os.path.isdir(abspath('applications', appname)) and \ + os.path.isdir(abspath('applications', appname, 'controllers')): + all_apps.append(appname) + if routers: + router = Storage(routers.BASE) # new copy + if appname in routers: + for key in routers[appname].keys(): + if key in ROUTER_BASE_KEYS: + raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, appname) + router.update(routers[appname]) + routers[appname] = router + if os.path.exists(abspath('applications', appname, routes)): + load(routes, appname) + + if routers: + load_routers(all_apps) + + else: # app + params_apps[app] = p + if routers and p.routers: + if app in p.routers: + routers[app].update(p.routers[app]) + + logger.debug('URL rewrite is on. configuration in %s' % path) + + +regex_at = re.compile(r'(?<!\\)\$[a-zA-Z]\w*') +regex_anything = re.compile(r'(?<!\\)\$anything') + +def compile_regex(k, v): + """ + Preprocess and compile the regular expressions in routes_app/in/out + + The resulting regex will match a pattern of the form: + + [remote address]:[protocol]://[host]:[method] [path] + + We allow abbreviated regexes on input; here we try to complete them. + """ + k0 = k # original k for error reporting + # bracket regex in ^...$ if not already done + if not k[0] == '^': + k = '^%s' % k + if not k[-1] == '$': + k = '%s$' % k + # if there are no :-separated parts, prepend a catch-all for the IP address + if k.find(':') < 0: + # k = '^.*?:%s' % k[1:] + k = '^.*?:https?://[^:/]+:[a-z]+ %s' % k[1:] + # if there's no ://, provide a catch-all for the protocol, host & method + if k.find('://') < 0: + i = k.find(':/') + if i < 0: + raise SyntaxError, "routes pattern syntax error: path needs leading '/' [%s]" % k0 + k = r'%s:https?://[^:/]+:[a-z]+ %s' % (k[:i], k[i+1:]) + # $anything -> ?P<anything>.* + for item in regex_anything.findall(k): + k = k.replace(item, '(?P<anything>.*)') + # $a (etc) -> ?P<a>\w+ + for item in regex_at.findall(k): + k = k.replace(item, r'(?P<%s>\w+)' % item[1:]) + # same for replacement pattern, but with \g + for item in regex_at.findall(v): + v = v.replace(item, r'\g<%s>' % item[1:]) + return (re.compile(k, re.DOTALL), v) + +def load_routers(all_apps): + "load-time post-processing of routers" + + for app in routers.keys(): + # initialize apps with routers that aren't present, on behalf of unit tests + if app not in all_apps: + all_apps.append(app) + router = Storage(routers.BASE) # new copy + if app != 'BASE': + for key in routers[app].keys(): + if key in ROUTER_BASE_KEYS: + raise SyntaxError, "BASE-only key '%s' in router '%s'" % (key, app) + router.update(routers[app]) + routers[app] = router + router = routers[app] + for key in router.keys(): + if key not in ROUTER_KEYS: + raise SyntaxError, "unknown key '%s' in router '%s'" % (key, app) + if not router.controllers: + router.controllers = set() + elif not isinstance(router.controllers, str): + router.controllers = set(router.controllers) + if router.languages: + router.languages = set(router.languages) + else: + router.languages = set() + if app != 'BASE': + for base_only in ROUTER_BASE_KEYS: + router.pop(base_only, None) + if 'domain' in router: + routers.BASE.domains[router.domain] = app + if isinstance(router.controllers, str) and router.controllers == 'DEFAULT': + router.controllers = set() + if os.path.isdir(abspath('applications', app)): + cpath = abspath('applications', app, 'controllers') + for cname in os.listdir(cpath): + if os.path.isfile(abspath(cpath, cname)) and cname.endswith('.py'): + router.controllers.add(cname[:-3]) + if router.controllers: + router.controllers.add('static') + router.controllers.add(router.default_controller) + if router.functions: + if isinstance(router.functions, (set, tuple, list)): + functions = set(router.functions) + if isinstance(router.default_function, str): + functions.add(router.default_function) # legacy compatibility + router.functions = { router.default_controller: functions } + for controller in router.functions: + router.functions[controller] = set(router.functions[controller]) + else: + router.functions = dict() + + if isinstance(routers.BASE.applications, str) and routers.BASE.applications == 'ALL': + routers.BASE.applications = list(all_apps) + if routers.BASE.applications: + routers.BASE.applications = set(routers.BASE.applications) + else: + routers.BASE.applications = set() + + for app in routers.keys(): + # set router name + router = routers[app] + router.name = app + # compile URL validation patterns + router._acfe_match = re.compile(router.acfe_match) + router._file_match = re.compile(router.file_match) + if router.args_match: + router._args_match = re.compile(router.args_match) + # convert path_prefix to a list of path elements + if router.path_prefix: + if isinstance(router.path_prefix, str): + router.path_prefix = router.path_prefix.strip('/').split('/') + + # rewrite BASE.domains as tuples + # + # key: 'domain[:port]' -> (domain, port) + # value: 'application[/controller] -> (application, controller) + # (port and controller may be None) + # + domains = dict() + if routers.BASE.domains: + for (domain, app) in [(d.strip(':'), a.strip('/')) for (d, a) in routers.BASE.domains.items()]: + port = None + if ':' in domain: + (domain, port) = domain.split(':') + ctlr = None + fcn = None + if '/' in app: + (app, ctlr) = app.split('/', 1) + if ctlr and '/' in ctlr: + (ctlr, fcn) = ctlr.split('/') + if app not in all_apps and app not in routers: + raise SyntaxError, "unknown app '%s' in domains" % app + domains[(domain, port)] = (app, ctlr, fcn) + routers.BASE.domains = domains + +def regex_uri(e, regexes, tag, default=None): + "filter incoming URI against a list of regexes" + path = e['PATH_INFO'] + host = e.get('HTTP_HOST', 'localhost').lower() + i = host.find(':') + if i > 0: + host = host[:i] + key = '%s:%s://%s:%s %s' % \ + (e.get('REMOTE_ADDR','localhost'), + e.get('WSGI_URL_SCHEME', 'http').lower(), host, + e.get('REQUEST_METHOD', 'get').lower(), path) + for (regex, value) in regexes: + if regex.match(key): + rewritten = regex.sub(value, key) + logger.debug('%s: [%s] [%s] -> %s' % (tag, key, value, rewritten)) + return rewritten + logger.debug('%s: [%s] -> %s (not rewritten)' % (tag, key, default)) + return default + +def regex_select(env=None, app=None, request=None): + """ + select a set of regex rewrite params for the current request + """ + if app: + thread.routes = params_apps.get(app, params) + elif env and params.routes_app: + if routers: + map_url_in(request, env, app=True) + else: + app = regex_uri(env, params.routes_app, "routes_app") + thread.routes = params_apps.get(app, params) + else: + thread.routes = params # default to base rewrite parameters + logger.debug("select routing parameters: %s" % thread.routes.name) + return app # for doctest + +def regex_filter_in(e): + "regex rewrite incoming URL" + query = e.get('QUERY_STRING', None) + e['WEB2PY_ORIGINAL_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') + if thread.routes.routes_in: + path = regex_uri(e, thread.routes.routes_in, "routes_in", e['PATH_INFO']) + items = path.split('?', 1) + e['PATH_INFO'] = items[0] + if len(items) > 1: + if query: + query = items[1] + '&' + query + else: + query = items[1] + e['QUERY_STRING'] = query + e['REQUEST_URI'] = e['PATH_INFO'] + (query and ('?' + query) or '') + return e + + +# pattern to replace spaces with underscore in URL +# also the html escaped variants '+' and '%20' are covered +regex_space = re.compile('(\+|\s|%20)+') + +# pattern to find valid paths in url /application/controller/... +# this could be: +# for static pages: +# /<b:application>/static/<x:file> +# for dynamic pages: +# /<a:application>[/<c:controller>[/<f:function>[.<e:ext>][/<s:args>]]] +# application, controller, function and ext may only contain [a-zA-Z0-9_] +# file and args may also contain '-', '=', '.' and '/' +# apps in routes_apps_raw must parse raw_args into args + +regex_static = re.compile(r''' + (^ # static pages + /(?P<b> \w+) # b=app + /static # /b/static + /(?P<x> (\w[\-\=\./]?)* ) # x=file + $) + ''', re.X) + +regex_url = re.compile(r''' + (^( # (/a/c/f.e/s) + /(?P<a> [\w\s+]+ ) # /a=app + ( # (/c.f.e/s) + /(?P<c> [\w\s+]+ ) # /a/c=controller + ( # (/f.e/s) + /(?P<f> [\w\s+]+ ) # /a/c/f=function + ( # (.e) + \.(?P<e> [\w\s+]+ ) # /a/c/f.e=extension + )? + ( # (/s) + /(?P<r> # /a/c/f.e/r=raw_args + .* + ) + )? + )? + )? + )? + /?$) + ''', re.X) + +regex_args = re.compile(r''' + (^ + (?P<s> + ( [\w@/-][=.]? )* # s=args + )? + /?$) # trailing slash + ''', re.X) + +def regex_url_in(request, environ): + "rewrite and parse incoming URL" + + # ################################################## + # select application + # rewrite URL if routes_in is defined + # update request.env + # ################################################## + + regex_select(env=environ, request=request) + + if thread.routes.routes_in: + environ = regex_filter_in(environ) + + for (key, value) in environ.items(): + request.env[key.lower().replace('.', '_')] = value + + path = request.env.path_info.replace('\\', '/') + + # ################################################## + # serve if a static file + # ################################################## + + match = regex_static.match(regex_space.sub('_', path)) + if match and match.group('x'): + static_file = os.path.join(request.env.applications_parent, + 'applications', match.group('b'), + 'static', match.group('x')) + return (static_file, environ) + + # ################################################## + # parse application, controller and function + # ################################################## + + path = re.sub('%20', ' ', path) + match = regex_url.match(path) + if not match or match.group('c') == 'static': + raise HTTP(400, + thread.routes.error_message % 'invalid request', + web2py_error='invalid path') + + request.application = \ + regex_space.sub('_', match.group('a') or thread.routes.default_application) + request.controller = \ + regex_space.sub('_', match.group('c') or thread.routes.default_controller) + request.function = \ + regex_space.sub('_', match.group('f') or thread.routes.default_function) + group_e = match.group('e') + request.raw_extension = group_e and regex_space.sub('_', group_e) or None + request.extension = request.raw_extension or 'html' + request.raw_args = match.group('r') + request.args = List([]) + if request.application in thread.routes.routes_apps_raw: + # application is responsible for parsing args + request.args = None + elif request.raw_args: + match = regex_args.match(request.raw_args.replace(' ', '_')) + if match: + group_s = match.group('s') + request.args = \ + List((group_s and group_s.split('/')) or []) + if request.args and request.args[-1] == '': + request.args.pop() # adjust for trailing empty arg + else: + raise HTTP(400, + thread.routes.error_message % 'invalid request', + web2py_error='invalid path (args)') + return (None, environ) + + +def regex_filter_out(url, e=None): + "regex rewrite outgoing URL" + if not hasattr(thread, 'routes'): + regex_select() # ensure thread.routes is set (for application threads) + if routers: + return url # already filtered + if thread.routes.routes_out: + items = url.split('?', 1) + if e: + host = e.get('http_host', 'localhost').lower() + i = host.find(':') + if i > 0: + host = host[:i] + items[0] = '%s:%s://%s:%s %s' % \ + (e.get('remote_addr', ''), + e.get('wsgi_url_scheme', 'http').lower(), host, + e.get('request_method', 'get').lower(), items[0]) + else: + items[0] = ':http://localhost:get %s' % items[0] + for (regex, value) in thread.routes.routes_out: + if regex.match(items[0]): + rewritten = '?'.join([regex.sub(value, items[0])] + items[1:]) + logger.debug('routes_out: [%s] -> %s' % (url, rewritten)) + return rewritten + logger.debug('routes_out: [%s] not rewritten' % url) + return url + + +def filter_url(url, method='get', remote='0.0.0.0', out=False, app=False, lang=None, + domain=(None,None), env=False, scheme=None, host=None, port=None): + "doctest/unittest interface to regex_filter_in() and regex_filter_out()" + regex_url = re.compile(r'^(?P<scheme>http|https|HTTP|HTTPS)\://(?P<host>[^/]*)(?P<uri>.*)') + match = regex_url.match(url) + urlscheme = match.group('scheme').lower() + urlhost = match.group('host').lower() + uri = match.group('uri') + k = uri.find('?') + if k < 0: + k = len(uri) + (path_info, query_string) = (uri[:k], uri[k+1:]) + path_info = urllib.unquote(path_info) # simulate server + e = { + 'REMOTE_ADDR': remote, + 'REQUEST_METHOD': method, + 'WSGI_URL_SCHEME': urlscheme, + 'HTTP_HOST': urlhost, + 'REQUEST_URI': uri, + 'PATH_INFO': path_info, + 'QUERY_STRING': query_string, + #for filter_out request.env use lowercase + 'remote_addr': remote, + 'request_method': method, + 'wsgi_url_scheme': urlscheme, + 'http_host': urlhost + } + + request = Storage() + e["applications_parent"] = global_settings.applications_parent + request.env = Storage(e) + request.uri_language = lang + + # determine application only + # + if app: + if routers: + return map_url_in(request, e, app=True) + return regex_select(e) + + # rewrite outbound URL + # + if out: + (request.env.domain_application, request.env.domain_controller) = domain + items = path_info.lstrip('/').split('/') + if items[-1] == '': + items.pop() # adjust trailing empty args + assert len(items) >= 3, "at least /a/c/f is required" + a = items.pop(0) + c = items.pop(0) + f = items.pop(0) + if not routers: + return regex_filter_out(uri, e) + acf = map_url_out(request, None, a, c, f, items, None, scheme, host, port) + if items: + url = '%s/%s' % (acf, '/'.join(items)) + if items[-1] == '': + url += '/' + else: + url = acf + if query_string: + url += '?' + query_string + return url + + # rewrite inbound URL + # + (static, e) = url_in(request, e) + if static: + return static + result = "/%s/%s/%s" % (request.application, request.controller, request.function) + if request.extension and request.extension != 'html': + result += ".%s" % request.extension + if request.args: + result += " %s" % request.args + if e['QUERY_STRING']: + result += " ?%s" % e['QUERY_STRING'] + if request.uri_language: + result += " (%s)" % request.uri_language + if env: + return request.env + return result + + +def filter_err(status, application='app', ticket='tkt'): + "doctest/unittest interface to routes_onerror" + if status > 399 and thread.routes.routes_onerror: + keys = set(('%s/%s' % (application, status), + '%s/*' % (application), + '*/%s' % (status), + '*/*')) + for (key,redir) in thread.routes.routes_onerror: + if key in keys: + if redir == '!': + break + elif '?' in redir: + url = redir + '&' + 'code=%s&ticket=%s' % (status,ticket) + else: + url = redir + '?' + 'code=%s&ticket=%s' % (status,ticket) + return url # redirection + return status # no action + +# router support +# +class MapUrlIn(object): + "logic for mapping incoming URLs" + + def __init__(self, request=None, env=None): + "initialize a map-in object" + self.request = request + self.env = env + + self.router = None + self.application = None + self.language = None + self.controller = None + self.function = None + self.extension = 'html' + + self.controllers = set() + self.functions = dict() + self.languages = set() + self.default_language = None + self.map_hyphen = False + self.exclusive_domain = False + + path = self.env['PATH_INFO'] + self.query = self.env.get('QUERY_STRING', None) + path = path.lstrip('/') + self.env['PATH_INFO'] = '/' + path + self.env['WEB2PY_ORIGINAL_URI'] = self.env['PATH_INFO'] + (self.query and ('?' + self.query) or '') + + # to handle empty args, strip exactly one trailing slash, if present + # .../arg1// represents one trailing empty arg + # + if path.endswith('/'): + path = path[:-1] + self.args = List(path and path.split('/') or []) + + # see http://www.python.org/dev/peps/pep-3333/#url-reconstruction for URL composition + self.remote_addr = self.env.get('REMOTE_ADDR','localhost') + self.scheme = self.env.get('WSGI_URL_SCHEME', 'http').lower() + self.method = self.env.get('REQUEST_METHOD', 'get').lower() + self.host = self.env.get('HTTP_HOST') + self.port = None + if not self.host: + self.host = self.env.get('SERVER_NAME') + self.port = self.env.get('SERVER_PORT') + if not self.host: + self.host = 'localhost' + self.port = '80' + if ':' in self.host: + (self.host, self.port) = self.host.split(':') + if not self.port: + if self.scheme == 'https': + self.port = '443' + else: + self.port = '80' + + def map_prefix(self): + "strip path prefix, if present in its entirety" + prefix = routers.BASE.path_prefix + if prefix: + prefixlen = len(prefix) + if prefixlen > len(self.args): + return + for i in xrange(prefixlen): + if prefix[i] != self.args[i]: + return # prefix didn't match + self.args = List(self.args[prefixlen:]) # strip the prefix + + def map_app(self): + "determine application name" + base = routers.BASE # base router + self.domain_application = None + self.domain_controller = None + self.domain_function = None + arg0 = self.harg0 + if (self.host, self.port) in base.domains: + (self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, self.port)] + self.env['domain_application'] = self.application + self.env['domain_controller'] = self.domain_controller + self.env['domain_function'] = self.domain_function + elif (self.host, None) in base.domains: + (self.application, self.domain_controller, self.domain_function) = base.domains[(self.host, None)] + self.env['domain_application'] = self.application + self.env['domain_controller'] = self.domain_controller + self.env['domain_function'] = self.domain_function + elif base.applications and arg0 in base.applications: + self.application = arg0 + elif arg0 and not base.applications: + self.application = arg0 + else: + self.application = base.default_application or '' + self.pop_arg_if(self.application == arg0) + + if not base._acfe_match.match(self.application): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error="invalid application: '%s'" % self.application) + + if self.application not in routers and \ + (self.application != thread.routes.default_application or self.application == 'welcome'): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error="unknown application: '%s'" % self.application) + + # set the application router + # + logger.debug("select application=%s" % self.application) + self.request.application = self.application + if self.application not in routers: + self.router = routers.BASE # support gluon.main.wsgibase init->welcome + else: + self.router = routers[self.application] # application router + self.controllers = self.router.controllers + self.default_controller = self.domain_controller or self.router.default_controller + self.functions = self.router.functions + self.languages = self.router.languages + self.default_language = self.router.default_language + self.map_hyphen = self.router.map_hyphen + self.exclusive_domain = self.router.exclusive_domain + self._acfe_match = self.router._acfe_match + self._file_match = self.router._file_match + self._args_match = self.router._args_match + + def map_root_static(self): + ''' + handle root-static files (no hyphen mapping) + + a root-static file is one whose incoming URL expects it to be at the root, + typically robots.txt & favicon.ico + ''' + if len(self.args) == 1 and self.arg0 in self.router.root_static: + self.controller = self.request.controller = 'static' + root_static_file = os.path.join(self.request.env.applications_parent, + 'applications', self.application, + self.controller, self.arg0) + logger.debug("route: root static=%s" % root_static_file) + return root_static_file + return None + + def map_language(self): + "handle language (no hyphen mapping)" + arg0 = self.arg0 # no hyphen mapping + if arg0 and self.languages and arg0 in self.languages: + self.language = arg0 + else: + self.language = self.default_language + if self.language: + logger.debug("route: language=%s" % self.language) + self.pop_arg_if(self.language == arg0) + arg0 = self.arg0 + + def map_controller(self): + "identify controller" + # handle controller + # + arg0 = self.harg0 # map hyphens + if not arg0 or (self.controllers and arg0 not in self.controllers): + self.controller = self.default_controller or '' + else: + self.controller = arg0 + self.pop_arg_if(arg0 == self.controller) + logger.debug("route: controller=%s" % self.controller) + if not self.router._acfe_match.match(self.controller): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error='invalid controller') + + def map_static(self): + ''' + handle static files + file_match but no hyphen mapping + ''' + if self.controller != 'static': + return None + file = '/'.join(self.args) + if not self.router._file_match.match(file): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error='invalid static file') + # + # support language-specific static subdirectories, + # eg /appname/en/static/filename => applications/appname/static/en/filename + # if language-specific file doesn't exist, try same file in static + # + if self.language: + static_file = os.path.join(self.request.env.applications_parent, + 'applications', self.application, + 'static', self.language, file) + if not self.language or not os.path.isfile(static_file): + static_file = os.path.join(self.request.env.applications_parent, + 'applications', self.application, + 'static', file) + logger.debug("route: static=%s" % static_file) + return static_file + + def map_function(self): + "handle function.extension" + arg0 = self.harg0 # map hyphens + functions = self.functions.get(self.controller, set()) + if isinstance(self.router.default_function, dict): + default_function = self.router.default_function.get(self.controller, None) + else: + default_function = self.router.default_function # str or None + default_function = self.domain_function or default_function + if not arg0 or functions and arg0 not in functions: + self.function = default_function or "" + self.pop_arg_if(arg0 and self.function == arg0) + else: + func_ext = arg0.split('.') + if len(func_ext) > 1: + self.function = func_ext[0] + self.extension = func_ext[-1] + else: + self.function = arg0 + self.pop_arg_if(True) + logger.debug("route: function.ext=%s.%s" % (self.function, self.extension)) + + if not self.router._acfe_match.match(self.function): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error='invalid function') + if self.extension and not self.router._acfe_match.match(self.extension): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error='invalid extension') + + def validate_args(self): + ''' + check args against validation pattern + ''' + for arg in self.args: + if not self.router._args_match.match(arg): + raise HTTP(400, thread.routes.error_message % 'invalid request', + web2py_error='invalid arg <%s>' % arg) + + def update_request(self): + ''' + update request from self + build env.request_uri + make lower-case versions of http headers in env + ''' + self.request.application = self.application + self.request.controller = self.controller + self.request.function = self.function + self.request.extension = self.extension + self.request.args = self.args + if self.language: + self.request.uri_language = self.language + uri = '/%s/%s/%s' % (self.application, self.controller, self.function) + if self.map_hyphen: + uri = uri.replace('_', '-') + if self.extension != 'html': + uri += '.' + self.extension + if self.language: + uri = '/%s%s' % (self.language, uri) + uri += self.args and urllib.quote('/' + '/'.join([str(x) for x in self.args])) or '' + uri += (self.query and ('?' + self.query) or '') + self.env['REQUEST_URI'] = uri + for (key, value) in self.env.items(): + self.request.env[key.lower().replace('.', '_')] = value + + @property + def arg0(self): + "return first arg" + return self.args(0) + + @property + def harg0(self): + "return first arg with optional hyphen mapping" + if self.map_hyphen and self.args(0): + return self.args(0).replace('-', '_') + return self.args(0) + + def pop_arg_if(self, dopop): + "conditionally remove first arg and return new first arg" + if dopop: + self.args.pop(0) + +class MapUrlOut(object): + "logic for mapping outgoing URLs" + + def __init__(self, request, env, application, controller, function, args, other, scheme, host, port): + "initialize a map-out object" + self.default_application = routers.BASE.default_application + if application in routers: + self.router = routers[application] + else: + self.router = routers.BASE + self.request = request + self.env = env + self.application = application + self.controller = controller + self.function = function + self.args = args + self.other = other + self.scheme = scheme + self.host = host + self.port = port + + self.applications = routers.BASE.applications + self.controllers = self.router.controllers + self.functions = self.router.functions.get(self.controller, set()) + self.languages = self.router.languages + self.default_language = self.router.default_language + self.exclusive_domain = self.router.exclusive_domain + self.map_hyphen = self.router.map_hyphen + self.map_static = self.router.map_static + self.path_prefix = routers.BASE.path_prefix + + self.domain_application = request and self.request.env.domain_application + self.domain_controller = request and self.request.env.domain_controller + if isinstance(self.router.default_function, dict): + self.default_function = self.router.default_function.get(self.controller, None) + else: + self.default_function = self.router.default_function + + if (self.router.exclusive_domain and self.domain_application and self.domain_application != self.application and not self.host): + raise SyntaxError, 'cross-domain conflict: must specify host' + + lang = request and request.uri_language + if lang and self.languages and lang in self.languages: + self.language = lang + else: + self.language = None + + self.omit_application = False + self.omit_language = False + self.omit_controller = False + self.omit_function = False + + def omit_lang(self): + "omit language if possible" + + if not self.language or self.language == self.default_language: + self.omit_language = True + + def omit_acf(self): + "omit what we can of a/c/f" + + router = self.router + + # Handle the easy no-args case of tail-defaults: /a/c /a / + # + if not self.args and self.function == self.default_function: + self.omit_function = True + if self.controller == router.default_controller: + self.omit_controller = True + if self.application == self.default_application: + self.omit_application = True + + # omit default application + # (which might be the domain default application) + # + default_application = self.domain_application or self.default_application + if self.application == default_application: + self.omit_application = True + + # omit controller if default controller + # + default_controller = ((self.application == self.domain_application) and self.domain_controller) or router.default_controller or '' + if self.controller == default_controller: + self.omit_controller = True + + # omit function if possible + # + if self.functions and self.function in self.functions and self.function == self.default_function: + self.omit_function = True + + # prohibit ambiguous cases + # + # because we presume the lang string to be unambiguous, its presence protects application omission + # + if self.omit_language: + if not self.applications or self.controller in self.applications: + self.omit_application = False + if self.omit_application: + if not self.applications or self.function in self.applications: + self.omit_controller = False + if not self.controllers or self.function in self.controllers: + self.omit_controller = False + if self.args: + if self.args[0] in self.functions or self.args[0] in self.controllers or self.args[0] in self.applications: + self.omit_function = False + if self.omit_controller: + if self.function in self.controllers or self.function in self.applications: + self.omit_controller = False + if self.omit_application: + if self.controller in self.applications: + self.omit_application = False + + # handle static as a special case + # (easier for external static handling) + # + if self.controller == 'static' or self.controller.startswith('static/'): + if not self.map_static: + self.omit_application = False + if self.language: + self.omit_language = False + self.omit_controller = False + self.omit_function = False + + def build_acf(self): + "build acf from components" + acf = '' + if self.map_hyphen: + self.application = self.application.replace('_', '-') + self.controller = self.controller.replace('_', '-') + if self.controller != 'static' and not self.controller.startswith('static/'): + self.function = self.function.replace('_', '-') + if not self.omit_application: + acf += '/' + self.application + if not self.omit_language: + acf += '/' + self.language + if not self.omit_controller: + acf += '/' + self.controller + if not self.omit_function: + acf += '/' + self.function + if self.path_prefix: + acf = '/' + '/'.join(self.path_prefix) + acf + if self.args: + return acf + return acf or '/' + + def acf(self): + "convert components to /app/lang/controller/function" + + if not routers: + return None # use regex filter + self.omit_lang() # try to omit language + self.omit_acf() # try to omit a/c/f + return self.build_acf() # build and return the /a/lang/c/f string + + +def map_url_in(request, env, app=False): + "route incoming URL" + + # initialize router-url object + # + thread.routes = params # default to base routes + map = MapUrlIn(request=request, env=env) + map.map_prefix() # strip prefix if present + map.map_app() # determine application + + # configure thread.routes for error rewrite + # + if params.routes_app: + thread.routes = params_apps.get(app, params) + + if app: + return map.application + + root_static_file = map.map_root_static() # handle root-static files + if root_static_file: + return (root_static_file, map.env) + map.map_language() + map.map_controller() + static_file = map.map_static() + if static_file: + return (static_file, map.env) + map.map_function() + map.validate_args() + map.update_request() + return (None, map.env) + +def map_url_out(request, env, application, controller, function, args, other, scheme, host, port): + ''' + supply /a/c/f (or /a/lang/c/f) portion of outgoing url + + The basic rule is that we can only make transformations + that map_url_in can reverse. + + Suppose that the incoming arguments are a,c,f,args,lang + and that the router defaults are da, dc, df, dl. + + We can perform these transformations trivially if args=[] and lang=None or dl: + + /da/dc/df => / + /a/dc/df => /a + /a/c/df => /a/c + + We would also like to be able to strip the default application or application/controller + from URLs with function/args present, thus: + + /da/c/f/args => /c/f/args + /da/dc/f/args => /f/args + + We use [applications] and [controllers] and {functions} to suppress ambiguous omissions. + + We assume that language names do not collide with a/c/f names. + ''' + map = MapUrlOut(request, env, application, controller, function, args, other, scheme, host, port) + return map.acf() + +def get_effective_router(appname): + "return a private copy of the effective router for the specified application" + if not routers or appname not in routers: + return None + return Storage(routers[appname]) # return a copy + + + ADDED gluon/rewrite.pyc Index: gluon/rewrite.pyc ================================================================== --- /dev/null +++ gluon/rewrite.pyc cannot compute difference between binary files ADDED gluon/rocket.py Index: gluon/rocket.py ================================================================== --- /dev/null +++ gluon/rocket.py @@ -0,0 +1,2076 @@ +# -*- coding: utf-8 -*- + +# This file is part of the Rocket Web Server +# Copyright (c) 2011 Timothy Farrell + +# Import System Modules +import sys +import errno +import socket +import logging +import platform +import traceback + +# Define Constants +VERSION = '1.2.4' +SERVER_NAME = socket.gethostname() +SERVER_SOFTWARE = 'Rocket %s' % VERSION +HTTP_SERVER_SOFTWARE = '%s Python/%s' % (SERVER_SOFTWARE, sys.version.split(' ')[0]) +BUF_SIZE = 16384 +SOCKET_TIMEOUT = 1 # in secs +THREAD_STOP_CHECK_INTERVAL = 1 # in secs, How often should threads check for a server stop message? +IS_JYTHON = platform.system() == 'Java' # Handle special cases for Jython +IGNORE_ERRORS_ON_CLOSE = set([errno.ECONNABORTED, errno.ECONNRESET]) +DEFAULT_LISTEN_QUEUE_SIZE = 5 +DEFAULT_MIN_THREADS = 10 +DEFAULT_MAX_THREADS = 0 +DEFAULTS = dict(LISTEN_QUEUE_SIZE = DEFAULT_LISTEN_QUEUE_SIZE, + MIN_THREADS = DEFAULT_MIN_THREADS, + MAX_THREADS = DEFAULT_MAX_THREADS) + +PY3K = sys.version_info[0] > 2 + +class NullHandler(logging.Handler): + "A Logging handler to prevent library errors." + def emit(self, record): + pass + +if PY3K: + def b(val): + """ Convert string/unicode/bytes literals into bytes. This allows for + the same code to run on Python 2.x and 3.x. """ + if isinstance(val, str): + return val.encode() + else: + return val + + def u(val, encoding="us-ascii"): + """ Convert bytes into string/unicode. This allows for the + same code to run on Python 2.x and 3.x. """ + if isinstance(val, bytes): + return val.decode(encoding) + else: + return val + +else: + def b(val): + """ Convert string/unicode/bytes literals into bytes. This allows for + the same code to run on Python 2.x and 3.x. """ + if isinstance(val, unicode): + return val.encode() + else: + return val + + def u(val, encoding="us-ascii"): + """ Convert bytes into string/unicode. This allows for the + same code to run on Python 2.x and 3.x. """ + if isinstance(val, str): + return val.decode(encoding) + else: + return val + +# Import Package Modules +# package imports removed in monolithic build + +__all__ = ['VERSION', 'SERVER_SOFTWARE', 'HTTP_SERVER_SOFTWARE', 'BUF_SIZE', + 'IS_JYTHON', 'IGNORE_ERRORS_ON_CLOSE', 'DEFAULTS', 'PY3K', 'b', 'u', + 'Rocket', 'CherryPyWSGIServer', 'SERVER_NAME', 'NullHandler'] + +# Monolithic build...end of module: rocket\__init__.py +# Monolithic build...start of module: rocket\connection.py + +# Import System Modules +import sys +import time +import socket +try: + import ssl + has_ssl = True +except ImportError: + has_ssl = False +# Import Package Modules +# package imports removed in monolithic build +# TODO - This part is still very experimental. +#from .filelike import FileLikeSocket + +class Connection(object): + __slots__ = [ + 'setblocking', + 'sendall', + 'shutdown', + 'makefile', + 'fileno', + 'client_addr', + 'client_port', + 'server_port', + 'socket', + 'start_time', + 'ssl', + 'secure', + 'recv', + 'send', + 'read', + 'write' + ] + + def __init__(self, sock_tuple, port, secure=False): + self.client_addr, self.client_port = sock_tuple[1] + self.server_port = port + self.socket = sock_tuple[0] + self.start_time = time.time() + self.ssl = has_ssl and isinstance(self.socket, ssl.SSLSocket) + self.secure = secure + + if IS_JYTHON: + # In Jython we must set TCP_NODELAY here since it does not + # inherit from the listening socket. + # See: http://bugs.jython.org/issue1309 + self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) + + self.socket.settimeout(SOCKET_TIMEOUT) + + self.sendall = self.socket.sendall + self.shutdown = self.socket.shutdown + self.fileno = self.socket.fileno + self.setblocking = self.socket.setblocking + self.recv = self.socket.recv + self.send = self.socket.send + self.makefile = self.socket.makefile + +# FIXME - this is not ready for prime-time yet. +# def makefile(self, buf_size=BUF_SIZE): +# return FileLikeSocket(self, buf_size) + + def close(self): + if hasattr(self.socket, '_sock'): + try: + self.socket._sock.close() + except socket.error: + info = sys.exc_info() + if info[1].args[0] != socket.EBADF: + raise info[1] + else: + pass + self.socket.close() + + +# Monolithic build...end of module: rocket\connection.py +# Monolithic build...start of module: rocket\filelike.py + +# Import System Modules +import socket +try: + from io import StringIO +except ImportError: + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO +# Import Package Modules +# package imports removed in monolithic build + +class FileLikeSocket(object): + def __init__(self, conn, buf_size=BUF_SIZE): + self.conn = conn + self.buf_size = buf_size + self.buffer = StringIO() + self.content_length = None + + if self.conn.socket.gettimeout() == 0.0: + self.read = self.non_blocking_read + else: + self.read = self.blocking_read + + def __iter__(self): + return self + + def recv(self, size): + while True: + try: + return self.conn.recv(size) + except socket.error: + exc = sys.exc_info() + e = exc[1] + # FIXME - Don't raise socket_errors_nonblocking or socket_error_eintr + if (e.args[0] not in set()): + raise + + def next(self): + data = self.readline() + if data == '': + raise StopIteration + return data + + def non_blocking_read(self, size=None): + # Shamelessly adapted from Cherrypy! + bufr = self.buffer + bufr.seek(0, 2) + if size is None: + while True: + data = self.recv(self.buf_size) + if not data: + break + bufr.write(data) + + self.buffer = StringIO() + + return bufr.getvalue() + else: + buf_len = self.buffer.tell() + if buf_len >= size: + bufr.seek(0) + data = bufr.read(size) + self.buffer = StringIO(bufr.read()) + return data + + self.buffer = StringIO() + while True: + remaining = size - buf_len + data = self.recv(remaining) + + if not data: + break + + n = len(data) + if n == size and not buf_len: + return data + + if n == remaining: + bufr.write(data) + del data + break + + bufr.write(data) + buf_len += n + del data + + return bufr.getvalue() + + def blocking_read(self, length=None): + if length is None: + if self.content_length is not None: + length = self.content_length + else: + length = 1 + + try: + data = self.conn.recv(length) + except: + data = b('') + + return data + + def readline(self): + data = b("") + char = self.read(1) + while char != b('\n') and char is not b(''): + line = repr(char) + data += char + char = self.read(1) + data += char + return data + + def readlines(self, hint="ignored"): + return list(self) + + def close(self): + self.conn = None + self.content_length = None + +# Monolithic build...end of module: rocket\filelike.py +# Monolithic build...start of module: rocket\futures.py + +# Import System Modules +import time +try: + from concurrent.futures import Future, ThreadPoolExecutor + from concurrent.futures.thread import _WorkItem + has_futures = True +except ImportError: + has_futures = False + + class Future: + pass + + class ThreadPoolExecutor: + pass + + class _WorkItem: + pass + + +class WSGIFuture(Future): + def __init__(self, f_dict, *args, **kwargs): + Future.__init__(self, *args, **kwargs) + + self.timeout = None + + self._mem_dict = f_dict + self._lifespan = 30 + self._name = None + self._start_time = time.time() + + def set_running_or_notify_cancel(self): + if time.time() - self._start_time >= self._lifespan: + self.cancel() + else: + return super(WSGIFuture, self).set_running_or_notify_cancel() + + + def remember(self, name, lifespan=None): + self._lifespan = lifespan or self._lifespan + + if name in self._mem_dict: + raise NameError('Cannot remember future by name "%s". ' % name + \ + 'A future already exists with that name.' ) + self._name = name + self._mem_dict[name] = self + + return self + + def forget(self): + if self._name in self._mem_dict and self._mem_dict[self._name] is self: + del self._mem_dict[self._name] + self._name = None + +class _WorkItem(object): + def __init__(self, future, fn, args, kwargs): + self.future = future + self.fn = fn + self.args = args + self.kwargs = kwargs + + def run(self): + if not self.future.set_running_or_notify_cancel(): + return + + try: + result = self.fn(*self.args, **self.kwargs) + except BaseException: + e = sys.exc_info()[1] + self.future.set_exception(e) + else: + self.future.set_result(result) + +class WSGIExecutor(ThreadPoolExecutor): + multithread = True + multiprocess = False + + def __init__(self, *args, **kwargs): + ThreadPoolExecutor.__init__(self, *args, **kwargs) + + self.futures = dict() + + def submit(self, fn, *args, **kwargs): + if self._shutdown_lock.acquire(): + if self._shutdown: + self._shutdown_lock.release() + raise RuntimeError('Cannot schedule new futures after shutdown') + + f = WSGIFuture(self.futures) + w = _WorkItem(f, fn, args, kwargs) + + self._work_queue.put(w) + self._adjust_thread_count() + self._shutdown_lock.release() + return f + else: + return False + +class FuturesMiddleware(object): + "Futures middleware that adds a Futures Executor to the environment" + def __init__(self, app, threads=5): + self.app = app + self.executor = WSGIExecutor(threads) + + def __call__(self, environ, start_response): + environ["wsgiorg.executor"] = self.executor + environ["wsgiorg.futures"] = self.executor.futures + return self.app(environ, start_response) + +# Monolithic build...end of module: rocket\futures.py +# Monolithic build...start of module: rocket\listener.py + +# Import System Modules +import os +import socket +import logging +import traceback +from threading import Thread + +try: + import ssl + from ssl import SSLError + has_ssl = True +except ImportError: + has_ssl = False + class SSLError(socket.error): + pass +# Import Package Modules +# package imports removed in monolithic build + +class Listener(Thread): + """The Listener class is a class responsible for accepting connections + and queuing them to be processed by a worker thread.""" + + def __init__(self, interface, queue_size, active_queue, *args, **kwargs): + Thread.__init__(self, *args, **kwargs) + + # Instance variables + self.active_queue = active_queue + self.interface = interface + self.addr = interface[0] + self.port = interface[1] + self.secure = len(interface) >= 4 + self.clientcert_req = (len(interface) == 5 and interface[4]) + + self.thread = None + self.ready = False + + # Error Log + self.err_log = logging.getLogger('Rocket.Errors.Port%i' % self.port) + self.err_log.addHandler(NullHandler()) + + # Build the socket + listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + + if not listener: + self.err_log.error("Failed to get socket.") + return + + if self.secure: + if not has_ssl: + self.err_log.error("ssl module required to serve HTTPS.") + return + elif not os.path.exists(interface[2]): + data = (interface[2], interface[0], interface[1]) + self.err_log.error("Cannot find key file " + "'%s'. Cannot bind to %s:%s" % data) + return + elif not os.path.exists(interface[3]): + data = (interface[3], interface[0], interface[1]) + self.err_log.error("Cannot find certificate file " + "'%s'. Cannot bind to %s:%s" % data) + return + + if self.clientcert_req and not os.path.exists(interface[4]): + data = (interface[4], interface[0], interface[1]) + self.err_log.error("Cannot find root ca certificate file " + "'%s'. Cannot bind to %s:%s" % data) + return + + # Set socket options + try: + listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + except: + msg = "Cannot share socket. Using %s:%i exclusively." + self.err_log.warning(msg % (self.addr, self.port)) + + try: + if not IS_JYTHON: + listener.setsockopt(socket.IPPROTO_TCP, + socket.TCP_NODELAY, + 1) + except: + msg = "Cannot set TCP_NODELAY, things might run a little slower" + self.err_log.warning(msg) + + try: + listener.bind((self.addr, self.port)) + except: + msg = "Socket %s:%i in use by other process and it won't share." + self.err_log.error(msg % (self.addr, self.port)) + else: + # We want socket operations to timeout periodically so we can + # check if the server is shutting down + listener.settimeout(THREAD_STOP_CHECK_INTERVAL) + # Listen for new connections allowing queue_size number of + # connections to wait before rejecting a connection. + listener.listen(queue_size) + + self.listener = listener + + self.ready = True + + def wrap_socket(self, sock): + try: + if self.clientcert_req: + ca_certs = self.interface[4] + cert_reqs = ssl.CERT_OPTIONAL + sock = ssl.wrap_socket(sock, + keyfile = self.interface[2], + certfile = self.interface[3], + server_side = True, + cert_reqs = cert_reqs, + ca_certs = ca_certs, + ssl_version = ssl.PROTOCOL_SSLv23) + else: + sock = ssl.wrap_socket(sock, + keyfile = self.interface[2], + certfile = self.interface[3], + server_side = True, + ssl_version = ssl.PROTOCOL_SSLv23) + + except SSLError: + # Generally this happens when an HTTP request is received on a + # secure socket. We don't do anything because it will be detected + # by Worker and dealt with appropriately. + self.err_log.error('SSL Error: %s' % traceback.format_exc()) + pass + + return sock + + def start(self): + if not self.ready: + self.err_log.warning('Listener started when not ready.') + return + + if self.thread is not None and self.thread.isAlive(): + self.err_log.warning('Listener already running.') + return + + self.thread = Thread(target=self.listen, name="Port" + str(self.port)) + + self.thread.start() + + def isAlive(self): + if self.thread is None: + return False + + return self.thread.isAlive() + + def join(self): + if self.thread is None: + return + + self.ready = False + + self.thread.join() + + del self.thread + self.thread = None + self.ready = True + + def listen(self): + if __debug__: + self.err_log.debug('Entering main loop.') + while True: + try: + sock, addr = self.listener.accept() + + if self.secure: + sock = self.wrap_socket(sock) + + self.active_queue.put(((sock, addr), + self.interface[1], + self.secure)) + + except socket.timeout: + # socket.timeout will be raised every THREAD_STOP_CHECK_INTERVAL + # seconds. When that happens, we check if it's time to die. + + if not self.ready: + if __debug__: + self.err_log.debug('Listener exiting.') + return + else: + continue + except: + self.err_log.error(traceback.format_exc()) + +# Monolithic build...end of module: rocket\listener.py +# Monolithic build...start of module: rocket\main.py + +# Import System Modules +import sys +import time +import socket +import logging +import traceback +from threading import Lock +try: + from queue import Queue +except ImportError: + from Queue import Queue + +# Import Package Modules +# package imports removed in monolithic build + + + + + +# Setup Logging +log = logging.getLogger('Rocket') +log.addHandler(NullHandler()) + +class Rocket(object): + """The Rocket class is responsible for handling threads and accepting and + dispatching connections.""" + + def __init__(self, + interfaces = ('127.0.0.1', 8000), + method = 'wsgi', + app_info = None, + min_threads = None, + max_threads = None, + queue_size = None, + timeout = 600, + handle_signals = True): + + self.handle_signals = handle_signals + self.startstop_lock = Lock() + self.timeout = timeout + + if not isinstance(interfaces, list): + self.interfaces = [interfaces] + else: + self.interfaces = interfaces + + if min_threads is None: + min_threads = DEFAULTS['MIN_THREADS'] + + if max_threads is None: + max_threads = DEFAULTS['MAX_THREADS'] + + if not queue_size: + if hasattr(socket, 'SOMAXCONN'): + queue_size = socket.SOMAXCONN + else: + queue_size = DEFAULTS['LISTEN_QUEUE_SIZE'] + + if max_threads and queue_size > max_threads: + queue_size = max_threads + + if isinstance(app_info, dict): + app_info['server_software'] = SERVER_SOFTWARE + + self.monitor_queue = Queue() + self.active_queue = Queue() + + self._threadpool = ThreadPool(get_method(method), + app_info = app_info, + active_queue = self.active_queue, + monitor_queue = self.monitor_queue, + min_threads = min_threads, + max_threads = max_threads) + + # Build our socket listeners + self.listeners = [Listener(i, queue_size, self.active_queue) for i in self.interfaces] + for ndx in range(len(self.listeners)-1, 0, -1): + if not self.listeners[ndx].ready: + del self.listeners[ndx] + + if not self.listeners: + log.critical("No interfaces to listen on...closing.") + sys.exit(1) + + def _sigterm(self, signum, frame): + log.info('Received SIGTERM') + self.stop() + + def _sighup(self, signum, frame): + log.info('Received SIGHUP') + self.restart() + + def start(self, background=False): + log.info('Starting %s' % SERVER_SOFTWARE) + + self.startstop_lock.acquire() + + try: + # Set up our shutdown signals + if self.handle_signals: + try: + import signal + signal.signal(signal.SIGTERM, self._sigterm) + signal.signal(signal.SIGUSR1, self._sighup) + except: + log.debug('This platform does not support signals.') + + # Start our worker threads + self._threadpool.start() + + # Start our monitor thread + self._monitor = Monitor(self.monitor_queue, + self.active_queue, + self.timeout, + self._threadpool) + self._monitor.setDaemon(True) + self._monitor.start() + + # I know that EXPR and A or B is bad but I'm keeping it for Py2.4 + # compatibility. + str_extract = lambda l: (l.addr, l.port, l.secure and '*' or '') + + msg = 'Listening on sockets: ' + msg += ', '.join(['%s:%i%s' % str_extract(l) for l in self.listeners]) + log.info(msg) + + for l in self.listeners: + l.start() + + finally: + self.startstop_lock.release() + + if background: + return + + while self._monitor.isAlive(): + try: + time.sleep(THREAD_STOP_CHECK_INTERVAL) + except KeyboardInterrupt: + # Capture a keyboard interrupt when running from a console + break + except: + if self._monitor.isAlive(): + log.error(traceback.format_exc()) + continue + + return self.stop() + + def stop(self, stoplogging = False): + log.info('Stopping %s' % SERVER_SOFTWARE) + + self.startstop_lock.acquire() + + try: + # Stop listeners + for l in self.listeners: + l.ready = False + + # Encourage a context switch + time.sleep(0.01) + + for l in self.listeners: + if l.isAlive(): + l.join() + + # Stop Monitor + self._monitor.stop() + if self._monitor.isAlive(): + self._monitor.join() + + # Stop Worker threads + self._threadpool.stop() + + if stoplogging: + logging.shutdown() + msg = "Calling logging.shutdown() is now the responsibility of \ + the application developer. Please update your \ + applications to no longer call rocket.stop(True)" + try: + import warnings + raise warnings.DeprecationWarning(msg) + except ImportError: + raise RuntimeError(msg) + + finally: + self.startstop_lock.release() + + def restart(self): + self.stop() + self.start() + +def CherryPyWSGIServer(bind_addr, + wsgi_app, + numthreads = 10, + server_name = None, + max = -1, + request_queue_size = 5, + timeout = 10, + shutdown_timeout = 5): + """ A Cherrypy wsgiserver-compatible wrapper. """ + max_threads = max + if max_threads < 0: + max_threads = 0 + return Rocket(bind_addr, 'wsgi', {'wsgi_app': wsgi_app}, + min_threads = numthreads, + max_threads = max_threads, + queue_size = request_queue_size, + timeout = timeout) + +# Monolithic build...end of module: rocket\main.py +# Monolithic build...start of module: rocket\monitor.py + +# Import System Modules +import time +import logging +import select +from threading import Thread + +# Import Package Modules +# package imports removed in monolithic build + +class Monitor(Thread): + # Monitor worker class. + + def __init__(self, + monitor_queue, + active_queue, + timeout, + threadpool, + *args, + **kwargs): + + Thread.__init__(self, *args, **kwargs) + + self._threadpool = threadpool + + # Instance Variables + self.monitor_queue = monitor_queue + self.active_queue = active_queue + self.timeout = timeout + + self.log = logging.getLogger('Rocket.Monitor') + self.log.addHandler(NullHandler()) + + self.connections = set() + self.active = False + + def run(self): + self.active = True + conn_list = list() + list_changed = False + + # We need to make sure the queue is empty before we start + while not self.monitor_queue.empty(): + self.monitor_queue.get() + + if __debug__: + self.log.debug('Entering monitor loop.') + + # Enter thread main loop + while self.active: + + # Move the queued connections to the selection pool + while not self.monitor_queue.empty(): + if __debug__: + self.log.debug('In "receive timed-out connections" loop.') + + c = self.monitor_queue.get() + + if c is None: + # A non-client is a signal to die + if __debug__: + self.log.debug('Received a death threat.') + self.stop() + break + + self.log.debug('Received a timed out connection.') + + if __debug__: + assert(c not in self.connections) + + if IS_JYTHON: + # Jython requires a socket to be in Non-blocking mode in + # order to select on it. + c.setblocking(False) + + if __debug__: + self.log.debug('Adding connection to monitor list.') + + self.connections.add(c) + list_changed = True + + # Wait on those connections + if list_changed: + conn_list = list(self.connections) + list_changed = False + + try: + if len(conn_list): + readable = select.select(conn_list, + [], + [], + THREAD_STOP_CHECK_INTERVAL)[0] + else: + time.sleep(THREAD_STOP_CHECK_INTERVAL) + readable = [] + + if not self.active: + break + + # If we have any readable connections, put them back + for r in readable: + if __debug__: + self.log.debug('Restoring readable connection') + + if IS_JYTHON: + # Jython requires a socket to be in Non-blocking mode in + # order to select on it, but the rest of the code requires + # that it be in blocking mode. + r.setblocking(True) + + r.start_time = time.time() + self.active_queue.put(r) + + self.connections.remove(r) + list_changed = True + + except: + if self.active: + raise + else: + break + + # If we have any stale connections, kill them off. + if self.timeout: + now = time.time() + stale = set() + for c in self.connections: + if (now - c.start_time) >= self.timeout: + stale.add(c) + + for c in stale: + if __debug__: + # "EXPR and A or B" kept for Py2.4 compatibility + data = (c.client_addr, c.server_port, c.ssl and '*' or '') + self.log.debug('Flushing stale connection: %s:%i%s' % data) + + self.connections.remove(c) + list_changed = True + + try: + c.close() + finally: + del c + + # Dynamically resize the threadpool to adapt to our changing needs. + self._threadpool.dynamic_resize() + + + def stop(self): + self.active = False + + if __debug__: + self.log.debug('Flushing waiting connections') + + while self.connections: + c = self.connections.pop() + try: + c.close() + finally: + del c + + if __debug__: + self.log.debug('Flushing queued connections') + + while not self.monitor_queue.empty(): + c = self.monitor_queue.get() + + if c is None: + continue + + try: + c.close() + finally: + del c + + # Place a None sentry value to cause the monitor to die. + self.monitor_queue.put(None) + +# Monolithic build...end of module: rocket\monitor.py +# Monolithic build...start of module: rocket\threadpool.py + +# Import System Modules +import logging +# Import Package Modules +# package imports removed in monolithic build + + +# Setup Logging +log = logging.getLogger('Rocket.Errors.ThreadPool') +log.addHandler(NullHandler()) + +class ThreadPool: + """The ThreadPool class is a container class for all the worker threads. It + manages the number of actively running threads.""" + + def __init__(self, + method, + app_info, + active_queue, + monitor_queue, + min_threads=DEFAULTS['MIN_THREADS'], + max_threads=DEFAULTS['MAX_THREADS'], + ): + + if __debug__: + log.debug("Initializing ThreadPool.") + + self.check_for_dead_threads = 0 + self.active_queue = active_queue + + self.worker_class = method + self.min_threads = min_threads + self.max_threads = max_threads + self.monitor_queue = monitor_queue + self.stop_server = False + self.alive = False + + # TODO - Optimize this based on some real-world usage data + self.grow_threshold = int(max_threads/10) + 2 + + if not isinstance(app_info, dict): + app_info = dict() + + if has_futures and app_info.get('futures'): + app_info['executor'] = WSGIExecutor(max([DEFAULTS['MIN_THREADS'], + 2])) + + app_info.update(max_threads=max_threads, + min_threads=min_threads) + + self.min_threads = min_threads + self.app_info = app_info + + self.threads = set() + + def start(self): + self.stop_server = False + if __debug__: + log.debug("Starting threads.") + + self.grow(self.min_threads) + + self.alive = True + + def stop(self): + self.alive = False + + if __debug__: + log.debug("Stopping threads.") + + self.stop_server = True + + # Prompt the threads to die + self.shrink(len(self.threads)) + + # Stop futures initially + if has_futures and self.app_info.get('futures'): + if __debug__: + log.debug("Future executor is present. Python will not " + "exit until all jobs have finished.") + self.app_info['executor'].shutdown(wait=False) + + # Give them the gun + #active_threads = [t for t in self.threads if t.isAlive()] + #while active_threads: + # t = active_threads.pop() + # t.kill() + + # Wait until they pull the trigger + for t in self.threads: + if t.isAlive(): + t.join() + + # Clean up the mess + self.bring_out_your_dead() + + def bring_out_your_dead(self): + # Remove dead threads from the pool + + dead_threads = [t for t in self.threads if not t.isAlive()] + for t in dead_threads: + if __debug__: + log.debug("Removing dead thread: %s." % t.getName()) + try: + # Py2.4 complains here so we put it in a try block + self.threads.remove(t) + except: + pass + self.check_for_dead_threads -= len(dead_threads) + + def grow(self, amount=None): + if self.stop_server: + return + + if not amount: + amount = self.max_threads + + if self.alive: + amount = min([amount, self.max_threads - len(self.threads)]) + + if __debug__: + log.debug("Growing by %i." % amount) + + for x in range(amount): + worker = self.worker_class(self.app_info, + self.active_queue, + self.monitor_queue) + + worker.setDaemon(True) + self.threads.add(worker) + worker.start() + + def shrink(self, amount=1): + if __debug__: + log.debug("Shrinking by %i." % amount) + + self.check_for_dead_threads += amount + + for x in range(amount): + self.active_queue.put(None) + + def dynamic_resize(self): + if (self.max_threads > self.min_threads or self.max_threads == 0): + if self.check_for_dead_threads > 0: + self.bring_out_your_dead() + + queueSize = self.active_queue.qsize() + threadCount = len(self.threads) + + if __debug__: + log.debug("Examining ThreadPool. %i threads and %i Q'd conxions" + % (threadCount, queueSize)) + + if queueSize == 0 and threadCount > self.min_threads: + self.shrink() + + elif queueSize > self.grow_threshold: + + self.grow(queueSize) + +# Monolithic build...end of module: rocket\threadpool.py +# Monolithic build...start of module: rocket\worker.py + +# Import System Modules +import re +import sys +import socket +import logging +import traceback +from wsgiref.headers import Headers +from threading import Thread +from datetime import datetime + +try: + from urllib import unquote +except ImportError: + from urllib.parse import unquote + +try: + from io import StringIO +except ImportError: + try: + from cStringIO import StringIO + except ImportError: + from StringIO import StringIO + +try: + from ssl import SSLError +except ImportError: + class SSLError(socket.error): + pass +# Import Package Modules +# package imports removed in monolithic build + + +# Define Constants +re_SLASH = re.compile('%2F', re.IGNORECASE) +re_REQUEST_LINE = re.compile(r"""^ +(?P<method>OPTIONS|GET|HEAD|POST|PUT|DELETE|TRACE|CONNECT) # Request Method +\ # (single space) +( + (?P<scheme>[^:/]+) # Scheme + (://) # + (?P<host>[^/]+) # Host +)? # +(?P<path>(\*|/[^ \?]*)) # Path +(\? (?P<query_string>[^ ]+))? # Query String +\ # (single space) +(?P<protocol>HTTPS?/1\.[01]) # Protocol +$ +""", re.X) +LOG_LINE = '%(client_ip)s - "%(request_line)s" - %(status)s %(size)s' +RESPONSE = '''\ +HTTP/1.1 %s +Content-Length: %i +Content-Type: %s + +%s +''' +if IS_JYTHON: + HTTP_METHODS = set(['OPTIONS', 'GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'TRACE', 'CONNECT']) + +class Worker(Thread): + """The Worker class is a base class responsible for receiving connections + and (a subclass) will run an application to process the the connection """ + + def __init__(self, + app_info, + active_queue, + monitor_queue, + *args, + **kwargs): + + Thread.__init__(self, *args, **kwargs) + + # Instance Variables + self.app_info = app_info + self.active_queue = active_queue + self.monitor_queue = monitor_queue + + self.size = 0 + self.status = "200 OK" + self.closeConnection = True + self.request_line = "" + + # Request Log + self.req_log = logging.getLogger('Rocket.Requests') + self.req_log.addHandler(NullHandler()) + + # Error Log + self.err_log = logging.getLogger('Rocket.Errors.'+self.getName()) + self.err_log.addHandler(NullHandler()) + + def _handleError(self, typ, val, tb): + if typ == SSLError: + if 'timed out' in val.args[0]: + typ = SocketTimeout + if typ == SocketTimeout: + if __debug__: + self.err_log.debug('Socket timed out') + self.monitor_queue.put(self.conn) + return True + if typ == SocketClosed: + self.closeConnection = True + if __debug__: + self.err_log.debug('Client closed socket') + return False + if typ == BadRequest: + self.closeConnection = True + if __debug__: + self.err_log.debug('Client sent a bad request') + return True + if typ == socket.error: + self.closeConnection = True + if val.args[0] in IGNORE_ERRORS_ON_CLOSE: + if __debug__: + self.err_log.debug('Ignorable socket Error received...' + 'closing connection.') + return False + else: + self.status = "999 Utter Server Failure" + tb_fmt = traceback.format_exception(typ, val, tb) + self.err_log.error('Unhandled Error when serving ' + 'connection:\n' + '\n'.join(tb_fmt)) + return False + + self.closeConnection = True + tb_fmt = traceback.format_exception(typ, val, tb) + self.err_log.error('\n'.join(tb_fmt)) + self.send_response('500 Server Error') + return False + + def run(self): + if __debug__: + self.err_log.debug('Entering main loop.') + + # Enter thread main loop + while True: + conn = self.active_queue.get() + + if not conn: + # A non-client is a signal to die + if __debug__: + self.err_log.debug('Received a death threat.') + return conn + + if isinstance(conn, tuple): + conn = Connection(*conn) + + self.conn = conn + + if conn.ssl != conn.secure: + self.err_log.info('Received HTTP connection on HTTPS port.') + self.send_response('400 Bad Request') + self.closeConnection = True + conn.close() + continue + else: + if __debug__: + self.err_log.debug('Received a connection.') + self.closeConnection = False + + # Enter connection serve loop + while True: + if __debug__: + self.err_log.debug('Serving a request') + try: + self.run_app(conn) + log_info = dict(client_ip = conn.client_addr, + time = datetime.now().strftime('%c'), + status = self.status.split(' ')[0], + size = self.size, + request_line = self.request_line) + self.req_log.info(LOG_LINE % log_info) + except: + exc = sys.exc_info() + handled = self._handleError(*exc) + if handled: + break + else: + if self.request_line: + log_info = dict(client_ip = conn.client_addr, + time = datetime.now().strftime('%c'), + status = self.status.split(' ')[0], + size = self.size, + request_line = self.request_line + ' - not stopping') + self.req_log.info(LOG_LINE % log_info) + + if self.closeConnection: + try: + conn.close() + except: + self.err_log.error(str(traceback.format_exc())) + + break + + def run_app(self, conn): + # Must be overridden with a method reads the request from the socket + # and sends a response. + self.closeConnection = True + raise NotImplementedError('Overload this method!') + + def send_response(self, status): + stat_msg = status.split(' ', 1)[1] + msg = RESPONSE % (status, + len(stat_msg), + 'text/plain', + stat_msg) + try: + self.conn.sendall(b(msg)) + except socket.error: + self.closeConnection = True + self.err_log.error('Tried to send "%s" to client but received socket' + ' error' % status) + + #def kill(self): + # if self.isAlive() and hasattr(self, 'conn'): + # try: + # self.conn.shutdown(socket.SHUT_RDWR) + # except socket.error: + # info = sys.exc_info() + # if info[1].args[0] != socket.EBADF: + # self.err_log.debug('Error on shutdown: '+str(info)) + + def read_request_line(self, sock_file): + self.request_line = '' + try: + # Grab the request line + d = sock_file.readline() + if PY3K: + d = d.decode('ISO-8859-1') + + if d == '\r\n': + # Allow an extra NEWLINE at the beginning per HTTP 1.1 spec + if __debug__: + self.err_log.debug('Client sent newline') + + d = sock_file.readline() + if PY3K: + d = d.decode('ISO-8859-1') + except socket.timeout: + raise SocketTimeout("Socket timed out before request.") + + d = d.strip() + + if not d: + if __debug__: + self.err_log.debug('Client did not send a recognizable request.') + raise SocketClosed('Client closed socket.') + + self.request_line = d + + # NOTE: I've replaced the traditional method of procedurally breaking + # apart the request line with a (rather unsightly) regular expression. + # However, Java's regexp support sucks so bad that it actually takes + # longer in Jython to process the regexp than procedurally. So I've + # left the old code here for Jython's sake...for now. + if IS_JYTHON: + return self._read_request_line_jython(d) + + match = re_REQUEST_LINE.match(d) + + if not match: + self.send_response('400 Bad Request') + raise BadRequest + + req = match.groupdict() + for k,v in req.items(): + if not v: + req[k] = "" + if k == 'path': + req['path'] = r'%2F'.join([unquote(x) for x in re_SLASH.split(v)]) + + return req + + def _read_request_line_jython(self, d): + d = d.strip() + try: + method, uri, proto = d.split(' ') + if not proto.startswith('HTTP') or \ + proto[-3:] not in ('1.0', '1.1') or \ + method not in HTTP_METHODS: + self.send_response('400 Bad Request') + raise BadRequest + except ValueError: + self.send_response('400 Bad Request') + raise BadRequest + + req = dict(method=method, protocol = proto) + scheme = '' + host = '' + if uri == '*' or uri.startswith('/'): + path = uri + elif '://' in uri: + scheme, rest = uri.split('://') + host, path = rest.split('/', 1) + path = '/' + path + else: + self.send_response('400 Bad Request') + raise BadRequest + + query_string = '' + if '?' in path: + path, query_string = path.split('?', 1) + + path = r'%2F'.join([unquote(x) for x in re_SLASH.split(path)]) + + req.update(path=path, + query_string=query_string, + scheme=scheme.lower(), + host=host) + return req + + + def read_headers(self, sock_file): + try: + headers = dict() + l = sock_file.readline() + + lname = None + lval = None + while True: + if PY3K: + try: + l = str(l, 'ISO-8859-1') + except UnicodeDecodeError: + self.err_log.warning('Client sent invalid header: ' + repr(l)) + + if l == '\r\n': + break + + if l[0] in ' \t' and lname: + # Some headers take more than one line + lval += ',' + l.strip() + else: + # HTTP header values are latin-1 encoded + l = l.split(':', 1) + # HTTP header names are us-ascii encoded + + lname = l[0].strip().upper().replace('-', '_') + lval = l[-1].strip() + headers[str(lname)] = str(lval) + + l = sock_file.readline() + except socket.timeout: + raise SocketTimeout("Socket timed out before request.") + + return headers + +class SocketTimeout(Exception): + "Exception for when a socket times out between requests." + pass + +class BadRequest(Exception): + "Exception for when a client sends an incomprehensible request." + pass + +class SocketClosed(Exception): + "Exception for when a socket is closed by the client." + pass + +class ChunkedReader(object): + def __init__(self, sock_file): + self.stream = sock_file + self.chunk_size = 0 + + def _read_header(self): + chunk_len = "" + try: + while "" == chunk_len: + chunk_len = self.stream.readline().strip() + return int(chunk_len, 16) + except ValueError: + return 0 + + def read(self, size): + data = b('') + chunk_size = self.chunk_size + while size: + if not chunk_size: + chunk_size = self._read_header() + + if size < chunk_size: + data += self.stream.read(size) + chunk_size -= size + break + else: + if not chunk_size: + break + data += self.stream.read(chunk_size) + size -= chunk_size + chunk_size = 0 + + self.chunk_size = chunk_size + return data + + def readline(self): + data = b('') + c = self.read(1) + while c and c != b('\n'): + data += c + c = self.read(1) + data += c + return data + + def readlines(self): + yield self.readline() + +def get_method(method): + + + methods = dict(wsgi=WSGIWorker, + fs=FileSystemWorker) + return methods[method.lower()] + +# Monolithic build...end of module: rocket\worker.py +# Monolithic build...start of module: rocket\methods\__init__.py + +# Monolithic build...end of module: rocket\methods\__init__.py +# Monolithic build...start of module: rocket\methods\fs.py + +# Import System Modules +import os +import time +import mimetypes +from email.utils import formatdate +from wsgiref.headers import Headers +from wsgiref.util import FileWrapper +# Import Package Modules +# package imports removed in monolithic build + + +# Define Constants +CHUNK_SIZE = 2**16 # 64 Kilobyte chunks +HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s''' +INDEX_HEADER = '''\ +<html> +<head><title>Directory Index: %(path)s + + +

    Directory Index: %(path)s

    +
    + +''' +INDEX_ROW = '''''' +INDEX_FOOTER = '''
    Directories
    \r\n''' + +class LimitingFileWrapper(FileWrapper): + def __init__(self, limit=None, *args, **kwargs): + self.limit = limit + FileWrapper.__init__(self, *args, **kwargs) + + def read(self, amt): + if amt > self.limit: + amt = self.limit + self.limit -= amt + return FileWrapper.read(self, amt) + +class FileSystemWorker(Worker): + def __init__(self, *args, **kwargs): + """Builds some instance variables that will last the life of the + thread.""" + + Worker.__init__(self, *args, **kwargs) + + self.root = os.path.abspath(self.app_info['document_root']) + self.display_index = self.app_info['display_index'] + + def serve_file(self, filepath, headers): + filestat = os.stat(filepath) + self.size = filestat.st_size + modtime = time.strftime("%a, %d %b %Y %H:%M:%S GMT", + time.gmtime(filestat.st_mtime)) + self.headers.add_header('Last-Modified', modtime) + if headers.get('if_modified_since') == modtime: + # The browser cache is up-to-date, send a 304. + self.status = "304 Not Modified" + self.data = [] + return + + ct = mimetypes.guess_type(filepath)[0] + self.content_type = ct if ct else 'text/plain' + try: + f = open(filepath, 'rb') + self.headers['Pragma'] = 'cache' + self.headers['Cache-Control'] = 'private' + self.headers['Content-Length'] = str(self.size) + if self.etag: + self.headers.add_header('Etag', self.etag) + if self.expires: + self.headers.add_header('Expires', self.expires) + + try: + # Implement 206 partial file support. + start, end = headers['range'].split('-') + start = 0 if not start.isdigit() else int(start) + end = self.size if not end.isdigit() else int(end) + if self.size < end or start < 0: + self.status = "214 Unsatisfiable Range Requested" + self.data = FileWrapper(f, CHUNK_SIZE) + else: + f.seek(start) + self.data = LimitingFileWrapper(f, CHUNK_SIZE, limit=end) + self.status = "206 Partial Content" + except: + self.data = FileWrapper(f, CHUNK_SIZE) + except IOError: + self.status = "403 Forbidden" + + def serve_dir(self, pth, rpth): + def rel_path(path): + return os.path.normpath(path[len(self.root):] if path.startswith(self.root) else path) + + if not self.display_index: + self.status = '404 File Not Found' + return b('') + else: + self.content_type = 'text/html' + + dir_contents = [os.path.join(pth, x) for x in os.listdir(os.path.normpath(pth))] + dir_contents.sort() + + dirs = [rel_path(x)+'/' for x in dir_contents if os.path.isdir(x)] + files = [rel_path(x) for x in dir_contents if os.path.isfile(x)] + + self.data = [INDEX_HEADER % dict(path='/'+rpth)] + if rpth: + self.data += [INDEX_ROW % dict(name='(parent directory)', cls='dir parent', link='/'.join(rpth[:-1].split('/')[:-1]))] + self.data += [INDEX_ROW % dict(name=os.path.basename(x[:-1]), link=os.path.join(rpth, os.path.basename(x[:-1])).replace('\\', '/'), cls='dir') for x in dirs] + self.data += ['Files'] + self.data += [INDEX_ROW % dict(name=os.path.basename(x), link=os.path.join(rpth, os.path.basename(x)).replace('\\', '/'), cls='file') for x in files] + self.data += [INDEX_FOOTER] + self.headers['Content-Length'] = self.size = str(sum([len(x) for x in self.data])) + self.status = '200 OK' + + def run_app(self, conn): + self.status = "200 OK" + self.size = 0 + self.expires = None + self.etag = None + self.content_type = 'text/plain' + self.content_length = None + + if __debug__: + self.err_log.debug('Getting sock_file') + + # Build our file-like object + sock_file = conn.makefile('rb',BUF_SIZE) + request = self.read_request_line(sock_file) + if request['method'].upper() not in ('GET', ): + self.status = "501 Not Implemented" + + try: + # Get our file path + headers = dict([(str(k.lower()), v) for k, v in self.read_headers(sock_file).items()]) + rpath = request.get('path', '').lstrip('/') + filepath = os.path.join(self.root, rpath) + filepath = os.path.abspath(filepath) + if __debug__: + self.err_log.debug('Request for path: %s' % filepath) + + self.closeConnection = headers.get('connection', 'close').lower() == 'close' + self.headers = Headers([('Date', formatdate(usegmt=True)), + ('Server', HTTP_SERVER_SOFTWARE), + ('Connection', headers.get('connection', 'close')), + ]) + + if not filepath.lower().startswith(self.root.lower()): + # File must be within our root directory + self.status = "400 Bad Request" + self.closeConnection = True + elif not os.path.exists(filepath): + self.status = "404 File Not Found" + self.closeConnection = True + elif os.path.isdir(filepath): + self.serve_dir(filepath, rpath) + elif os.path.isfile(filepath): + self.serve_file(filepath, headers) + else: + # It exists but it's not a file or a directory???? + # What is it then? + self.status = "501 Not Implemented" + self.closeConnection = True + + h = self.headers + statcode, statstr = self.status.split(' ', 1) + statcode = int(statcode) + if statcode >= 400: + h.add_header('Content-Type', self.content_type) + self.data = [statstr] + + # Build our output headers + header_data = HEADER_RESPONSE % (self.status, str(h)) + + # Send the headers + if __debug__: + self.err_log.debug('Sending Headers: %s' % repr(header_data)) + self.conn.sendall(b(header_data)) + + for data in self.data: + self.conn.sendall(b(data)) + + if hasattr(self.data, 'close'): + self.data.close() + + finally: + if __debug__: + self.err_log.debug('Finally closing sock_file') + sock_file.close() + +# Monolithic build...end of module: rocket\methods\fs.py +# Monolithic build...start of module: rocket\methods\wsgi.py + +# Import System Modules +import sys +import socket +from wsgiref.headers import Headers +from wsgiref.util import FileWrapper + +# Import Package Modules +# package imports removed in monolithic build + + + +if PY3K: + from email.utils import formatdate +else: + # Caps Utils for Py2.4 compatibility + from email.Utils import formatdate + +# Define Constants +NEWLINE = b('\r\n') +HEADER_RESPONSE = '''HTTP/1.1 %s\r\n%s''' +BASE_ENV = {'SERVER_NAME': SERVER_NAME, + 'SCRIPT_NAME': '', # Direct call WSGI does not need a name + 'wsgi.errors': sys.stderr, + 'wsgi.version': (1, 0), + 'wsgi.multiprocess': False, + 'wsgi.run_once': False, + 'wsgi.file_wrapper': FileWrapper + } + +class WSGIWorker(Worker): + def __init__(self, *args, **kwargs): + """Builds some instance variables that will last the life of the + thread.""" + Worker.__init__(self, *args, **kwargs) + + if isinstance(self.app_info, dict): + multithreaded = self.app_info.get('max_threads') != 1 + else: + multithreaded = False + self.base_environ = dict({'SERVER_SOFTWARE': self.app_info['server_software'], + 'wsgi.multithread': multithreaded, + }) + self.base_environ.update(BASE_ENV) + + # Grab our application + self.app = self.app_info.get('wsgi_app') + + if not hasattr(self.app, "__call__"): + raise TypeError("The wsgi_app specified (%s) is not a valid WSGI application." % repr(self.app)) + + # Enable futures + if has_futures and self.app_info.get('futures'): + executor = self.app_info['executor'] + self.base_environ.update({"wsgiorg.executor": executor, + "wsgiorg.futures": executor.futures}) + + def build_environ(self, sock_file, conn): + """ Build the execution environment. """ + # Grab the request line + request = self.read_request_line(sock_file) + + # Copy the Base Environment + environ = self.base_environ.copy() + + # Grab the headers + for k, v in self.read_headers(sock_file).items(): + environ[str('HTTP_'+k)] = v + + # Add CGI Variables + environ['REQUEST_METHOD'] = request['method'] + environ['PATH_INFO'] = request['path'] + environ['SERVER_PROTOCOL'] = request['protocol'] + environ['SERVER_PORT'] = str(conn.server_port) + environ['REMOTE_PORT'] = str(conn.client_port) + environ['REMOTE_ADDR'] = str(conn.client_addr) + environ['QUERY_STRING'] = request['query_string'] + if 'HTTP_CONTENT_LENGTH' in environ: + environ['CONTENT_LENGTH'] = environ['HTTP_CONTENT_LENGTH'] + if 'HTTP_CONTENT_TYPE' in environ: + environ['CONTENT_TYPE'] = environ['HTTP_CONTENT_TYPE'] + + # Save the request method for later + self.request_method = environ['REQUEST_METHOD'] + + # Add Dynamic WSGI Variables + if conn.ssl: + environ['wsgi.url_scheme'] = 'https' + environ['HTTPS'] = 'on' + else: + environ['wsgi.url_scheme'] = 'http' + + if conn.ssl: + try: + peercert = conn.socket.getpeercert(binary_form=True) + environ['SSL_CLIENT_RAW_CERT'] = \ + peercert and ssl.DER_cert_to_PEM_cert(peercert) + except Exception,e: + print e + + if environ.get('HTTP_TRANSFER_ENCODING', '') == 'chunked': + environ['wsgi.input'] = ChunkedReader(sock_file) + else: + environ['wsgi.input'] = sock_file + + return environ + + def send_headers(self, data, sections): + h_set = self.header_set + + # Does the app want us to send output chunked? + self.chunked = h_set.get('transfer-encoding', '').lower() == 'chunked' + + # Add a Date header if it's not there already + if not 'date' in h_set: + h_set['Date'] = formatdate(usegmt=True) + + # Add a Server header if it's not there already + if not 'server' in h_set: + h_set['Server'] = HTTP_SERVER_SOFTWARE + + if 'content-length' in h_set: + self.size = int(h_set['content-length']) + else: + s = int(self.status.split(' ')[0]) + if s < 200 or s not in (204, 205, 304): + if not self.chunked: + if sections == 1: + # Add a Content-Length header if it's not there already + h_set['Content-Length'] = str(len(data)) + self.size = len(data) + else: + # If they sent us more than one section, we blow chunks + h_set['Transfer-Encoding'] = 'Chunked' + self.chunked = True + if __debug__: + self.err_log.debug('Adding header...' + 'Transfer-Encoding: Chunked') + + if 'connection' not in h_set: + # If the application did not provide a connection header, fill it in + client_conn = self.environ.get('HTTP_CONNECTION', '').lower() + if self.environ['SERVER_PROTOCOL'] == 'HTTP/1.1': + # HTTP = 1.1 defaults to keep-alive connections + if client_conn: + h_set['Connection'] = client_conn + else: + h_set['Connection'] = 'keep-alive' + else: + # HTTP < 1.1 supports keep-alive but it's quirky so we don't support it + h_set['Connection'] = 'close' + + # Close our connection if we need to. + self.closeConnection = h_set.get('connection', '').lower() == 'close' + + # Build our output headers + header_data = HEADER_RESPONSE % (self.status, str(h_set)) + + # Send the headers + if __debug__: + self.err_log.debug('Sending Headers: %s' % repr(header_data)) + self.conn.sendall(b(header_data)) + self.headers_sent = True + + def write_warning(self, data, sections=None): + self.err_log.warning('WSGI app called write method directly. This is ' + 'deprecated behavior. Please update your app.') + return self.write(data, sections) + + def write(self, data, sections=None): + """ Write the data to the output socket. """ + + if self.error[0]: + self.status = self.error[0] + data = b(self.error[1]) + + if not self.headers_sent: + self.send_headers(data, sections) + + if self.request_method != 'HEAD': + try: + if self.chunked: + self.conn.sendall(b('%x\r\n%s\r\n' % (len(data), data))) + else: + self.conn.sendall(data) + except socket.error: + # But some clients will close the connection before that + # resulting in a socket error. + self.closeConnection = True + + def start_response(self, status, response_headers, exc_info=None): + """ Store the HTTP status and headers to be sent when self.write is + called. """ + if exc_info: + try: + if self.headers_sent: + # Re-raise original exception if headers sent + # because this violates WSGI specification. + raise + finally: + exc_info = None + elif self.header_set: + raise AssertionError("Headers already set!") + + if PY3K and not isinstance(status, str): + self.status = str(status, 'ISO-8859-1') + else: + self.status = status + # Make sure headers are bytes objects + try: + self.header_set = Headers(response_headers) + except UnicodeDecodeError: + self.error = ('500 Internal Server Error', + 'HTTP Headers should be bytes') + self.err_log.error('Received HTTP Headers from client that contain' + ' invalid characters for Latin-1 encoding.') + + return self.write_warning + + def run_app(self, conn): + self.size = 0 + self.header_set = Headers([]) + self.headers_sent = False + self.error = (None, None) + self.chunked = False + sections = None + output = None + + if __debug__: + self.err_log.debug('Getting sock_file') + + # Build our file-like object + if PY3K: + sock_file = conn.makefile(mode='rb', buffering=BUF_SIZE) + else: + sock_file = conn.makefile(BUF_SIZE) + + try: + # Read the headers and build our WSGI environment + self.environ = environ = self.build_environ(sock_file, conn) + + # Handle 100 Continue + if environ.get('HTTP_EXPECT', '') == '100-continue': + res = environ['SERVER_PROTOCOL'] + ' 100 Continue\r\n\r\n' + conn.sendall(b(res)) + + # Send it to our WSGI application + output = self.app(environ, self.start_response) + + if not hasattr(output, '__len__') and not hasattr(output, '__iter__'): + self.error = ('500 Internal Server Error', + 'WSGI applications must return a list or ' + 'generator type.') + + if hasattr(output, '__len__'): + sections = len(output) + + for data in output: + # Don't send headers until body appears + if data: + self.write(data, sections) + + if self.chunked: + # If chunked, send our final chunk length + self.conn.sendall(b('0\r\n\r\n')) + elif not self.headers_sent: + # Send headers if the body was empty + self.send_headers('', sections) + + # Don't capture exceptions here. The Worker class handles + # them appropriately. + finally: + if __debug__: + self.err_log.debug('Finally closing output and sock_file') + + if hasattr(output,'close'): + output.close() + + sock_file.close() + +# Monolithic build...end of module: rocket\methods\wsgi.py + +# +# the following code is not part of Rocket but was added in web2py for testing purposes +# + +def demo_app(environ, start_response): + global static_folder + import os + types = {'htm': 'text/html','html': 'text/html','gif': 'image/gif', + 'jpg': 'image/jpeg','png': 'image/png','pdf': 'applications/pdf'} + if static_folder: + if not static_folder.startswith('/'): + static_folder = os.path.join(os.getcwd(),static_folder) + path = os.path.join(static_folder, environ['PATH_INFO'][1:] or 'index.html') + type = types.get(path.split('.')[-1],'text') + if os.path.exists(path): + try: + data = open(path,'rb').read() + start_response('200 OK', [('Content-Type', type)]) + except IOError: + start_response('404 NOT FOUND', []) + data = '404 NOT FOUND' + else: + start_response('500 INTERNAL SERVER ERROR', []) + data = '500 INTERNAL SERVER ERROR' + else: + start_response('200 OK', [('Content-Type', 'text/html')]) + data = '

    Hello from Rocket Web Server

    ' + return [data] + +def demo(): + from optparse import OptionParser + parser = OptionParser() + parser.add_option("-i", "--ip", dest="ip",default="127.0.0.1", + help="ip address of the network interface") + parser.add_option("-p", "--port", dest="port",default="8000", + help="post where to run web server") + parser.add_option("-s", "--static", dest="static",default=None, + help="folder containing static files") + (options, args) = parser.parse_args() + global static_folder + static_folder = options.static + print 'Rocket running on %s:%s' % (options.ip, options.port) + r=Rocket((options.ip,int(options.port)),'wsgi', {'wsgi_app':demo_app}) + r.start() + +if __name__=='__main__': + demo() + ADDED gluon/rocket.pyc Index: gluon/rocket.pyc ================================================================== --- /dev/null +++ gluon/rocket.pyc cannot compute difference between binary files ADDED gluon/sanitizer.py Index: gluon/sanitizer.py ================================================================== --- /dev/null +++ gluon/sanitizer.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +:: + + # from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496942 + # Title: Cross-site scripting (XSS) defense + # Submitter: Josh Goldfoot (other recipes) + # Last Updated: 2006/08/05 + # Version no: 1.0 + +""" + + +from htmllib import HTMLParser +from cgi import escape +from urlparse import urlparse +from formatter import AbstractFormatter +from htmlentitydefs import entitydefs +from xml.sax.saxutils import quoteattr + +__all__ = ['sanitize'] + + +def xssescape(text): + """Gets rid of < and > and & and, for good measure, :""" + + return escape(text, quote=True).replace(':', ':') + + +class XssCleaner(HTMLParser): + + def __init__( + self, + permitted_tags=[ + 'a', + 'b', + 'blockquote', + 'br/', + 'i', + 'li', + 'ol', + 'ul', + 'p', + 'cite', + 'code', + 'pre', + 'img/', + ], + allowed_attributes={'a': ['href', 'title'], 'img': ['src', 'alt' + ], 'blockquote': ['type']}, + fmt=AbstractFormatter, + strip_disallowed = False + ): + + HTMLParser.__init__(self, fmt) + self.result = '' + self.open_tags = [] + self.permitted_tags = [i for i in permitted_tags if i[-1] != '/'] + self.requires_no_close = [i[:-1] for i in permitted_tags + if i[-1] == '/'] + self.permitted_tags += self.requires_no_close + self.allowed_attributes = allowed_attributes + + # The only schemes allowed in URLs (for href and src attributes). + # Adding "javascript" or "vbscript" to this list would not be smart. + + self.allowed_schemes = ['http', 'https', 'ftp'] + + #to strip or escape disallowed tags? + self.strip_disallowed = strip_disallowed + self.in_disallowed = False + + def handle_data(self, data): + if data and not self.in_disallowed: + self.result += xssescape(data) + + def handle_charref(self, ref): + if self.in_disallowed: + return + elif len(ref) < 7 and ref.isdigit(): + self.result += '&#%s;' % ref + else: + self.result += xssescape('&#%s' % ref) + + def handle_entityref(self, ref): + if self.in_disallowed: + return + elif ref in entitydefs: + self.result += '&%s;' % ref + else: + self.result += xssescape('&%s' % ref) + + def handle_comment(self, comment): + if self.in_disallowed: + return + elif comment: + self.result += xssescape('' % comment) + + def handle_starttag( + self, + tag, + method, + attrs, + ): + if tag not in self.permitted_tags: + if self.strip_disallowed: + self.in_disallowed = True + else: + self.result += xssescape('<%s>' % tag) + else: + bt = '<' + tag + if tag in self.allowed_attributes: + attrs = dict(attrs) + self.allowed_attributes_here = [x for x in + self.allowed_attributes[tag] if x in attrs + and len(attrs[x]) > 0] + for attribute in self.allowed_attributes_here: + if attribute in ['href', 'src', 'background']: + if self.url_is_acceptable(attrs[attribute]): + bt += ' %s="%s"' % (attribute, + attrs[attribute]) + else: + bt += ' %s=%s' % (xssescape(attribute), + quoteattr(attrs[attribute])) + if bt == '" % tag, "<%s />" % tag) + if not escape: + self.strip_disallowed = True + self.result = '' + self.feed(rawstring) + for endtag in self.open_tags: + if endtag not in self.requires_no_close: + self.result += '' % endtag + return self.result + + def xtags(self): + """ + Returns a printable string informing the user which tags are allowed + """ + + tg = '' + for x in sorted(self.permitted_tags): + tg += '<' + x + if x in self.allowed_attributes: + for y in self.allowed_attributes[x]: + tg += ' %s=""' % y + tg += '> ' + return xssescape(tg.strip()) + + +def sanitize(text, permitted_tags=[ + 'a', + 'b', + 'blockquote', + 'br/', + 'i', + 'li', + 'ol', + 'ul', + 'p', + 'cite', + 'code', + 'pre', + 'img/', + 'h1','h2','h3','h4','h5','h6', + 'table','tr','td','div', + ], + allowed_attributes = { + 'a': ['href', 'title'], + 'img': ['src', 'alt'], + 'blockquote': ['type'], + 'td': ['colspan'], + }, + escape=True): + if not isinstance(text, str): return str(text) + return XssCleaner(permitted_tags=permitted_tags, + allowed_attributes=allowed_attributes).strip(text, escape) + + + ADDED gluon/sanitizer.pyc Index: gluon/sanitizer.pyc ================================================================== --- /dev/null +++ gluon/sanitizer.pyc cannot compute difference between binary files ADDED gluon/scheduler.py Index: gluon/scheduler.py ================================================================== --- /dev/null +++ gluon/scheduler.py @@ -0,0 +1,547 @@ +#### WORK IN PROGRESS... NOT SUPPOSED TO WORK YET + +USAGE = """ +## Example + +For any existing app + +Create File: app/models/scheduler.py ====== +from gluon.scheduler import Scheduler + +def demo1(*args,**vars): + print 'you passed args=%s and vars=%s' % (args, vars) + return 'done!' + +def demo2(): + 1/0 + +scheduler = Scheduler(db,dict(demo1=demo1,demo2=demo2)) +## run worker nodes with: + + cd web2py + python gluon/scheduler.py -u sqlite://storage.sqlite \ + -f applications/myapp/databases/ \ + -t mytasks.py +(-h for info) +python scheduler.py -h + +## schedule jobs using +http://127.0.0.1:8000/scheduler/appadmin/insert/db/scheduler_task + +## monitor scheduled jobs +http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_task.id>0 + +## view completed jobs +http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_run.id>0 + +## view workers +http://127.0.0.1:8000/scheduler/appadmin/select/db?query=db.scheduler_worker.id>0 + +## Comments +""" + +import os +import time +import multiprocessing +import sys +import cStringIO +import threading +import traceback +import signal +import socket +import datetime +import logging +import optparse + +try: + from gluon.contrib.simplejson import loads, dumps +except: + from simplejson import loads, dumps + +if 'WEB2PY_PATH' in os.environ: + sys.path.append(os.environ['WEB2PY_PATH']) +else: + os.environ['WEB2PY_PATH'] = os.getcwd() + +from gluon import DAL, Field, IS_NOT_EMPTY, IS_IN_SET +from gluon.utils import web2py_uuid + +QUEUED = 'QUEUED' +ASSIGNED = 'ASSIGNED' +RUNNING = 'RUNNING' +COMPLETED = 'COMPLETED' +FAILED = 'FAILED' +TIMEOUT = 'TIMEOUT' +STOPPED = 'STOPPED' +ACTIVE = 'ACTIVE' +INACTIVE = 'INACTIVE' +DISABLED = 'DISABLED' +SECONDS = 1 +HEARTBEAT = 3*SECONDS + +class Task(object): + def __init__(self,app,function,timeout,args='[]',vars='{}',**kwargs): + logging.debug(' new task allocated: %s.%s' % (app,function)) + self.app = app + self.function = function + self.timeout = timeout + self.args = args # json + self.vars = vars # json + self.__dict__.update(kwargs) + def __str__(self): + return '' % self.function + +class TaskReport(object): + def __init__(self,status,result=None,output=None,tb=None): + logging.debug(' new task report: %s' % status) + if tb: + logging.debug(' traceback: %s' % tb) + else: + logging.debug(' result: %s' % result) + self.status = status + self.result = result + self.output = output + self.tb = tb + def __str__(self): + return '' % self.status + +def demo_function(*argv,**kwargs): + """ test function """ + for i in range(argv[0]): + print 'click',i + time.sleep(1) + return 'done' + +#the two functions below deal with simplejson decoding as unicode, esp for the dict decode +#and subsequent usage as function Keyword arguments unicode variable names won't work! +#borrowed from http://stackoverflow.com/questions/956867/how-to-get-string-objects-instead-unicode-ones-from-json-in-python +def _decode_list(lst): + newlist = [] + for i in lst: + if isinstance(i, unicode): + i = i.encode('utf-8') + elif isinstance(i, list): + i = _decode_list(i) + newlist.append(i) + return newlist + +def _decode_dict(dct): + newdict = {} + for k, v in dct.iteritems(): + if isinstance(k, unicode): + k = k.encode('utf-8') + if isinstance(v, unicode): + v = v.encode('utf-8') + elif isinstance(v, list): + v = _decode_list(v) + newdict[k] = v + return newdict + +def executor(queue,task): + """ the background process """ + logging.debug(' task started') + stdout, sys.stdout = sys.stdout, cStringIO.StringIO() + try: + if task.app: + os.chdir(os.environ['WEB2PY_PATH']) + from gluon.shell import env + from gluon.dal import BaseAdapter + from gluon import current + level = logging.getLogger().getEffectiveLevel() + logging.getLogger().setLevel(logging.WARN) + _env = env(task.app,import_models=True) + logging.getLogger().setLevel(level) + scheduler = current._scheduler + scheduler_tasks = current._scheduler.tasks + _function = scheduler_tasks[task.function] + globals().update(_env) + args = loads(task.args) + vars = loads(task.vars, object_hook=_decode_dict) + result = dumps(_function(*args,**vars)) + else: + ### for testing purpose only + result = eval(task.function)(*loads(task.args, list_hook),**loads(task.vars, object_hook=_decode_dict)) + stdout, sys.stdout = sys.stdout, stdout + queue.put(TaskReport(COMPLETED, result,stdout.getvalue())) + except BaseException,e: + sys.stdout = stdout + tb = traceback.format_exc() + queue.put(TaskReport(FAILED,tb=tb)) + +class MetaScheduler(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + self.process = None # the backround process + self.have_heartbeat = True # set to False to kill + def async(self,task): + """ + starts the background process and returns: + ('ok',result,output) + ('error',exception,None) + ('timeout',None,None) + ('terminated',None,None) + """ + queue = multiprocessing.Queue(maxsize=1) + p = multiprocessing.Process(target=executor,args=(queue,task)) + self.process = p + logging.debug(' task starting') + p.start() + try: + p.join(task.timeout) + except: + p.terminate() + p.join() + self.have_heartbeat = False + logging.debug(' task stopped') + return TaskReport(STOPPED) + if p.is_alive(): + p.terminate() + p.join() + logging.debug(' task timeout') + return TaskReport(TIMEOUT) + elif queue.empty(): + self.have_heartbeat = False + logging.debug(' task stopped') + return TaskReport(STOPPED) + else: + logging.debug(' task completed or failed') + return queue.get() + + def die(self): + logging.info('die!') + self.have_heartbeat = False + self.terminate_process() + + def terminate_process(self): + try: + self.process.terminate() + except: + pass # no process to terminate + + def run(self): + """ the thread that sends heartbeat """ + counter = 0 + while self.have_heartbeat: + self.send_heartbeat(counter) + counter += 1 + + def start_heartbeats(self): + self.start() + + def send_heartbeat(self,counter): + print 'thum' + time.sleep(1) + + def pop_task(self): + return Task( + app = None, + function = 'demo_function', + timeout = 7, + args = '[2]', + vars = '{}') + + def report_task(self,task,task_report): + print 'reporting task' + pass + + def sleep(self): + pass + + def loop(self): + try: + self.start_heartbeats() + while True and self.have_heartbeat: + logging.debug('looping...') + task = self.pop_task() + if task: + self.report_task(task,self.async(task)) + else: + logging.debug('sleeping...') + self.sleep() + except KeyboardInterrupt: + self.die() + + +TASK_STATUS = (QUEUED, RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED) +RUN_STATUS = (RUNNING, COMPLETED, FAILED, TIMEOUT, STOPPED) +WORKER_STATUS = (ACTIVE,INACTIVE,DISABLED) + +class TYPE(object): + """ + validator that check whether field is valid json and validate its type + """ + + def __init__(self,myclass=list,parse=False): + self.myclass = myclass + self.parse=parse + + def __call__(self,value): + from gluon import current + try: + obj = loads(value) + except: + return (value,current.T('invalid json')) + else: + if isinstance(obj,self.myclass): + if self.parse: + return (obj,None) + else: + return (value,None) + else: + return (value,current.T('Not of type: %s') % self.myclass) + +class Scheduler(MetaScheduler): + def __init__(self,db,tasks={},migrate=True, + worker_name=None,group_names=None,heartbeat=HEARTBEAT): + + MetaScheduler.__init__(self) + + self.db = db + self.db_thread = None + self.tasks = tasks + self.group_names = group_names or ['main'] + self.heartbeat = heartbeat + self.worker_name = worker_name or socket.gethostname()+'#'+str(web2py_uuid()) + + from gluon import current + current._scheduler = self + + self.define_tables(db,migrate=migrate) + + def define_tables(self,db,migrate): + from gluon import current + logging.debug('defining tables (migrate=%s)' % migrate) + now = datetime.datetime.now() + db.define_table( + 'scheduler_task', + Field('application_name',requires=IS_NOT_EMPTY(), + default=None,writable=False), + Field('task_name',requires=IS_NOT_EMPTY()), + Field('group_name',default='main',writable=False), + Field('status',requires=IS_IN_SET(TASK_STATUS), + default=QUEUED,writable=False), + Field('function_name', + requires=IS_IN_SET(sorted(self.tasks.keys()))), + Field('args','text',default='[]',requires=TYPE(list)), + Field('vars','text',default='{}',requires=TYPE(dict)), + Field('enabled','boolean',default=True), + Field('start_time','datetime',default=now), + Field('next_run_time','datetime',default=now), + Field('stop_time','datetime',default=now+datetime.timedelta(days=1)), + Field('repeats','integer',default=1,comment="0=unlimted"), + Field('period','integer',default=60,comment='seconds'), + Field('timeout','integer',default=60,comment='seconds'), + Field('times_run','integer',default=0,writable=False), + Field('last_run_time','datetime',writable=False,readable=False), + Field('assigned_worker_name',default='',writable=False), + migrate=migrate,format='%(task_name)s') + if hasattr(current,'request'): + db.scheduler_task.application_name.default=current.request.application + + db.define_table( + 'scheduler_run', + Field('scheduler_task','reference scheduler_task'), + Field('status',requires=IS_IN_SET(RUN_STATUS)), + Field('start_time','datetime'), + Field('stop_time','datetime'), + Field('output','text'), + Field('result','text'), + Field('traceback','text'), + Field('worker_name',default=self.worker_name), + migrate=migrate) + + db.define_table( + 'scheduler_worker', + Field('worker_name'), + Field('first_heartbeat','datetime'), + Field('last_heartbeat','datetime'), + Field('status',requires=IS_IN_SET(WORKER_STATUS)), + migrate=migrate) + db.commit() + + def loop(self,worker_name=None): + MetaScheduler.loop(self) + + def pop_task(self): + now = datetime.datetime.now() + db, ts = self.db, self.db.scheduler_task + try: + logging.debug(' grabbing all queued tasks') + all_available = db(ts.status.belongs((QUEUED,RUNNING)))\ + ((ts.times_runnow)\ + (ts.next_run_time<=now)\ + (ts.enabled==True)\ + (ts.assigned_worker_name.belongs((None,'',self.worker_name))) #None? + number_grabbed = all_available.update( + assigned_worker_name=self.worker_name,status=ASSIGNED) + db.commit() + except: + db.rollback() + logging.debug(' grabbed %s tasks' % number_grabbed) + if number_grabbed: + grabbed = db(ts.assigned_worker_name==self.worker_name)\ + (ts.status==ASSIGNED) + task = grabbed.select(limitby=(0,1), orderby=ts.next_run_time).first() + + logging.debug(' releasing all but one (running)') + if task: + task.update_record(status=RUNNING,last_run_time=now) + grabbed.update(assigned_worker_name='',status=QUEUED) + db.commit() + else: + return None + next_run_time = task.last_run_time + datetime.timedelta(seconds=task.period) + times_run = task.times_run + 1 + if times_run < task.repeats or task.repeats==0: + run_again = True + else: + run_again = False + logging.debug(' new scheduler_run record') + while True: + try: + run_id = db.scheduler_run.insert( + scheduler_task = task.id, + status=RUNNING, + start_time=now, + worker_name=self.worker_name) + db.commit() + break + except: + db.rollback + logging.info('new task %(id)s "%(task_name)s" %(application_name)s.%(function_name)s' % task) + return Task( + app = task.application_name, + function = task.function_name, + timeout = task.timeout, + args = task.args, #in json + vars = task.vars, #in json + task_id = task.id, + run_id = run_id, + run_again = run_again, + next_run_time=next_run_time, + times_run = times_run) + + def report_task(self,task,task_report): + logging.debug(' recording task report in db (%s)' % task_report.status) + db = self.db + db(db.scheduler_run.id==task.run_id).update( + status = task_report.status, + stop_time = datetime.datetime.now(), + result = task_report.result, + output = task_report.output, + traceback = task_report.tb) + if task_report.status == COMPLETED: + d = dict(status = task.run_again and QUEUED or COMPLETED, + next_run_time = task.next_run_time, + times_run = task.times_run, + assigned_worker_name = '') + else: + d = dict( + assigned_worker_name = '', + status = {'FAILED':'FAILED', + 'TIMEOUT':'TIMEOUT', + 'STOPPED':'QUEUED'}[task_report.status]) + db(db.scheduler_task.id==task.task_id)\ + (db.scheduler_task.status==RUNNING).update(**d) + db.commit() + logging.info('task completed (%s)' % task_report.status) + + def send_heartbeat(self,counter): + if not self.db_thread: + logging.debug('thread building own DAL object') + self.db_thread = DAL(self.db._uri,folder = self.db._adapter.folder) + self.define_tables(self.db_thread,migrate=False) + try: + db = self.db_thread + sw, st = db.scheduler_worker, db.scheduler_task + now = datetime.datetime.now() + expiration = now-datetime.timedelta(seconds=self.heartbeat*3) + # record heartbeat + logging.debug('........recording heartbeat') + if not db(sw.worker_name==self.worker_name)\ + .update(last_heartbeat = now, status = ACTIVE): + sw.insert(status = ACTIVE,worker_name = self.worker_name, + first_heartbeat = now,last_heartbeat = now) + if counter % 10 == 0: + # deallocate jobs assigned to inactive workers and requeue them + logging.debug(' freeing workers that have not sent heartbeat') + inactive_workers = db(sw.last_heartbeat +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" +import datetime +from storage import Storage +from html import TAG +from html import xmlescape +from languages import lazyT +import contrib.rss2 as rss2 + +try: + import json as json_parser # try stdlib (Python 2.6) +except ImportError: + try: + import simplejson as json_parser # try external module + except: + import contrib.simplejson as json_parser # fallback to pure-Python module + +def custom_json(o): + if hasattr(o,'custom_json') and callable(o.custom_json): + return o.custom_json() + if isinstance(o, (datetime.date, + datetime.datetime, + datetime.time)): + return o.isoformat()[:19].replace('T',' ') + elif isinstance(o, (int, long)): + return int(o) + elif isinstance(o, lazyT): + return str(o) + elif hasattr(o,'as_list') and callable(o.as_list): + return o.as_list() + elif hasattr(o,'as_dict') and callable(o.as_dict): + return o.as_dict() + else: + raise TypeError(repr(o) + " is not JSON serializable") + + +def xml_rec(value, key): + if hasattr(value,'custom_xml') and callable(value.custom_xml): + return value.custom_xml() + elif isinstance(value, (dict, Storage)): + return TAG[key](*[TAG[k](xml_rec(v, '')) for k, v in value.items()]) + elif isinstance(value, list): + return TAG[key](*[TAG.item(xml_rec(item, '')) for item in value]) + elif hasattr(value,'as_list') and callable(value.as_list): + return str(xml_rec(value.as_list(),'')) + elif hasattr(value,'as_dict') and callable(value.as_dict): + return str(xml_rec(value.as_dict(),'')) + else: + return xmlescape(value) + + +def xml(value, encoding='UTF-8', key='document'): + return ('' % encoding) + str(xml_rec(value,key)) + + +def json(value,default=custom_json): + return json_parser.dumps(value,default=default) + + +def csv(value): + return '' + + +def rss(feed): + if not 'entries' in feed and 'items' in feed: + feed['entries'] = feed['items'] + now=datetime.datetime.now() + rss = rss2.RSS2(title = feed['title'], + link = str(feed['link']), + description = feed['description'], + lastBuildDate = feed.get('created_on', now), + items = [rss2.RSSItem(\ + title=entry['title'], + link=str(entry['link']), + description=entry['description'], + pubDate=entry.get('created_on', now) + )\ + for entry in feed['entries'] + ] + ) + return rss2.dumps(rss) + + + ADDED gluon/serializers.pyc Index: gluon/serializers.pyc ================================================================== --- /dev/null +++ gluon/serializers.pyc cannot compute difference between binary files ADDED gluon/settings.py Index: gluon/settings.py ================================================================== --- /dev/null +++ gluon/settings.py @@ -0,0 +1,13 @@ +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +from storage import Storage + +global_settings = Storage() +settings = global_settings # legacy compatibility + + + ADDED gluon/settings.pyc Index: gluon/settings.pyc ================================================================== --- /dev/null +++ gluon/settings.pyc cannot compute difference between binary files ADDED gluon/shell.py Index: gluon/shell.py ================================================================== --- /dev/null +++ gluon/shell.py @@ -0,0 +1,421 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Developed by Massimo Di Pierro , +limodou and srackham . +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +""" + +import os +import sys +import code +import logging +import types +import re +import optparse +import glob +import traceback +import fileutils +import settings +from utils import web2py_uuid +from compileapp import build_environment, read_pyc, run_models_in +from restricted import RestrictedError +from globals import Request, Response, Session +from storage import Storage +from admin import w2p_unpack +from dal import BaseAdapter + + +logger = logging.getLogger("web2py") + +def exec_environment( + pyfile='', + request=None, + response=None, + session=None, + ): + """ + .. function:: gluon.shell.exec_environment([pyfile=''[, request=Request() + [, response=Response[, session=Session()]]]]) + + Environment builder and module loader. + + + Builds a web2py environment and optionally executes a Python + file into the environment. + A Storage dictionary containing the resulting environment is returned. + The working directory must be web2py root -- this is the web2py default. + + """ + + if request is None: request = Request() + if response is None: response = Response() + if session is None: session = Session() + + if request.folder is None: + mo = re.match(r'(|.*/)applications/(?P[^/]+)', pyfile) + if mo: + appname = mo.group('appname') + request.folder = os.path.join('applications', appname) + else: + request.folder = '' + env = build_environment(request, response, session, store_current=False) + if pyfile: + pycfile = pyfile + 'c' + if os.path.isfile(pycfile): + exec read_pyc(pycfile) in env + else: + execfile(pyfile, env) + return Storage(env) + + +def env( + a, + import_models=False, + c=None, + f=None, + dir='', + extra_request={}, + ): + """ + Return web2py execution environment for application (a), controller (c), + function (f). + If import_models is True the exec all application models into the + environment. + + extra_request allows you to pass along any extra + variables to the request object before your models + get executed. This was mainly done to support + web2py_utils.test_runner, however you can use it + with any wrapper scripts that need access to the + web2py environment. + """ + + request = Request() + response = Response() + session = Session() + request.application = a + + # Populate the dummy environment with sensible defaults. + + if not dir: + request.folder = os.path.join('applications', a) + else: + request.folder = dir + request.controller = c or 'default' + request.function = f or 'index' + response.view = '%s/%s.html' % (request.controller, + request.function) + request.env.path_info = '/%s/%s/%s' % (a, c, f) + request.env.http_host = '127.0.0.1:8000' + request.env.remote_addr = '127.0.0.1' + request.env.web2py_runtime_gae = settings.global_settings.web2py_runtime_gae + + for k,v in extra_request.items(): + request[k] = v + + # Monkey patch so credentials checks pass. + + def check_credentials(request, other_application='admin'): + return True + + fileutils.check_credentials = check_credentials + + environment = build_environment(request, response, session) + + if import_models: + try: + run_models_in(environment) + except RestrictedError, e: + sys.stderr.write(e.traceback+'\n') + sys.exit(1) + + environment['__name__'] = '__main__' + return environment + + +def exec_pythonrc(): + pythonrc = os.environ.get('PYTHONSTARTUP') + if pythonrc and os.path.isfile(pythonrc): + try: + execfile(pythonrc) + except NameError: + pass + + +def run( + appname, + plain=False, + import_models=False, + startfile=None, + bpython=False, + python_code=False + ): + """ + Start interactive shell or run Python script (startfile) in web2py + controller environment. appname is formatted like: + + a web2py application name + a/c exec the controller c into the application environment + """ + + (a, c, f) = parse_path_info(appname) + errmsg = 'invalid application name: %s' % appname + if not a: + die(errmsg) + adir = os.path.join('applications', a) + if not os.path.exists(adir): + if raw_input('application %s does not exist, create (y/n)?' + % a).lower() in ['y', 'yes']: + os.mkdir(adir) + w2p_unpack('welcome.w2p', adir) + for subfolder in ['models','views','controllers', 'databases', + 'modules','cron','errors','sessions', + 'languages','static','private','uploads']: + subpath = os.path.join(adir,subfolder) + if not os.path.exists(subpath): + os.mkdir(subpath) + db = os.path.join(adir,'models/db.py') + if os.path.exists(db): + data = fileutils.read_file(db) + data = data.replace('','sha512:'+web2py_uuid()) + fileutils.write_file(db, data) + + if c: + import_models = True + _env = env(a, c=c, import_models=import_models) + if c: + cfile = os.path.join('applications', a, 'controllers', c + '.py') + if not os.path.isfile(cfile): + cfile = os.path.join('applications', a, 'compiled', "controllers_%s_%s.pyc" % (c,f)) + if not os.path.isfile(cfile): + die(errmsg) + else: + exec read_pyc(cfile) in _env + else: + execfile(cfile, _env) + + if f: + exec ('print %s()' % f, _env) + elif startfile: + exec_pythonrc() + try: + execfile(startfile, _env) + if import_models: BaseAdapter.close_all_instances('commit') + except Exception, e: + print traceback.format_exc() + if import_models: BaseAdapter.close_all_instances('rollback') + elif python_code: + exec_pythonrc() + try: + exec(python_code, _env) + if import_models: BaseAdapter.close_all_instances('commit') + except Exception, e: + print traceback.format_exc() + if import_models: BaseAdapter.close_all_instances('rollback') + else: + if not plain: + if bpython: + try: + import bpython + bpython.embed(locals_=_env) + return + except: + logger.warning( + 'import bpython error; trying ipython...') + else: + try: + import IPython + if IPython.__version__ >= '0.11': + from IPython.frontend.terminal.embed import InteractiveShellEmbed + shell = InteractiveShellEmbed(user_ns=_env) + shell() + return + else: + # following 2 lines fix a problem with + # IPython; thanks Michael Toomim + if '__builtins__' in _env: + del _env['__builtins__'] + shell = IPython.Shell.IPShell(argv=[],user_ns=_env) + shell.mainloop() + return + except: + logger.warning( + 'import IPython error; use default python shell') + try: + import readline + import rlcompleter + except ImportError: + pass + else: + readline.set_completer(rlcompleter.Completer(_env).complete) + readline.parse_and_bind('tab:complete') + exec_pythonrc() + code.interact(local=_env) + + +def parse_path_info(path_info): + """ + Parse path info formatted like a/c/f where c and f are optional + and a leading / accepted. + Return tuple (a, c, f). If invalid path_info a is set to None. + If c or f are omitted they are set to None. + """ + + mo = re.match(r'^/?(?P
    \w+)(/(?P\w+)(/(?P\w+))?)?$', + path_info) + if mo: + return (mo.group('a'), mo.group('c'), mo.group('f')) + else: + return (None, None, None) + + +def die(msg): + print >> sys.stderr, msg + sys.exit(1) + + +def test(testpath, import_models=True, verbose=False): + """ + Run doctests in web2py environment. testpath is formatted like: + + a tests all controllers in application a + a/c tests controller c in application a + a/c/f test function f in controller c, application a + + Where a, c and f are application, controller and function names + respectively. If the testpath is a file name the file is tested. + If a controller is specified models are executed by default. + """ + + import doctest + if os.path.isfile(testpath): + mo = re.match(r'(|.*/)applications/(?P[^/]+)', testpath) + if not mo: + die('test file is not in application directory: %s' + % testpath) + a = mo.group('a') + c = f = None + files = [testpath] + else: + (a, c, f) = parse_path_info(testpath) + errmsg = 'invalid test path: %s' % testpath + if not a: + die(errmsg) + cdir = os.path.join('applications', a, 'controllers') + if not os.path.isdir(cdir): + die(errmsg) + if c: + cfile = os.path.join(cdir, c + '.py') + if not os.path.isfile(cfile): + die(errmsg) + files = [cfile] + else: + files = glob.glob(os.path.join(cdir, '*.py')) + for testfile in files: + globs = env(a, import_models) + ignores = globs.keys() + execfile(testfile, globs) + + def doctest_object(name, obj): + """doctest obj and enclosed methods and classes.""" + + if type(obj) in (types.FunctionType, types.TypeType, + types.ClassType, types.MethodType, + types.UnboundMethodType): + + # Reload environment before each test. + + globs = env(a, c=c, f=f, import_models=import_models) + execfile(testfile, globs) + doctest.run_docstring_examples(obj, globs=globs, + name='%s: %s' % (os.path.basename(testfile), + name), verbose=verbose) + if type(obj) in (types.TypeType, types.ClassType): + for attr_name in dir(obj): + + # Execute . operator so decorators are executed. + + o = eval('%s.%s' % (name, attr_name), globs) + doctest_object(attr_name, o) + + for (name, obj) in globs.items(): + if name not in ignores and (f is None or f == name): + doctest_object(name, obj) + + +def get_usage(): + usage = """ + %prog [options] pythonfile +""" + return usage + + +def execute_from_command_line(argv=None): + if argv is None: + argv = sys.argv + + parser = optparse.OptionParser(usage=get_usage()) + + parser.add_option('-S', '--shell', dest='shell', metavar='APPNAME', + help='run web2py in interactive shell or IPython(if installed) ' + \ + 'with specified appname') + msg = 'run web2py in interactive shell or bpython (if installed) with' + msg += ' specified appname (if app does not exist it will be created).' + msg += '\n Use combined with --shell' + parser.add_option( + '-B', + '--bpython', + action='store_true', + default=False, + dest='bpython', + help=msg, + ) + parser.add_option( + '-P', + '--plain', + action='store_true', + default=False, + dest='plain', + help='only use plain python shell, should be used with --shell option', + ) + parser.add_option( + '-M', + '--import_models', + action='store_true', + default=False, + dest='import_models', + help='auto import model files, default is False, ' + \ + ' should be used with --shell option', + ) + parser.add_option( + '-R', + '--run', + dest='run', + metavar='PYTHON_FILE', + default='', + help='run PYTHON_FILE in web2py environment, ' + \ + 'should be used with --shell option', + ) + + (options, args) = parser.parse_args(argv[1:]) + + if len(sys.argv) == 1: + parser.print_help() + sys.exit(0) + + if len(args) > 0: + startfile = args[0] + else: + startfile = '' + run(options.shell, options.plain, startfile=startfile, bpython=options.bpython) + + +if __name__ == '__main__': + execute_from_command_line() + + + ADDED gluon/shell.pyc Index: gluon/shell.pyc ================================================================== --- /dev/null +++ gluon/shell.pyc cannot compute difference between binary files ADDED gluon/sql.py Index: gluon/sql.py ================================================================== --- /dev/null +++ gluon/sql.py @@ -0,0 +1,8 @@ +# this file exists for backward compatibility + +__all__ = ['DAL','Field','drivers'] + +from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, drivers, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType + + + ADDED gluon/sqlhtml.py Index: gluon/sqlhtml.py ================================================================== --- /dev/null +++ gluon/sqlhtml.py @@ -0,0 +1,2115 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Holds: + +- SQLFORM: provide a form for a table (with/without record) +- SQLTABLE: provides a table for a set of records +- form_factory: provides a SQLFORM for an non-db backed table + +""" + +from http import HTTP +from html import XML, SPAN, TAG, A, DIV, CAT, UL, LI, TEXTAREA, BR, IMG, SCRIPT +from html import FORM, INPUT, LABEL, OPTION, SELECT +from html import TABLE, THEAD, TBODY, TR, TD, TH +from html import URL +from dal import DAL, Table, Row, CALLABLETYPES, smart_query +from storage import Storage +from utils import md5_hash +from validators import IS_EMPTY_OR + +import urllib +import re +import cStringIO + +table_field = re.compile('[\w_]+\.[\w_]+') +widget_class = re.compile('^\w*') + +def represent(field,value,record): + f = field.represent + if not callable(f): + return str(value) + n = f.func_code.co_argcount-len(f.func_defaults or []) + if n==1: + return f(value) + elif n==2: + return f(value,record) + else: + raise RuntimeError, "field representation must take 1 or 2 args" + +def safe_int(x): + try: + return int(x) + except ValueError: + return 0 + +def safe_float(x): + try: + return float(x) + except ValueError: + return 0 + +class FormWidget(object): + """ + helper for SQLFORM to generate form input fields (widget), + related to the fieldtype + """ + + @staticmethod + def _attributes(field, widget_attributes, **attributes): + """ + helper to build a common set of attributes + + :param field: the field involved, some attributes are derived from this + :param widget_attributes: widget related attributes + :param attributes: any other supplied attributes + """ + attr = dict( + _id = '%s_%s' % (field._tablename, field.name), + _class = widget_class.match(str(field.type)).group(), + _name = field.name, + requires = field.requires, + ) + attr.update(widget_attributes) + attr.update(attributes) + return attr + + @staticmethod + def widget(field, value, **attributes): + """ + generates the widget for the field. + + When serialized, will provide an INPUT tag: + + - id = tablename_fieldname + - class = field.type + - name = fieldname + + :param field: the field needing the widget + :param value: value + :param attributes: any other attributes to be applied + """ + + raise NotImplementedError + +class StringWidget(FormWidget): + + @staticmethod + def widget(field, value, **attributes): + """ + generates an INPUT text tag. + + see also: :meth:`FormWidget.widget` + """ + + default = dict( + _type = 'text', + value = (not value is None and str(value)) or '', + ) + attr = StringWidget._attributes(field, default, **attributes) + + return INPUT(**attr) + + +class IntegerWidget(StringWidget): + + pass + + +class DoubleWidget(StringWidget): + + pass + + +class DecimalWidget(StringWidget): + + pass + + +class TimeWidget(StringWidget): + + pass + + +class DateWidget(StringWidget): + + pass + + +class DatetimeWidget(StringWidget): + + pass + + +class TextWidget(FormWidget): + + @staticmethod + def widget(field, value, **attributes): + """ + generates a TEXTAREA tag. + + see also: :meth:`FormWidget.widget` + """ + + default = dict( + value = value, + ) + attr = TextWidget._attributes(field, default, **attributes) + + return TEXTAREA(**attr) + + +class BooleanWidget(FormWidget): + + @staticmethod + def widget(field, value, **attributes): + """ + generates an INPUT checkbox tag. + + see also: :meth:`FormWidget.widget` + """ + + default=dict( + _type='checkbox', + value=value, + ) + attr = BooleanWidget._attributes(field, default, **attributes) + + return INPUT(**attr) + + +class OptionsWidget(FormWidget): + + @staticmethod + def has_options(field): + """ + checks if the field has selectable options + + :param field: the field needing checking + :returns: True if the field has options + """ + + return hasattr(field.requires, 'options') + + @staticmethod + def widget(field, value, **attributes): + """ + generates a SELECT tag, including OPTIONs (only 1 option allowed) + + see also: :meth:`FormWidget.widget` + """ + default = dict( + value=value, + ) + attr = OptionsWidget._attributes(field, default, **attributes) + + requires = field.requires + if not isinstance(requires, (list, tuple)): + requires = [requires] + if requires: + if hasattr(requires[0], 'options'): + options = requires[0].options() + else: + raise SyntaxError, 'widget cannot determine options of %s' \ + % field + opts = [OPTION(v, _value=k) for (k, v) in options] + + return SELECT(*opts, **attr) + +class ListWidget(StringWidget): + @staticmethod + def widget(field,value,**attributes): + _id = '%s_%s' % (field._tablename, field.name) + _name = field.name + if field.type=='list:integer': _class = 'integer' + else: _class = 'string' + items=[LI(INPUT(_id=_id,_class=_class,_name=_name,value=v,hideerror=True)) \ + for v in value or ['']] + script=SCRIPT(""" +// from http://refactormycode.com/codes/694-expanding-input-list-using-jquery +(function(){ +jQuery.fn.grow_input = function() { + return this.each(function() { + var ul = this; + jQuery(ul).find(":text").after(' + - otherwise with download url: file + - otherwise: file + + :param field: the field + :param value: the field value + :param download_url: url for the file download (default = None) + """ + + inp = UploadWidget.GENERIC_DESCRIPTION + + if download_url and value: + if callable(download_url): + url = download_url(value) + else: + url = download_url + '/' + value + if UploadWidget.is_image(value): + inp = IMG(_src = url, _width = UploadWidget.DEFAULT_WIDTH) + inp = A(inp, _href = url) + + return inp + + @staticmethod + def is_image(value): + """ + Tries to check if the filename provided references to an image + + Checking is based on filename extension. Currently recognized: + gif, png, jp(e)g, bmp + + :param value: filename + """ + + extension = value.split('.')[-1].lower() + if extension in ['gif', 'png', 'jpg', 'jpeg', 'bmp']: + return True + return False + + +class AutocompleteWidget(object): + + def __init__(self, request, field, id_field=None, db=None, + orderby=None, limitby=(0,10), + keyword='_autocomplete_%(fieldname)s', + min_length=2): + self.request = request + self.keyword = keyword % dict(fieldname=field.name) + self.db = db or field._db + self.orderby = orderby + self.limitby = limitby + self.min_length = min_length + self.fields=[field] + if id_field: + self.is_reference = True + self.fields.append(id_field) + else: + self.is_reference = False + if hasattr(request,'application'): + self.url = URL(args=request.args) + self.callback() + else: + self.url = request + def callback(self): + if self.keyword in self.request.vars: + field = self.fields[0] + rows = self.db(field.like(self.request.vars[self.keyword]+'%'))\ + .select(orderby=self.orderby,limitby=self.limitby,*self.fields) + if rows: + if self.is_reference: + id_field = self.fields[1] + raise HTTP(200,SELECT(_id=self.keyword,_class='autocomplete', + _size=len(rows),_multiple=(len(rows)==1), + *[OPTION(s[field.name],_value=s[id_field.name], + _selected=(k==0)) \ + for k,s in enumerate(rows)]).xml()) + else: + raise HTTP(200,SELECT(_id=self.keyword,_class='autocomplete', + _size=len(rows),_multiple=(len(rows)==1), + *[OPTION(s[field.name], + _selected=(k==0)) \ + for k,s in enumerate(rows)]).xml()) + else: + + raise HTTP(200,'') + def __call__(self,field,value,**attributes): + default = dict( + _type = 'text', + value = (not value is None and str(value)) or '', + ) + attr = StringWidget._attributes(field, default, **attributes) + div_id = self.keyword+'_div' + attr['_autocomplete']='off' + if self.is_reference: + key2 = self.keyword+'_aux' + key3 = self.keyword+'_auto' + attr['_class']='string' + name = attr['_name'] + if 'requires' in attr: del attr['requires'] + attr['_name'] = key2 + value = attr['value'] + record = self.db(self.fields[1]==value).select(self.fields[0]).first() + attr['value'] = record and record[self.fields[0].name] + attr['_onblur']="jQuery('#%(div_id)s').delay(3000).fadeOut('slow');" % \ + dict(div_id=div_id,u='F'+self.keyword) + attr['_onkeyup'] = "jQuery('#%(key3)s').val('');var e=event.which?event.which:event.keyCode; function %(u)s(){jQuery('#%(id)s').val(jQuery('#%(key)s :selected').text());jQuery('#%(key3)s').val(jQuery('#%(key)s').val())}; if(e==39) %(u)s(); else if(e==40) {if(jQuery('#%(key)s option:selected').next().length)jQuery('#%(key)s option:selected').attr('selected',null).next().attr('selected','selected'); %(u)s();} else if(e==38) {if(jQuery('#%(key)s option:selected').prev().length)jQuery('#%(key)s option:selected').attr('selected',null).prev().attr('selected','selected'); %(u)s();} else if(jQuery('#%(id)s').val().length>=%(min_length)s) jQuery.get('%(url)s?%(key)s='+escape(jQuery('#%(id)s').val()),function(data){if(data=='')jQuery('#%(key3)s').val('');else{jQuery('#%(id)s').next('.error').hide();jQuery('#%(div_id)s').html(data).show().focus();jQuery('#%(div_id)s select').css('width',jQuery('#%(id)s').css('width'));jQuery('#%(key3)s').val(jQuery('#%(key)s').val());jQuery('#%(key)s').change(%(u)s);jQuery('#%(key)s').click(%(u)s);};}); else jQuery('#%(div_id)s').fadeOut('slow');" % \ + dict(url=self.url,min_length=self.min_length, + key=self.keyword,id=attr['_id'],key2=key2,key3=key3, + name=name,div_id=div_id,u='F'+self.keyword) + if self.min_length==0: + attr['_onfocus'] = attr['_onkeyup'] + return TAG[''](INPUT(**attr),INPUT(_type='hidden',_id=key3,_value=value, + _name=name,requires=field.requires), + DIV(_id=div_id,_style='position:absolute;')) + else: + attr['_name']=field.name + attr['_onblur']="jQuery('#%(div_id)s').delay(3000).fadeOut('slow');" % \ + dict(div_id=div_id,u='F'+self.keyword) + attr['_onkeyup'] = "var e=event.which?event.which:event.keyCode; function %(u)s(){jQuery('#%(id)s').val(jQuery('#%(key)s').val())}; if(e==39) %(u)s(); else if(e==40) {if(jQuery('#%(key)s option:selected').next().length)jQuery('#%(key)s option:selected').attr('selected',null).next().attr('selected','selected'); %(u)s();} else if(e==38) {if(jQuery('#%(key)s option:selected').prev().length)jQuery('#%(key)s option:selected').attr('selected',null).prev().attr('selected','selected'); %(u)s();} else if(jQuery('#%(id)s').val().length>=%(min_length)s) jQuery.get('%(url)s?%(key)s='+escape(jQuery('#%(id)s').val()),function(data){jQuery('#%(id)s').next('.error').hide();jQuery('#%(div_id)s').html(data).show().focus();jQuery('#%(div_id)s select').css('width',jQuery('#%(id)s').css('width'));jQuery('#%(key)s').change(%(u)s);jQuery('#%(key)s').click(%(u)s);}); else jQuery('#%(div_id)s').fadeOut('slow');" % \ + dict(url=self.url,min_length=self.min_length, + key=self.keyword,id=attr['_id'],div_id=div_id,u='F'+self.keyword) + if self.min_length==0: + attr['_onfocus'] = attr['_onkeyup'] + return TAG[''](INPUT(**attr),DIV(_id=div_id,_style='position:absolute;')) + + +class SQLFORM(FORM): + + """ + SQLFORM is used to map a table (and a current record) into an HTML form + + given a SQLTable stored in db.table + + generates an insert form:: + + SQLFORM(db.table) + + generates an update form:: + + record=db.table[some_id] + SQLFORM(db.table, record) + + generates an update with a delete button:: + + SQLFORM(db.table, record, deletable=True) + + if record is an int:: + + record=db.table[record] + + optional arguments: + + :param fields: a list of fields that should be placed in the form, + default is all. + :param labels: a dictionary with labels for each field, keys are the field + names. + :param col3: a dictionary with content for an optional third column + (right of each field). keys are field names. + :param linkto: the URL of a controller/function to access referencedby + records + see controller appadmin.py for examples + :param upload: the URL of a controller/function to download an uploaded file + see controller appadmin.py for examples + + any named optional attribute is passed to the
    tag + for example _class, _id, _style, _action, _method, etc. + + """ + + # usability improvements proposal by fpp - 4 May 2008 : + # - correct labels (for points to field id, not field name) + # - add label for delete checkbox + # - add translatable label for record ID + # - add third column to right of fields, populated from the col3 dict + + widgets = Storage(dict( + string = StringWidget, + text = TextWidget, + password = PasswordWidget, + integer = IntegerWidget, + double = DoubleWidget, + decimal = DecimalWidget, + time = TimeWidget, + date = DateWidget, + datetime = DatetimeWidget, + upload = UploadWidget, + boolean = BooleanWidget, + blob = None, + options = OptionsWidget, + multiple = MultipleOptionsWidget, + radio = RadioWidget, + checkboxes = CheckboxesWidget, + autocomplete = AutocompleteWidget, + list = ListWidget, + )) + + FIELDNAME_REQUEST_DELETE = 'delete_this_record' + FIELDKEY_DELETE_RECORD = 'delete_record' + ID_LABEL_SUFFIX = '__label' + ID_ROW_SUFFIX = '__row' + + def __init__( + self, + table, + record = None, + deletable = False, + linkto = None, + upload = None, + fields = None, + labels = None, + col3 = {}, + submit_button = 'Submit', + delete_label = 'Check to delete:', + showid = True, + readonly = False, + comments = True, + keepopts = [], + ignore_rw = False, + record_id = None, + formstyle = 'table3cols', + buttons = ['submit'], + separator = ': ', + **attributes + ): + """ + SQLFORM(db.table, + record=None, + fields=['name'], + labels={'name': 'Your name'}, + linkto=URL(f='table/db/') + """ + + self.ignore_rw = ignore_rw + self.formstyle = formstyle + nbsp = XML(' ') # Firefox2 does not display fields with blanks + FORM.__init__(self, *[], **attributes) + ofields = fields + keyed = hasattr(table,'_primarykey') + + # if no fields are provided, build it from the provided table + # will only use writable or readable fields, unless forced to ignore + if fields is None: + fields = [f.name for f in table if (ignore_rw or f.writable or f.readable) and not f.compute] + self.fields = fields + + # make sure we have an id + if self.fields[0] != table.fields[0] and \ + isinstance(table,Table) and not keyed: + self.fields.insert(0, table.fields[0]) + + self.table = table + + # try to retrieve the indicated record using its id + # otherwise ignore it + if record and isinstance(record, (int, long, str, unicode)): + if not str(record).isdigit(): + raise HTTP(404, "Object not found") + record = table._db(table._id == record).select().first() + if not record: + raise HTTP(404, "Object not found") + self.record = record + + self.record_id = record_id + if keyed: + if record: + self.record_id = dict([(k,record[k]) for k in table._primarykey]) + else: + self.record_id = dict([(k,None) for k in table._primarykey]) + self.field_parent = {} + xfields = [] + self.fields = fields + self.custom = Storage() + self.custom.dspval = Storage() + self.custom.inpval = Storage() + self.custom.label = Storage() + self.custom.comment = Storage() + self.custom.widget = Storage() + self.custom.linkto = Storage() + + sep = separator or '' + + for fieldname in self.fields: + if fieldname.find('.') >= 0: + continue + + field = self.table[fieldname] + comment = None + + if comments: + comment = col3.get(fieldname, field.comment) + if comment is None: + comment = '' + self.custom.comment[fieldname] = comment + + if not labels is None and fieldname in labels: + label = labels[fieldname] + else: + label = field.label + self.custom.label[fieldname] = label + + field_id = '%s_%s' % (table._tablename, fieldname) + + label = LABEL(label, label and sep, _for=field_id, + _id=field_id+SQLFORM.ID_LABEL_SUFFIX) + + row_id = field_id+SQLFORM.ID_ROW_SUFFIX + if field.type == 'id': + self.custom.dspval.id = nbsp + self.custom.inpval.id = '' + widget = '' + if record: + if showid and 'id' in fields and field.readable: + v = record['id'] + widget = SPAN(v, _id=field_id) + self.custom.dspval.id = str(v) + xfields.append((row_id,label, widget,comment)) + self.record_id = str(record['id']) + self.custom.widget.id = widget + continue + + if readonly and not ignore_rw and not field.readable: + continue + + if record: + default = record[fieldname] + else: + default = field.default + if isinstance(default,CALLABLETYPES): + default=default() + + cond = readonly or \ + (not ignore_rw and not field.writable and field.readable) + + if default and not cond: + default = field.formatter(default) + dspval = default + inpval = default + + if cond: + + # ## if field.represent is available else + # ## ignore blob and preview uploaded images + # ## format everything else + + if field.represent: + inp = represent(field,default,record) + elif field.type in ['blob']: + continue + elif field.type == 'upload': + inp = UploadWidget.represent(field, default, upload) + elif field.type == 'boolean': + inp = self.widgets.boolean.widget(field, default, _disabled=True) + else: + inp = field.formatter(default) + elif field.type == 'upload': + if hasattr(field, 'widget') and field.widget: + inp = field.widget(field, default, upload) + else: + inp = self.widgets.upload.widget(field, default, upload) + elif hasattr(field, 'widget') and field.widget: + inp = field.widget(field, default) + elif field.type == 'boolean': + inp = self.widgets.boolean.widget(field, default) + if default: + inpval = 'checked' + else: + inpval = '' + elif OptionsWidget.has_options(field): + if not field.requires.multiple: + inp = self.widgets.options.widget(field, default) + else: + inp = self.widgets.multiple.widget(field, default) + if fieldname in keepopts: + inpval = TAG[''](*inp.components) + elif field.type.startswith('list:'): + inp = self.widgets.list.widget(field,default) + elif field.type == 'text': + inp = self.widgets.text.widget(field, default) + elif field.type == 'password': + inp = self.widgets.password.widget(field, default) + if self.record: + dspval = PasswordWidget.DEFAULT_PASSWORD_DISPLAY + else: + dspval = '' + elif field.type == 'blob': + continue + else: + inp = self.widgets.string.widget(field, default) + + xfields.append((row_id,label,inp,comment)) + self.custom.dspval[fieldname] = dspval or nbsp + self.custom.inpval[fieldname] = inpval or '' + self.custom.widget[fieldname] = inp + + # if a record is provided and found, as is linkto + # build a link + if record and linkto: + db = linkto.split('/')[-1] + for (rtable, rfield) in table._referenced_by: + if keyed: + rfld = table._db[rtable][rfield] + query = urllib.quote('%s.%s==%s' % (db,rfld,record[rfld.type[10:].split('.')[1]])) + else: + query = urllib.quote('%s.%s==%s' % (db,table._db[rtable][rfield],record.id)) + lname = olname = '%s.%s' % (rtable, rfield) + if ofields and not olname in ofields: + continue + if labels and lname in labels: + lname = labels[lname] + widget = A(lname, + _class='reference', + _href='%s/%s?query=%s' % (linkto, rtable, query)) + xfields.append((olname.replace('.', '__')+SQLFORM.ID_ROW_SUFFIX, + '',widget,col3.get(olname,''))) + self.custom.linkto[olname.replace('.', '__')] = widget +# + + # when deletable, add delete? checkbox + self.custom.deletable = '' + if record and deletable: + widget = INPUT(_type='checkbox', + _class='delete', + _id=self.FIELDKEY_DELETE_RECORD, + _name=self.FIELDNAME_REQUEST_DELETE, + ) + xfields.append((self.FIELDKEY_DELETE_RECORD+SQLFORM.ID_ROW_SUFFIX, + LABEL( + delete_label, + _for=self.FIELDKEY_DELETE_RECORD, + _id=self.FIELDKEY_DELETE_RECORD+SQLFORM.ID_LABEL_SUFFIX), + widget, + col3.get(self.FIELDKEY_DELETE_RECORD, ''))) + self.custom.deletable = widget + # when writable, add submit button + self.custom.submit = '' + if (not readonly) and ('submit' in buttons): + widget = INPUT(_type='submit', + _value=submit_button) + xfields.append(('submit_record'+SQLFORM.ID_ROW_SUFFIX, + '', widget,col3.get('submit_button', ''))) + self.custom.submit = widget + # if a record is provided and found + # make sure it's id is stored in the form + if record: + if not self['hidden']: + self['hidden'] = {} + if not keyed: + self['hidden']['id'] = record['id'] + + (begin, end) = self._xml() + self.custom.begin = XML("<%s %s>" % (self.tag, begin)) + self.custom.end = XML("%s" % (end, self.tag)) + table = self.createform(xfields) + self.components = [table] + + def createform(self, xfields): + if self.formstyle == 'table3cols': + table = TABLE() + for id,a,b,c in xfields: + td_b = self.field_parent[id] = TD(b,_class='w2p_fw') + table.append(TR(TD(a,_class='w2p_fl'), + td_b, + TD(c,_class='w2p_fc'),_id=id)) + elif self.formstyle == 'table2cols': + table = TABLE() + for id,a,b,c in xfields: + td_b = self.field_parent[id] = TD(b,_class='w2p_fw',_colspan="2") + table.append(TR(TD(a,_class='w2p_fl'), + TD(c,_class='w2p_fc'),_id=id + +'1',_class='even')) + table.append(TR(td_b,_id=id+'2',_class='odd')) + elif self.formstyle == 'divs': + table = TAG['']() + for id,a,b,c in xfields: + div_b = self.field_parent[id] = DIV(b,_class='w2p_fw') + table.append(DIV(DIV(a,_class='w2p_fl'), + div_b, + DIV(c,_class='w2p_fc'),_id=id)) + elif self.formstyle == 'ul': + table = UL() + for id,a,b,c in xfields: + div_b = self.field_parent[id] = DIV(b,_class='w2p_fw') + table.append(LI(DIV(a,_class='w2p_fl'), + div_b, + DIV(c,_class='w2p_fc'),_id=id)) + elif type(self.formstyle) == type(lambda:None): + table = TABLE() + for id,a,b,c in xfields: + td_b = self.field_parent[id] = TD(b,_class='w2p_fw') + newrows = self.formstyle(id,a,td_b,c) + if type(newrows).__name__ != "tuple": + newrows = [newrows] + for newrow in newrows: + table.append(newrow) + else: + raise RuntimeError, 'formstyle not supported' + return table + + + def accepts( + self, + request_vars, + session=None, + formname='%(tablename)s/%(record_id)s', + keepvalues=False, + onvalidation=None, + dbio=True, + hideerror=False, + detect_record_change=False, + ): + + """ + similar FORM.accepts but also does insert, update or delete in DAL. + but if detect_record_change == True than: + form.record_changed = False (record is properly validated/submitted) + form.record_changed = True (record cannot be submitted because changed) + elseif detect_record_change == False than: + form.record_changed = None + """ + + if request_vars.__class__.__name__ == 'Request': + request_vars = request_vars.post_vars + + keyed = hasattr(self.table, '_primarykey') + + # implement logic to detect whether record exist but has been modified + # server side + self.record_changed = None + if detect_record_change: + if self.record: + self.record_changed = False + serialized = '|'.join(str(self.record[k]) for k in self.table.fields()) + self.record_hash = md5_hash(serialized) + + # logic to deal with record_id for keyed tables + if self.record: + if keyed: + formname_id = '.'.join(str(self.record[k]) + for k in self.table._primarykey + if hasattr(self.record,k)) + record_id = dict((k, request_vars[k]) for k in self.table._primarykey) + else: + (formname_id, record_id) = (self.record.id, + request_vars.get('id', None)) + keepvalues = True + else: + if keyed: + formname_id = 'create' + record_id = dict([(k, None) for k in self.table._primarykey]) + else: + (formname_id, record_id) = ('create', None) + + if not keyed and isinstance(record_id, (list, tuple)): + record_id = record_id[0] + + if formname: + formname = formname % dict(tablename = self.table._tablename, + record_id = formname_id) + + # ## THIS IS FOR UNIQUE RECORDS, read IS_NOT_IN_DB + + for fieldname in self.fields: + field = self.table[fieldname] + requires = field.requires or [] + if not isinstance(requires, (list, tuple)): + requires = [requires] + [item.set_self_id(self.record_id) for item in requires + if hasattr(item, 'set_self_id') and self.record_id] + + # ## END + + fields = {} + for key in self.vars: + fields[key] = self.vars[key] + + ret = FORM.accepts( + self, + request_vars, + session, + formname, + keepvalues, + onvalidation, + hideerror=hideerror, + ) + + if not ret and self.record and self.errors: + ### if there are errors in update mode + # and some errors refers to an already uploaded file + # delete error if + # - user not trying to upload a new file + # - there is existing file and user is not trying to delete it + # this is because removing the file may not pass validation + for key in self.errors.keys(): + if key in self.table \ + and self.table[key].type == 'upload' \ + and request_vars.get(key, None) in (None, '') \ + and self.record[key] \ + and not key + UploadWidget.ID_DELETE_SUFFIX in request_vars: + del self.errors[key] + if not self.errors: + ret = True + + requested_delete = \ + request_vars.get(self.FIELDNAME_REQUEST_DELETE, False) + + self.custom.end = TAG[''](self.hidden_fields(), self.custom.end) + + auch = record_id and self.errors and requested_delete + + # auch is true when user tries to delete a record + # that does not pass validation, yet it should be deleted + + if not ret and not auch: + for fieldname in self.fields: + field = self.table[fieldname] + ### this is a workaround! widgets should always have default not None! + if not field.widget and field.type.startswith('list:') and \ + not OptionsWidget.has_options(field): + field.widget = self.widgets.list.widget + if hasattr(field, 'widget') and field.widget and fieldname in request_vars: + if fieldname in self.vars: + value = self.vars[fieldname] + elif self.record: + value = self.record[fieldname] + else: + value = self.table[fieldname].default + if field.type.startswith('list:') and \ + isinstance(value, str): + value = [value] + row_id = '%s_%s%s' % (self.table, fieldname, SQLFORM.ID_ROW_SUFFIX) + widget = field.widget(field, value) + self.field_parent[row_id].components = [ widget ] + if not field.type.startswith('list:'): + self.field_parent[row_id]._traverse(False, hideerror) + self.custom.widget[ fieldname ] = widget + self.accepted = ret + return ret + + if record_id and str(record_id) != str(self.record_id): + raise SyntaxError, 'user is tampering with form\'s record_id: ' \ + '%s != %s' % (record_id, self.record_id) + + if record_id and dbio and not keyed: + self.vars.id = self.record.id + + if requested_delete and self.custom.deletable: + if dbio: + if keyed: + qry = reduce(lambda x, y: x & y, + [self.table[k] == record_id[k] for k in self.table._primarykey]) + else: + qry = self.table._id == self.record.id + self.table._db(qry).delete() + self.errors.clear() + for component in self.elements('input, select, textarea'): + component['_disabled'] = True + self.accepted = True + return True + + for fieldname in self.fields: + if not fieldname in self.table.fields: + continue + + if not self.ignore_rw and not self.table[fieldname].writable: + ### this happens because FORM has no knowledge of writable + ### and thinks that a missing boolean field is a None + if self.table[fieldname].type == 'boolean' and \ + self.vars.get(fieldname, True) is None: + del self.vars[fieldname] + continue + + field = self.table[fieldname] + if field.type == 'id': + continue + if field.type == 'boolean': + if self.vars.get(fieldname, False): + self.vars[fieldname] = fields[fieldname] = True + else: + self.vars[fieldname] = fields[fieldname] = False + elif field.type == 'password' and self.record\ + and request_vars.get(fieldname, None) == \ + PasswordWidget.DEFAULT_PASSWORD_DISPLAY: + continue # do not update if password was not changed + elif field.type == 'upload': + f = self.vars[fieldname] + fd = '%s__delete' % fieldname + if f == '' or f is None: + if self.vars.get(fd, False) or not self.record: + fields[fieldname] = '' + else: + fields[fieldname] = self.record[fieldname] + self.vars[fieldname] = fields[fieldname] + continue + elif hasattr(f, 'file'): + (source_file, original_filename) = (f.file, f.filename) + elif isinstance(f, (str, unicode)): + ### do not know why this happens, it should not + (source_file, original_filename) = \ + (cStringIO.StringIO(f), 'file.txt') + newfilename = field.store(source_file, original_filename) + # this line is for backward compatibility only + self.vars['%s_newfilename' % fieldname] = newfilename + fields[fieldname] = newfilename + if isinstance(field.uploadfield, str): + fields[field.uploadfield] = source_file.read() + # proposed by Hamdy (accept?) do we need fields at this point? + self.vars[fieldname] = fields[fieldname] + continue + elif fieldname in self.vars: + fields[fieldname] = self.vars[fieldname] + elif field.default is None and field.type != 'blob': + self.errors[fieldname] = 'no data' + self.accepted = False + return False + value = fields.get(fieldname,None) + if field.type == 'list:string': + if not isinstance(value, (tuple, list)): + fields[fieldname] = value and [value] or [] + elif isinstance(field.type,str) and field.type.startswith('list:'): + if not isinstance(value, list): + fields[fieldname] = [safe_int(x) for x in (value and [value] or [])] + elif field.type == 'integer': + if not value is None: + fields[fieldname] = safe_int(value) + elif field.type.startswith('reference'): + if not value is None and isinstance(self.table, Table) and not keyed: + fields[fieldname] = safe_int(value) + elif field.type == 'double': + if not value is None: + fields[fieldname] = safe_float(value) + + for fieldname in self.vars: + if fieldname != 'id' and fieldname in self.table.fields\ + and not fieldname in fields and not fieldname\ + in request_vars: + fields[fieldname] = self.vars[fieldname] + + if dbio: + if 'delete_this_record' in fields: + # this should never happen but seems to happen to some + del fields['delete_this_record'] + for field in self.table: + if not field.name in fields and field.writable==False \ + and field.update is None: + if record_id: + fields[field.name] = self.record[field.name] + elif not self.table[field.name].default is None: + fields[field.name] = self.table[field.name].default + if keyed: + if reduce(lambda x, y: x and y, record_id.values()): # if record_id + if fields: + qry = reduce(lambda x, y: x & y, + [self.table[k] == self.record[k] for k in self.table._primarykey]) + self.table._db(qry).update(**fields) + else: + pk = self.table.insert(**fields) + if pk: + self.vars.update(pk) + else: + ret = False + else: + if record_id: + self.vars.id = self.record.id + if fields: + self.table._db(self.table._id == self.record.id).update(**fields) + else: + self.vars.id = self.table.insert(**fields) + self.accepted = ret + return ret + + @staticmethod + def factory(*fields, **attributes): + """ + generates a SQLFORM for the given fields. + + Internally will build a non-database based data model + to hold the fields. + """ + # Define a table name, this way it can be logical to our CSS. + # And if you switch from using SQLFORM to SQLFORM.factory + # your same css definitions will still apply. + + table_name = attributes.get('table_name', 'no_table') + + # So it won't interfear with SQLDB.define_table + if 'table_name' in attributes: + del attributes['table_name'] + + return SQLFORM(DAL(None).define_table(table_name, *fields), + **attributes) + + @staticmethod + def grid(query, + fields=None, + field_id=None, + left=None, + headers={}, + columns=None, + orderby=None, + searchable=True, + sortable=True, + paginate=20, + deletable=True, + editable=True, + details=True, + selectable=None, + create=True, + csv=True, + links=None, + upload = '', + args=[], + user_signature = True, + maxtextlengths={}, + maxtextlength=20, + onvalidation=None, + oncreate=None, + onupdate=None, + ondelete=None, + sorter_icons=('[^]','[v]'), + ui = 'web2py', + showbuttontext=True, + _class="web2py_grid", + formname='web2py_grid', + ): + + # jQuery UI ThemeRoller classes (empty if ui is disabled) + if ui == 'jquery-ui': + ui = dict(widget='ui-widget', + header='ui-widget-header', + content='ui-widget-content', + default='ui-state-default', + cornerall='ui-corner-all', + cornertop='ui-corner-top', + cornerbottom='ui-corner-bottom', + button='ui-button-text-icon-primary', + buttontext='ui-button-text', + buttonadd='ui-icon ui-icon-plusthick', + buttonback='ui-icon ui-icon-arrowreturnthick-1-w', + buttonexport='ui-icon ui-icon-transferthick-e-w', + buttondelete='ui-icon ui-icon-trash', + buttonedit='ui-icon ui-icon-pencil', + buttontable='ui-icon ui-icon-triangle-1-e', + buttonview='ui-icon ui-icon-zoomin', + ) + elif ui == 'web2py': + ui = dict(widget='', + header='', + content='', + default='', + cornerall='', + cornertop='', + cornerbottom='', + button='button', + buttontext='buttontext button', + buttonadd='icon plus', + buttonback='icon leftarrow', + buttonexport='icon downarrow', + buttondelete='icon trash', + buttonedit='icon pen', + buttontable='icon rightarrow', + buttonview='icon magnifier', + ) + elif not isinstance(ui,dict): + raise RuntimeError,'SQLFORM.grid ui argument must be a dictionary' + + from gluon import current, redirect + db = query._db + T = current.T + request = current.request + session = current.session + response = current.response + wenabled = (not user_signature or (session.auth and session.auth.user)) + #create = wenabled and create + #editable = wenabled and editable + deletable = wenabled and deletable + def url(**b): + b['args'] = args+b.get('args',[]) + b['user_signature'] = user_signature + return URL(**b) + + def gridbutton(buttonclass='buttonadd',buttontext='Add',buttonurl=url(args=[]),callback=None,delete=None): + if showbuttontext: + if callback: + return A(SPAN(_class=ui.get(buttonclass,'')), + SPAN(T(buttontext),_title=buttontext, + _class=ui.get('buttontext','')), + callback=callback,delete=delete, + _class=ui.get('button','')) + else: + return A(SPAN(_class=ui.get(buttonclass,'')), + SPAN(T(buttontext),_title=buttontext, + _class=ui.get('buttontext','')), + _href=buttonurl,_class=ui.get('button','')) + else: + if callback: + return A(SPAN(_class=ui.get(buttonclass,'')), + callback=callback,delete=delete, + _title=buttontext,_class=ui.get('buttontext','')) + else: + return A(SPAN(_class=ui.get(buttonclass,'')), + _href=buttonurl,_title=buttontext, + _class=ui.get('buttontext','')) + + dbset = db(query) + tables = [db[tablename] for tablename in db._adapter.tables( + dbset.query)] + if not fields: + fields = reduce(lambda a,b:a+b, + [[field for field in table] for table in tables]) + if not field_id: + field_id = tables[0]._id + table = field_id.table + tablename = table._tablename + referrer = session.get('_web2py_grid_referrer_'+formname, url()) + def check_authorization(): + if user_signature: + if not URL.verify(request,user_signature=user_signature): + session.flash = T('not authorized') + redirect(referrer) + if upload=='': + upload = lambda filename: url(args=['download',filename]) + if len(request.args)>1 and request.args[-2]=='download': + check_authorization() + stream = response.download(request,db) + raise HTTP(200,stream,**response.headers) + + def buttons(edit=False,view=False,record=None): + buttons = DIV(gridbutton('buttonback', 'Back', referrer), + _class='form_header row_buttons %(header)s %(cornertop)s' % ui) + if edit: + args = ['edit',table._tablename,request.args[-1]] + buttons.append(gridbutton('buttonedit', 'Edit', + url(args=args))) + if view: + args = ['view',table._tablename,request.args[-1]] + buttons.append(gridbutton('buttonview', 'View', + url(args=args))) + if record and links: + for link in links: + buttons.append(link(record)) + return buttons + + formfooter = DIV( + _class='form_footer row_buttons %(header)s %(cornerbottom)s' % ui) + + create_form = edit_form = None + + if create and len(request.args)>1 and request.args[-2]=='new': + check_authorization() + table = db[request.args[-1]] + create_form = SQLFORM( + table, + _class='web2py_form' + ).process(next=referrer, + onvalidation=onvalidation, + onsuccess=oncreate, + formname=formname) + res = DIV(buttons(),create_form,formfooter,_class=_class) + res.create_form = create_form + res.edit_form = None + res.update_form = None + return res + elif details and len(request.args)>2 and request.args[-3]=='view': + check_authorization() + table = db[request.args[-2]] + record = table(request.args[-1]) or redirect(URL('error')) + form = SQLFORM(table,record,upload=upload, + readonly=True,_class='web2py_form') + res = DIV(buttons(edit=editable,record=record),form, + formfooter,_class=_class) + res.create_form = None + res.edit_form = None + res.update_form = None + return res + elif editable and len(request.args)>2 and request.args[-3]=='edit': + check_authorization() + table = db[request.args[-2]] + record = table(request.args[-1]) or redirect(URL('error')) + edit_form = SQLFORM(table,record,upload=upload, + deletable=deletable, + _class='web2py_form') + edit_form.process(formname=formname, + onvalidation=onvalidation, + onsuccess=onupdate, + next=referrer) + res = DIV(buttons(view=details,record=record), + edit_form,formfooter,_class=_class) + res.create_form = None + res.edit_form = edit_form + res.update_form = None + return res + elif deletable and len(request.args)>2 and request.args[-3]=='delete': + check_authorization() + table = db[request.args[-2]] + ret = db(table.id==request.args[-1]).delete() + if ondelete: + return ondelete(table,request.args[-2],ret) + return ret + elif csv and len(request.args)>0 and request.args[-1]=='csv': + check_authorization() + response.headers['Content-Type'] = 'text/csv' + response.headers['Content-Disposition'] = \ + 'attachment;filename=rows.csv;' + raise HTTP(200,str(dbset.select()), + **{'Content-Type':'text/csv', + 'Content-Disposition':'attachment;filename=rows.csv;'}) + elif request.vars.records and not isinstance( + request.vars.records,list): + request.vars.records=[request.vars.records] + elif not request.vars.records: + request.vars.records=[] + def OR(a,b): return a|b + def AND(a,b): return a&b + + session['_web2py_grid_referrer_'+formname] = \ + URL(args=request.args,vars=request.vars, + user_signature=user_signature) + console = DIV(_class='web2py_console %(header)s %(cornertop)s' % ui) + error = None + search_form = None + if searchable: + form = FORM(INPUT(_name='keywords',_value=request.vars.keywords, + _id='web2py_keywords'), + INPUT(_type='submit',_value=T('Search')), + INPUT(_type='submit',_value=T('Clear'), + _onclick="jQuery('#web2py_keywords').val('');"), + _method="GET",_action=url()) + search_form = form + console.append(form) + key = request.vars.get('keywords','').strip() + if searchable==True: + subquery = None + if key and not ' ' in key: + SEARCHABLE_TYPES = ('string','text','list:string') + parts = [field.contains(key) for field in fields \ + if field.type in SEARCHABLE_TYPES] + else: + parts = None + if parts: + subquery = reduce(OR,parts) + else: + try: + subquery = smart_query(fields,key) + except RuntimeError: + subquery = None + error = T('Invalid query') + else: + subquery = searchable(key,fields) + if subquery: + dbset = dbset(subquery) + try: + if left: + nrows = dbset.select('count(*)',left=left).first()['count(*)'] + else: + nrows = dbset.count() + except: + nrows = 0 + error = T('Unsupported query') + + search_actions = DIV(_class='web2py_search_actions') + if create: + search_actions.append(gridbutton( + buttonclass='buttonadd', + buttontext='Add', + buttonurl=url(args=['new',tablename]))) + if csv: + search_actions.append(gridbutton( + buttonclass='buttonexport', + buttontext='Export', + buttonurl=url(args=['csv']))) + + console.append(search_actions) + + message = error or T('%(nrows)s records found' % dict(nrows=nrows)) + + console.append(DIV(message,_class='web2py_counter')) + + order = request.vars.order or '' + if sortable: + if order and not order=='None': + if order[:1]=='~': + sign, rorder = '~', order[1:] + else: + sign, rorder = '', order + tablename,fieldname = rorder.split('.',1) + if sign=='~': + orderby=~db[tablename][fieldname] + else: + orderby=db[tablename][fieldname] + + head = TR(_class=ui.get('header','')) + if selectable: + head.append(TH(_class=ui.get('default',''))) + for field in fields: + if columns and not str(field) in columns: continue + if not field.readable: continue + key = str(field) + header = headers.get(str(field), + hasattr(field,'label') and field.label or key) + if sortable: + if key == order: + key, marker = '~'+order, sorter_icons[0] + elif key == order[1:]: + marker = sorter_icons[1] + else: + marker = '' + header = A(header,marker,_href=url(vars=dict( + keywords=request.vars.keywords or '', + order=key))) + head.append(TH(header, _class=ui.get('default',''))) + + for link in links or []: + if isinstance(link,dict): + head.append(TH(link['header'], _class=ui.get('default',''))) + + head.append(TH(_class=ui.get('default',''))) + + paginator = UL() + if paginate and paginate0: + paginator.append(LI(self_link('<<',0))) + if page>1: + paginator.append(LI(self_link('<',page-1))) + pages = range(max(0,page-5),min(page+5,npages-1)) + for p in pages: + if p == page: + paginator.append(LI(A(p+1,_onclick='return false'), + _class='current')) + else: + paginator.append(LI(self_link(p+1,p))) + if page',page+1))) + if page>',npages-1))) + else: + limitby = None + + rows = dbset.select(left=left,orderby=orderby,limitby=limitby,*fields) + if not searchable and not rows: return DIV(T('No records found')) + if rows: + htmltable = TABLE(THEAD(head)) + tbody = TBODY() + numrec=0 + for row in rows: + if numrec % 2 == 0: + classtr = 'even' + else: + classtr = 'odd' + numrec+=1 + id = row[field_id] + if len(tables)>1 or row.get('_extra',None): + rrow = row[field._tablename] + else: + rrow = row + tr = TR(_class=classtr) + if selectable: + tr.append(INPUT(_type="checkbox",_name="records",_value=id, + value=request.vars.records)) + for field in fields: + if columns and not str(field) in columns: continue + if not field.readable: continue + if field.type=='blob': continue + value = row[field] + if field.represent: + try: + value=field.represent(value,rrow) + except KeyError: + pass + elif field.type=='boolean': + value = INPUT(_type="checkbox",_checked = value, + _disabled=True) + elif field.type=='upload': + if value: + if callable(upload): + value = A('File', _href=upload(value)) + elif upload: + value = A('File', + _href='%s/%s' % (upload, value)) + else: + value = '' + elif isinstance(value,str) and len(value)>maxtextlength: + value=value[:maxtextlengths.get(str(field),maxtextlength)]+'...' + else: + value=field.formatter(value) + tr.append(TD(value)) + row_buttons = TD(_class='row_buttons') + for link in links or []: + if isinstance(link, dict): + tr.append(TD(link['body'](row))) + else: + row_buttons.append(link(row)) + if details and (not callable(details) or details(row)): + row_buttons.append(gridbutton( + 'buttonview', 'View', + url(args=['view',tablename,id]))) + if editable and (not callable(editable) or editable(row)): + row_buttons.append(gridbutton( + 'buttonedit', 'Edit', + url(args=['edit',tablename,id]))) + if deletable and (not callable(deletable) or deletable(row)): + row_buttons.append(gridbutton( + 'buttondelete', 'Delete', + callback=url(args=['delete',tablename,id]), + delete='tr')) + tr.append(row_buttons) + tbody.append(tr) + htmltable.append(tbody) + if selectable: + htmltable = FORM(htmltable,INPUT(_type="submit")) + if htmltable.process(formname=formname).accepted: + records = [int(r) for r in htmltable.vars.records or []] + selectable(records) + redirect(referrer) + else: + htmltable = DIV(T('No records found')) + res = DIV(console, + DIV(htmltable,_class="web2py_table"), + DIV(paginator,_class=\ + "web2py_paginator %(header)s %(cornerbottom)s" % ui), + _class='%s %s' % (_class, ui.get('widget',''))) + res.create_form = create_form + res.edit_form = edit_form + res.search_form = search_form + return res + + @staticmethod + def smartgrid(table, constraints=None, links=None, + linked_tables=None, user_signature=True, + **kwargs): + """ + @auth.requires_login() + def index(): + db.define_table('person',Field('name'),format='%(name)s') + db.define_table('dog', + Field('name'),Field('owner',db.person),format='%(name)s') + db.define_table('comment',Field('body'),Field('dog',db.dog)) + if db(db.person).isempty(): + from gluon.contrib.populate import populate + populate(db.person,300) + populate(db.dog,300) + populate(db.comment,1000) + db.commit() + form=SQLFORM.smartgrid(db[request.args(0) or 'person']) #*** + return dict(form=form) + + *** builds a complete interface to navigate all tables links + to the request.args(0) + table: pagination, search, view, edit, delete, + children, parent, etc. + + constraints is a dict {'table',query} that limits which + records can be accessible + links is a list of lambda row: A(....) that will add buttons + linked_tables is a optional list of tablenames of tables to be linked + + """ + from gluon import current, A, URL, DIV, H3, redirect + request, T = current.request, current.T + db = table._db + if links is None: links = [] + if constraints is None: constraints = {} + breadcrumbs = [] + if request.args(0) != table._tablename: + request.args=[table._tablename] + try: + args = 1 + previous_tablename,previous_fieldname,previous_id = \ + table._tablename,None,None + while len(request.args)>args: + key = request.args(args) + if '.' in key: + id = request.args(args+1) + tablename,fieldname = key.split('.',1) + table = db[tablename] + field = table[fieldname] + field.default = id + referee = field.type[10:] + if referee!=previous_tablename: + raise HTTP(400) + cond = constraints.get(referee,None) + if cond: + record = db(db[referee].id==id)(cond).select().first() + else: + record = db[referee](id) + if previous_id: + if record[previous_fieldname] != int(previous_id): + raise HTTP(400) + previous_tablename,previous_fieldname,previous_id = \ + tablename,fieldname,id + try: + name = db[referee]._format % record + except TypeError: + name = id + breadcrumbs += [A(T(referee), + _href=URL(args=request.args[:args])),' ', + A(name, + _href=URL(args=request.args[:args]+[ + 'view',referee,id],user_signature=True)), + ' > '] + args+=2 + else: + break + if args>1: + query = (field == id) + if linked_tables is None or referee in linked_tables: + field.represent = lambda id,r=None,referee=referee,rep=field.represent: A(rep(id),_href=URL(args=request.args[:args]+['view',referee,id], user_signature=user_signature)) + except (KeyError,ValueError,TypeError): + redirect(URL(args=table._tablename)) + if args==1: + query = table.id>0 + if table._tablename in constraints: + query = query&constraints[table._tablename] + for tablename,fieldname in table._referenced_by: + if linked_tables is None or tablename in linked_tables: + args0 = tablename+'.'+fieldname + links.append(lambda row,t=T(tablename),args=args,args0=args0:\ + A(SPAN(t),_href=URL(args=request.args[:args]+[args0,row.id]))) + grid=SQLFORM.grid(query,args=request.args[:args],links=links, + user_signature=user_signature,**kwargs) + if isinstance(grid,DIV): + breadcrumbs.append(A(T(table._tablename), + _href=URL(args=request.args[:args]))) + grid.insert(0,DIV(H3(*breadcrumbs),_class='web2py_breadcrumbs')) + return grid + + +class SQLTABLE(TABLE): + + """ + given a Rows object, as returned by a db().select(), generates + an html table with the rows. + + optional arguments: + + :param linkto: URL (or lambda to generate a URL) to edit individual records + :param upload: URL to download uploaded files + :param orderby: Add an orderby link to column headers. + :param headers: dictionary of headers to headers redefinions + headers can also be a string to gerenare the headers from data + for now only headers="fieldname:capitalize", + headers="labels" and headers=None are supported + :param truncate: length at which to truncate text in table cells. + Defaults to 16 characters. + :param columns: a list or dict contaning the names of the columns to be shown + Defaults to all + + Optional names attributes for passed to the tag + + The keys of headers and columns must be of the form "tablename.fieldname" + + Simple linkto example:: + + rows = db.select(db.sometable.ALL) + table = SQLTABLE(rows, linkto='someurl') + + This will link rows[id] to .../sometable/value_of_id + + + More advanced linkto example:: + + def mylink(field, type, ref): + return URL(args=[field]) + + rows = db.select(db.sometable.ALL) + table = SQLTABLE(rows, linkto=mylink) + + This will link rows[id] to + current_app/current_controlle/current_function/value_of_id + + New Implements: 24 June 2011: + ----------------------------- + + :param selectid: The id you want to select + :param renderstyle: Boolean render the style with the table + + :param extracolums = [{'label':A('Extra',_href='#'), + 'class': '', #class name of the header + 'width':'', #width in pixels or % + 'content':lambda row, rc: A('Edit',_href='edit/%s'%row.id), + 'selected': False #agregate class selected to this column + }] + + + :param headers = {'table.id':{'label':'Id', + 'class':'', #class name of the header + 'width':'', #width in pixels or % + 'truncate': 16, #truncate the content to... + 'selected': False #agregate class selected to this column + }, + 'table.myfield':{'label':'My field', + 'class':'', #class name of the header + 'width':'', #width in pixels or % + 'truncate': 16, #truncate the content to... + 'selected': False #agregate class selected to this column + }, + } + + table = SQLTABLE(rows, headers=headers, extracolums=extracolums) + + + """ + + def __init__( + self, + sqlrows, + linkto=None, + upload=None, + orderby=None, + headers={}, + truncate=16, + columns=None, + th_link='', + extracolumns=None, + selectid=None, + renderstyle=False, + **attributes + ): + + TABLE.__init__(self, **attributes) + + self.components = [] + self.attributes = attributes + self.sqlrows = sqlrows + (components, row) = (self.components, []) + if not sqlrows: + return + if not columns: + columns = sqlrows.colnames + if headers=='fieldname:capitalize': + headers = {} + for c in columns: + headers[c] = ' '.join([w.capitalize() for w in c.split('.')[-1].split('_')]) + elif headers=='labels': + headers = {} + for c in columns: + (t,f) = c.split('.') + field = sqlrows.db[t][f] + headers[c] = field.label + if not headers is None: + for c in columns:#new implement dict + if isinstance(headers.get(c, c), dict): + coldict = headers.get(c, c) + attrcol = dict() + if coldict['width']!="": + attrcol.update(_width=coldict['width']) + if coldict['class']!="": + attrcol.update(_class=coldict['class']) + row.append(TH(coldict['label'],**attrcol)) + elif orderby: + row.append(TH(A(headers.get(c, c), + _href=th_link+'?orderby=' + c))) + else: + row.append(TH(headers.get(c, c))) + + if extracolumns:#new implement dict + for c in extracolumns: + attrcol = dict() + if c['width']!="": + attrcol.update(_width=c['width']) + if c['class']!="": + attrcol.update(_class=c['class']) + row.append(TH(c['label'],**attrcol)) + + components.append(THEAD(TR(*row))) + + + tbody = [] + for (rc, record) in enumerate(sqlrows): + row = [] + if rc % 2 == 0: + _class = 'even' + else: + _class = 'odd' + + if not selectid is None: #new implement + if record.id==selectid: + _class += ' rowselected' + + for colname in columns: + if not table_field.match(colname): + if "_extra" in record and colname in record._extra: + r = record._extra[colname] + row.append(TD(r)) + continue + else: + raise KeyError("Column %s not found (SQLTABLE)" % colname) + (tablename, fieldname) = colname.split('.') + try: + field = sqlrows.db[tablename][fieldname] + except KeyError: + field = None + if tablename in record \ + and isinstance(record,Row) \ + and isinstance(record[tablename],Row): + r = record[tablename][fieldname] + elif fieldname in record: + r = record[fieldname] + else: + raise SyntaxError, 'something wrong in Rows object' + r_old = r + if not field: + pass + elif linkto and field.type == 'id': + try: + href = linkto(r, 'table', tablename) + except TypeError: + href = '%s/%s/%s' % (linkto, tablename, r_old) + r = A(r, _href=href) + elif field.type.startswith('reference'): + if linkto: + ref = field.type[10:] + try: + href = linkto(r, 'reference', ref) + except TypeError: + href = '%s/%s/%s' % (linkto, ref, r_old) + if ref.find('.') >= 0: + tref,fref = ref.split('.') + if hasattr(sqlrows.db[tref],'_primarykey'): + href = '%s/%s?%s' % (linkto, tref, urllib.urlencode({fref:r})) + r = A(represent(field,r,record), _href=str(href)) + elif field.represent: + r = represent(field,r,record) + elif linkto and hasattr(field._table,'_primarykey') and fieldname in field._table._primarykey: + # have to test this with multi-key tables + key = urllib.urlencode(dict( [ \ + ((tablename in record \ + and isinstance(record, Row) \ + and isinstance(record[tablename], Row)) and + (k, record[tablename][k])) or (k, record[k]) \ + for k in field._table._primarykey ] )) + r = A(r, _href='%s/%s?%s' % (linkto, tablename, key)) + elif field.type.startswith('list:'): + r = represent(field,r or [],record) + elif field.represent: + r = represent(field,r,record) + elif field.type == 'blob' and r: + r = 'DATA' + elif field.type == 'upload': + if upload and r: + r = A('file', _href='%s/%s' % (upload, r)) + elif r: + r = 'file' + else: + r = '' + elif field.type in ['string','text']: + r = str(field.formatter(r)) + ur = unicode(r, 'utf8') + if headers!={}: #new implement dict + if isinstance(headers[colname],dict): + if isinstance(headers[colname]['truncate'], int) \ + and len(ur)>headers[colname]['truncate']: + r = ur[:headers[colname]['truncate'] - 3] + r = r.encode('utf8') + '...' + elif not truncate is None and len(ur) > truncate: + r = ur[:truncate - 3].encode('utf8') + '...' + + attrcol = dict()#new implement dict + if headers!={}: + if isinstance(headers[colname],dict): + colclass=headers[colname]['class'] + if headers[colname]['selected']: + colclass= str(headers[colname]['class'] + " colselected").strip() + if colclass!="": + attrcol.update(_class=colclass) + + row.append(TD(r,**attrcol)) + + if extracolumns:#new implement dict + for c in extracolumns: + attrcol = dict() + colclass=c['class'] + if c['selected']: + colclass= str(c['class'] + " colselected").strip() + if colclass!="": + attrcol.update(_class=colclass) + contentfunc = c['content'] + row.append(TD(contentfunc(record, rc),**attrcol)) + + tbody.append(TR(_class=_class, *row)) + + if renderstyle: + components.append(STYLE(self.style())) + + components.append(TBODY(*tbody)) + + + def style(self): + + css = ''' + table tbody tr.odd { + background-color: #DFD; + } + table tbody tr.even { + background-color: #EFE; + } + table tbody tr.rowselected { + background-color: #FDD; + } + table tbody tr td.colselected { + background-color: #FDD; + } + table tbody tr:hover { + background: #DDF; + } + ''' + + return css + +form_factory = SQLFORM.factory # for backward compatibility, deprecated + + + + ADDED gluon/sqlhtml.pyc Index: gluon/sqlhtml.pyc ================================================================== --- /dev/null +++ gluon/sqlhtml.pyc cannot compute difference between binary files ADDED gluon/storage.py Index: gluon/storage.py ================================================================== --- /dev/null +++ gluon/storage.py @@ -0,0 +1,226 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Provides: + +- List; like list but returns None instead of IndexOutOfBounds +- Storage; like dictionary allowing also for `obj.foo` for `obj['foo']` +""" + +import cPickle +import portalocker + +__all__ = ['List', 'Storage', 'Settings', 'Messages', + 'StorageList', 'load_storage', 'save_storage'] + + +class List(list): + """ + Like a regular python list but a[i] if i is out of bounds return None + instead of IndexOutOfBounds + """ + + def __call__(self, i, default=None): + if 0<=i>> o = Storage(a=1) + >>> print o.a + 1 + + >>> o['a'] + 1 + + >>> o.a = 2 + >>> print o['a'] + 2 + + >>> del o.a + >>> print o.a + None + + """ + + def __getattr__(self, key): + if key in self: + return self[key] + else: + return None + + def __setattr__(self, key, value): + if value is None: + if key in self: + del self[key] + else: + self[key] = value + + def __delattr__(self, key): + if key in self: + del self[key] + else: + raise AttributeError, "missing key=%s" % key + + def __repr__(self): + return '' + + def __getstate__(self): + return dict(self) + + def __setstate__(self, value): + for (k, v) in value.items(): + self[k] = v + + def getlist(self, key): + """Return a Storage value as a list. + + If the value is a list it will be returned as-is. + If object is None, an empty list will be returned. + Otherwise, [value] will be returned. + + Example output for a query string of ?x=abc&y=abc&y=def + >>> request = Storage() + >>> request.vars = Storage() + >>> request.vars.x = 'abc' + >>> request.vars.y = ['abc', 'def'] + >>> request.vars.getlist('x') + ['abc'] + >>> request.vars.getlist('y') + ['abc', 'def'] + >>> request.vars.getlist('z') + [] + + """ + value = self.get(key, None) + if isinstance(value, (list, tuple)): + return value + elif value is None: + return [] + return [value] + + def getfirst(self, key): + """Return the first or only value when given a request.vars-style key. + + If the value is a list, its first item will be returned; + otherwise, the value will be returned as-is. + + Example output for a query string of ?x=abc&y=abc&y=def + >>> request = Storage() + >>> request.vars = Storage() + >>> request.vars.x = 'abc' + >>> request.vars.y = ['abc', 'def'] + >>> request.vars.getfirst('x') + 'abc' + >>> request.vars.getfirst('y') + 'abc' + >>> request.vars.getfirst('z') + + """ + value = self.getlist(key) + if len(value): + return value[0] + return None + + def getlast(self, key): + """Returns the last or only single value when given a request.vars-style key. + + If the value is a list, the last item will be returned; + otherwise, the value will be returned as-is. + + Simulated output with a query string of ?x=abc&y=abc&y=def + >>> request = Storage() + >>> request.vars = Storage() + >>> request.vars.x = 'abc' + >>> request.vars.y = ['abc', 'def'] + >>> request.vars.getlast('x') + 'abc' + >>> request.vars.getlast('y') + 'def' + >>> request.vars.getlast('z') + + """ + value = self.getlist(key) + if len(value): + return value[-1] + return None + +class StorageList(Storage): + """ + like Storage but missing elements default to [] instead of None + """ + def __getattr__(self, key): + if key in self: + return self[key] + else: + self[key] = [] + return self[key] + +def load_storage(filename): + fp = open(filename, 'rb') + try: + portalocker.lock(fp, portalocker.LOCK_EX) + storage = cPickle.load(fp) + portalocker.unlock(fp) + finally: + fp.close() + return Storage(storage) + + +def save_storage(storage, filename): + fp = open(filename, 'wb') + try: + portalocker.lock(fp, portalocker.LOCK_EX) + cPickle.dump(dict(storage), fp) + portalocker.unlock(fp) + finally: + fp.close() + + +class Settings(Storage): + + def __setattr__(self, key, value): + if key != 'lock_keys' and self.get('lock_keys', None)\ + and not key in self: + raise SyntaxError, 'setting key \'%s\' does not exist' % key + if key != 'lock_values' and self.get('lock_values', None): + raise SyntaxError, 'setting value cannot be changed: %s' % key + self[key] = value + + +class Messages(Storage): + + def __init__(self, T): + self['T'] = T + + def __setattr__(self, key, value): + if key != 'lock_keys' and self.get('lock_keys', None)\ + and not key in self: + raise SyntaxError, 'setting key \'%s\' does not exist' % key + if key != 'lock_values' and self.get('lock_values', None): + raise SyntaxError, 'setting value cannot be changed: %s' % key + self[key] = value + + def __getattr__(self, key): + value = self[key] + if isinstance(value, str): + return str(self['T'](value)) + return value + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + ADDED gluon/storage.pyc Index: gluon/storage.pyc ================================================================== --- /dev/null +++ gluon/storage.pyc cannot compute difference between binary files ADDED gluon/streamer.py Index: gluon/streamer.py ================================================================== --- /dev/null +++ gluon/streamer.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import os +import stat +import time +import re +import errno +import rewrite +from http import HTTP +from contenttype import contenttype + + +regex_start_range = re.compile('\d+(?=\-)') +regex_stop_range = re.compile('(?<=\-)\d+') + +DEFAULT_CHUNK_SIZE = 64*1024 + +def streamer(stream, chunk_size = DEFAULT_CHUNK_SIZE, bytes = None): + offset = 0 + while bytes is None or offset < bytes: + if not bytes is None and bytes - offset < chunk_size: + chunk_size = bytes - offset + data = stream.read(chunk_size) + length = len(data) + if not length: + break + else: + yield data + if length < chunk_size: + break + offset += length + stream.close() + +def stream_file_or_304_or_206( + static_file, + chunk_size = DEFAULT_CHUNK_SIZE, + request = None, + headers = {}, + error_message = None, + ): + if error_message is None: + error_message = rewrite.thread.routes.error_message % 'invalid request' + try: + fp = open(static_file) + except IOError, e: + if e[0] == errno.EISDIR: + raise HTTP(403, error_message, web2py_error='file is a directory') + elif e[0] == errno.EACCES: + raise HTTP(403, error_message, web2py_error='inaccessible file') + else: + raise HTTP(404, error_message, web2py_error='invalid file') + else: + fp.close() + stat_file = os.stat(static_file) + fsize = stat_file[stat.ST_SIZE] + mtime = time.strftime('%a, %d %b %Y %H:%M:%S GMT', + time.gmtime(stat_file[stat.ST_MTIME])) + headers['Content-Type'] = contenttype(static_file) + headers['Last-Modified'] = mtime + headers['Pragma'] = 'cache' + headers['Cache-Control'] = 'private' + + if request and request.env.http_if_modified_since == mtime: + raise HTTP(304, **{'Content-Type': headers['Content-Type']}) + + elif request and request.env.http_range: + start_items = regex_start_range.findall(request.env.http_range) + if not start_items: + start_items = [0] + stop_items = regex_stop_range.findall(request.env.http_range) + if not stop_items or int(stop_items[0]) > fsize - 1: + stop_items = [fsize - 1] + part = (int(start_items[0]), int(stop_items[0]), fsize) + bytes = part[1] - part[0] + 1 + try: + stream = open(static_file, 'rb') + except IOError, e: + if e[0] in (errno.EISDIR, errno.EACCES): + raise HTTP(403) + else: + raise HTTP(404) + stream.seek(part[0]) + headers['Content-Range'] = 'bytes %i-%i/%i' % part + headers['Content-Length'] = '%i' % bytes + status = 206 + else: + try: + stream = open(static_file, 'rb') + except IOError, e: + if e[0] in (errno.EISDIR, errno.EACCES): + raise HTTP(403) + else: + raise HTTP(404) + headers['Content-Length'] = fsize + bytes = None + status = 200 + if request and request.env.web2py_use_wsgi_file_wrapper: + wrapped = request.env.wsgi_file_wrapper(stream, chunk_size) + else: + wrapped = streamer(stream, chunk_size=chunk_size, bytes=bytes) + raise HTTP(status, wrapped, **headers) + + + ADDED gluon/streamer.pyc Index: gluon/streamer.pyc ================================================================== --- /dev/null +++ gluon/streamer.pyc cannot compute difference between binary files ADDED gluon/template.py Index: gluon/template.py ================================================================== --- /dev/null +++ gluon/template.py @@ -0,0 +1,933 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework (Copyrighted, 2007-2011). +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Author: Thadeus Burgess + +Contributors: + +- Thank you to Massimo Di Pierro for creating the original gluon/template.py +- Thank you to Jonathan Lundell for extensively testing the regex on Jython. +- Thank you to Limodou (creater of uliweb) who inspired the block-element support for web2py. +""" + +import os +import re +import cgi +import cStringIO +import logging +try: + from restricted import RestrictedError +except: + def RestrictedError(a,b,c): + logging.error(str(a)+':'+str(b)+':'+str(c)) + return RuntimeError + +class Node(object): + """ + Basic Container Object + """ + def __init__(self, value = None, pre_extend = False): + self.value = value + self.pre_extend = pre_extend + + def __str__(self): + return str(self.value) + +class SuperNode(Node): + def __init__(self, name = '', pre_extend = False): + self.name = name + self.value = None + self.pre_extend = pre_extend + + def __str__(self): + if self.value: + return str(self.value) + else: + raise SyntaxError("Undefined parent block ``%s``. \n" % self.name + \ +"You must define a block before referencing it.\nMake sure you have not left out an ``{{end}}`` tag." ) + + def __repr__(self): + return "%s->%s" % (self.name, self.value) + +class BlockNode(Node): + """ + Block Container. + + This Node can contain other Nodes and will render in a hierarchical order + of when nodes were added. + + ie:: + + {{ block test }} + This is default block test + {{ end }} + """ + def __init__(self, name = '', pre_extend = False, delimiters = ('{{','}}')): + """ + name - Name of this Node. + """ + self.nodes = [] + self.name = name + self.pre_extend = pre_extend + self.left, self.right = delimiters + + def __repr__(self): + lines = ['%sblock %s%s' % (self.left,self.name,self.right)] + for node in self.nodes: + lines.append(str(node)) + lines.append('%send%s' % (self.left, self.right)) + return ''.join(lines) + + def __str__(self): + """ + Get this BlockNodes content, not including child Nodes + """ + lines = [] + for node in self.nodes: + if not isinstance(node, BlockNode): + lines.append(str(node)) + return ''.join(lines) + + def append(self, node): + """ + Add an element to the nodes. + + Keyword Arguments + + - node -- Node object or string to append. + """ + if isinstance(node, str) or isinstance(node, Node): + self.nodes.append(node) + else: + raise TypeError("Invalid type; must be instance of ``str`` or ``BlockNode``. %s" % node) + + def extend(self, other): + """ + Extend the list of nodes with another BlockNode class. + + Keyword Arguments + + - other -- BlockNode or Content object to extend from. + """ + if isinstance(other, BlockNode): + self.nodes.extend(other.nodes) + else: + raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other) + + def output(self, blocks): + """ + Merges all nodes into a single string. + + blocks -- Dictionary of blocks that are extending + from this template. + """ + lines = [] + # Get each of our nodes + for node in self.nodes: + # If we have a block level node. + if isinstance(node, BlockNode): + # If we can override this block. + if node.name in blocks: + # Override block from vars. + lines.append(blocks[node.name].output(blocks)) + # Else we take the default + else: + lines.append(node.output(blocks)) + # Else its just a string + else: + lines.append(str(node)) + # Now combine all of our lines together. + return ''.join(lines) + +class Content(BlockNode): + """ + Parent Container -- Used as the root level BlockNode. + + Contains functions that operate as such. + """ + def __init__(self, name = "ContentBlock", pre_extend = False): + """ + Keyword Arguments + + name -- Unique name for this BlockNode + """ + self.name = name + self.nodes = [] + self.blocks = {} + self.pre_extend = pre_extend + + def __str__(self): + lines = [] + # For each of our nodes + for node in self.nodes: + # If it is a block node. + if isinstance(node, BlockNode): + # And the node has a name that corresponds with a block in us + if node.name in self.blocks: + # Use the overriding output. + lines.append(self.blocks[node.name].output(self.blocks)) + else: + # Otherwise we just use the nodes output. + lines.append(node.output(self.blocks)) + else: + # It is just a string, so include it. + lines.append(str(node)) + # Merge our list together. + return ''.join(lines) + + def _insert(self, other, index = 0): + """ + Inserts object at index. + """ + if isinstance(other, str) or isinstance(other, Node): + self.nodes.insert(index, other) + else: + raise TypeError("Invalid type, must be instance of ``str`` or ``Node``.") + + def insert(self, other, index = 0): + """ + Inserts object at index. + + You may pass a list of objects and have them inserted. + """ + if isinstance(other, (list, tuple)): + # Must reverse so the order stays the same. + other.reverse() + for item in other: + self._insert(item, index) + else: + self._insert(other, index) + + def append(self, node): + """ + Adds a node to list. If it is a BlockNode then we assign a block for it. + """ + if isinstance(node, str) or isinstance(node, Node): + self.nodes.append(node) + if isinstance(node, BlockNode): + self.blocks[node.name] = node + else: + raise TypeError("Invalid type, must be instance of ``str`` or ``BlockNode``. %s" % node) + + def extend(self, other): + """ + Extends the objects list of nodes with another objects nodes + """ + if isinstance(other, BlockNode): + self.nodes.extend(other.nodes) + self.blocks.update(other.blocks) + else: + raise TypeError("Invalid type; must be instance of ``BlockNode``. %s" % other) + + def clear_content(self): + self.nodes = [] + +class TemplateParser(object): + + r_tag = re.compile(r'(\{\{.*?\}\})', re.DOTALL) + + r_multiline = re.compile(r'(""".*?""")|(\'\'\'.*?\'\'\')', re.DOTALL) + + # These are used for re-indentation. + # Indent + 1 + re_block = re.compile('^(elif |else:|except:|except |finally:).*$', + re.DOTALL) + # Indent - 1 + re_unblock = re.compile('^(return|continue|break|raise)( .*)?$', re.DOTALL) + # Indent - 1 + re_pass = re.compile('^pass( .*)?$', re.DOTALL) + + def __init__(self, text, + name = "ParserContainer", + context = dict(), + path = 'views/', + writer = 'response.write', + lexers = {}, + delimiters = ('{{','}}'), + _super_nodes = [], + ): + """ + text -- text to parse + context -- context to parse in + path -- folder path to templates + writer -- string of writer class to use + lexers -- dict of custom lexers to use. + delimiters -- for example ('{{','}}') + _super_nodes -- a list of nodes to check for inclusion + this should only be set by "self.extend" + It contains a list of SuperNodes from a child + template that need to be handled. + """ + + # Keep a root level name. + self.name = name + # Raw text to start parsing. + self.text = text + # Writer to use (refer to the default for an example). + # This will end up as + # "%s(%s, escape=False)" % (self.writer, value) + self.writer = writer + + # Dictionary of custom name lexers to use. + if isinstance(lexers, dict): + self.lexers = lexers + else: + self.lexers = {} + + # Path of templates + self.path = path + # Context for templates. + self.context = context + + # allow optional alternative delimiters + self.delimiters = delimiters + if delimiters!=('{{','}}'): + escaped_delimiters = (re.escape(delimiters[0]),re.escape(delimiters[1])) + self.r_tag = re.compile(r'(%s.*?%s)' % escaped_delimiters, re.DOTALL) + + + # Create a root level Content that everything will go into. + self.content = Content(name=name) + + # Stack will hold our current stack of nodes. + # As we descend into a node, it will be added to the stack + # And when we leave, it will be removed from the stack. + # self.content should stay on the stack at all times. + self.stack = [self.content] + + # This variable will hold a reference to every super block + # that we come across in this template. + self.super_nodes = [] + + # This variable will hold a reference to the child + # super nodes that need handling. + self.child_super_nodes = _super_nodes + + # This variable will hold a reference to every block + # that we come across in this template + self.blocks = {} + + # Begin parsing. + self.parse(text) + + def to_string(self): + """ + Return the parsed template with correct indentation. + + Used to make it easier to port to python3. + """ + return self.reindent(str(self.content)) + + def __str__(self): + "Make sure str works exactly the same as python 3" + return self.to_string() + + def __unicode__(self): + "Make sure str works exactly the same as python 3" + return self.to_string() + + def reindent(self, text): + """ + Reindents a string of unindented python code. + """ + + # Get each of our lines into an array. + lines = text.split('\n') + + # Our new lines + new_lines = [] + + # Keeps track of how many indents we have. + # Used for when we need to drop a level of indentation + # only to reindent on the next line. + credit = 0 + + # Current indentation + k = 0 + + ################# + # THINGS TO KNOW + ################# + + # k += 1 means indent + # k -= 1 means unindent + # credit = 1 means unindent on the next line. + + for raw_line in lines: + line = raw_line.strip() + + # ignore empty lines + if not line: + continue + + # If we have a line that contains python code that + # should be unindented for this line of code. + # and then reindented for the next line. + if TemplateParser.re_block.match(line): + k = k + credit - 1 + + # We obviously can't have a negative indentation + k = max(k,0) + + # Add the indentation! + new_lines.append(' '*(4*k)+line) + + # Bank account back to 0 again :( + credit = 0 + + # If we are a pass block, we obviously de-dent. + if TemplateParser.re_pass.match(line): + k -= 1 + + # If we are any of the following, de-dent. + # However, we should stay on the same level + # But the line right after us will be de-dented. + # So we add one credit to keep us at the level + # while moving back one indentation level. + if TemplateParser.re_unblock.match(line): + credit = 1 + k -= 1 + + # If we are an if statement, a try, or a semi-colon we + # probably need to indent the next line. + if line.endswith(':') and not line.startswith('#'): + k += 1 + + # This must come before so that we can raise an error with the + # right content. + new_text = '\n'.join(new_lines) + + if k > 0: + self._raise_error('missing "pass" in view', new_text) + elif k < 0: + self._raise_error('too many "pass" in view', new_text) + + return new_text + + def _raise_error(self, message='', text=None): + """ + Raise an error using itself as the filename and textual content. + """ + raise RestrictedError(self.name, text or self.text, message) + + def _get_file_text(self, filename): + """ + Attempt to open ``filename`` and retrieve its text. + + This will use self.path to search for the file. + """ + + # If they didn't specify a filename, how can we find one! + if not filename.strip(): + self._raise_error('Invalid template filename') + + # Get the filename; filename looks like ``"template.html"``. + # We need to eval to remove the quotes and get the string type. + filename = eval(filename, self.context) + + # Get the path of the file on the system. + filepath = os.path.join(self.path, filename) + + # try to read the text. + try: + fileobj = open(filepath, 'rb') + text = fileobj.read() + fileobj.close() + except IOError: + self._raise_error('Unable to open included view file: ' + filepath) + + return text + + def include(self, content, filename): + """ + Include ``filename`` here. + """ + text = self._get_file_text(filename) + + t = TemplateParser(text, + name = filename, + context = self.context, + path = self.path, + writer = self.writer, + delimiters = self.delimiters) + + content.append(t.content) + + def extend(self, filename): + """ + Extend ``filename``. Anything not declared in a block defined by the + parent will be placed in the parent templates ``{{include}}`` block. + """ + text = self._get_file_text(filename) + + # Create out nodes list to send to the parent + super_nodes = [] + # We want to include any non-handled nodes. + super_nodes.extend(self.child_super_nodes) + # And our nodes as well. + super_nodes.extend(self.super_nodes) + + t = TemplateParser(text, + name = filename, + context = self.context, + path = self.path, + writer = self.writer, + delimiters = self.delimiters, + _super_nodes = super_nodes) + + # Make a temporary buffer that is unique for parent + # template. + buf = BlockNode(name='__include__' + filename, delimiters=self.delimiters) + pre = [] + + # Iterate through each of our nodes + for node in self.content.nodes: + # If a node is a block + if isinstance(node, BlockNode): + # That happens to be in the parent template + if node.name in t.content.blocks: + # Do not include it + continue + + if isinstance(node, Node): + # Or if the node was before the extension + # we should not include it + if node.pre_extend: + pre.append(node) + continue + + # Otherwise, it should go int the + # Parent templates {{include}} section. + buf.append(node) + else: + buf.append(node) + + # Clear our current nodes. We will be replacing this with + # the parent nodes. + self.content.nodes = [] + + # Set our include, unique by filename + t.content.blocks['__include__' + filename] = buf + + # Make sure our pre_extended nodes go first + t.content.insert(pre) + + # Then we extend our blocks + t.content.extend(self.content) + + # Work off the parent node. + self.content = t.content + + def parse(self, text): + + # Basically, r_tag.split will split the text into + # an array containing, 'non-tag', 'tag', 'non-tag', 'tag' + # so if we alternate this variable, we know + # what to look for. This is alternate to + # line.startswith("{{") + in_tag = False + extend = None + pre_extend = True + + # Use a list to store everything in + # This is because later the code will "look ahead" + # for missing strings or brackets. + ij = self.r_tag.split(text) + # j = current index + # i = current item + for j in range(len(ij)): + i = ij[j] + + if i: + if len(self.stack) == 0: + self._raise_error('The "end" tag is unmatched, please check if you have a starting "block" tag') + + # Our current element in the stack. + top = self.stack[-1] + + if in_tag: + line = i + + # If we are missing any strings!!!! + # This usually happens with the following example + # template code + # + # {{a = '}}'}} + # or + # {{a = '}}blahblah{{'}} + # + # This will fix these + # This is commented out because the current template + # system has this same limitation. Since this has a + # performance hit on larger templates, I do not recommend + # using this code on production systems. This is still here + # for "i told you it *can* be fixed" purposes. + # + # +# if line.count("'") % 2 != 0 or line.count('"') % 2 != 0: +# +# # Look ahead +# la = 1 +# nextline = ij[j+la] +# +# # As long as we have not found our ending +# # brackets keep going +# while '}}' not in nextline: +# la += 1 +# nextline += ij[j+la] +# # clear this line, so we +# # don't attempt to parse it +# # this is why there is an "if i" +# # around line 530 +# ij[j+la] = '' +# +# # retrieve our index. +# index = nextline.index('}}') +# +# # Everything before the new brackets +# before = nextline[:index+2] +# +# # Everything after +# after = nextline[index+2:] +# +# # Make the next line everything after +# # so it parses correctly, this *should* be +# # all html +# ij[j+1] = after +# +# # Add everything before to the current line +# line += before + + # Get rid of '{{' and '}}' + line = line[2:-2].strip() + + # This is bad juju, but let's do it anyway + if not line: + continue + + # We do not want to replace the newlines in code, + # only in block comments. + def remove_newline(re_val): + # Take the entire match and replace newlines with + # escaped newlines. + return re_val.group(0).replace('\n', '\\n') + + # Perform block comment escaping. + # This performs escaping ON anything + # in between """ and """ + line = re.sub(TemplateParser.r_multiline, + remove_newline, + line) + + if line.startswith('='): + # IE: {{=response.title}} + name, value = '=', line[1:].strip() + else: + v = line.split(' ', 1) + if len(v) == 1: + # Example + # {{ include }} + # {{ end }} + name = v[0] + value = '' + else: + # Example + # {{ block pie }} + # {{ include "layout.html" }} + # {{ for i in range(10): }} + name = v[0] + value = v[1] + + # This will replace newlines in block comments + # with the newline character. This is so that they + # retain their formatting, but squish down to one + # line in the rendered template. + + # First check if we have any custom lexers + if name in self.lexers: + # Pass the information to the lexer + # and allow it to inject in the environment + + # You can define custom names such as + # '{{<.< + tokens = line.split('\n') + + # We need to look for any instances of + # for i in range(10): + # = i + # pass + # So we can properly put a response.write() in place. + continuation = False + len_parsed = 0 + for k in range(len(tokens)): + + tokens[k] = tokens[k].strip() + len_parsed += len(tokens[k]) + + if tokens[k].startswith('='): + if tokens[k].endswith('\\'): + continuation = True + tokens[k] = "\n%s(%s" % (self.writer, tokens[k][1:].strip()) + else: + tokens[k] = "\n%s(%s)" % (self.writer, tokens[k][1:].strip()) + elif continuation: + tokens[k] += ')' + continuation = False + + + buf = "\n%s" % '\n'.join(tokens) + top.append(Node(buf, pre_extend = pre_extend)) + + else: + # It is HTML so just include it. + buf = "\n%s(%r, escape=False)" % (self.writer, i) + top.append(Node(buf, pre_extend = pre_extend)) + + # Remember: tag, not tag, tag, not tag + in_tag = not in_tag + + # Make a list of items to remove from child + to_rm = [] + + # Go through each of the children nodes + for node in self.child_super_nodes: + # If we declared a block that this node wants to include + if node.name in self.blocks: + # Go ahead and include it! + node.value = self.blocks[node.name] + # Since we processed this child, we don't need to + # pass it along to the parent + to_rm.append(node) + + # Remove some of the processed nodes + for node in to_rm: + # Since this is a pointer, it works beautifully. + # Sometimes I miss C-Style pointers... I want my asterisk... + self.child_super_nodes.remove(node) + + # If we need to extend a template. + if extend: + self.extend(extend) + +# We need this for integration with gluon +def parse_template(filename, + path = 'views/', + context = dict(), + lexers = {}, + delimiters = ('{{','}}') + ): + """ + filename can be a view filename in the views folder or an input stream + path is the path of a views folder + context is a dictionary of symbols used to render the template + """ + + # First, if we have a str try to open the file + if isinstance(filename, str): + try: + fp = open(os.path.join(path, filename), 'rb') + text = fp.read() + fp.close() + except IOError: + raise RestrictedError(filename, '', 'Unable to find the file') + else: + text = filename.read() + + # Use the file contents to get a parsed template and return it. + return str(TemplateParser(text, context=context, path=path, lexers=lexers, delimiters=delimiters)) + +def get_parsed(text): + """ + Returns the indented python code of text. Useful for unit testing. + + """ + return str(TemplateParser(text)) + +# And this is a generic render function. +# Here for integration with gluon. +def render(content = "hello world", + stream = None, + filename = None, + path = None, + context = {}, + lexers = {}, + delimiters = ('{{','}}') + ): + """ + >>> render() + 'hello world' + >>> render(content='abc') + 'abc' + >>> render(content='abc\\'') + "abc'" + >>> render(content='a"\\'bc') + 'a"\\'bc' + >>> render(content='a\\nbc') + 'a\\nbc' + >>> render(content='a"bcd"e') + 'a"bcd"e' + >>> render(content="'''a\\nc'''") + "'''a\\nc'''" + >>> render(content="'''a\\'c'''") + "'''a\'c'''" + >>> render(content='{{for i in range(a):}}{{=i}}
    {{pass}}', context=dict(a=5)) + '0
    1
    2
    3
    4
    ' + >>> render(content='{%for i in range(a):%}{%=i%}
    {%pass%}', context=dict(a=5),delimiters=('{%','%}')) + '0
    1
    2
    3
    4
    ' + >>> render(content="{{='''hello\\nworld'''}}") + 'hello\\nworld' + >>> render(content='{{for i in range(3):\\n=i\\npass}}') + '012' + """ + # Here to avoid circular Imports + try: + from globals import Response + except: + # Working standalone. Build a mock Response object. + class Response(): + def __init__(self): + self.body = cStringIO.StringIO() + def write(self, data, escape=True): + if not escape: + self.body.write(str(data)) + elif hasattr(data,'xml') and callable(data.xml): + self.body.write(data.xml()) + else: + # make it a string + if not isinstance(data, (str, unicode)): + data = str(data) + elif isinstance(data, unicode): + data = data.encode('utf8', 'xmlcharrefreplace') + data = cgi.escape(data, True).replace("'","'") + self.body.write(data) + + # A little helper to avoid escaping. + class NOESCAPE(): + def __init__(self, text): + self.text = text + def xml(self): + return self.text + # Add it to the context so we can use it. + context['NOESCAPE'] = NOESCAPE + + # If we don't have anything to render, why bother? + if not content and not stream and not filename: + raise SyntaxError, "Must specify a stream or filename or content" + + # Here for legacy purposes, probably can be reduced to something more simple. + close_stream = False + if not stream: + if filename: + stream = open(filename, 'rb') + close_stream = True + elif content: + stream = cStringIO.StringIO(content) + + # Get a response class. + context['response'] = Response() + + # Execute the template. + code = str(TemplateParser(stream.read(), context=context, path=path, lexers=lexers, delimiters=delimiters)) + try: + exec(code) in context + except Exception: + # for i,line in enumerate(code.split('\n')): print i,line + raise + + if close_stream: + stream.close() + + # Returned the rendered content. + return context['response'].body.getvalue() + + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + ADDED gluon/template.pyc Index: gluon/template.pyc ================================================================== --- /dev/null +++ gluon/template.pyc cannot compute difference between binary files ADDED gluon/tests/__init__.py Index: gluon/tests/__init__.py ================================================================== --- /dev/null +++ gluon/tests/__init__.py @@ -0,0 +1,1 @@ + ADDED gluon/tests/test.sh Index: gluon/tests/test.sh ================================================================== --- /dev/null +++ gluon/tests/test.sh @@ -0,0 +1,45 @@ +#!/bin/sh +# +# run unit tests under nose if available, +# optionally with coverage +# +# test.sh [cover [gluon.rewrite]] +# +# easy_install nose +# easy_install coverage +# +NOSETESTS=nosetests +COVER=gluon # change to (eg) gluon.rewrite to collect coverage stats on a single module +PROCESSES=4 + +WHICH=`which $NOSETESTS` +if [ "$WHICH" == "" ]; then + # if nose isn't available, run the tests directly + for testmod in test_*.py; do + python $testmod + done +else + if [ "$1" = "cover" ]; then + # note: coverage doesn't handle multiple processes + if [ "$2" != "" ]; then + COVER=$2 + fi + $NOSETESTS --with-coverage --cover-package=$COVER --cover-erase + elif [ "$1" = "doctest" ]; then + # this has to run in gluon's parent; needs work + # + # the problem is that doctests run this way have a very different environment, + # apparently due to imports that don't happen in the normal course of running + # doctest via __main__. + # + echo doctest not supported >&2 + exit 1 + if [ ! -d gluon ]; then + cd ../.. + fi + $NOSETESTS --with-doctest + else + $NOSETESTS --processes=$PROCESSES + fi +fi + ADDED gluon/tests/test_dal.py Index: gluon/tests/test_dal.py ================================================================== --- /dev/null +++ gluon/tests/test_dal.py @@ -0,0 +1,486 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + Unit tests for gluon.sql +""" + +import sys +import os +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) +else: + sys.path.append(os.path.realpath('../')) + +import unittest +import datetime +from dal import DAL, Field, Table, SQLALL + +ALLOWED_DATATYPES = [ + 'string', + 'text', + 'integer', + 'boolean', + 'double', + 'blob', + 'date', + 'time', + 'datetime', + 'upload', + 'password', + ] + + +def setUpModule(): + pass + +def tearDownModule(): + if os.path.isfile('sql.log'): + os.unlink('sql.log') + + +class TestFields(unittest.TestCase): + + def testFieldName(self): + + # Check that Fields cannot start with underscores + self.assertRaises(SyntaxError, Field, '_abc', 'string') + + # Check that Fields cannot contain punctuation other than underscores + self.assertRaises(SyntaxError, Field, 'a.bc', 'string') + + # Check that Fields cannot be a name of a method or property of Table + for x in ['drop', 'on', 'truncate']: + self.assertRaises(SyntaxError, Field, x, 'string') + + # Check that Fields allows underscores in the body of a field name. + self.assert_(Field('a_bc', 'string'), + "Field isn't allowing underscores in fieldnames. It should.") + + def testFieldTypes(self): + + # Check that string, text, and password default length is 512 + for typ in ['string', 'password']: + self.assert_(Field('abc', typ).length == 512, + "Default length for type '%s' is not 512 or 255" % typ) + + # Check that upload default length is 512 + self.assert_(Field('abc', 'upload').length == 512, + "Default length for type 'upload' is not 128") + + # Check that Tables passed in the type creates a reference + self.assert_(Field('abc', Table(None, 'temp')).type + == 'reference temp', + 'Passing an Table does not result in a reference type.') + + def testFieldLabels(self): + + # Check that a label is successfully built from the supplied fieldname + self.assert_(Field('abc', 'string').label == 'Abc', + 'Label built is incorrect') + self.assert_(Field('abc_def', 'string').label == 'Abc Def', + 'Label built is incorrect') + + def testFieldFormatters(self): # Formatter should be called Validator + + # Test the default formatters + for typ in ALLOWED_DATATYPES: + f = Field('abc', typ) + if typ not in ['date', 'time', 'datetime']: + isinstance(f.formatter('test'), str) + else: + isinstance(f.formatter(datetime.datetime.now()), str) + + def testRun(self): + db = DAL('sqlite:memory:') + for ft in ['string', 'text', 'password', 'upload', 'blob']: + db.define_table('t', Field('a', ft, default='')) + self.assertEqual(db.t.insert(a='x'), 1) + self.assertEqual(db().select(db.t.a)[0].a, 'x') + db.t.drop() + db.define_table('t', Field('a', 'integer', default=1)) + self.assertEqual(db.t.insert(a=3), 1) + self.assertEqual(db().select(db.t.a)[0].a, 3) + db.t.drop() + db.define_table('t', Field('a', 'double', default=1)) + self.assertEqual(db.t.insert(a=3.1), 1) + self.assertEqual(db().select(db.t.a)[0].a, 3.1) + db.t.drop() + db.define_table('t', Field('a', 'boolean', default=True)) + self.assertEqual(db.t.insert(a=True), 1) + self.assertEqual(db().select(db.t.a)[0].a, True) + db.t.drop() + db.define_table('t', Field('a', 'date', + default=datetime.date.today())) + t0 = datetime.date.today() + self.assertEqual(db.t.insert(a=t0), 1) + self.assertEqual(db().select(db.t.a)[0].a, t0) + db.t.drop() + db.define_table('t', Field('a', 'datetime', + default=datetime.datetime.today())) + t0 = datetime.datetime( + 1971, + 12, + 21, + 10, + 30, + 55, + 0, + ) + self.assertEqual(db.t.insert(a=t0), 1) + self.assertEqual(db().select(db.t.a)[0].a, t0) + db.t.drop() + db.define_table('t', Field('a', 'time', default='11:30')) + t0 = datetime.time(10, 30, 55) + self.assertEqual(db.t.insert(a=t0), 1) + self.assertEqual(db().select(db.t.a)[0].a, t0) + db.t.drop() + + +class TestAll(unittest.TestCase): + + def setUp(self): + self.pt = Table(None,'PseudoTable',Field('name'),Field('birthdate')) + + def testSQLALL(self): + ans = 'PseudoTable.id, PseudoTable.name, PseudoTable.birthdate' + self.assertEqual(str(SQLALL(self.pt)), ans) + + +class TestTable(unittest.TestCase): + + def testTableCreation(self): + + # Check for error when not passing type other than Field or Table + + self.assertRaises(SyntaxError, Table, None, 'test', None) + + persons = Table(None, 'persons', + Field('firstname','string'), + Field('lastname', 'string')) + + # Does it have the correct fields? + + self.assert_(set(persons.fields).issuperset(set(['firstname', + 'lastname']))) + + # ALL is set correctly + + self.assert_('persons.firstname, persons.lastname' + in str(persons.ALL)) + + def testTableAlias(self): + db = DAL('sqlite:memory:') + persons = Table(db, 'persons', Field('firstname', + 'string'), Field('lastname', 'string')) + aliens = persons.with_alias('aliens') + + # Are the different table instances with the same fields + + self.assert_(persons is not aliens) + self.assert_(set(persons.fields) == set(aliens.fields)) + + def testTableInheritance(self): + persons = Table(None, 'persons', Field('firstname', + 'string'), Field('lastname', 'string')) + customers = Table(None, 'customers', + Field('items_purchased', 'integer'), + persons) + self.assert_(set(customers.fields).issuperset(set( + ['items_purchased', 'firstname', 'lastname']))) + + +class TestInsert(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + self.assertEqual(db.t.insert(a='1'), 1) + self.assertEqual(db.t.insert(a='1'), 2) + self.assertEqual(db.t.insert(a='1'), 3) + self.assertEqual(db(db.t.a == '1').count(), 3) + self.assertEqual(db(db.t.a == '1').update(a='2'), 3) + self.assertEqual(db(db.t.a == '2').count(), 3) + self.assertEqual(db(db.t.a == '2').delete(), 3) + db.t.drop() + + +class TestSelect(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + self.assertEqual(db.t.insert(a='1'), 1) + self.assertEqual(db.t.insert(a='2'), 2) + self.assertEqual(db.t.insert(a='3'), 3) + self.assertEqual(len(db(db.t.id > 0).select()), 3) + self.assertEqual(db(db.t.id > 0).select(orderby=~db.t.a + | db.t.id)[0].a, '3') + self.assertEqual(len(db(db.t.id > 0).select(limitby=(1, 2))), 1) + self.assertEqual(db(db.t.id > 0).select(limitby=(1, 2))[0].a, + '2') + self.assertEqual(len(db().select(db.t.ALL)), 3) + self.assertEqual(len(db(db.t.a == None).select()), 0) + self.assertEqual(len(db(db.t.a != None).select()), 3) + self.assertEqual(len(db(db.t.a > '1').select()), 2) + self.assertEqual(len(db(db.t.a >= '1').select()), 3) + self.assertEqual(len(db(db.t.a == '1').select()), 1) + self.assertEqual(len(db(db.t.a != '1').select()), 2) + self.assertEqual(len(db(db.t.a < '3').select()), 2) + self.assertEqual(len(db(db.t.a <= '3').select()), 3) + self.assertEqual(len(db(db.t.a > '1')(db.t.a < '3').select()), 1) + self.assertEqual(len(db((db.t.a > '1') & (db.t.a < '3')).select()), 1) + self.assertEqual(len(db((db.t.a > '1') | (db.t.a < '3')).select()), 3) + self.assertEqual(len(db((db.t.a > '1') & ~(db.t.a > '2')).select()), 1) + self.assertEqual(len(db(~(db.t.a > '1') & (db.t.a > '2')).select()), 0) + db.t.drop() + + +class TestBelongs(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + self.assertEqual(db.t.insert(a='1'), 1) + self.assertEqual(db.t.insert(a='2'), 2) + self.assertEqual(db.t.insert(a='3'), 3) + self.assertEqual(len(db(db.t.a.belongs(('1', '3'))).select()), + 2) + self.assertEqual(len(db(db.t.a.belongs(db(db.t.id + > 2)._select(db.t.a))).select()), 1) + self.assertEqual(len(db(db.t.a.belongs(db(db.t.a.belongs(('1', + '3')))._select(db.t.a))).select()), 2) + self.assertEqual(len(db(db.t.a.belongs(db(db.t.a.belongs(db + (db.t.a.belongs(('1', '3')))._select(db.t.a)))._select( + db.t.a))).select()), + 2) + db.t.drop() + + +class TestLike(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + self.assertEqual(db.t.insert(a='abc'), 1) + self.assertEqual(len(db(db.t.a.like('a%')).select()), 1) + self.assertEqual(len(db(db.t.a.like('%b%')).select()), 1) + self.assertEqual(len(db(db.t.a.like('%c')).select()), 1) + self.assertEqual(len(db(db.t.a.like('%d%')).select()), 0) + self.assertEqual(len(db(db.t.a.lower().like('A%')).select()), 1) + self.assertEqual(len(db(db.t.a.lower().like('%B%')).select()), + 1) + self.assertEqual(len(db(db.t.a.lower().like('%C')).select()), 1) + self.assertEqual(len(db(db.t.a.upper().like('A%')).select()), 1) + self.assertEqual(len(db(db.t.a.upper().like('%B%')).select()), + 1) + self.assertEqual(len(db(db.t.a.upper().like('%C')).select()), 1) + db.t.drop() + + +class TestDatetime(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a', 'datetime')) + self.assertEqual(db.t.insert(a=datetime.datetime(1971, 12, 21, + 11, 30)), 1) + self.assertEqual(db.t.insert(a=datetime.datetime(1971, 11, 21, + 10, 30)), 2) + self.assertEqual(db.t.insert(a=datetime.datetime(1970, 12, 21, + 9, 30)), 3) + self.assertEqual(len(db(db.t.a == datetime.datetime(1971, 12, + 21, 11, 30)).select()), 1) + self.assertEqual(len(db(db.t.a.year() == 1971).select()), 2) + self.assertEqual(len(db(db.t.a.month() == 12).select()), 2) + self.assertEqual(len(db(db.t.a.day() == 21).select()), 3) + self.assertEqual(len(db(db.t.a.hour() == 11).select()), 1) + self.assertEqual(len(db(db.t.a.minutes() == 30).select()), 3) + self.assertEqual(len(db(db.t.a.seconds() == 0).select()), 3) + db.t.drop() + + +class TestExpressions(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a', 'integer')) + self.assertEqual(db.t.insert(a=1), 1) + self.assertEqual(db.t.insert(a=2), 2) + self.assertEqual(db.t.insert(a=3), 3) + self.assertEqual(db(db.t.a == 3).update(a=db.t.a + 1), 1) + self.assertEqual(len(db(db.t.a == 4).select()), 1) + db.t.drop() + + +class TestJoin(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t1', Field('a')) + db.define_table('t2', Field('a'), Field('b', db.t1)) + i1 = db.t1.insert(a='1') + i2 = db.t1.insert(a='2') + i3 = db.t1.insert(a='3') + db.t2.insert(a='4', b=i1) + db.t2.insert(a='5', b=i2) + db.t2.insert(a='6', b=i2) + self.assertEqual(len(db(db.t1.id + == db.t2.b).select(orderby=db.t1.a + | db.t2.a)), 3) + self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.a + | db.t2.a)[2].t1.a, '2') + self.assertEqual(db(db.t1.id == db.t2.b).select(orderby=db.t1.a + | db.t2.a)[2].t2.a, '6') + self.assertEqual(len(db().select(db.t1.ALL, db.t2.ALL, + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a)), 4) + self.assertEqual(db().select(db.t1.ALL, db.t2.ALL, + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a)[2].t1.a, '2') + self.assertEqual(db().select(db.t1.ALL, db.t2.ALL, + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a)[2].t2.a, '6') + self.assertEqual(db().select(db.t1.ALL, db.t2.ALL, + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a)[3].t1.a, '3') + self.assertEqual(db().select(db.t1.ALL, db.t2.ALL, + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a)[3].t2.a, None) + self.assertEqual(len(db().select(db.t1.ALL, db.t2.id.count(), + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a, groupby=db.t1.a)), + 3) + self.assertEqual(db().select(db.t1.ALL, db.t2.id.count(), + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a, + groupby=db.t1.a)[0]._extra[db.t2.id.count()], + 1) + self.assertEqual(db().select(db.t1.ALL, db.t2.id.count(), + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a, + groupby=db.t1.a)[1]._extra[db.t2.id.count()], + 2) + self.assertEqual(db().select(db.t1.ALL, db.t2.id.count(), + left=db.t2.on(db.t1.id == db.t2.b), + orderby=db.t1.a | db.t2.a, + groupby=db.t1.a)[2]._extra[db.t2.id.count()], + 0) + db.t1.drop() + db.t2.drop() + + +class TestMinMaxSum(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a', 'integer')) + self.assertEqual(db.t.insert(a=1), 1) + self.assertEqual(db.t.insert(a=2), 2) + self.assertEqual(db.t.insert(a=3), 3) + s = db.t.a.min() + self.assertEqual(db(db.t.id > 0).select(s)[0]._extra[s], 1) + s = db.t.a.max() + self.assertEqual(db(db.t.id > 0).select(s)[0]._extra[s], 3) + s = db.t.a.sum() + self.assertEqual(db(db.t.id > 0).select(s)[0]._extra[s], 6) + s = db.t.a.count() + self.assertEqual(db(db.t.id > 0).select(s)[0]._extra[s], 3) + db.t.drop() + + +#class TestCache(unittest. +# def testRun(self): +# cache = cache.ram +# db = DAL('sqlite:memory:') +# db.define_table('t', Field('a')) +# db.t.insert(a='1') +# r1 = db().select(db.t.ALL, cache=(cache, 1000)) +# db.t.insert(a='1') +# r2 = db().select(db.t.ALL, cache=(cache, 1000)) +# self.assertEqual(r1.response, r2.response) +# db.t.drop() + + +class TestMigrations(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite://.storage.db') + db.define_table('t', Field('a'), migrate='.storage.table') + db.commit() + db = DAL('sqlite://.storage.db') + db.define_table('t', Field('a'), Field('b'), + migrate='.storage.table') + db.commit() + db = DAL('sqlite://.storage.db') + db.define_table('t', Field('a'), Field('b', 'text'), + migrate='.storage.table') + db.commit() + db = DAL('sqlite://.storage.db') + db.define_table('t', Field('a'), migrate='.storage.table') + db.t.drop() + db.commit() + + def tearDown(self): + os.unlink('.storage.db') + +class TestReferece(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('name'), Field('a','reference t')) + db.commit() + x = db.t.insert(name='max') + assert x.id == 1 + assert x['id'] == 1 + x.a = x + assert x.a == 1 + x.update_record() + y = db.t[1] + assert y.a == 1 + assert y.a.a.a.a.a.a.name == 'max' + z=db.t.insert(name='xxx', a = y) + assert z.a == y.id + db.t.drop() + db.commit() + +class TestClientLevelOps(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + db.commit() + db.t.insert(a="test") + rows1 = db(db.t.id>0).select() + rows2 = db(db.t.id>0).select() + rows3 = rows1 & rows2 + assert len(rows3) == 2 + rows4 = rows1 | rows2 + assert len(rows4) == 1 + rows5 = rows1.find(lambda row: row.a=="test") + assert len(rows5) == 1 + rows6 = rows2.exclude(lambda row: row.a=="test") + assert len(rows6) == 1 + rows7 = rows5.sort(lambda row: row.a) + assert len(rows7) == 1 + db.t.drop() + db.commit() + + +class TestVirtualFields(unittest.TestCase): + + def testRun(self): + db = DAL('sqlite:memory:') + db.define_table('t', Field('a')) + db.commit() + db.t.insert(a="test") + class Compute: + def a_upper(row): return row.t.a.upper() + db.t.virtualfields.append(Compute()) + assert db(db.t.id>0).select().first().a_upper == 'TEST' + db.t.drop() + db.commit() + + +if __name__ == '__main__': + unittest.main() + tearDownModule() ADDED gluon/tests/test_html.py Index: gluon/tests/test_html.py ================================================================== --- /dev/null +++ gluon/tests/test_html.py @@ -0,0 +1,122 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +""" + Unit tests for gluon.html +""" + +import sys +import os +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) +else: + sys.path.append(os.path.realpath('../')) + +import unittest +from html import * + + +class TestBareHelpers(unittest.TestCase): + + def testRun(self): + self.assertEqual(BR(_a='1', _b='2').xml(), '
    ') + self.assertEqual(EMBED(_a='1', _b='2').xml(), + '') + self.assertEqual(HR(_a='1', _b='2').xml(), '
    ') + self.assertEqual(IMG(_a='1', _b='2').xml(), + '') + self.assertEqual(INPUT(_a='1', _b='2').xml(), + '') + self.assertEqual(LINK(_a='1', _b='2').xml(), + '') + self.assertEqual(META(_a='1', _b='2').xml(), + '') + + self.assertEqual(A('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(B('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(BODY('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(CENTER('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(DIV('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(EM('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(FIELDSET('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(FORM('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(H1('<>', _a='1', _b='2').xml(), + '

    <>

    ') + self.assertEqual(H2('<>', _a='1', _b='2').xml(), + '

    <>

    ') + self.assertEqual(H3('<>', _a='1', _b='2').xml(), + '

    <>

    ') + self.assertEqual(H4('<>', _a='1', _b='2').xml(), + '

    <>

    ') + self.assertEqual(H5('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(H6('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(HEAD('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(HTML('<>', _a='1', _b='2').xml(), + '\n<>') + self.assertEqual(IFRAME('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(LABEL('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(LI('<>', _a='1', _b='2').xml(), + '
  • <>
  • ') + self.assertEqual(OBJECT('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(OL('<>', _a='1', _b='2').xml(), + '
    1. <>
    ') + self.assertEqual(OPTION('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(P('<>', _a='1', _b='2').xml(), + '

    <>

    ') + self.assertEqual(PRE('<>', _a='1', _b='2').xml(), + '
    <>
    ') + self.assertEqual(SCRIPT('<>', _a='1', _b='2').xml(), + '''''') + self.assertEqual(SELECT('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(SPAN('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(STYLE('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(TABLE('<>', _a='1', _b='2').xml(), + '
    ' + \ + '
    <>
    ') + self.assertEqual(TBODY('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TD('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TEXTAREA('<>', _a='1', _b='2').xml(), + '') + self.assertEqual(TFOOT('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TH('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(THEAD('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TITLE('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TR('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(TT('<>', _a='1', _b='2').xml(), + '<>') + self.assertEqual(UL('<>', _a='1', _b='2').xml(), + '
    • <>
    ') + + +if __name__ == '__main__': + unittest.main() ADDED gluon/tests/test_is_url.py Index: gluon/tests/test_is_url.py ================================================================== --- /dev/null +++ gluon/tests/test_is_url.py @@ -0,0 +1,644 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Unit tests for IS_URL() +""" + +import sys +import os +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) +else: + sys.path.append(os.path.realpath('../')) + +import unittest +from validators import IS_URL, IS_HTTP_URL, IS_GENERIC_URL, \ + unicode_to_ascii_authority + + +class TestIsUrl(unittest.TestCase): + + def testModeHttp(self): + + # defaults to mode='http' + + x = IS_URL() + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('google.ca'), ('http://google.ca', None)) + self.assertEqual(x('google.ca:80'), ('http://google.ca:80', + None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + self.assertEqual(x('google..ca'), ('google..ca', 'enter a valid URL')) + self.assertEqual(x('google.ca..'), ('google.ca..', 'enter a valid URL')) + + # explicit use of 'http' mode + + x = IS_URL(mode='http') + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('google.ca'), ('http://google.ca', None)) + self.assertEqual(x('google.ca:80'), ('http://google.ca:80', + None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # prepends 'https' instead of 'http' + + x = IS_URL(mode='http', prepend_scheme='https') + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('google.ca'), ('https://google.ca', None)) + self.assertEqual(x('google.ca:80'), ('https://google.ca:80', + None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # prepending disabled + + x = IS_URL(prepend_scheme=None) + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('google.ca'), ('google.ca', None)) + self.assertEqual(x('google.ca:80'), ('google.ca:80', None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # custom allowed_schemes + + x = IS_URL(mode='http', allowed_schemes=[None, 'http']) + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('https://google.ca'), ('https://google.ca', + 'enter a valid URL')) + self.assertEqual(x('google.ca'), ('http://google.ca', None)) + self.assertEqual(x('google.ca:80'), ('http://google.ca:80', + None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # custom allowed_schemes, excluding None + + x = IS_URL(allowed_schemes=['http']) + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('https://google.ca'), ('https://google.ca', + 'enter a valid URL')) + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + self.assertEqual(x('google.ca:80'), ('google.ca:80', + 'enter a valid URL')) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # custom allowed_schemes and prepend_scheme + + x = IS_URL(allowed_schemes=[None, 'https'], + prepend_scheme='https') + self.assertEqual(x('http://google.ca'), ('http://google.ca', + 'enter a valid URL')) + self.assertEqual(x('https://google.ca'), ('https://google.ca', + None)) + self.assertEqual(x('google.ca'), ('https://google.ca', None)) + self.assertEqual(x('google.ca:80'), ('https://google.ca:80', + None)) + self.assertEqual(x('unreal.blargg'), ('unreal.blargg', + 'enter a valid URL')) + + # Now any URL requiring prepending will fail, but prepending is still + # enabled! + + x = IS_URL(allowed_schemes=['http']) + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + + def testModeGeneric(self): + + # 'generic' mode + + x = IS_URL(mode='generic') + self.assertEqual(x('http://google.ca'), ('http://google.ca',None)) + self.assertEqual(x('google.ca'), ('google.ca', None)) + self.assertEqual(x('google.ca:80'), ('http://google.ca:80',None)) + self.assertEqual(x('blargg://unreal'), ('blargg://unreal', + 'enter a valid URL')) + + # 'generic' mode with custom allowed_schemes that still includes + # 'http' (the default for prepend_scheme) + + x = IS_URL(mode='generic', allowed_schemes=['http', 'blargg']) + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca', + 'enter a valid URL')) + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + self.assertEqual(x('google.ca:80'), ('google.ca:80', + 'enter a valid URL')) + self.assertEqual(x('blargg://unreal'), ('blargg://unreal', + None)) + + # 'generic' mode with overriden prepend_scheme + + x = IS_URL(mode='generic', prepend_scheme='ftp') + self.assertEqual(x('http://google.ca'), ('http://google.ca', + None)) + self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca', + None)) + self.assertEqual(x('google.ca'), ('google.ca', None)) + self.assertEqual(x('google.ca:80'), ('ftp://google.ca:80', + None)) + self.assertEqual(x('blargg://unreal'), ('blargg://unreal', + 'enter a valid URL')) + + # 'generic' mode with overriden allowed_schemes and prepend_scheme + + x = IS_URL(mode='generic', allowed_schemes=[None, 'ftp', 'ftps' + ], prepend_scheme='ftp') + self.assertEqual(x('http://google.ca'), ('http://google.ca', + 'enter a valid URL')) + self.assertEqual(x('google.ca'), ('google.ca', None)) + self.assertEqual(x('ftp://google.ca'), ('ftp://google.ca', + None)) + self.assertEqual(x('google.ca:80'), ('ftp://google.ca:80', + None)) + self.assertEqual(x('blargg://unreal'), ('blargg://unreal', + 'enter a valid URL')) + + # Now any URL requiring prepending will fail, but prepending is still + # enabled! + + x = IS_URL(mode='generic', allowed_schemes=['http']) + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + + def testExceptionalUse(self): + + # mode must be in set ['http', 'generic'] + + try: + x = IS_URL(mode='ftp') + x('http://www.google.ca') + except Exception, e: + if str(e) != "invalid mode 'ftp' in IS_URL": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid mode: 'ftp'") + + # allowed_schemes in 'http' mode must be in set [None, 'http', 'https'] + + try: + x = IS_URL(allowed_schemes=[None, 'ftp', 'ftps'], + prepend_scheme='ftp') + x('http://www.benn.ca') # we can only reasonably know about the + # error at calling time + except Exception, e: + if str(e)\ + != "allowed_scheme value 'ftp' is not in [None, 'http', 'https']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid allowed_schemes: [None, 'ftp', 'ftps']") + + # prepend_scheme's value must be in allowed_schemes (default for 'http' + # mode is [None, 'http', 'https']) + + try: + x = IS_URL(prepend_scheme='ftp') + x('http://www.benn.ca') # we can only reasonably know about the + # error at calling time + except Exception, e: + if str(e)\ + != "prepend_scheme='ftp' is not in allowed_schemes=[None, 'http', 'https']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'ftp'") + + # custom allowed_schemes that excludes 'http', so prepend_scheme must be + # specified! + + try: + x = IS_URL(allowed_schemes=[None, 'https']) + except Exception, e: + if str(e)\ + != "prepend_scheme='http' is not in allowed_schemes=[None, 'https']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'http'") + + # prepend_scheme must be in allowed_schemes + + try: + x = IS_URL(allowed_schemes=[None, 'http'], + prepend_scheme='https') + except Exception, e: + if str(e)\ + != "prepend_scheme='https' is not in allowed_schemes=[None, 'http']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'https'") + + # prepend_scheme's value (default is 'http') must be in allowed_schemes + + try: + x = IS_URL(mode='generic', allowed_schemes=[None, 'ftp', + 'ftps']) + except Exception, e: + if str(e)\ + != "prepend_scheme='http' is not in allowed_schemes=[None, 'ftp', 'ftps']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'http'") + + # prepend_scheme's value must be in allowed_schemes, which by default + # is all schemes that really exist + + try: + x = IS_URL(mode='generic', prepend_scheme='blargg') + x('http://www.google.ca') # we can only reasonably know about the error at calling time + except Exception, e: + if not str(e).startswith( + "prepend_scheme='blargg' is not in allowed_schemes="): + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'blargg'") + + # prepend_scheme's value must be in allowed_schemes + + try: + x = IS_URL(mode='generic', allowed_schemes=[None, 'http'], + prepend_scheme='blargg') + except Exception, e: + if str(e)\ + != "prepend_scheme='blargg' is not in allowed_schemes=[None, 'http']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Accepted invalid prepend_scheme: 'blargg'") + + # Not inluding None in the allowed_schemes essentially disabled + # prepending, so even though + # prepend_scheme has the invalid value 'http', we don't care! + + x = IS_URL(allowed_schemes=['https'], prepend_scheme='https') + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + + # Not inluding None in the allowed_schemes essentially disabled prepending, so even though + # prepend_scheme has the invalid value 'http', we don't care! + + x = IS_URL(mode='generic', allowed_schemes=['https'], + prepend_scheme='https') + self.assertEqual(x('google.ca'), ('google.ca', 'enter a valid URL')) + + +# ############################################################################## + + +class TestIsGenericUrl(unittest.TestCase): + + x = IS_GENERIC_URL() + + def testInvalidUrls(self): + urlsToCheckA = [] + for i in range(0, 32) + [127]: + + # Control characters are disallowed in any part of a URL + + urlsToCheckA.append('http://www.benn' + chr(i) + '.ca') + + urlsToCheckB = [ + None, + '', + 'http://www.no spaces allowed.com', + 'http://www.benn.ca/no spaces allowed/', + 'http://www.benn.ca/angle_bracket/', + 'http://www.benn.ca/invalid%character', + 'http://www.benn.ca/illegal%%20use', + 'http://www.benn.ca/illegaluse%', + 'http://www.benn.ca/illegaluse%0', + 'http://www.benn.ca/illegaluse%x', + 'http://www.benn.ca/ill%egaluse%x', + 'http://www.benn.ca/double"quote/', + 'http://www.curly{brace.com', + 'http://www.benn.ca/curly}brace/', + 'http://www.benn.ca/or|symbol/', + 'http://www.benn.ca/back\slash', + 'http://www.benn.ca/the^carat', + 'http://left[bracket.me', + 'http://www.benn.ca/right]bracket', + 'http://www.benn.ca/angle`quote', + '-ttp://www.benn.ca', + '+ttp://www.benn.ca', + '.ttp://www.benn.ca', + '9ttp://www.benn.ca', + 'ht;tp://www.benn.ca', + 'ht@tp://www.benn.ca', + 'ht&tp://www.benn.ca', + 'ht=tp://www.benn.ca', + 'ht$tp://www.benn.ca', + 'ht,tp://www.benn.ca', + 'ht:tp://www.benn.ca', + 'htp://invalid_scheme.com', + ] + + failures = [] + + for url in urlsToCheckA + urlsToCheckB: + if self.x(url)[1] == None: + failures.append('Incorrectly accepted: ' + str(url)) + + if len(failures) > 0: + self.fail(failures) + + def testValidUrls(self): + urlsToCheck = [ + 'ftp://ftp.is.co.za/rfc/rfc1808.txt', + 'gopher://spinaltap.micro.umn.edu/00/Weather/California/Los%20Angeles', + 'http://www.math.uio.no/faq/compression-faq/part1.html', + 'mailto:mduerst@ifi.unizh.ch', + 'news:comp.infosystems.www.servers.unix', + 'telnet://melvyl.ucop.edu/', + 'hTTp://www.benn.ca', + '%66%74%70://ftp.is.co.za/rfc/rfc1808.txt', + '%46%74%70://ftp.is.co.za/rfc/rfc1808.txt', + '/faq/compression-faq/part1.html', + 'google.com', + 'www.google.com:8080', + '128.127.123.250:8080', + 'blargg:ping', + 'http://www.benn.ca', + 'http://benn.ca', + 'http://amazon.com/books/', + 'https://amazon.com/movies', + 'rtsp://idontknowthisprotocol', + 'HTTP://allcaps.com', + 'http://localhost', + 'http://localhost#fragment', + 'http://localhost/hello', + 'http://localhost/hello?query=True', + 'http://localhost/hello/', + 'http://localhost:8080', + 'http://localhost:8080/', + 'http://localhost:8080/hello', + 'http://localhost:8080/hello/', + 'file:///C:/Documents%20and%20Settings/Jonathan/Desktop/view.py' + , + ] + + failures = [] + + for url in urlsToCheck: + if self.x(url)[1] != None: + failures.append('Incorrectly rejected: ' + str(url)) + + if len(failures) > 0: + self.fail(failures) + + def testPrepending(self): + # Does not prepend scheme for abbreviated domains + self.assertEqual(self.x('google.ca'), ('google.ca', None)) + + # Does not prepend scheme for abbreviated domains + self.assertEqual(self.x('google.ca:8080'), ('google.ca:8080', None)) + + # Does not prepend when scheme already exists + self.assertEqual(self.x('https://google.ca'), + ('https://google.ca', None)) + + # Does not prepend if None type is not specified in allowed_scheme, + # because a scheme is required + + y = IS_GENERIC_URL(allowed_schemes=['http', 'blargg'], + prepend_scheme='http') + self.assertEqual(y('google.ca'), ('google.ca', 'enter a valid URL')) + + +# ############################################################################## + + +class TestIsHttpUrl(unittest.TestCase): + + x = IS_HTTP_URL() + + def testInvalidUrls(self): + urlsToCheck = [ + None, + '', + 'http://invalid' + chr(2) + '.com', + 'htp://invalid_scheme.com', + 'blargg://invalid_scheme.com', + 'http://-123.com', + 'http://abcd-.ca', + 'http://-abc123-.me', + 'http://www.dom&ain.com/', + 'http://www.dom=ain.com/', + 'http://www.benn.ca&', + 'http://%62%65%6E%6E%2E%63%61/path', + 'http://.domain.com', + 'http://.domain.com./path', + 'http://domain..com', + 'http://domain...at..com', + 'http://domain.com..', + 'http://domain.com../path', + 'http://domain.3m', + 'http://domain.-3m', + 'http://domain.3m-', + 'http://domain.-3m-', + 'http://domain.co&m', + 'http://domain.m3456', + 'http://domain.m-3/path#fragment', + 'http://domain.m---k/path?query=value', + 'http://23.32..', + 'http://23..32.56.0', + 'http://38997.222.999', + 'http://23.32.56.99.', + 'http://.23.32.56.99', + 'http://.23.32.56.99.', + 'http://w127.123.0.256:8080', + 'http://23.32.56.99:abcd', + 'http://23.32.56.99:23cd', + 'http://google.com:cd22', + 'http://23.32:1300.56.99', + 'http://www.yahoo:1600.com', + 'path/segment/without/starting/slash', + 'http://www.math.uio.no;param=3', + '://ABC.com:/%7esmith/home.html', + ] + + failures = [] + + for url in urlsToCheck: + if self.x(url)[1] == None: + failures.append('Incorrectly accepted: ' + str(url)) + + if len(failures) > 0: + self.fail(failures) + + def testValidUrls(self): + + urlsToCheck = [ + 'http://abc.com:80/~smith/home.html', + 'http://ABC.com/%7Esmith/home.html', + 'http://ABC.com:/%7esmith/home.html', + 'http://www.math.uio.no/faq/compression-faq/part1.html', + '//google.ca/faq/compression-faq/part1.html', + '//google.ca/faq;param=3', + '//google.ca/faq/index.html?query=5', + '//google.ca/faq/index.html;param=value?query=5', + '/faq/compression-faq/part1.html', + '/faq;param=3', + '/faq/index.html?query=5', + '/faq/index.html;param=value?query=5', + 'google.com', + 'benn.ca/init/default', + 'benn.ca/init;param=value/default?query=value', + 'http://host-name---with-dashes.me', + 'http://www.host-name---with-dashes.me', + 'http://a.com', + 'http://a.3.com', + 'http://a.bl-ck.com', + 'http://bl-e.b.com', + 'http://host123with456numbers.ca', + 'http://1234567890.com.', + 'http://1234567890.com./path', + 'http://google.com./path', + 'http://domain.xn--0zwm56d', + 'http://127.123.0.256', + 'http://127.123.0.256/document/drawer', + '127.123.0.256/document/', + '156.212.123.100', + 'http://www.google.com:180200', + 'http://www.google.com:8080/path', + 'http://www.google.com:8080', + '//www.google.com:8080', + 'www.google.com:8080', + 'http://127.123.0.256:8080/path', + '//127.123.0.256:8080', + '127.123.0.256:8080', + 'http://example.me??query=value?', + 'http://a.com', + 'http://3.com', + 'http://www.benn.ca', + 'http://benn.ca', + 'http://amazon.com/books/', + 'https://amazon.com/movies', + 'hTTp://allcaps.com', + 'http://localhost', + 'HTTPS://localhost.', + 'http://localhost#fragment', + 'http://localhost/hello;param=value', + 'http://localhost/hello;param=value/hi;param2=value2;param3=value3' + , + 'http://localhost/hello?query=True', + 'http://www.benn.ca/hello;param=value/hi;param2=value2;param3=value3/index.html?query=3', + 'http://localhost/hello/?query=1500&five=6', + 'http://localhost:8080', + 'http://localhost:8080/', + 'http://localhost:8080/hello', + 'http://localhost:8080/hello%20world/', + 'http://www.a.3.be-nn.5.ca', + 'http://www.amazon.COM', + ] + + failures = [] + + for url in urlsToCheck: + if self.x(url)[1] != None: + failures.append('Incorrectly rejected: ' + str(url)) + + if len(failures) > 0: + self.fail(failures) + + def testPrepending(self): + # prepends scheme for abbreviated domains + self.assertEqual(self.x('google.ca'), ('http://google.ca', None)) + + # prepends scheme for abbreviated domains + self.assertEqual(self.x('google.ca:8080'), + ('http://google.ca:8080', None)) + + # does not prepend when scheme already exists + self.assertEqual(self.x('https://google.ca'), + ('https://google.ca', None)) + + y = IS_HTTP_URL(prepend_scheme='https', allowed_schemes=[None, 'https']) + self.assertEqual(y('google.ca'), ('https://google.ca', None)) # prepends https if asked + + z = IS_HTTP_URL(prepend_scheme=None) + self.assertEqual(z('google.ca:8080'), ('google.ca:8080', None)) # prepending disabled + + try: + IS_HTTP_URL(prepend_scheme='mailto') + except Exception, e: + if str(e)\ + != "prepend_scheme='mailto' is not in allowed_schemes=[None, 'http', 'https']": + self.fail('Wrong exception: ' + str(e)) + else: + self.fail("Got invalid prepend_scheme: 'mailto'") + + # Does not prepend if None type is not specified in allowed_scheme, because a scheme is required + + a = IS_HTTP_URL(allowed_schemes=['http']) + self.assertEqual(a('google.ca'), ('google.ca', 'enter a valid URL')) + self.assertEqual(a('google.ca:80'), ('google.ca:80', + 'enter a valid URL')) + +class TestUnicode(unittest.TestCase): + x = IS_URL() + y = IS_URL(allowed_schemes=['https'], prepend_scheme='https') #excludes the option for abbreviated URLs with no scheme + z = IS_URL(prepend_scheme=None) # disables prepending the scheme in the return value + + + def testUnicodeToAsciiUrl(self): + self.assertEquals(unicode_to_ascii_authority(u'www.Alliancefran\xe7aise.nu'), 'www.xn--alliancefranaise-npb.nu') + self.assertEquals(unicode_to_ascii_authority(u'www.benn.ca'), 'www.benn.ca') + self.assertRaises(UnicodeError, unicode_to_ascii_authority, u'\u4e2d'*1000) #label is too long + + + def testValidUrls(self): + self.assertEquals(self.x(u'www.Alliancefrancaise.nu'), ('http://www.Alliancefrancaise.nu', None)) + self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu'), ('http://www.xn--alliancefranaise-npb.nu', None)) + self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu:8080'), ('http://www.xn--alliancefranaise-npb.nu:8080', None)) + self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu'), ('http://www.xn--alliancefranaise-npb.nu', None)) + self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue', None)) + self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue#fragment', None)) + self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue?query=value#fragment', None)) + self.assertEquals(self.x(u'http://www.Alliancefran\xe7aise.nu:8080/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu:8080/parnaise/blue?query=value#fragment', None)) + self.assertEquals(self.x(u'www.Alliancefran\xe7aise.nu/parnaise/blue?query=value#fragment'), ('http://www.xn--alliancefranaise-npb.nu/parnaise/blue?query=value#fragment', None)) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com'), ('http://xn--fiq13b.com', None)) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86'), ('http://xn--fiq13b.com/%4e%86', None)) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86?query=\u4e86'), ('http://xn--fiq13b.com/%4e%86?query=%4e%86', None)) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com/\u4e86?query=\u4e86#fragment'), ('http://xn--fiq13b.com/%4e%86?query=%4e%86#fragment', None)) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.com?query=\u4e86#fragment'), ('http://xn--fiq13b.com?query=%4e%86#fragment', None)) + self.assertEquals(self.x(u'http://B\xfccher.ch'), ('http://xn--bcher-kva.ch', None)) + self.assertEquals(self.x(u'http://\xe4\xf6\xfc\xdf.com'), ('http://xn--ss-uia6e4a.com', None)) + self.assertEquals(self.x(u'http://visegr\xe1d.com'), ('http://xn--visegrd-mwa.com', None)) + self.assertEquals(self.x(u'http://h\xe1zipatika.com'), ('http://xn--hzipatika-01a.com', None)) + self.assertEquals(self.x(u'http://www.\xe7ukurova.com'), ('http://www.xn--ukurova-txa.com', None)) + self.assertEquals(self.x(u'http://nixier\xf6hre.nixieclock-tube.com'), ('http://xn--nixierhre-57a.nixieclock-tube.com', None)) + self.assertEquals(self.x(u'google.ca.'), ('http://google.ca.', None)) + + self.assertEquals(self.y(u'https://google.ca'), ('https://google.ca', None)) + self.assertEquals(self.y(u'https://\u4e2d\u4fd4.com'), ('https://xn--fiq13b.com', None)) + + self.assertEquals(self.z(u'google.ca'), ('google.ca', None)) + + + def testInvalidUrls(self): + self.assertEquals(self.x(u'://ABC.com'), (u'://ABC.com', 'enter a valid URL')) + self.assertEquals(self.x(u'http://\u4e2d\u4fd4.dne'), (u'http://\u4e2d\u4fd4.dne', 'enter a valid URL')) + self.assertEquals(self.x(u'https://google.dne'), (u'https://google.dne', 'enter a valid URL')) + self.assertEquals(self.x(u'https://google..ca'), (u'https://google..ca', 'enter a valid URL')) + self.assertEquals(self.x(u'google..ca'), (u'google..ca', 'enter a valid URL')) + self.assertEquals(self.x(u'http://' + u'\u4e2d'*1000 + u'.com'), (u'http://' + u'\u4e2d'*1000 + u'.com', 'enter a valid URL')) + + self.assertEquals(self.x(u'http://google.com#fragment_\u4e86'), (u'http://google.com#fragment_\u4e86', 'enter a valid URL')) + self.assertEquals(self.x(u'http\u4e86://google.com'), (u'http\u4e86://google.com', 'enter a valid URL')) + self.assertEquals(self.x(u'http\u4e86://google.com#fragment_\u4e86'), (u'http\u4e86://google.com#fragment_\u4e86', 'enter a valid URL')) + + self.assertEquals(self.y(u'http://\u4e2d\u4fd4.com/\u4e86'), (u'http://\u4e2d\u4fd4.com/\u4e86', 'enter a valid URL')) + #self.assertEquals(self.y(u'google.ca'), (u'google.ca', 'enter a valid URL')) + + self.assertEquals(self.z(u'invalid.domain..com'), (u'invalid.domain..com', 'enter a valid URL')) + self.assertEquals(self.z(u'invalid.\u4e2d\u4fd4.blargg'), (u'invalid.\u4e2d\u4fd4.blargg', 'enter a valid URL')) + +# ############################################################################## + +if __name__ == '__main__': + unittest.main() ADDED gluon/tests/test_router.py Index: gluon/tests/test_router.py ================================================================== --- /dev/null +++ gluon/tests/test_router.py @@ -0,0 +1,872 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""Unit tests for rewrite.py routers option""" + +import sys +import os +import unittest +import tempfile +import logging + +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) # running from web2py base +else: + sys.path.append(os.path.realpath('../')) # running from gluon/tests/ + +from rewrite import load, filter_url, filter_err, get_effective_router, map_url_out +from html import URL +from fileutils import abspath +from settings import global_settings +from http import HTTP +from storage import Storage + +logger = None +oldcwd = None +root = None + +def setUpModule(): + def make_apptree(): + "build a temporary applications tree" + # applications/ + os.mkdir(abspath('applications')) + # applications/app/ + for app in ('admin', 'examples', 'welcome'): + os.mkdir(abspath('applications', app)) + # applications/app/(controllers, static) + for subdir in ('controllers', 'static'): + os.mkdir(abspath('applications', app, subdir)) + # applications/admin/controllers/*.py + for ctr in ('appadmin', 'default', 'gae', 'mercurial', 'shell', 'wizard'): + open(abspath('applications', 'admin', 'controllers', '%s.py' % ctr), 'w').close() + # applications/examples/controllers/*.py + for ctr in ('ajax_examples', 'appadmin', 'default', 'global', 'spreadsheet'): + open(abspath('applications', 'examples', 'controllers', '%s.py' % ctr), 'w').close() + # applications/welcome/controllers/*.py + for ctr in ('appadmin', 'default'): + open(abspath('applications', 'welcome', 'controllers', '%s.py' % ctr), 'w').close() + # create an app-specific routes.py for examples app + routes = open(abspath('applications', 'examples', 'routes.py'), 'w') + routes.write("routers=dict(examples=dict(default_function='exdef'))") + routes.close() + # create language files for examples app + for lang in ('en', 'it'): + os.mkdir(abspath('applications', 'examples', 'static', lang)) + open(abspath('applications', 'examples', 'static', lang, 'file'), 'w').close() + + global oldcwd + if oldcwd is None: # do this only once + oldcwd = os.getcwd() + if not os.path.isdir('gluon'): + os.chdir(os.path.realpath('../../')) # run from web2py base directory + import main # for initialization after chdir + global logger + logger = logging.getLogger('web2py.rewrite') + global_settings.applications_parent = tempfile.mkdtemp() + global root + root = global_settings.applications_parent + make_apptree() + +def tearDownModule(): + global oldcwd + if oldcwd is not None: + os.chdir(oldcwd) + oldcwd = None + + +class TestRouter(unittest.TestCase): + """ Tests the routers logic from gluon.rewrite """ + + def test_router_syntax(self): + """ Test router syntax error """ + level = logger.getEffectiveLevel() + logger.setLevel(logging.CRITICAL) # disable logging temporarily + self.assertRaises(SyntaxError, load, data='x:y') + self.assertRaises(SyntaxError, load, rdict=dict(BASE=dict(badkey="value"))) + self.assertRaises(SyntaxError, load, rdict=dict(BASE=dict(), app=dict(default_application="name"))) + try: + # 2.7+ only + self.assertRaisesRegexp(SyntaxError, "invalid syntax", + load, data='x:y') + self.assertRaisesRegexp(SyntaxError, "unknown key", + load, rdict=dict(BASE=dict(badkey="value"))) + self.assertRaisesRegexp(SyntaxError, "BASE-only key", + load, rdict=dict(BASE=dict(), app=dict(default_application="name"))) + except AttributeError: + pass + logger.setLevel(level) + + def test_router_null(self): + """ Tests the null router """ + load(rdict=dict()) + # app resolution + self.assertEqual(filter_url('http://domain.com/welcome', app=True), 'welcome') + self.assertEqual(filter_url('http://domain.com/', app=True), 'init') + # incoming + self.assertEqual(filter_url('http://domain.com/favicon.ico'), '%s/applications/init/static/favicon.ico' % root) + self.assertEqual(filter_url('http://domain.com/abc'), '/init/default/abc') + self.assertEqual(filter_url('http://domain.com/index/abc'), "/init/default/index ['abc']") + self.assertEqual(filter_url('http://domain.com/abc/def'), "/init/default/abc ['def']") + self.assertEqual(filter_url('http://domain.com/index/a%20bc'), "/init/default/index ['a bc']") + self.assertEqual(filter_url('http://domain.com/welcome/static/path/to/static'), "%s/applications/welcome/static/path/to/static" % root) + self.assertRaises(HTTP, filter_url, 'http://domain.com/welcome/static/bad/path/to/st~tic') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*invalid static file", filter_url, 'http://domain.com/welcome/static/bad/path/to/st~tic') + except AttributeError: + pass + # outgoing + self.assertEqual(filter_url('http://domain.com/init/default/index', out=True), '/') + self.assertEqual(filter_url('http://domain.com/init/default/index/arg1', out=True), '/index/arg1') + self.assertEqual(filter_url('http://domain.com/init/default/abc', out=True), '/abc') + self.assertEqual(filter_url('http://domain.com/init/static/abc', out=True), '/init/static/abc') + self.assertEqual(filter_url('http://domain.com/init/appadmin/index', out=True), '/appadmin') + self.assertEqual(filter_url('http://domain.com/init/appadmin/abc', out=True), '/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/init/admin/index', out=True), '/init/admin') + self.assertEqual(filter_url('http://domain.com/init/admin/abc', out=True), '/init/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + + def test_router_specific(self): + """ + Test app-specific routes.py + + Note that make_apptree above created applications/examples/routes.py with a default_function. + """ + load(rdict=dict()) + self.assertEqual(filter_url('http://domain.com/welcome'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/examples'), '/examples/default/exdef') + + def test_router_defapp(self): + """ Test the default-application function """ + routers = dict(BASE=dict(default_application='welcome')) + load(rdict=routers) + # app resolution + self.assertEqual(filter_url('http://domain.com/welcome', app=True), 'welcome') + self.assertEqual(filter_url('http://domain.com/', app=True), 'welcome') + # incoming + self.assertEqual(filter_url('http://domain.com'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/appadmin'), '/welcome/appadmin/index') + self.assertEqual(filter_url('http://domain.com/abc'), '/welcome/default/abc') + self.assertEqual(filter_url('http://domain.com/index/abc'), "/welcome/default/index ['abc']") + self.assertEqual(filter_url('http://domain.com/abc/def'), "/welcome/default/abc ['def']") + self.assertEqual(filter_url('http://domain.com/favicon.ico'), '%s/applications/welcome/static/favicon.ico' % root) + self.assertEqual(filter_url('http://domain.com/static/abc'), '%s/applications/welcome/static/abc' % root) + self.assertEqual(filter_url('http://domain.com/static/path/to/static'), "%s/applications/welcome/static/path/to/static" % root) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/abc') + self.assertEqual(filter_url('http://domain.com/welcome/default/admin', out=True), '/default/admin') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), + '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + + def test_router_nodef(self): + """ Test no-default functions """ + routers = dict( + BASE=dict(default_application='welcome'), + welcome=dict(controllers=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/default') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/default/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/default/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), + '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertEqual(filter_url('http://domain.com'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/appadmin'), '/welcome/appadmin/index') + self.assertEqual(filter_url('http://domain.com/abc'), '/welcome/abc/index') + self.assertEqual(filter_url('http://domain.com/index/abc'), "/welcome/index/abc") + self.assertEqual(filter_url('http://domain.com/abc/def'), "/welcome/abc/def") + self.assertEqual(filter_url('http://domain.com/abc/def/ghi'), "/welcome/abc/def ['ghi']") + + routers = dict( + BASE=dict(default_application=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/welcome') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/welcome/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/welcome/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/welcome/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/welcome/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertRaises(HTTP, filter_url, 'http://domain.com') + self.assertRaises(HTTP, filter_url, 'http://domain.com/appadmin') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*invalid application", filter_url, 'http://domain.com') + self.assertRaisesRegexp(HTTP, "400.*invalid application", filter_url, 'http://domain.com/appadmin') + except AttributeError: + pass + + routers = dict( + BASE=dict(default_application='welcome', applications=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/welcome') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/welcome/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/welcome/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/welcome/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/welcome/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertEqual(filter_url('http://domain.com'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/welcome/default/index') + self.assertRaises(HTTP, filter_url, 'http://domain.com/appadmin') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*unknown application: 'appadmin'", filter_url, 'http://domain.com/appadmin') + except AttributeError: + pass + + routers = dict( + BASE=dict(default_application='welcome', applications=None), + welcome=dict(controllers=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/welcome/default') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/welcome/default/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/welcome/default/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/welcome/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/welcome/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertEqual(filter_url('http://domain.com'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/welcome/default/index') + self.assertRaises(HTTP, filter_url, 'http://domain.com/appadmin') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*unknown application: 'appadmin'", filter_url, 'http://domain.com/appadmin') + except AttributeError: + pass + + routers = dict( + BASE=dict(default_application='welcome', applications=None), + welcome=dict(default_controller=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/welcome/default') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/welcome/default/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/welcome/default/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/welcome/appadmin') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/welcome/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertRaises(HTTP, filter_url, 'http://domain.com') + self.assertRaises(HTTP, filter_url, 'http://domain.com/appadmin') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*invalid controller", filter_url, 'http://domain.com') + self.assertRaisesRegexp(HTTP, "400.*unknown application: 'appadmin'", filter_url, 'http://domain.com/appadmin') + except AttributeError: + pass + + routers = dict( + BASE=dict(default_application='welcome', applications=None), + welcome=dict(controllers=None, default_function=None), + ) + load(rdict=routers) + # outgoing + self.assertEqual(filter_url('http://domain.com/welcome/default/index', out=True), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/arg1', out=True), '/welcome/default/index/arg1') + self.assertEqual(filter_url('http://domain.com/welcome/default/abc', out=True), '/welcome/default/abc') + self.assertEqual(filter_url('http://domain.com/welcome/static/abc', out=True), '/welcome/static/abc') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/index', out=True), '/welcome/appadmin/index') + self.assertEqual(filter_url('http://domain.com/welcome/appadmin/abc', out=True), '/welcome/appadmin/abc') + self.assertEqual(filter_url('http://domain.com/welcome/admin/index', out=True), '/welcome/admin/index') + self.assertEqual(filter_url('http://domain.com/welcome/admin/abc', out=True), '/welcome/admin/abc') + self.assertEqual(filter_url('http://domain.com/admin/default/abc', out=True), '/admin/abc') + # incoming + self.assertRaises(HTTP, filter_url, 'http://domain.com') + self.assertRaises(HTTP, filter_url, 'http://domain.com/appadmin') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*invalid function", filter_url, 'http://domain.com') + self.assertRaisesRegexp(HTTP, "400.*unknown application: 'appadmin'", filter_url, 'http://domain.com/appadmin') + except AttributeError: + pass + + def test_router_app(self): + """ Tests the doctest router app resolution""" + routers = dict( + BASE = dict( + domains = { + "domain1.com" : "app1", + "www.domain1.com" : "app1", + "domain2.com" : "app2", + }, + ), + app1 = dict(), + app2 = dict(), + goodapp = dict(), + ) + routers['bad!app'] = dict() + load(rdict=routers) + self.assertEqual(filter_url('http://domain.com/welcome', app=True), 'welcome') + self.assertEqual(filter_url('http://domain.com/welcome/', app=True), 'welcome') + self.assertEqual(filter_url('http://domain.com', app=True), 'init') + self.assertEqual(filter_url('http://domain.com/', app=True), 'init') + self.assertEqual(filter_url('http://domain.com/abc', app=True), 'init') + self.assertEqual(filter_url('http://domain1.com/abc', app=True), 'app1') + self.assertEqual(filter_url('http://www.domain1.com/abc', app=True), 'app1') + self.assertEqual(filter_url('http://domain2.com/abc', app=True), 'app2') + self.assertEqual(filter_url('http://domain2.com/admin', app=True), 'app2') + + self.assertEqual(filter_url('http://domain.com/goodapp', app=True), 'goodapp') + self.assertRaises(HTTP, filter_url, 'http://domain.com/bad!app', app=True) + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, '400.*invalid application', filter_url, 'http://domain.com/bad!app') + except AttributeError: + pass + + routers['BASE']['domains']['domain3.com'] = 'app3' + self.assertRaises(SyntaxError, load, rdict=routers) + try: + # 2.7+ only + self.assertRaisesRegexp(SyntaxError, "unknown.*app3", load, rdict=routers) + except AttributeError: + pass + + def test_router_domains(self): + ''' + Test URLs that map domains + ''' + routers = dict( + BASE = dict( + applications = ['app1', 'app2', 'app2A', 'app3', 'app4', 'app5', 'app6'], + domains = { + # two domains to the same app + "domain1.com" : "app1", + "www.domain1.com" : "app1", + # same domain, two ports, to two apps + "domain2.com" : "app2a", + "domain2.com:8080" : "app2b", + # two domains, same app, two controllers + "domain3a.com" : "app3/c3a", + "domain3b.com" : "app3/c3b", + # two domains, same app & controller, two functions + "domain4a.com" : "app4/c4/f4a", + "domain4b.com" : "app4/c4/f4b", + # http vs https + "domain6.com:80" : "app6", + "domain6.com:443" : "app6s", + }, + ), + app1 = dict( default_controller = 'c1', default_function = 'f1', controllers = ['c1'], exclusive_domain=True, ), + app2a = dict( default_controller = 'c2a', default_function = 'f2a', controllers = ['c2a'], ), + app2b = dict( default_controller = 'c2b', default_function = 'f2b', controllers = ['c2b'], ), + app3 = dict( controllers = ['c3a', 'c3b'], ), + app4 = dict( default_controller = 'c4', controllers = ['c4']), + app5 = dict( default_controller = 'c5', controllers = ['c5'], domain = 'localhost' ), + app6 = dict( default_controller = 'c6', default_function = 'f6', controllers = ['c6'], ), + app6s = dict( default_controller = 'c6s', default_function = 'f6s', controllers = ['c6s'], ), + ) + + load(rdict=routers) + self.assertEqual(filter_url('http://domain1.com/abc'), '/app1/c1/abc') + self.assertEqual(filter_url('http://domain1.com/c1/abc'), '/app1/c1/abc') + self.assertEqual(filter_url('http://domain1.com/abc.html'), '/app1/c1/abc') + self.assertEqual(filter_url('http://domain1.com/abc.css'), '/app1/c1/abc.css') + self.assertEqual(filter_url('http://domain1.com/index/abc'), "/app1/c1/index ['abc']") + self.assertEqual(filter_url('http://domain2.com/app1'), "/app2a/c2a/app1") + + self.assertEqual(filter_url('https://domain1.com/app1/ctr/fcn', domain=('app1',None), out=True), "/ctr/fcn") + self.assertEqual(filter_url('https://www.domain1.com/app1/ctr/fcn', domain=('app1',None), out=True), "/ctr/fcn") + + self.assertEqual(filter_url('http://domain2.com/abc'), '/app2a/c2a/abc') + self.assertEqual(filter_url('http://domain2.com:8080/abc'), '/app2b/c2b/abc') + + self.assertEqual(filter_url('http://domain2.com/app2a/ctr/fcn', domain=('app2a',None), out=True), "/ctr/fcn") + self.assertEqual(filter_url('http://domain2.com/app2a/ctr/f2a', domain=('app2a',None), out=True), "/ctr") + self.assertEqual(filter_url('http://domain2.com/app2a/c2a/f2a', domain=('app2a',None), out=True), "/") + self.assertEqual(filter_url('http://domain2.com/app2a/c2a/fcn', domain=('app2a',None), out=True), "/fcn") + self.assertEqual(filter_url('http://domain2.com/app2a/ctr/fcn', domain=('app2b',None), out=True), "/app2a/ctr/fcn") + self.assertEqual(filter_url('http://domain2.com/app2a/ctr/f2a', domain=('app2b',None), out=True), "/app2a/ctr") + self.assertEqual(filter_url('http://domain2.com/app2a/c2a/f2a', domain=('app2b',None), out=True), "/app2a") + + self.assertEqual(filter_url('http://domain3a.com/'), '/app3/c3a/index') + self.assertEqual(filter_url('http://domain3a.com/abc'), '/app3/c3a/abc') + self.assertEqual(filter_url('http://domain3a.com/c3b'), '/app3/c3b/index') + self.assertEqual(filter_url('http://domain3b.com/abc'), '/app3/c3b/abc') + + self.assertEqual(filter_url('http://domain3a.com/app3/c3a/fcn', domain=('app3','c3a'), out=True), "/fcn") + self.assertEqual(filter_url('http://domain3a.com/app3/c3a/fcn', domain=('app3','c3b'), out=True), "/c3a/fcn") + self.assertEqual(filter_url('http://domain3a.com/app3/c3a/fcn', domain=('app1',None), out=True), "/app3/c3a/fcn") + + self.assertEqual(filter_url('http://domain4a.com/abc'), '/app4/c4/abc') + self.assertEqual(filter_url('https://domain4a.com/app4/c4/fcn', domain=('app4',None), out=True), "/fcn") + + self.assertEqual(filter_url('http://domain4a.com'), '/app4/c4/f4a') + self.assertEqual(filter_url('http://domain4b.com'), '/app4/c4/f4b') + + self.assertEqual(filter_url('http://localhost/abc'), '/app5/c5/abc') + self.assertEqual(filter_url('http:///abc'), '/app5/c5/abc') # test null host => localhost + self.assertEqual(filter_url('https://localhost/app5/c5/fcn', domain=('app5',None), out=True), "/fcn") + + self.assertEqual(filter_url('http://domain6.com'), '/app6/c6/f6') + self.assertEqual(filter_url('https://domain6.com'), '/app6s/c6s/f6s') + + self.assertEqual(filter_url('http://domain2.com/app3/c3a/f3', domain=('app2b',None), out=True), "/app3/c3a/f3") + self.assertRaises(SyntaxError, filter_url, 'http://domain1.com/app1/c1/f1', domain=('app2b',None), out=True) + try: + # 2.7+ only + self.assertRaisesRegexp(SyntaxError, 'cross-domain conflict', filter_url, 'http://domain1.com/app1/c1/f1', domain=('app2b',None), out=True) + except AttributeError: + pass + self.assertEqual(filter_url('http://domain1.com/app1/c1/f1', domain=('app2b',None), host='domain2.com', out=True), "/app1") + + def test_router_raise(self): + ''' + Test URLs that raise exceptions + ''' + # test non-exception variants + router_raise = dict( + init = dict( + controllers = [], + ), + welcome = dict( + map_hyphen = False, + ), + ) + load(rdict=router_raise) + self.assertEqual(filter_url('http://domain.com/ctl'), "/init/ctl/index") + self.assertEqual(filter_url('http://domain.com/default/fcn'), "/init/default/fcn") + self.assertEqual(filter_url('http://domain.com/default/fcn.ext'), "/init/default/fcn.ext") + self.assertEqual(filter_url('http://domain.com/default/fcn/arg'), "/init/default/fcn ['arg']") + # now raise-HTTP variants + self.assertRaises(HTTP, filter_url, 'http://domain.com/bad!ctl') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/bad!fcn') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/fcn.bad!ext') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/fcn/bad!arg') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, '400.*invalid controller', filter_url, 'http://domain.com/init/bad!ctl') + self.assertRaisesRegexp(HTTP, '400.*invalid function', filter_url, 'http://domain.com/init/ctlr/bad!fcn') + self.assertRaisesRegexp(HTTP, '400.*invalid extension', filter_url, 'http://domain.com/init/ctlr/fcn.bad!ext') + self.assertRaisesRegexp(HTTP, '400.*invalid arg', filter_url, 'http://domain.com/appc/init/fcn/bad!arg') + except AttributeError: + pass + + self.assertEqual(filter_url('http://domain.com/welcome/default/fcn_1'), "/welcome/default/fcn_1") + self.assertRaises(HTTP, filter_url, 'http://domain.com/welcome/default/fcn-1') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, '400.*invalid function', filter_url, 'http://domain.com/welcome/default/fcn-1') + except AttributeError: + pass + + def test_router_out(self): + ''' + Test basic outgoing routing + ''' + router_out = dict( + BASE = dict(), + init = dict( controllers = ['default', 'ctr'], ), + app = dict(), + ) + load(rdict=router_out) + self.assertEqual(filter_url('https://domain.com/app/ctr/fcn', out=True), "/app/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/init/ctr/fcn', out=True), "/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/init/ctr/fcn', out=True), "/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/init/static/file', out=True), "/init/static/file") + self.assertEqual(filter_url('https://domain.com/init/static/index', out=True), "/init/static/index") + self.assertEqual(filter_url('https://domain.com/init/default/index', out=True), "/") + self.assertEqual(filter_url('https://domain.com/init/ctr/index', out=True), "/ctr") + self.assertEqual(filter_url('http://domain.com/init/default/fcn?query', out=True), "/fcn?query") + self.assertEqual(filter_url('http://domain.com/init/default/fcn#anchor', out=True), "/fcn#anchor") + self.assertEqual(filter_url('http://domain.com/init/default/fcn?query#anchor', out=True), + "/fcn?query#anchor") + + router_out['BASE']['map_static'] = True + load(rdict=router_out) + self.assertEqual(filter_url('https://domain.com/init/static/file', out=True), "/static/file") + self.assertEqual(filter_url('https://domain.com/init/static/index', out=True), "/static/index") + + router_out['init']['map_static'] = False + load(rdict=router_out) + self.assertEqual(filter_url('https://domain.com/init/static/file', out=True), "/init/static/file") + self.assertEqual(filter_url('https://domain.com/init/static/index', out=True), "/init/static/index") + + def test_router_functions(self): + ''' + Test function-omission with functions=[something] + ''' + router_functions = dict( + BASE = dict( + applications = ['init', 'app', 'app2'], + default_application = 'app', + ), + init = dict( + controllers = ['default'], + ), + app = dict( + controllers = ['default', 'ctr'], + functions = dict( + default=['index', 'user', 'help'], + ctr=['ctrf1', 'ctrf2', 'ctrf3'], + ), + default_function = dict( + default='index', + ctr='ctrf1', + ), + ), + app2 = dict( + controllers = ['default', 'ctr'], + functions = ['index', 'user', 'help'], + ), + ) + load(rdict=router_functions) + + # outbound + self.assertEqual(str(URL(a='init', c='default', f='f', args=['arg1'])), "/init/f/arg1") + self.assertEqual(str(URL(a='init', c='default', f='index', args=['arg1'])), "/init/index/arg1") + + self.assertEqual(str(URL(a='app', c='default', f='index', args=['arg1'])), "/arg1") + self.assertEqual(str(URL(a='app', c='default', f='user', args=['arg1'])), "/user/arg1") + self.assertEqual(str(URL(a='app', c='default', f='user', args=['index'])), "/user/index") + self.assertEqual(str(URL(a='app', c='default', f='index', args=['index'])), "/index/index") + self.assertEqual(str(URL(a='app', c='default', f='index', args=['init'])), "/index/init") + self.assertEqual(str(URL(a='app', c='default', f='index', args=['ctr'])), "/index/ctr") + self.assertEqual(str(URL(a='app', c='ctr', f='index', args=['arg'])), "/ctr/index/arg") + self.assertEqual(str(URL(a='app', c='ctr', f='ctrf1', args=['arg'])), "/ctr/arg") + self.assertEqual(str(URL(a='app', c='ctr', f='ctrf1', args=['ctrf2'])), "/ctr/ctrf1/ctrf2") + + self.assertEqual(str(URL(a='app2', c='default', f='index', args=['arg1'])), "/app2/arg1") + self.assertEqual(str(URL(a='app2', c='default', f='user', args=['arg1'])), "/app2/user/arg1") + self.assertEqual(str(URL(a='app2', c='default', f='user', args=['index'])), "/app2/user/index") + self.assertEqual(str(URL(a='app2', c='default', f='index', args=['index'])), "/app2/index/index") + self.assertEqual(str(URL(a='app2', c='default', f='index', args=['init'])), "/app2/index/init") + self.assertEqual(str(URL(a='app2', c='default', f='index', args=['ctr'])), "/app2/index/ctr") + + # inbound + self.assertEqual(filter_url('http://d.com/arg'), "/app/default/index ['arg']") + self.assertEqual(filter_url('http://d.com/user'), "/app/default/user") + self.assertEqual(filter_url('http://d.com/user/arg'), "/app/default/user ['arg']") + self.assertEqual(filter_url('http://d.com/ctr'), "/app/ctr/ctrf1") + self.assertEqual(filter_url('http://d.com/ctr/arg'), "/app/ctr/ctrf1 ['arg']") + + self.assertEqual(filter_url('http://d.com/app2/arg'), "/app2/default/index ['arg']") + self.assertEqual(filter_url('http://d.com/app2/user'), "/app2/default/user") + self.assertEqual(filter_url('http://d.com/app2/user/arg'), "/app2/default/user ['arg']") + self.assertEqual(filter_url('http://d.com/app2/ctr'), "/app2/ctr/index") + self.assertEqual(filter_url('http://d.com/app2/ctr/index/arg'), "/app2/ctr/index ['arg']") + self.assertEqual(filter_url('http://d.com/app2/ctr/arg'), "/app2/ctr/arg") + + def test_router_hyphen(self): + ''' + Test hyphen conversion + ''' + router_hyphen = dict( + BASE = dict( + applications = ['init', 'app1', 'app2'], + ), + init = dict( + controllers = ['default'], + ), + app1 = dict( + controllers = ['default'], + map_hyphen = True, + ), + app2 = dict( + controllers = ['default'], + map_hyphen = False, + ), + ) + load(rdict=router_hyphen) + self.assertEqual(filter_url('http://domain.com/init/default/fcn_1', out=True), "/fcn_1") + self.assertEqual(filter_url('http://domain.com/static/filename-with_underscore'), + "%s/applications/init/static/filename-with_underscore" % root) + self.assertEqual(filter_url('http://domain.com/init/static/filename-with_underscore', out=True), + "/init/static/filename-with_underscore") + + self.assertEqual(filter_url('http://domain.com/app2/fcn_1'), + "/app2/default/fcn_1") + self.assertEqual(filter_url('http://domain.com/app2/ctr/fcn_1', domain=('app2',None), out=True), + "/ctr/fcn_1") + self.assertEqual(filter_url('http://domain.com/app2/static/filename-with_underscore', domain=('app2',None), out=True), + "/app2/static/filename-with_underscore") + self.assertEqual(filter_url('http://domain.com/app2/static/filename-with_underscore'), + "%s/applications/app2/static/filename-with_underscore" % root) + + self.assertEqual(str(URL(a='init', c='default', f='a_b')), "/a_b") + self.assertEqual(str(URL(a='app1', c='default', f='a_b')), "/app1/a-b") + self.assertEqual(str(URL(a='app2', c='default', f='a_b')), "/app2/a_b") + self.assertEqual(str(URL(a='app1', c='static', f='a/b_c')), "/app1/static/a/b_c") + self.assertEqual(str(URL(a='app1', c='static/a', f='b_c')), "/app1/static/a/b_c") + self.assertEqual(str(URL(a='app2', c='static', f='a/b_c')), "/app2/static/a/b_c") + self.assertEqual(str(URL(a='app2', c='static/a', f='b_c')), "/app2/static/a/b_c") + + + def test_router_lang(self): + ''' + Test language specifications + ''' + router_lang = dict( + BASE = dict(default_application = 'admin'), + welcome = dict(), + admin = dict( + controllers = ['default', 'ctr'], + languages = ['en', 'it', 'it-it'], default_language = 'en', + ), + examples = dict( + languages = ['en', 'it', 'it-it'], default_language = 'en', + ), + ) + load(rdict=router_lang) + self.assertEqual(filter_url('http://domain.com/index/abc'), "/admin/default/index ['abc'] (en)") + self.assertEqual(filter_url('http://domain.com/en/abc/def'), "/admin/default/abc ['def'] (en)") + self.assertEqual(filter_url('http://domain.com/it/abc/def'), "/admin/default/abc ['def'] (it)") + self.assertEqual(filter_url('http://domain.com/it-it/abc/def'), "/admin/default/abc ['def'] (it-it)") + self.assertEqual(filter_url('http://domain.com/index/a%20bc'), "/admin/default/index ['a bc'] (en)") + self.assertEqual(filter_url('http://domain.com/static/file'), "%s/applications/admin/static/file" % root) + self.assertEqual(filter_url('http://domain.com/en/static/file'), "%s/applications/admin/static/file" % root) + self.assertEqual(filter_url('http://domain.com/examples/en/static/file'), "%s/applications/examples/static/en/file" % root) + self.assertEqual(filter_url('http://domain.com/examples/static/file'), "%s/applications/examples/static/en/file" % root) + self.assertEqual(filter_url('http://domain.com/examples/it/static/file'), "%s/applications/examples/static/it/file" % root) + self.assertEqual(filter_url('http://domain.com/examples/it-it/static/file'), "%s/applications/examples/static/file" % root) + + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='en', out=True), "/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='it', out=True), "/it/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='it-it', out=True), "/it-it/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='en', out=True), "/admin/en/static/file") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='it', out=True), "/admin/it/static/file") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='it-it', out=True), "/admin/it-it/static/file") + self.assertEqual(filter_url('https://domain.com/welcome/ctr/fcn', lang='it', out=True), "/welcome/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/welcome/ctr/fcn', lang='es', out=True), "/welcome/ctr/fcn") + + router_lang['admin']['map_static'] = True + load(rdict=router_lang) + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='en', out=True), "/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='it', out=True), "/it/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/ctr/fcn', lang='it-it', out=True), "/it-it/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='en', out=True), "/static/file") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='it', out=True), "/it/static/file") + self.assertEqual(filter_url('https://domain.com/admin/static/file', lang='it-it', out=True), "/it-it/static/file") + self.assertEqual(filter_url('https://domain.com/welcome/ctr/fcn', lang='it', out=True), "/welcome/ctr/fcn") + self.assertEqual(filter_url('https://domain.com/welcome/ctr/fcn', lang='es', out=True), "/welcome/ctr/fcn") + + def test_router_get_effective(self): + ''' + Test get_effective_router + ''' + router_get_effective = dict( + BASE = dict( + default_application = 'a1', + applications = ['a1', 'a2'], + ), + a1 = dict( + controllers = ['c1a', 'c1b', 'default'], + ), + a2 = dict( + default_controller = 'c2', + controllers = [], + ), + a3 = dict( + default_controller = 'c2', + controllers = ['c1'], + ), + a4 = dict( + default_function = 'f1', + functions = ['f2'], + ), + ) + load(rdict=router_get_effective) + self.assertEqual(get_effective_router('BASE').applications, set(['a1','a2'])) + self.assertEqual(get_effective_router('BASE').default_application, 'a1') + self.assertEqual(get_effective_router('BASE').domains, {}) + self.assertEqual(get_effective_router('a1').applications, None) + self.assertEqual(get_effective_router('a1').default_application, None) + self.assertEqual(get_effective_router('a1').domains, None) + self.assertEqual(get_effective_router('a1').default_controller, "default") + self.assertEqual(get_effective_router('a2').default_application, None) + self.assertEqual(get_effective_router('a2').default_controller, "c2") + self.assertEqual(get_effective_router('a1').controllers, set(['c1a', 'c1b', 'default', 'static'])) + self.assertEqual(get_effective_router('a2').controllers, set()) + self.assertEqual(get_effective_router('a3').controllers, set(['c1', 'c2', 'static'])) + self.assertEqual(get_effective_router('a4').functions, dict(default=set(['f1', 'f2']))) + self.assertEqual(get_effective_router('xx'), None) + + def test_router_error(self): + ''' + Test rewrite of HTTP errors + ''' + router_err = dict() + load(rdict=router_err) + self.assertEqual(filter_err(200), 200) + self.assertEqual(filter_err(399), 399) + self.assertEqual(filter_err(400), 400) + + def test_router_args(self): + ''' + Test URL args parsing/generation + ''' + load(rdict=dict()) + self.assertEqual(filter_url('http://domain.com/init/default/f/arg1'), + "/init/default/f ['arg1']") + self.assertEqual(filter_url('http://domain.com/init/default/f/arg1/'), + "/init/default/f ['arg1']") + self.assertEqual(filter_url('http://domain.com/init/default/f/arg1//'), + "/init/default/f ['arg1', '']") + self.assertEqual(filter_url('http://domain.com/init/default/f//arg1'), + "/init/default/f ['', 'arg1']") + self.assertEqual(filter_url('http://domain.com/init/default/f/arg1/arg2'), + "/init/default/f ['arg1', 'arg2']") + + self.assertEqual(filter_url('http://domain.com/init/default/f', out=True), "/f") + self.assertEqual(map_url_out(None, None, 'init', 'default', 'f', None, None, None, None, None), "/f") + self.assertEqual(map_url_out(None, None, 'init', 'default', 'f', [], None, None, None, None), "/f") + self.assertEqual(map_url_out(None, None, 'init', 'default', 'f', ['arg1'], None, None, None, None), "/f") + self.assertEqual(map_url_out(None, None, 'init', 'default', 'f', ['arg1', ''], None, None, None, None), "/f") + self.assertEqual(str(URL(a='init', c='default', f='f', args=None)), "/f") + self.assertEqual(str(URL(a='init', c='default', f='f', args=['arg1'])), "/f/arg1") + self.assertEqual(str(URL(a='init', c='default', f='f', args=['arg1', ''])), "/f/arg1//") + self.assertEqual(str(URL(a='init', c='default', f='f', args=['arg1', '', 'arg3'])), "/f/arg1//arg3") + self.assertEqual(str(URL(a='init', c='default', f='f', args=['ar g'])), "/f/ar%20g") + self.assertEqual(str(URL(a='init', c='default', f='f', args=['årg'])), "/f/%C3%A5rg") + self.assertEqual(str(URL(a='init', c='default', f='fünc')), "/f\xc3\xbcnc") + + def test_routes_anchor(self): + ''' + Test URL with anchor + ''' + self.assertEqual(str(URL(a='a', c='c', f='f', anchor='anchor')), "/a/c/f#anchor") + load(rdict=dict()) + self.assertEqual(str(URL(a='a', c='c', f='f', anchor='anchor')), "/a/c/f#anchor") + args = ['a1', 'a2'] + self.assertEqual(str(URL(a='a', c='c', f='f', args=args, anchor='anchor')), + "/a/c/f/a1/a2#anchor") + vars = dict(v1=1, v2=2) + self.assertEqual(str(URL(a='a', c='c', f='f', vars=vars, anchor='anchor')), + "/a/c/f?v1=1&v2=2#anchor") + self.assertEqual(str(URL(a='a', c='c', f='f', args=args, vars=vars, anchor='anchor')), + "/a/c/f/a1/a2?v1=1&v2=2#anchor") + self.assertEqual(str(URL(a='init', c='default', f='index')), + "/") + self.assertEqual(str(URL(a='init', c='default', f='f')), + "/f") + self.assertEqual(str(URL(a='init', c='default', f='index', anchor='anchor')), + "/#anchor") + self.assertEqual(str(URL(a='init', c='default', f='f', anchor='anchor')), + "/f#anchor") + + def test_router_prefix(self): + ''' + Test path_prefix + ''' + router_path_prefix = dict( + BASE = dict( + default_application = 'a1', + applications = ['a1', 'a2'], + path_prefix = '/path/to/apps', + ), + a1 = dict( + controllers = ['c1a', 'c1b', 'default'], + ), + a2 = dict( + default_controller = 'c2', + controllers = [], + ), + ) + load(rdict=router_path_prefix) + self.assertEqual(str(URL(a='a1', c='c1a', f='f')), + "/path/to/apps/c1a/f") + self.assertEqual(str(URL(a='a2', c='c', f='f')), + "/path/to/apps/a2/c/f") + self.assertEqual(str(URL(a='a2', c='c2', f='f')), + "/path/to/apps/a2/c2/f") + self.assertEqual(filter_url('http://domain.com/a1/'), "/a1/default/index") + self.assertEqual(filter_url('http://domain.com/path/to/apps/a1/'), "/a1/default/index") + self.assertEqual(filter_url('http://domain.com/path/to/a1/'), "/a1/default/path ['to', 'a1']") + + def test_router_absolute(self): + ''' + Test absolute URL + ''' + load(rdict=dict()) + r = Storage() + r.env = Storage() + r.env.http_host = 'domain.com' + r.env.WSGI_URL_SCHEME = 'httpx' # distinguish incoming scheme + self.assertEqual(str(URL(r=r, a='a', c='c', f='f')), "/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False)), + "/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='https')), + "https://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='wss')), + "wss://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='https', host=True)), + "https://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False, host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False, host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', port=1234)), + "httpx://domain.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, port=1234)), + "httpx://domain.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host='host.com', port=1234)), + "httpx://host.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='wss', host='host.com', port=1234)), + "wss://host.com:1234/a/c/f") + + def test_request_uri(self): + ''' + Test REQUEST_URI in env + ''' + load(rdict=dict()) + self.assertEqual(filter_url('http://domain.com/abc', env=True).request_uri, + '/init/default/abc') + self.assertEqual(filter_url('http://domain.com/abc?def', env=True).request_uri, + '/init/default/abc?def') + self.assertEqual(filter_url('http://domain.com/index/abc', env=True).request_uri, + "/init/default/index/abc") + self.assertEqual(filter_url('http://domain.com/abc/def', env=True).request_uri, + "/init/default/abc/def") + self.assertEqual(filter_url('http://domain.com/index/a%20bc', env=True).request_uri, + "/init/default/index/a%20bc") + +if __name__ == '__main__': + setUpModule() # pre-2.7 + unittest.main() + tearDownModule() ADDED gluon/tests/test_routes.py Index: gluon/tests/test_routes.py ================================================================== --- /dev/null +++ gluon/tests/test_routes.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +"""Unit tests for rewrite.py regex routing option""" + +import sys +import os +import unittest +import tempfile +import logging + +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) # running from web2py base +else: + sys.path.append(os.path.realpath('../')) # running from gluon/tests/ + +from rewrite import load, filter_url, filter_err, get_effective_router, regex_filter_out, regex_select +from html import URL +from fileutils import abspath +from settings import global_settings +from http import HTTP +from storage import Storage + +logger = None +oldcwd = None +root = None + +def setUpModule(): + def make_apptree(): + "build a temporary applications tree" + # applications/ + os.mkdir(abspath('applications')) + # applications/app/ + for app in ('admin', 'examples', 'welcome'): + os.mkdir(abspath('applications', app)) + # applications/app/(controllers, static) + for subdir in ('controllers', 'static'): + os.mkdir(abspath('applications', app, subdir)) + # applications/admin/controllers/*.py + for ctr in ('appadmin', 'default', 'gae', 'mercurial', 'shell', 'wizard'): + open(abspath('applications', 'admin', 'controllers', '%s.py' % ctr), 'w').close() + # applications/examples/controllers/*.py + for ctr in ('ajax_examples', 'appadmin', 'default', 'global', 'spreadsheet'): + open(abspath('applications', 'examples', 'controllers', '%s.py' % ctr), 'w').close() + # applications/welcome/controllers/*.py + for ctr in ('appadmin', 'default'): + open(abspath('applications', 'welcome', 'controllers', '%s.py' % ctr), 'w').close() + # create an app-specific routes.py for examples app + routes = open(abspath('applications', 'examples', 'routes.py'), 'w') + routes.write("default_function='exdef'\n") + routes.close() + + global oldcwd + if oldcwd is None: # do this only once + oldcwd = os.getcwd() + if not os.path.isdir('gluon'): + os.chdir(os.path.realpath('../../')) # run from web2py base directory + import main # for initialization after chdir + global logger + logger = logging.getLogger('web2py.rewrite') + global_settings.applications_parent = tempfile.mkdtemp() + global root + root = global_settings.applications_parent + make_apptree() + +def tearDownModule(): + global oldcwd + if oldcwd is not None: + os.chdir(oldcwd) + oldcwd = None + + +class TestRoutes(unittest.TestCase): + """ Tests the regex routing logic from gluon.rewrite """ + + def test_routes_null(self): + """ Tests a null routes table """ + load(data='') + # incoming + self.assertEqual(filter_url('http://domain.com'), '/init/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/init/default/index') + self.assertEqual(filter_url('http://domain.com/abc'), '/abc/default/index') + self.assertEqual(filter_url('http://domain.com/abc/'), '/abc/default/index') + self.assertEqual(filter_url('http://domain.com/abc/def'), "/abc/def/index") + self.assertEqual(filter_url('http://domain.com/abc/def/'), "/abc/def/index") + self.assertEqual(filter_url('http://domain.com/abc/def/ghi'), "/abc/def/ghi") + self.assertEqual(filter_url('http://domain.com/abc/def/ghi/'), "/abc/def/ghi") + self.assertEqual(filter_url('http://domain.com/abc/def/ghi/jkl'), "/abc/def/ghi ['jkl']") + self.assertEqual(filter_url('http://domain.com/abc/def/ghi/j%20kl'), "/abc/def/ghi ['j_kl']") + self.assertEqual(filter_url('http://domain.com/welcome/static/path/to/static'), "%s/applications/welcome/static/path/to/static" % root) + self.assertRaises(HTTP, filter_url, 'http://domain.com/welcome/static/bad/path/to/st~tic') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, "400.*BAD REQUEST \[invalid path\]", filter_url, 'http://domain.com/welcome/static/bad/path/to/st~tic') + except AttributeError: + pass + # outgoing + self.assertEqual(filter_url('http://domain.com/init/default/index', out=True), '/init/default/index') + self.assertEqual(filter_url('http://domain.com/init/default/index/arg1', out=True), '/init/default/index/arg1') + self.assertEqual(filter_url('http://domain.com/init/default/abc', out=True), '/init/default/abc') + + def test_routes_query(self): + """ Test query appending """ + data = r''' +routes_in = ( + ('/service/$model/create', '/app/default/call/json/create?model=$model'), +) +''' + load(data=data) + self.assertEqual(filter_url('http://localhost:8000/service/person/create'), "/app/default/call ['json', 'create'] ?model=person") + self.assertEqual(filter_url('http://localhost:8000/service/person/create?var1=val1'), "/app/default/call ['json', 'create'] ?model=person&var1=val1") + + def test_routes_specific(self): + """ + Test app-specific routes.py + + Note that make_apptree above created applications/examples/routes.py with a default_function. + """ + data = r''' +routes_app = [ + (r'/(?Pwelcome|admin|examples)\b.*', r'\g'), + (r'$anything', r'welcome'), + (r'/?$anything', r'welcome'), +] +''' + load(data=data) + self.assertEqual(filter_url('http://domain.com/welcome'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/examples'), '/examples/default/exdef') + + def test_routes_defapp(self): + """ Test the default-application function """ + data = r''' +default_application = 'defapp' +''' + load(data=data) + # incoming + self.assertEqual(filter_url('http://domain.com'), '/defapp/default/index') + self.assertEqual(filter_url('http://domain.com/'), '/defapp/default/index') + self.assertEqual(filter_url('http://domain.com/welcome'), '/welcome/default/index') + self.assertEqual(filter_url('http://domain.com/app'), '/app/default/index') + self.assertEqual(filter_url('http://domain.com/welcome/default/index/abc'), "/welcome/default/index ['abc']") + self.assertEqual(filter_url('http://domain.com/welcome/static/abc'), '%s/applications/welcome/static/abc' % root) + self.assertEqual(filter_url('http://domain.com/defapp/static/path/to/static'), "%s/applications/defapp/static/path/to/static" % root) + + def test_routes_raise(self): + ''' + Test URLs that raise exceptions + ''' + # test non-exception variants + load(data='') + self.assertEqual(filter_url('http://domain.com/init'), "/init/default/index") + self.assertEqual(filter_url('http://domain.com/init/default'), "/init/default/index") + self.assertEqual(filter_url('http://domain.com/init/default/fcn.ext'), "/init/default/fcn.ext") + self.assertEqual(filter_url('http://domain.com/init/default/fcn/arg'), "/init/default/fcn ['arg']") + # now raise-HTTP variants + self.assertRaises(HTTP, filter_url, 'http://domain.com/bad!ctl') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/bad!fcn') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/fcn.bad!ext') + self.assertRaises(HTTP, filter_url, 'http://domain.com/ctl/fcn/bad!arg') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, '400 BAD REQUEST \[invalid path\]', filter_url, 'http://domain.com/init/bad!ctl') + self.assertRaisesRegexp(HTTP, '400 BAD REQUEST \[invalid path\]', filter_url, 'http://domain.com/init/ctlr/bad!fcn') + self.assertRaisesRegexp(HTTP, '400 BAD REQUEST \[invalid path\]', filter_url, 'http://domain.com/init/ctlr/fcn.bad!ext') + self.assertRaisesRegexp(HTTP, '400 BAD REQUEST \[invalid path \(args\)\]', filter_url, 'http://domain.com/appc/init/fcn/bad!arg') + except AttributeError: + pass + + self.assertEqual(filter_url('http://domain.com/welcome/default/fcn_1'), "/welcome/default/fcn_1") + self.assertRaises(HTTP, filter_url, 'http://domain.com/welcome/default/fcn-1') + try: + # 2.7+ only + self.assertRaisesRegexp(HTTP, '400 BAD REQUEST \[invalid path\]', filter_url, 'http://domain.com/welcome/default/fcn-1') + except AttributeError: + pass + + def test_routes_error(self): + ''' + Test rewrite of HTTP errors + ''' + router_err = dict() + load(rdict=router_err) + self.assertEqual(filter_err(200), 200) + self.assertEqual(filter_err(399), 399) + self.assertEqual(filter_err(400), 400) + + def test_routes_args(self): + ''' + Test URL args parsing/generation + ''' + data = r'''routes_in = [ + ('/robots.txt', '/welcome/static/robots.txt'), + ('/favicon.ico', '/welcome/static/favicon.ico'), + ('/admin$anything', '/admin$anything'), + ('.*:https?://(.*\\.)?domain1.com:$method /', '/app1/default'), + ('.*:https?://(.*\\.)?domain1.com:$method /static/$anything', '/app1/static/$anything'), + ('.*:https?://(.*\\.)?domain1.com:$method /appadmin/$anything', '/app1/appadmin/$anything'), + ('.*:https?://(.*\\.)?domain1.com:$method /$anything', '/app1/default/$anything'), + ('.*:https?://(.*\\.)?domain2.com:$method /', '/app2/default'), + ('.*:https?://(.*\\.)?domain2.com:$method /static/$anything', '/app2/static/$anything'), + ('.*:https?://(.*\\.)?domain2.com:$method /appadmin/$anything', '/app2/appadmin/$anything'), + ('.*:https?://(.*\\.)?domain2.com:$method /$anything', '/app2/default/$anything'), + ('.*:https?://(.*\\.)?domain3.com:$method /', '/app3/defcon3'), + ('.*:https?://(.*\\.)?domain3.com:$method /static/$anything', '/app3/static/$anything'), + ('.*:https?://(.*\\.)?domain3.com:$method /appadmin/$anything', '/app3/appadmin/$anything'), + ('.*:https?://(.*\\.)?domain3.com:$method /$anything', '/app3/defcon3/$anything'), + ('/', '/welcome/default'), + ('/welcome/default/$anything', '/welcome/default/$anything'), + ('/welcome/$anything', '/welcome/default/$anything'), + ('/static/$anything', '/welcome/static/$anything'), + ('/appadmin/$anything', '/welcome/appadmin/$anything'), + ('/$anything', '/welcome/default/$anything'), + ] +routes_out = [ + ('/welcome/static/$anything', '/static/$anything'), + ('/welcome/appadmin/$anything', '/appadmin/$anything'), + ('/welcome/default/$anything', '/$anything'), + ('/app1/static/$anything', '/static/$anything'), + ('/app1/appadmin/$anything', '/appadmin/$anything'), + ('/app1/default/$anything', '/$anything'), + ('/app2/static/$anything', '/static/$anything'), + ('/app2/appadmin/$anything', '/appadmin/$anything'), + ('/app2/default/$anything', '/$anything'), + ('/app3/static/$anything', '/static/$anything'), + ('/app3/appadmin/$anything', '/appadmin/$anything'), + ('/app3/defcon3/$anything', '/$anything') + ] +''' + load(data=data) + self.assertEqual(filter_url('http://domain.com/welcome/default/f/arg1'), + "/welcome/default/f ['arg1']") + self.assertEqual(filter_url('http://domain.com/welcome/default/f/arg1/'), + "/welcome/default/f ['arg1']") + self.assertEqual(filter_url('http://domain.com/welcome/default/f/arg1//'), + "/welcome/default/f ['arg1', '']") + self.assertEqual(filter_url('http://domain.com/welcome/default/f//arg1'), + "/welcome/default/f ['', 'arg1']") + self.assertEqual(filter_url('http://domain.com/welcome/default/f/arg1/arg2'), + "/welcome/default/f ['arg1', 'arg2']") + + self.assertEqual(filter_url('http://domain.com/welcome/default/f', out=True), "/f") + self.assertEqual(regex_filter_out('/welcome/default/f'), "/f") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=None)), "/f") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=['arg1'])), "/f/arg1") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=['arg1', ''])), "/f/arg1//") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=['arg1', '', 'arg3'])), "/f/arg1//arg3") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=['ar g'])), "/f/ar%20g") + self.assertEqual(str(URL(a='welcome', c='default', f='f', args=['årg'])), "/f/%C3%A5rg") + self.assertEqual(str(URL(a='welcome', c='default', f='fünc')), "/f\xc3\xbcnc") + + def test_routes_anchor(self): + ''' + Test URL with anchor + ''' + self.assertEqual(str(URL(a='a', c='c', f='f', anchor='anchor')), "/a/c/f#anchor") + load(data='') + self.assertEqual(str(URL(a='a', c='c', f='f', anchor='anchor')), "/a/c/f#anchor") + args = ['a1', 'a2'] + self.assertEqual(str(URL(a='a', c='c', f='f', args=args, anchor='anchor')), + "/a/c/f/a1/a2#anchor") + vars = dict(v1=1, v2=2) + self.assertEqual(str(URL(a='a', c='c', f='f', vars=vars, anchor='anchor')), + "/a/c/f?v1=1&v2=2#anchor") + self.assertEqual(str(URL(a='a', c='c', f='f', args=args, vars=vars, anchor='anchor')), + "/a/c/f/a1/a2?v1=1&v2=2#anchor") + + data = r'''routes_out = [ + ('/init/default/index', '/'), + ]''' + load(data=data) + self.assertEqual(str(URL(a='init', c='default', f='index')), + "/") + self.assertEqual(str(URL(a='init', c='default', f='index', anchor='anchor')), + "/init/default/index#anchor") + + data = r'''routes_out = [ + (r'/init/default/index(?P(#.*)?)', r'/\g'), + ]''' + load(data=data) + self.assertEqual(str(URL(a='init', c='default', f='index')), + "/") + self.assertEqual(str(URL(a='init', c='default', f='index', anchor='anchor')), + "/#anchor") + + data = r'''routes_out = [ + (r'/init/default/index(?P([?#].*)?)', r'/\g'), + ]''' + load(data=data) + self.assertEqual(str(URL(a='init', c='default', f='index')), + "/") + self.assertEqual(str(URL(a='init', c='default', f='index', anchor='anchor')), + "/#anchor") + query = dict(var='abc') + self.assertEqual(str(URL(a='init', c='default', f='index', vars=query)), + "/?var=abc") + self.assertEqual(str(URL(a='init', c='default', f='index', vars=query, anchor='anchor')), + "/?var=abc#anchor") + + def test_routes_absolute(self): + ''' + Test absolute URL + ''' + load(data='') + r = Storage() + r.env = Storage() + r.env.http_host = 'domain.com' + r.env.WSGI_URL_SCHEME = 'httpx' # distinguish incoming scheme + self.assertEqual(str(URL(r=r, a='a', c='c', f='f')), "/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False)), + "/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='https')), + "https://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='wss')), + "wss://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='https', host=True)), + "https://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False, host=True)), + "httpx://domain.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=False, host='host.com')), + "httpx://host.com/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', port=1234)), + "httpx://domain.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme=True, port=1234)), + "httpx://domain.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', host='host.com', port=1234)), + "httpx://host.com:1234/a/c/f") + self.assertEqual(str(URL(r=r, a='a', c='c', f='f', scheme='wss', host='host.com', port=1234)), + "wss://host.com:1234/a/c/f") + + def test_request_uri(self): + ''' + Test REQUEST_URI in env + ''' + data = r'''routes_in = [ + ('/abc', '/init/default/abc'), + ('/index/$anything', '/init/default/index/$anything'), + ] +''' + load(data=data) + self.assertEqual(filter_url('http://domain.com/abc', env=True).request_uri, + '/init/default/abc') + self.assertEqual(filter_url('http://domain.com/abc?def', env=True).request_uri, + '/init/default/abc?def') + self.assertEqual(filter_url('http://domain.com/index/abc', env=True).request_uri, + "/init/default/index/abc") + self.assertEqual(filter_url('http://domain.com/index/a%20bc', env=True).request_uri, + "/init/default/index/a bc") + + +if __name__ == '__main__': + setUpModule() # pre-2.7 + unittest.main() + tearDownModule() ADDED gluon/tests/test_template.py Index: gluon/tests/test_template.py ================================================================== --- /dev/null +++ gluon/tests/test_template.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" + Unit tests for gluon.template +""" + +import sys +import os +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) +else: + sys.path.append(os.path.realpath('../')) + +import unittest +from template import render + +class TestVirtualFields(unittest.TestCase): + + def testRun(self): + self.assertEqual(render(content='{{for i in range(n):}}{{=i}}{{pass}}', + context=dict(n=3)), '012') + self.assertEqual(render(content='{{if n>2:}}ok{{pass}}', + context=dict(n=3)), 'ok') + self.assertEqual(render(content='{{try:}}{{n/0}}{{except:}}fail{{pass}}', + context=dict(n=3)), 'fail') + self.assertEqual(render(content='{{="<&>"}}'), '<&>') + self.assertEqual(render(content='"abc"'), '"abc"') + self.assertEqual(render(content='"a\'bc"'), '"a\'bc"') + self.assertEqual(render(content='"a\"bc"'), '"a\"bc"') + self.assertEqual(render(content=r'''"a\"bc"'''), r'"a\"bc"') + self.assertEqual(render(content=r'''"""abc\""""'''), r'"""abc\""""') + + def testEqualWrite(self): + "test generation of response.write from =" + self.assertEqual(render(content='{{="abc"}}'), 'abc') + # whitespace is stripped + self.assertEqual(render(content='{{ ="abc"}}'), 'abc') + self.assertEqual(render(content='{{ ="abc" }}'), 'abc') + self.assertEqual(render(content='{{pass\n="abc" }}'), 'abc') + # = recognized only at the beginning of a physical line + self.assertEqual(render(content='{{xyz = "xyz"\n="abc"\n="def"\n=xyz }}'), 'abcdefxyz') + # = in python blocks + self.assertEqual(render(content='{{if True:\n="abc"\npass }}'), 'abc') + self.assertEqual(render(content='{{if True:\n="abc"\npass\n="def" }}'), 'abcdef') + self.assertEqual(render(content='{{if False:\n="abc"\npass\n="def" }}'), 'def') + self.assertEqual(render(content='{{if True:\n="abc"\nelse:\n="def"\npass }}'), 'abc') + self.assertEqual(render(content='{{if False:\n="abc"\nelse:\n="def"\npass }}'), 'def') + # codeblock-leading = handles internal newlines, escaped or not + self.assertEqual(render(content='{{=list((1,2,3))}}'), '[1, 2, 3]') + self.assertEqual(render(content='{{=list((1,2,\\\n3))}}'), '[1, 2, 3]') + self.assertEqual(render(content='{{=list((1,2,\n3))}}'), '[1, 2, 3]') + # ...but that means no more = operators in the codeblock + self.assertRaises(SyntaxError, render, content='{{="abc"\n="def" }}') + # = embedded in codeblock won't handle newlines in its argument + self.assertEqual(render(content='{{pass\n=list((1,2,\\\n3))}}'), '[1, 2, 3]') + self.assertRaises(SyntaxError, render, content='{{pass\n=list((1,2,\n3))}}') + + +if __name__ == '__main__': + unittest.main() ADDED gluon/tests/test_utils.py Index: gluon/tests/test_utils.py ================================================================== --- /dev/null +++ gluon/tests/test_utils.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" Unit tests for utils.py """ + +import sys +import os +import unittest +if os.path.isdir('gluon'): + sys.path.append(os.path.realpath('gluon')) +else: + sys.path.append(os.path.realpath('../')) + +from utils import md5_hash + + +class TestUtils(unittest.TestCase): + """ Tests the utils.py module """ + + def test_md5_hash(self): + """ Tests the md5_hash function """ + + data = md5_hash("web2py rocks") + self.assertEqual(data, '79509f3246a2824dee64635303e99204') + +if __name__ == '__main__': + unittest.main() ADDED gluon/tools.py Index: gluon/tools.py ================================================================== --- /dev/null +++ gluon/tools.py @@ -0,0 +1,4019 @@ +#!/bin/python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +import base64 +import cPickle +import datetime +import thread +import logging +import sys +import os +import re +import time +import smtplib +import urllib +import urllib2 +import Cookie +import cStringIO +from email import MIMEBase, MIMEMultipart, MIMEText, Encoders, Header, message_from_string + +from contenttype import contenttype +from storage import Storage, StorageList, Settings, Messages +from utils import web2py_uuid +from fileutils import read_file +from gluon import * + +import serializers + +try: + import json as json_parser # try stdlib (Python 2.6) +except ImportError: + try: + import simplejson as json_parser # try external module + except: + import contrib.simplejson as json_parser # fallback to pure-Python module + +__all__ = ['Mail', 'Auth', 'Recaptcha', 'Crud', 'Service', + 'PluginManager', 'fetch', 'geocode', 'prettydate'] + +### mind there are two loggers here (logger and crud.settings.logger)! +logger = logging.getLogger("web2py") + +DEFAULT = lambda: None + +def callback(actions,form,tablename=None): + if actions: + if tablename and isinstance(actions,dict): + actions = actions.get(tablename, []) + if not isinstance(actions,(list, tuple)): + actions = [actions] + [action(form) for action in actions] + +def validators(*a): + b = [] + for item in a: + if isinstance(item, (list, tuple)): + b = b + list(item) + else: + b.append(item) + return b + +def call_or_redirect(f,*args): + if callable(f): + redirect(f(*args)) + else: + redirect(f) + +def replace_id(url, form): + if url and not url[0] == '/' and url[:4] != 'http': + # this is here for backward compatibility + return URL(url.replace('[id]', str(form.vars.id))) + elif url: + # this allows http://..../%(id)s/%(name)s/etc. + return url % form.vars + return url + +class Mail(object): + """ + Class for configuring and sending emails with alternative text / html + body, multiple attachments and encryption support + + Works with SMTP and Google App Engine. + """ + + class Attachment(MIMEBase.MIMEBase): + """ + Email attachment + + Arguments:: + + payload: path to file or file-like object with read() method + filename: name of the attachment stored in message; if set to + None, it will be fetched from payload path; file-like + object payload must have explicit filename specified + content_id: id of the attachment; automatically contained within + < and > + content_type: content type of the attachment; if set to None, + it will be fetched from filename using gluon.contenttype + module + encoding: encoding of all strings passed to this function (except + attachment body) + + Content ID is used to identify attachments within the html body; + in example, attached image with content ID 'photo' may be used in + html message as a source of img tag . + + Examples:: + + #Create attachment from text file: + attachment = Mail.Attachment('/path/to/file.txt') + + Content-Type: text/plain + MIME-Version: 1.0 + Content-Disposition: attachment; filename="file.txt" + Content-Transfer-Encoding: base64 + + SOMEBASE64CONTENT= + + #Create attachment from image file with custom filename and cid: + attachment = Mail.Attachment('/path/to/file.png', + filename='photo.png', + content_id='photo') + + Content-Type: image/png + MIME-Version: 1.0 + Content-Disposition: attachment; filename="photo.png" + Content-Id: + Content-Transfer-Encoding: base64 + + SOMEOTHERBASE64CONTENT= + """ + + def __init__( + self, + payload, + filename=None, + content_id=None, + content_type=None, + encoding='utf-8'): + if isinstance(payload, str): + if filename is None: + filename = os.path.basename(payload) + payload = read_file(payload, 'rb') + else: + if filename is None: + raise Exception('Missing attachment name') + payload = payload.read() + filename = filename.encode(encoding) + if content_type is None: + content_type = contenttype(filename) + self.my_filename = filename + self.my_payload = payload + MIMEBase.MIMEBase.__init__(self, *content_type.split('/', 1)) + self.set_payload(payload) + self['Content-Disposition'] = 'attachment; filename="%s"' % filename + if not content_id is None: + self['Content-Id'] = '<%s>' % content_id.encode(encoding) + Encoders.encode_base64(self) + + def __init__(self, server=None, sender=None, login=None, tls=True): + """ + Main Mail object + + Arguments:: + + server: SMTP server address in address:port notation + sender: sender email address + login: sender login name and password in login:password notation + or None if no authentication is required + tls: enables/disables encryption (True by default) + + In Google App Engine use:: + + server='gae' + + For sake of backward compatibility all fields are optional and default + to None, however, to be able to send emails at least server and sender + must be specified. They are available under following fields: + + mail.settings.server + mail.settings.sender + mail.settings.login + + When server is 'logging', email is logged but not sent (debug mode) + + Optionally you can use PGP encryption or X509: + + mail.settings.cipher_type = None + mail.settings.sign = True + mail.settings.sign_passphrase = None + mail.settings.encrypt = True + mail.settings.x509_sign_keyfile = None + mail.settings.x509_sign_certfile = None + mail.settings.x509_crypt_certfiles = None + + cipher_type : None + gpg - need a python-pyme package and gpgme lib + x509 - smime + sign : sign the message (True or False) + sign_passphrase : passphrase for key signing + encrypt : encrypt the message + ... x509 only ... + x509_sign_keyfile : the signers private key filename (PEM format) + x509_sign_certfile: the signers certificate filename (PEM format) + x509_crypt_certfiles: the certificates file to encrypt the messages + with can be a file name or a list of + file names (PEM format) + + Examples:: + + #Create Mail object with authentication data for remote server: + mail = Mail('example.com:25', 'me@example.com', 'me:password') + """ + + settings = self.settings = Settings() + settings.server = server + settings.sender = sender + settings.login = login + settings.tls = tls + settings.ssl = False + settings.cipher_type = None + settings.sign = True + settings.sign_passphrase = None + settings.encrypt = True + settings.x509_sign_keyfile = None + settings.x509_sign_certfile = None + settings.x509_crypt_certfiles = None + settings.debug = False + settings.lock_keys = True + self.result = {} + self.error = None + + def send( + self, + to, + subject='None', + message='None', + attachments=None, + cc=None, + bcc=None, + reply_to=None, + encoding='utf-8', + ): + """ + Sends an email using data specified in constructor + + Arguments:: + + to: list or tuple of receiver addresses; will also accept single + object + subject: subject of the email + message: email body text; depends on type of passed object: + if 2-list or 2-tuple is passed: first element will be + source of plain text while second of html text; + otherwise: object will be the only source of plain text + and html source will be set to None; + If text or html source is: + None: content part will be ignored, + string: content part will be set to it, + file-like object: content part will be fetched from + it using it's read() method + attachments: list or tuple of Mail.Attachment objects; will also + accept single object + cc: list or tuple of carbon copy receiver addresses; will also + accept single object + bcc: list or tuple of blind carbon copy receiver addresses; will + also accept single object + reply_to: address to which reply should be composed + encoding: encoding of all strings passed to this method (including + message bodies) + + Examples:: + + #Send plain text message to single address: + mail.send('you@example.com', + 'Message subject', + 'Plain text body of the message') + + #Send html message to single address: + mail.send('you@example.com', + 'Message subject', + 'Plain text body of the message') + + #Send text and html message to three addresses (two in cc): + mail.send('you@example.com', + 'Message subject', + ('Plain text body', 'html body'), + cc=['other1@example.com', 'other2@example.com']) + + #Send html only message with image attachment available from + the message by 'photo' content id: + mail.send('you@example.com', + 'Message subject', + (None, ''), + Mail.Attachment('/path/to/photo.jpg' + content_id='photo')) + + #Send email with two attachments and no body text + mail.send('you@example.com, + 'Message subject', + None, + [Mail.Attachment('/path/to/fist.file'), + Mail.Attachment('/path/to/second.file')]) + + Returns True on success, False on failure. + + Before return, method updates two object's fields: + self.result: return value of smtplib.SMTP.sendmail() or GAE's + mail.send_mail() method + self.error: Exception message or None if above was successful + """ + + def encode_header(key): + if [c for c in key if 32>ord(c) or ord(c)>127]: + return Header.Header(key.encode('utf-8'),'utf-8') + else: + return key + + if not isinstance(self.settings.server, str): + raise Exception('Server address not specified') + if not isinstance(self.settings.sender, str): + raise Exception('Sender address not specified') + payload_in = MIMEMultipart.MIMEMultipart('mixed') + if to: + if not isinstance(to, (list,tuple)): + to = [to] + else: + raise Exception('Target receiver address not specified') + if cc: + if not isinstance(cc, (list, tuple)): + cc = [cc] + if bcc: + if not isinstance(bcc, (list, tuple)): + bcc = [bcc] + if message is None: + text = html = None + elif isinstance(message, (list, tuple)): + text, html = message + elif message.strip().startswith(''): + text = self.settings.server=='gae' and message or None + html = message + else: + text = message + html = None + if not text is None or not html is None: + attachment = MIMEMultipart.MIMEMultipart('alternative') + if not text is None: + if isinstance(text, basestring): + text = text.decode(encoding).encode('utf-8') + else: + text = text.read().decode(encoding).encode('utf-8') + attachment.attach(MIMEText.MIMEText(text,_charset='utf-8')) + if not html is None: + if isinstance(html, basestring): + html = html.decode(encoding).encode('utf-8') + else: + html = html.read().decode(encoding).encode('utf-8') + attachment.attach(MIMEText.MIMEText(html, 'html',_charset='utf-8')) + payload_in.attach(attachment) + if attachments is None: + pass + elif isinstance(attachments, (list, tuple)): + for attachment in attachments: + payload_in.attach(attachment) + else: + payload_in.attach(attachments) + + + ####################################################### + # CIPHER # + ####################################################### + cipher_type = self.settings.cipher_type + sign = self.settings.sign + sign_passphrase = self.settings.sign_passphrase + encrypt = self.settings.encrypt + ####################################################### + # GPGME # + ####################################################### + if cipher_type == 'gpg': + if not sign and not encrypt: + self.error="No sign and no encrypt is set but cipher type to gpg" + return False + + # need a python-pyme package and gpgme lib + from pyme import core, errors + from pyme.constants.sig import mode + ############################################ + # sign # + ############################################ + if sign: + import string + core.check_version(None) + pin=string.replace(payload_in.as_string(),'\n','\r\n') + plain = core.Data(pin) + sig = core.Data() + c = core.Context() + c.set_armor(1) + c.signers_clear() + # search for signing key for From: + for sigkey in c.op_keylist_all(self.settings.sender, 1): + if sigkey.can_sign: + c.signers_add(sigkey) + if not c.signers_enum(0): + self.error='No key for signing [%s]' % self.settings.sender + return False + c.set_passphrase_cb(lambda x,y,z: sign_passphrase) + try: + # make a signature + c.op_sign(plain,sig,mode.DETACH) + sig.seek(0,0) + # make it part of the email + payload=MIMEMultipart.MIMEMultipart('signed', + boundary=None, + _subparts=None, + **dict(micalg="pgp-sha1", + protocol="application/pgp-signature")) + # insert the origin payload + payload.attach(payload_in) + # insert the detached signature + p=MIMEBase.MIMEBase("application",'pgp-signature') + p.set_payload(sig.read()) + payload.attach(p) + # it's just a trick to handle the no encryption case + payload_in=payload + except errors.GPGMEError, ex: + self.error="GPG error: %s" % ex.getstring() + return False + ############################################ + # encrypt # + ############################################ + if encrypt: + core.check_version(None) + plain = core.Data(payload_in.as_string()) + cipher = core.Data() + c = core.Context() + c.set_armor(1) + # collect the public keys for encryption + recipients=[] + rec=to[:] + if cc: + rec.extend(cc) + if bcc: + rec.extend(bcc) + for addr in rec: + c.op_keylist_start(addr,0) + r = c.op_keylist_next() + if r is None: + self.error='No key for [%s]' % addr + return False + recipients.append(r) + try: + # make the encryption + c.op_encrypt(recipients, 1, plain, cipher) + cipher.seek(0,0) + # make it a part of the email + payload=MIMEMultipart.MIMEMultipart('encrypted', + boundary=None, + _subparts=None, + **dict(protocol="application/pgp-encrypted")) + p=MIMEBase.MIMEBase("application",'pgp-encrypted') + p.set_payload("Version: 1\r\n") + payload.attach(p) + p=MIMEBase.MIMEBase("application",'octet-stream') + p.set_payload(cipher.read()) + payload.attach(p) + except errors.GPGMEError, ex: + self.error="GPG error: %s" % ex.getstring() + return False + ####################################################### + # X.509 # + ####################################################### + elif cipher_type == 'x509': + if not sign and not encrypt: + self.error="No sign and no encrypt is set but cipher type to x509" + return False + x509_sign_keyfile=self.settings.x509_sign_keyfile + if self.settings.x509_sign_certfile: + x509_sign_certfile=self.settings.x509_sign_certfile + else: + # if there is no sign certfile we'll assume the + # cert is in keyfile + x509_sign_certfile=self.settings.x509_sign_keyfile + # crypt certfiles could be a string or a list + x509_crypt_certfiles=self.settings.x509_crypt_certfiles + + + # need m2crypto + from M2Crypto import BIO, SMIME, X509 + msg_bio = BIO.MemoryBuffer(payload_in.as_string()) + s = SMIME.SMIME() + + # SIGN + if sign: + #key for signing + try: + s.load_key(x509_sign_keyfile, x509_sign_certfile, callback=lambda x: sign_passphrase) + if encrypt: + p7 = s.sign(msg_bio) + else: + p7 = s.sign(msg_bio,flags=SMIME.PKCS7_DETACHED) + msg_bio = BIO.MemoryBuffer(payload_in.as_string()) # Recreate coz sign() has consumed it. + except Exception,e: + self.error="Something went wrong on signing: <%s>" %str(e) + return False + + # ENCRYPT + if encrypt: + try: + sk = X509.X509_Stack() + if not isinstance(x509_crypt_certfiles, (list, tuple)): + x509_crypt_certfiles = [x509_crypt_certfiles] + + # make an encryption cert's stack + for x in x509_crypt_certfiles: + sk.push(X509.load_cert(x)) + s.set_x509_stack(sk) + + s.set_cipher(SMIME.Cipher('des_ede3_cbc')) + tmp_bio = BIO.MemoryBuffer() + if sign: + s.write(tmp_bio, p7) + else: + tmp_bio.write(payload_in.as_string()) + p7 = s.encrypt(tmp_bio) + except Exception,e: + self.error="Something went wrong on encrypting: <%s>" %str(e) + return False + + # Final stage in sign and encryption + out = BIO.MemoryBuffer() + if encrypt: + s.write(out, p7) + else: + if sign: + s.write(out, p7, msg_bio, SMIME.PKCS7_DETACHED) + else: + out.write('\r\n') + out.write(payload_in.as_string()) + out.close() + st=str(out.read()) + payload=message_from_string(st) + else: + # no cryptography process as usual + payload=payload_in + payload['From'] = encode_header(self.settings.sender.decode(encoding)) + origTo = to[:] + if to: + payload['To'] = encode_header(', '.join(to).decode(encoding)) + if reply_to: + payload['Reply-To'] = encode_header(reply_to.decode(encoding)) + if cc: + payload['Cc'] = encode_header(', '.join(cc).decode(encoding)) + to.extend(cc) + if bcc: + to.extend(bcc) + payload['Subject'] = encode_header(subject.decode(encoding)) + payload['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S +0000", + time.gmtime()) + result = {} + try: + if self.settings.server == 'logging': + logger.warn('email not sent\n%s\nFrom: %s\nTo: %s\nSubject: %s\n\n%s\n%s\n' % \ + ('-'*40,self.settings.sender, + subject, + ', '.join(to),text or html,'-'*40)) + elif self.settings.server == 'gae': + xcc = dict() + if cc: + xcc['cc'] = cc + if bcc: + xcc['bcc'] = bcc + from google.appengine.api import mail + attachments = attachments and [(a.my_filename,a.my_payload) for a in attachments] + if attachments: + result = mail.send_mail(sender=self.settings.sender, to=origTo, + subject=subject, body=text, html=html, + attachments=attachments, **xcc) + elif html: + result = mail.send_mail(sender=self.settings.sender, to=origTo, + subject=subject, body=text, html=html, **xcc) + else: + result = mail.send_mail(sender=self.settings.sender, to=origTo, + subject=subject, body=text, **xcc) + else: + smtp_args = self.settings.server.split(':') + if self.settings.ssl: + server = smtplib.SMTP_SSL(*smtp_args) + else: + server = smtplib.SMTP(*smtp_args) + if self.settings.tls and not self.settings.ssl: + server.ehlo() + server.starttls() + server.ehlo() + if not self.settings.login is None: + server.login(*self.settings.login.split(':',1)) + result = server.sendmail(self.settings.sender, to, payload.as_string()) + server.quit() + except Exception, e: + logger.warn('Mail.send failure:%s' % e) + self.result = result + self.error = e + return False + self.result = result + self.error = None + return True + + +class Recaptcha(DIV): + + API_SSL_SERVER = 'https://www.google.com/recaptcha/api' + API_SERVER = 'http://www.google.com/recaptcha/api' + VERIFY_SERVER = 'http://www.google.com/recaptcha/api/verify' + + def __init__( + self, + request, + public_key='', + private_key='', + use_ssl=False, + error=None, + error_message='invalid', + label = 'Verify:', + options = '' + ): + self.remote_addr = request.env.remote_addr + self.public_key = public_key + self.private_key = private_key + self.use_ssl = use_ssl + self.error = error + self.errors = Storage() + self.error_message = error_message + self.components = [] + self.attributes = {} + self.label = label + self.options = options + self.comment = '' + + def _validate(self): + + # for local testing: + + recaptcha_challenge_field = \ + self.request_vars.recaptcha_challenge_field + recaptcha_response_field = \ + self.request_vars.recaptcha_response_field + private_key = self.private_key + remoteip = self.remote_addr + if not (recaptcha_response_field and recaptcha_challenge_field + and len(recaptcha_response_field) + and len(recaptcha_challenge_field)): + self.errors['captcha'] = self.error_message + return False + params = urllib.urlencode({ + 'privatekey': private_key, + 'remoteip': remoteip, + 'challenge': recaptcha_challenge_field, + 'response': recaptcha_response_field, + }) + request = urllib2.Request( + url=self.VERIFY_SERVER, + data=params, + headers={'Content-type': 'application/x-www-form-urlencoded', + 'User-agent': 'reCAPTCHA Python'}) + httpresp = urllib2.urlopen(request) + return_values = httpresp.read().splitlines() + httpresp.close() + return_code = return_values[0] + if return_code == 'true': + del self.request_vars.recaptcha_challenge_field + del self.request_vars.recaptcha_response_field + self.request_vars.captcha = '' + return True + self.errors['captcha'] = self.error_message + return False + + def xml(self): + public_key = self.public_key + use_ssl = self.use_ssl + error_param = '' + if self.error: + error_param = '&error=%s' % self.error + if use_ssl: + server = self.API_SSL_SERVER + else: + server = self.API_SERVER + captcha = DIV( + SCRIPT("var RecaptchaOptions = {%s};" % self.options), + SCRIPT(_type="text/javascript", + _src="%s/challenge?k=%s%s" % (server,public_key,error_param)), + TAG.noscript(IFRAME(_src="%s/noscript?k=%s%s" % (server,public_key,error_param), + _height="300",_width="500",_frameborder="0"), BR(), + INPUT(_type='hidden', _name='recaptcha_response_field', + _value='manual_challenge')), _id='recaptcha') + if not self.errors.captcha: + return XML(captcha).xml() + else: + captcha.append(DIV(self.errors['captcha'], _class='error')) + return XML(captcha).xml() + + +def addrow(form, a, b, c, style, _id, position=-1): + if style == "divs": + form[0].insert(position, DIV(DIV(LABEL(a),_class='w2p_fl'), + DIV(b, _class='w2p_fw'), + DIV(c, _class='w2p_fc'), + _id = _id)) + elif style == "table2cols": + form[0].insert(position, TR(LABEL(a),'')) + form[0].insert(position+1, TR(b, _colspan=2, _id = _id)) + elif style == "ul": + form[0].insert(position, LI(DIV(LABEL(a),_class='w2p_fl'), + DIV(b, _class='w2p_fw'), + DIV(c, _class='w2p_fc'), + _id = _id)) + else: + form[0].insert(position, TR(LABEL(a),b,c,_id = _id)) + + +class Auth(object): + """ + Class for authentication, authorization, role based access control. + + Includes: + + - registration and profile + - login and logout + - username and password retrieval + - event logging + - role creation and assignment + - user defined group/role based permission + + Authentication Example:: + + from contrib.utils import * + mail=Mail() + mail.settings.server='smtp.gmail.com:587' + mail.settings.sender='you@somewhere.com' + mail.settings.login='username:password' + auth=Auth(db) + auth.settings.mailer=mail + # auth.settings....=... + auth.define_tables() + def authentication(): + return dict(form=auth()) + + exposes: + + - http://.../{application}/{controller}/authentication/login + - http://.../{application}/{controller}/authentication/logout + - http://.../{application}/{controller}/authentication/register + - http://.../{application}/{controller}/authentication/verify_email + - http://.../{application}/{controller}/authentication/retrieve_username + - http://.../{application}/{controller}/authentication/retrieve_password + - http://.../{application}/{controller}/authentication/reset_password + - http://.../{application}/{controller}/authentication/profile + - http://.../{application}/{controller}/authentication/change_password + + On registration a group with role=new_user.id is created + and user is given membership of this group. + + You can create a group with:: + + group_id=auth.add_group('Manager', 'can access the manage action') + auth.add_permission(group_id, 'access to manage') + + Here \"access to manage\" is just a user defined string. + You can give access to a user:: + + auth.add_membership(group_id, user_id) + + If user id is omitted, the logged in user is assumed + + Then you can decorate any action:: + + @auth.requires_permission('access to manage') + def manage(): + return dict() + + You can restrict a permission to a specific table:: + + auth.add_permission(group_id, 'edit', db.sometable) + @auth.requires_permission('edit', db.sometable) + + Or to a specific record:: + + auth.add_permission(group_id, 'edit', db.sometable, 45) + @auth.requires_permission('edit', db.sometable, 45) + + If authorization is not granted calls:: + + auth.settings.on_failed_authorization + + Other options:: + + auth.settings.mailer=None + auth.settings.expiration=3600 # seconds + + ... + + ### these are messages that can be customized + ... + """ + + @staticmethod + def get_or_create_key(filename=None): + request = current.request + if not filename: + filename = os.path.join(request.folder,'private','auth.key') + if os.path.exists(filename): + key = open(filename,'r').read().strip() + else: + key = web2py_uuid() + open(filename,'w').write(key) + return key + + def url(self, f=None, args=None, vars=None): + if args is None: args=[] + if vars is None: vars={} + return URL(c=self.settings.controller, f=f, args=args, vars=vars) + + def here(self): + return URL(args=current.request.args,vars=current.request.vars) + + def __init__(self, environment=None, db=None, mailer=True, + hmac_key=None, controller='default', cas_provider=None): + """ + auth=Auth(db) + + - environment is there for legacy but unused (awful) + - db has to be the database where to create tables for authentication + - mailer=Mail(...) or None (no mailed) or True (make a mailer) + - hmac_key can be a hmac_key or hmac_key=Auth.get_or_create_key() + - controller (where is the user action?) + - cas_provider (delegate authentication to the URL, CAS2) + """ + ## next two lines for backward compatibility + if not db and environment and isinstance(environment,DAL): + db = environment + self.db = db + self.environment = current + request = current.request + session = current.session + auth = session.auth + if auth and auth.last_visit and auth.last_visit + \ + datetime.timedelta(days=0, seconds=auth.expiration) > request.now: + self.user = auth.user + # this is a trick to speed up sessions + if (request.now - auth.last_visit).seconds > (auth.expiration/10): + auth.last_visit = request.now + else: + self.user = None + session.auth = None + settings = self.settings = Settings() + + # ## what happens after login? + + self.next = current.request.vars._next + if isinstance(self.next,(list,tuple)): + self.next = self.next[0] + + # ## what happens after registration? + + settings.hideerror = False + settings.password_min_length = 4 + settings.cas_domains = [request.env.http_host] + settings.cas_provider = cas_provider + settings.extra_fields = {} + settings.actions_disabled = [] + settings.reset_password_requires_verification = False + settings.registration_requires_verification = False + settings.registration_requires_approval = False + settings.login_after_registration = False + settings.alternate_requires_registration = False + settings.create_user_groups = True + + settings.controller = controller + settings.login_url = self.url('user', args='login') + settings.logged_url = self.url('user', args='profile') + settings.download_url = self.url('download') + settings.mailer = (mailer==True) and Mail() or mailer + settings.login_captcha = None + settings.register_captcha = None + settings.retrieve_username_captcha = None + settings.retrieve_password_captcha = None + settings.captcha = None + settings.expiration = 3600 # one hour + settings.long_expiration = 3600*30*24 # one month + settings.remember_me_form = True + settings.allow_basic_login = False + settings.allow_basic_login_only = False + settings.on_failed_authorization = \ + self.url('user',args='not_authorized') + + settings.on_failed_authentication = lambda x: redirect(x) + + settings.formstyle = 'table3cols' + settings.label_separator = ': ' + + # ## table names to be used + + settings.password_field = 'password' + settings.table_user_name = 'auth_user' + settings.table_group_name = 'auth_group' + settings.table_membership_name = 'auth_membership' + settings.table_permission_name = 'auth_permission' + settings.table_event_name = 'auth_event' + settings.table_cas_name = 'auth_cas' + + # ## if none, they will be created + + settings.table_user = None + settings.table_group = None + settings.table_membership = None + settings.table_permission = None + settings.table_event = None + settings.table_cas = None + + # ## + + settings.showid = False + + # ## these should be functions or lambdas + + settings.login_next = self.url('index') + settings.login_onvalidation = [] + settings.login_onaccept = [] + settings.login_methods = [self] + settings.login_form = self + settings.login_email_validate = True + settings.login_userfield = None + + settings.logout_next = self.url('index') + settings.logout_onlogout = None + + settings.register_next = self.url('index') + settings.register_onvalidation = [] + settings.register_onaccept = [] + settings.register_fields = None + settings.register_verify_password = True + + settings.verify_email_next = self.url('user', args='login') + settings.verify_email_onaccept = [] + + settings.profile_next = self.url('index') + settings.profile_onvalidation = [] + settings.profile_onaccept = [] + settings.profile_fields = None + settings.retrieve_username_next = self.url('index') + settings.retrieve_password_next = self.url('index') + settings.request_reset_password_next = self.url('user', args='login') + settings.reset_password_next = self.url('user', args='login') + + settings.change_password_next = self.url('index') + settings.change_password_onvalidation = [] + settings.change_password_onaccept = [] + + settings.retrieve_password_onvalidation = [] + settings.reset_password_onvalidation = [] + + settings.hmac_key = hmac_key + settings.lock_keys = True + + # ## these are messages that can be customized + messages = self.messages = Messages(current.T) + messages.login_button = 'Login' + messages.register_button = 'Register' + messages.password_reset_button = 'Request reset password' + messages.password_change_button = 'Change password' + messages.profile_save_button = 'Save profile' + messages.submit_button = 'Submit' + messages.verify_password = 'Verify Password' + messages.delete_label = 'Check to delete:' + messages.function_disabled = 'Function disabled' + messages.access_denied = 'Insufficient privileges' + messages.registration_verifying = 'Registration needs verification' + messages.registration_pending = 'Registration is pending approval' + messages.login_disabled = 'Login disabled by administrator' + messages.logged_in = 'Logged in' + messages.email_sent = 'Email sent' + messages.unable_to_send_email = 'Unable to send email' + messages.email_verified = 'Email verified' + messages.logged_out = 'Logged out' + messages.registration_successful = 'Registration successful' + messages.invalid_email = 'Invalid email' + messages.unable_send_email = 'Unable to send email' + messages.invalid_login = 'Invalid login' + messages.invalid_user = 'Invalid user' + messages.invalid_password = 'Invalid password' + messages.is_empty = "Cannot be empty" + messages.mismatched_password = "Password fields don't match" + messages.verify_email = \ + 'Click on the link http://' + current.request.env.http_host + \ + URL('default','user',args=['verify_email']) + \ + '/%(key)s to verify your email' + messages.verify_email_subject = 'Email verification' + messages.username_sent = 'Your username was emailed to you' + messages.new_password_sent = 'A new password was emailed to you' + messages.password_changed = 'Password changed' + messages.retrieve_username = 'Your username is: %(username)s' + messages.retrieve_username_subject = 'Username retrieve' + messages.retrieve_password = 'Your password is: %(password)s' + messages.retrieve_password_subject = 'Password retrieve' + messages.reset_password = \ + 'Click on the link http://' + current.request.env.http_host + \ + URL('default','user',args=['reset_password']) + \ + '/%(key)s to reset your password' + messages.reset_password_subject = 'Password reset' + messages.invalid_reset_password = 'Invalid reset password' + messages.profile_updated = 'Profile updated' + messages.new_password = 'New password' + messages.old_password = 'Old password' + messages.group_description = \ + 'Group uniquely assigned to user %(id)s' + + messages.register_log = 'User %(id)s Registered' + messages.login_log = 'User %(id)s Logged-in' + messages.login_failed_log = None + messages.logout_log = 'User %(id)s Logged-out' + messages.profile_log = 'User %(id)s Profile updated' + messages.verify_email_log = 'User %(id)s Verification email sent' + messages.retrieve_username_log = 'User %(id)s Username retrieved' + messages.retrieve_password_log = 'User %(id)s Password retrieved' + messages.reset_password_log = 'User %(id)s Password reset' + messages.change_password_log = 'User %(id)s Password changed' + messages.add_group_log = 'Group %(group_id)s created' + messages.del_group_log = 'Group %(group_id)s deleted' + messages.add_membership_log = None + messages.del_membership_log = None + messages.has_membership_log = None + messages.add_permission_log = None + messages.del_permission_log = None + messages.has_permission_log = None + messages.impersonate_log = 'User %(id)s is impersonating %(other_id)s' + + messages.label_first_name = 'First name' + messages.label_last_name = 'Last name' + messages.label_username = 'Username' + messages.label_email = 'E-mail' + messages.label_password = 'Password' + messages.label_registration_key = 'Registration key' + messages.label_reset_password_key = 'Reset Password key' + messages.label_registration_id = 'Registration identifier' + messages.label_role = 'Role' + messages.label_description = 'Description' + messages.label_user_id = 'User ID' + messages.label_group_id = 'Group ID' + messages.label_name = 'Name' + messages.label_table_name = 'Object or table name' + messages.label_record_id = 'Record ID' + messages.label_time_stamp = 'Timestamp' + messages.label_client_ip = 'Client IP' + messages.label_origin = 'Origin' + messages.label_remember_me = "Remember me (for 30 days)" + messages['T'] = current.T + messages.verify_password_comment = 'please input your password again' + messages.lock_keys = True + + # for "remember me" option + response = current.response + if auth and auth.remember: #when user wants to be logged in for longer + response.cookies[response.session_id_name]["expires"] = \ + auth.expiration + + def lazy_user (auth = self): return auth.user_id + reference_user = 'reference %s' % settings.table_user_name + def represent(id,record=None,s=settings): + try: + user = s.table_user(id) + return '%(first_name)s %(last_name)s' % user + except: return id + self.signature = db.Table(self.db,'auth_signature', + Field('is_active','boolean',default=True), + Field('created_on','datetime', + default=request.now, + writable=False,readable=False), + Field('created_by', + reference_user, + default=lazy_user,represent=represent, + writable=False,readable=False, + ), + Field('modified_on','datetime', + update=request.now,default=request.now, + writable=False,readable=False), + Field('modified_by', + reference_user,represent=represent, + default=lazy_user,update=lazy_user, + writable=False,readable=False)) + + + + def _get_user_id(self): + "accessor for auth.user_id" + return self.user and self.user.id or None + user_id = property(_get_user_id, doc="user.id or None") + + def _HTTP(self, *a, **b): + """ + only used in lambda: self._HTTP(404) + """ + + raise HTTP(*a, **b) + + def __call__(self): + """ + usage: + + def authentication(): return dict(form=auth()) + """ + + request = current.request + args = request.args + if not args: + redirect(self.url(args='login',vars=request.vars)) + elif args[0] in self.settings.actions_disabled: + raise HTTP(404) + if args[0] in ('login','logout','register','verify_email', + 'retrieve_username','retrieve_password', + 'reset_password','request_reset_password', + 'change_password','profile','groups', + 'impersonate','not_authorized'): + return getattr(self,args[0])() + elif args[0]=='cas' and not self.settings.cas_provider: + if args(1) == 'login': return self.cas_login(version=2) + if args(1) == 'validate': return self.cas_validate(version=2) + if args(1) == 'logout': + return self.logout(next=request.vars.service or DEFAULT) + else: + raise HTTP(404) + + def navbar(self, prefix='Welcome', action=None): + request = current.request + T = current.T + if isinstance(prefix,str): + prefix = T(prefix) + if not action: + action=URL(request.application,request.controller,'user') + if prefix: + prefix = prefix.strip()+' ' + if self.user_id: + logout=A(T('logout'),_href=action+'/logout') + profile=A(T('profile'),_href=action+'/profile') + password=A(T('password'),_href=action+'/change_password') + bar = SPAN(prefix,self.user.first_name,' [ ', logout, ']',_class='auth_navbar') + if not 'profile' in self.settings.actions_disabled: + bar.insert(4, ' | ') + bar.insert(5, profile) + if not 'change_password' in self.settings.actions_disabled: + bar.insert(-1, ' | ') + bar.insert(-1, password) + else: + login=A(T('login'),_href=action+'/login') + register=A(T('register'),_href=action+'/register') + retrieve_username=A(T('forgot username?'), + _href=action+'/retrieve_username') + lost_password=A(T('lost password?'), + _href=action+'/request_reset_password') + bar = SPAN(' [ ',login,' ]',_class='auth_navbar') + + if not 'register' in self.settings.actions_disabled: + bar.insert(2, ' | ') + bar.insert(3, register) + if 'username' in self.settings.table_user.fields() and \ + not 'retrieve_username' in self.settings.actions_disabled: + bar.insert(-1, ' | ') + bar.insert(-1, retrieve_username) + if not 'request_reset_password' in self.settings.actions_disabled: + bar.insert(-1, ' | ') + bar.insert(-1, lost_password) + return bar + + def __get_migrate(self, tablename, migrate=True): + + if type(migrate).__name__ == 'str': + return (migrate + tablename + '.table') + elif migrate == False: + return False + else: + return True + + def define_tables(self, username=False, migrate=True, fake_migrate=False): + """ + to be called unless tables are defined manually + + usages:: + + # defines all needed tables and table files + # 'myprefix_auth_user.table', ... + auth.define_tables(migrate='myprefix_') + + # defines all needed tables without migration/table files + auth.define_tables(migrate=False) + + """ + + db = self.db + settings = self.settings + if not settings.table_user_name in db.tables: + passfield = settings.password_field + if username or settings.cas_provider: + table = db.define_table( + settings.table_user_name, + Field('first_name', length=128, default='', + label=self.messages.label_first_name), + Field('last_name', length=128, default='', + label=self.messages.label_last_name), + Field('username', length=128, default='', + label=self.messages.label_username), + Field('email', length=512, default='', + label=self.messages.label_email), + Field(passfield, 'password', length=512, + readable=False, label=self.messages.label_password), + Field('registration_key', length=512, + writable=False, readable=False, default='', + label=self.messages.label_registration_key), + Field('reset_password_key', length=512, + writable=False, readable=False, default='', + label=self.messages.label_reset_password_key), + Field('registration_id', length=512, + writable=False, readable=False, default='', + label=self.messages.label_registration_id), + *settings.extra_fields.get(settings.table_user_name,[]), + **dict( + migrate=self.__get_migrate(settings.table_user_name, + migrate), + fake_migrate=fake_migrate, + format='%(username)s')) + table.username.requires = (IS_MATCH('[\w\.\-]+'), + IS_NOT_IN_DB(db, table.username)) + else: + table = db.define_table( + settings.table_user_name, + Field('first_name', length=128, default='', + label=self.messages.label_first_name), + Field('last_name', length=128, default='', + label=self.messages.label_last_name), + Field('email', length=512, default='', + label=self.messages.label_email), + Field(passfield, 'password', length=512, + readable=False, label=self.messages.label_password), + Field('registration_key', length=512, + writable=False, readable=False, default='', + label=self.messages.label_registration_key), + Field('reset_password_key', length=512, + writable=False, readable=False, default='', + label=self.messages.label_reset_password_key), + *settings.extra_fields.get(settings.table_user_name,[]), + **dict( + migrate=self.__get_migrate(settings.table_user_name, + migrate), + fake_migrate=fake_migrate, + format='%(first_name)s %(last_name)s (%(id)s)')) + table.first_name.requires = \ + IS_NOT_EMPTY(error_message=self.messages.is_empty) + table.last_name.requires = \ + IS_NOT_EMPTY(error_message=self.messages.is_empty) + table[passfield].requires = [ + CRYPT(key=settings.hmac_key, + min_length=self.settings.password_min_length)] + table.email.requires = \ + [IS_EMAIL(error_message=self.messages.invalid_email), + IS_NOT_IN_DB(db, table.email)] + table.registration_key.default = '' + settings.table_user = db[settings.table_user_name] + if not settings.table_group_name in db.tables: + table = db.define_table( + settings.table_group_name, + Field('role', length=512, default='', + label=self.messages.label_role), + Field('description', 'text', + label=self.messages.label_description), + *settings.extra_fields.get(settings.table_group_name,[]), + **dict( + migrate=self.__get_migrate( + settings.table_group_name, migrate), + fake_migrate=fake_migrate, + format = '%(role)s (%(id)s)')) + table.role.requires = IS_NOT_IN_DB(db, '%s.role' + % settings.table_group_name) + settings.table_group = db[settings.table_group_name] + if not settings.table_membership_name in db.tables: + table = db.define_table( + settings.table_membership_name, + Field('user_id', settings.table_user, + label=self.messages.label_user_id), + Field('group_id', settings.table_group, + label=self.messages.label_group_id), + *settings.extra_fields.get(settings.table_membership_name,[]), + **dict( + migrate=self.__get_migrate( + settings.table_membership_name, migrate), + fake_migrate=fake_migrate)) + table.user_id.requires = IS_IN_DB(db, '%s.id' % + settings.table_user_name, + '%(first_name)s %(last_name)s (%(id)s)') + table.group_id.requires = IS_IN_DB(db, '%s.id' % + settings.table_group_name, + '%(role)s (%(id)s)') + settings.table_membership = db[settings.table_membership_name] + if not settings.table_permission_name in db.tables: + table = db.define_table( + settings.table_permission_name, + Field('group_id', settings.table_group, + label=self.messages.label_group_id), + Field('name', default='default', length=512, + label=self.messages.label_name), + Field('table_name', length=512, + label=self.messages.label_table_name), + Field('record_id', 'integer',default=0, + label=self.messages.label_record_id), + *settings.extra_fields.get(settings.table_permission_name,[]), + **dict( + migrate=self.__get_migrate( + settings.table_permission_name, migrate), + fake_migrate=fake_migrate)) + table.group_id.requires = IS_IN_DB(db, '%s.id' % + settings.table_group_name, + '%(role)s (%(id)s)') + table.name.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) + #table.table_name.requires = IS_EMPTY_OR(IS_IN_SET(self.db.tables)) + table.record_id.requires = IS_INT_IN_RANGE(0, 10 ** 9) + settings.table_permission = db[settings.table_permission_name] + if not settings.table_event_name in db.tables: + table = db.define_table( + settings.table_event_name, + Field('time_stamp', 'datetime', + default=current.request.now, + label=self.messages.label_time_stamp), + Field('client_ip', + default=current.request.client, + label=self.messages.label_client_ip), + Field('user_id', settings.table_user, default=None, + label=self.messages.label_user_id), + Field('origin', default='auth', length=512, + label=self.messages.label_origin), + Field('description', 'text', default='', + label=self.messages.label_description), + *settings.extra_fields.get(settings.table_event_name,[]), + **dict( + migrate=self.__get_migrate( + settings.table_event_name, migrate), + fake_migrate=fake_migrate)) + table.user_id.requires = IS_IN_DB(db, '%s.id' % + settings.table_user_name, + '%(first_name)s %(last_name)s (%(id)s)') + table.origin.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) + table.description.requires = IS_NOT_EMPTY(error_message=self.messages.is_empty) + settings.table_event = db[settings.table_event_name] + now = current.request.now + if settings.cas_domains: + if not settings.table_cas_name in db.tables: + table = db.define_table( + settings.table_cas_name, + Field('user_id', settings.table_user, default=None, + label=self.messages.label_user_id), + Field('created_on','datetime',default=now), + Field('url',requires=IS_URL()), + Field('uuid'), + *settings.extra_fields.get(settings.table_cas_name,[]), + **dict( + migrate=self.__get_migrate( + settings.table_event_name, migrate), + fake_migrate=fake_migrate)) + table.user_id.requires = IS_IN_DB(db, '%s.id' % \ + settings.table_user_name, + '%(first_name)s %(last_name)s (%(id)s)') + settings.table_cas = db[settings.table_cas_name] + if settings.cas_provider: + settings.actions_disabled = \ + ['profile','register','change_password','request_reset_password'] + from gluon.contrib.login_methods.cas_auth import CasAuth + maps = dict((name,lambda v,n=name:v.get(n,None)) for name in \ + settings.table_user.fields if name!='id' \ + and settings.table_user[name].readable) + maps['registration_id'] = \ + lambda v,p=settings.cas_provider:'%s/%s' % (p,v['user']) + settings.login_form = CasAuth( + casversion = 2, + urlbase = settings.cas_provider, + actions=['login','validate','logout'], + maps=maps) + + + def log_event(self, description, vars=None, origin='auth'): + """ + usage:: + + auth.log_event(description='this happened', origin='auth') + """ + if not description: + return + elif self.is_logged_in(): + user_id = self.user.id + else: + user_id = None # user unknown + vars = vars or {} + self.settings.table_event.insert(description=description % vars, + origin=origin, user_id=user_id) + + def get_or_create_user(self, keys): + """ + Used for alternate login methods: + If the user exists already then password is updated. + If the user doesn't yet exist, then they are created. + """ + table_user = self.settings.table_user + if 'registration_id' in table_user.fields() and \ + 'registration_id' in keys: + username = 'registration_id' + elif 'username' in table_user.fields(): + username = 'username' + elif 'email' in table_user.fields(): + username = 'email' + else: + raise SyntaxError, "user must have username or email" + user = self.db(table_user[username] == keys[username]).select().first() + keys['registration_key']='' + if user: + user.update_record(**table_user._filter_fields(keys)) + else: + if not 'first_name' in keys and 'first_name' in table_user.fields: + keys['first_name'] = keys[username] + user_id = table_user.insert(**table_user._filter_fields(keys)) + user = self.user = table_user[user_id] + if self.settings.create_user_groups: + group_id = self.add_group("user_%s" % user_id) + self.add_membership(group_id, user_id) + return user + + def basic(self): + if not self.settings.allow_basic_login: + return False + basic = current.request.env.http_authorization + if not basic or not basic[:6].lower() == 'basic ': + return False + (username, password) = base64.b64decode(basic[6:]).split(':') + return self.login_bare(username, password) + + def login_bare(self, username, password): + """ + logins user + """ + + request = current.request + session = current.session + table_user = self.settings.table_user + if self.settings.login_userfield: + userfield = self.settings.login_userfield + elif 'username' in table_user.fields: + userfield = 'username' + else: + userfield = 'email' + passfield = self.settings.password_field + user = self.db(table_user[userfield] == username).select().first() + password = table_user[passfield].validate(password)[0] + if user: + if not user.registration_key and user[passfield] == password: + user = Storage(table_user._filter_fields(user, id=True)) + session.auth = Storage(user=user, last_visit=request.now, + expiration=self.settings.expiration, + hmac_key = web2py_uuid()) + self.user = user + return user + return False + + def cas_login( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + version=2, + ): + request, session = current.request, current.session + db, table = self.db, self.settings.table_cas + session._cas_service = request.vars.service or session._cas_service + if not request.env.http_host in self.settings.cas_domains or \ + not session._cas_service: + raise HTTP(403,'not authorized') + def allow_access(): + row = table(url=session._cas_service,user_id=self.user.id) + if row: + row.update_record(created_on=request.now) + uuid = row.uuid + else: + uuid = web2py_uuid() + table.insert(url=session._cas_service, user_id=self.user.id, + uuid=uuid, created_on=request.now) + url = session._cas_service + del session._cas_service + redirect(url+"?ticket="+uuid) + if self.is_logged_in(): + allow_access() + def cas_onaccept(form, onaccept=onaccept): + if onaccept!=DEFAULT: onaccept(form) + allow_access() + return self.login(next,onvalidation,cas_onaccept,log) + + + def cas_validate(self, version=2): + request = current.request + db, table = self.db, self.settings.table_cas + current.response.headers['Content-Type']='text' + ticket = table(uuid=request.vars.ticket) + if ticket: # and ticket.created_on>request.now-datetime.timedelta(60): + user = self.settings.table_user(ticket.user_id) + fullname = user.first_name+' '+user.last_name + if version == 1: + raise HTTP(200,'yes\n%s:%s:%s'%(user.id,user.email,fullname)) + # assume version 2 + username = user.get('username',user.email) + raise HTTP(200,'\n'+\ + TAG['cas:serviceResponse']( + TAG['cas:authenticationSuccess']( + TAG['cas:user'](username), + *[TAG['cas:'+field.name](user[field.name]) \ + for field in self.settings.table_user \ + if field.readable]), + **{'_xmlns:cas':'http://www.yale.edu/tp/cas'}).xml()) + if version == 1: + raise HTTP(200,'no\n') + # assume version 2 + raise HTTP(200,'\n'+\ + TAG['cas:serviceResponse']( + TAG['cas:authenticationFailure']( + 'Ticket %s not recognized' % ticket, + _code='INVALID TICKET'), + **{'_xmlns:cas':'http://www.yale.edu/tp/cas'}).xml()) + + + def login( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a login form + + .. method:: Auth.login([next=DEFAULT [, onvalidation=DEFAULT + [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + if self.settings.login_userfield: + username = self.settings.login_userfield + elif 'username' in table_user.fields: + username = 'username' + else: + username = 'email' + if 'username' in table_user.fields or not self.settings.login_email_validate: + tmpvalidator = IS_NOT_EMPTY(error_message=self.messages.is_empty) + else: + tmpvalidator = IS_EMAIL(error_message=self.messages.invalid_email) + old_requires = table_user[username].requires + table_user[username].requires = tmpvalidator + + request = current.request + response = current.response + session = current.session + + passfield = self.settings.password_field + try: table_user[passfield].requires[-1].min_length = 0 + except: pass + + ### use session for federated login + if self.next: + session._auth_next = self.next + elif session._auth_next: + self.next = session._auth_next + ### pass + + if next == DEFAULT: + next = self.next or self.settings.login_next + if onvalidation == DEFAULT: + onvalidation = self.settings.login_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.login_onaccept + if log == DEFAULT: + log = self.messages.login_log + + user = None # default + + # do we use our own login form, or from a central source? + if self.settings.login_form == self: + form = SQLFORM( + table_user, + fields=[username, passfield], + hidden = dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.login_button, + delete_label=self.messages.delete_label, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + + if self.settings.remember_me_form: + ## adds a new input checkbox "remember me for longer" + addrow(form,XML(" "), + DIV(XML(" "), + INPUT(_type='checkbox', + _class='checkbox', + _id="auth_user_remember", + _name="remember", + ), + XML("  "), + LABEL( + self.messages.label_remember_me, + _for="auth_user_remember", + )),"", + self.settings.formstyle, + 'auth_user_remember__row') + + captcha = self.settings.login_captcha or \ + (self.settings.login_captcha!=False and self.settings.captcha) + if captcha: + addrow(form, captcha.label, captcha, captcha.comment, + self.settings.formstyle,'captcha__row') + accepted_form = False + + if form.accepts(request, session, + formname='login', dbio=False, + onvalidation=onvalidation, + hideerror=self.settings.hideerror): + + accepted_form = True + # check for username in db + user = self.db(table_user[username] == form.vars[username]).select().first() + if user: + # user in db, check if registration pending or disabled + temp_user = user + if temp_user.registration_key == 'pending': + response.flash = self.messages.registration_pending + return form + elif temp_user.registration_key in ('disabled','blocked'): + response.flash = self.messages.login_disabled + return form + elif not temp_user.registration_key is None and \ + temp_user.registration_key.strip(): + response.flash = \ + self.messages.registration_verifying + return form + # try alternate logins 1st as these have the + # current version of the password + user = None + for login_method in self.settings.login_methods: + if login_method != self and \ + login_method(request.vars[username], + request.vars[passfield]): + if not self in self.settings.login_methods: + # do not store password in db + form.vars[passfield] = None + user = self.get_or_create_user(form.vars) + break + if not user: + # alternates have failed, maybe because service inaccessible + if self.settings.login_methods[0] == self: + # try logging in locally using cached credentials + if temp_user[passfield] == form.vars.get(passfield, ''): + # success + user = temp_user + else: + # user not in db + if not self.settings.alternate_requires_registration: + # we're allowed to auto-register users from external systems + for login_method in self.settings.login_methods: + if login_method != self and \ + login_method(request.vars[username], + request.vars[passfield]): + if not self in self.settings.login_methods: + # do not store password in db + form.vars[passfield] = None + user = self.get_or_create_user(form.vars) + break + if not user: + self.log_event(self.settings.login_failed_log, + request.post_vars) + # invalid login + session.flash = self.messages.invalid_login + redirect(self.url(args=request.args,vars=request.get_vars)) + + else: + # use a central authentication server + cas = self.settings.login_form + cas_user = cas.get_user() + + if cas_user: + cas_user[passfield] = None + user = self.get_or_create_user( + table_user._filter_fields(cas_user)) + elif hasattr(cas,'login_form'): + return cas.login_form() + else: + # we need to pass through login again before going on + next = self.url('user',args='login') + redirect(cas.login_url(next)) + + # process authenticated users + if user: + user = Storage(table_user._filter_fields(user, id=True)) + + # process authenticated users + # user wants to be logged in for longer + session.auth = Storage( + user = user, + last_visit = request.now, + expiration = self.settings.long_expiration, + remember = request.vars.has_key("remember"), + hmac_key = web2py_uuid() + ) + + self.user = user + self.log_event(log, user) + session.flash = self.messages.logged_in + + # how to continue + if self.settings.login_form == self: + if accepted_form: + callback(onaccept,form) + next = replace_id(next, form) + redirect(next) + table_user[username].requires = old_requires + return form + elif user: + callback(onaccept,None) + if next == session._auth_next: + del session._auth_next + redirect(next) + + def logout(self, next=DEFAULT, onlogout=DEFAULT, log=DEFAULT): + """ + logout and redirects to login + + .. method:: Auth.logout ([next=DEFAULT[, onlogout=DEFAULT[, + log=DEFAULT]]]) + + """ + + if next == DEFAULT: + next = self.settings.logout_next + if onlogout == DEFAULT: + onlogout = self.settings.logout_onlogout + if onlogout: + onlogout(self.user) + if log == DEFAULT: + log = self.messages.logout_log + if self.user: + self.log_event(log, self.user) + if self.settings.login_form != self: + cas = self.settings.login_form + cas_user = cas.get_user() + if cas_user: + next = cas.logout_url(next) + + current.session.auth = None + current.session.flash = self.messages.logged_out + redirect(next) + + def register( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a registration form + + .. method:: Auth.register([next=DEFAULT [, onvalidation=DEFAULT + [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + request = current.request + response = current.response + session = current.session + if self.is_logged_in(): + redirect(self.settings.logged_url) + if next == DEFAULT: + next = self.next or self.settings.register_next + if onvalidation == DEFAULT: + onvalidation = self.settings.register_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.register_onaccept + if log == DEFAULT: + log = self.messages.register_log + + passfield = self.settings.password_field + formstyle = self.settings.formstyle + form = SQLFORM(table_user, + fields = self.settings.register_fields, + hidden = dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.register_button, + delete_label=self.messages.delete_label, + formstyle=formstyle, + separator=self.settings.label_separator + ) + if self.settings.register_verify_password: + for i, row in enumerate(form[0].components): + item = row.element('input',_name=passfield) + if item: + form.custom.widget.password_two = \ + INPUT(_name="password_two", _type="password", + requires=IS_EXPR( + 'value==%s' % \ + repr(request.vars.get(passfield, None)), + error_message=self.messages.mismatched_password)) + + addrow(form, self.messages.verify_password + ':', + form.custom.widget.password_two, + self.messages.verify_password_comment, + formstyle, + '%s_%s__row' % (table_user, 'password_two'), + position=i+1) + break + captcha = self.settings.register_captcha or self.settings.captcha + if captcha: + addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row') + + table_user.registration_key.default = key = web2py_uuid() + if form.accepts(request, session, formname='register', + onvalidation=onvalidation,hideerror=self.settings.hideerror): + description = self.messages.group_description % form.vars + if self.settings.create_user_groups: + group_id = self.add_group("user_%s" % form.vars.id, description) + self.add_membership(group_id, form.vars.id) + if self.settings.registration_requires_verification: + if not self.settings.mailer or \ + not self.settings.mailer.send(to=form.vars.email, + subject=self.messages.verify_email_subject, + message=self.messages.verify_email + % dict(key=key)): + self.db.rollback() + response.flash = self.messages.unable_send_email + return form + session.flash = self.messages.email_sent + if self.settings.registration_requires_approval: + table_user[form.vars.id] = dict(registration_key='pending') + session.flash = self.messages.registration_pending + elif (not self.settings.registration_requires_verification or \ + self.settings.login_after_registration): + if not self.settings.registration_requires_verification: + table_user[form.vars.id] = dict(registration_key='') + session.flash = self.messages.registration_successful + table_user = self.settings.table_user + if 'username' in table_user.fields: + username = 'username' + else: + username = 'email' + user = self.db(table_user[username] == form.vars[username]).select().first() + user = Storage(table_user._filter_fields(user, id=True)) + session.auth = Storage(user=user, last_visit=request.now, + expiration=self.settings.expiration, + hmac_key = web2py_uuid()) + self.user = user + session.flash = self.messages.logged_in + self.log_event(log, form.vars) + callback(onaccept,form) + if not next: + next = self.url(args = request.args) + else: + next = replace_id(next, form) + redirect(next) + return form + + def is_logged_in(self): + """ + checks if the user is logged in and returns True/False. + if so user is in auth.user as well as in session.auth.user + """ + + if self.user: + return True + return False + + def verify_email( + self, + next=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + action user to verify the registration email, XXXXXXXXXXXXXXXX + + .. method:: Auth.verify_email([next=DEFAULT [, onvalidation=DEFAULT + [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + key = current.request.args[-1] + table_user = self.settings.table_user + user = self.db(table_user.registration_key == key).select().first() + if not user: + redirect(self.settings.login_url) + if self.settings.registration_requires_approval: + user.update_record(registration_key = 'pending') + current.session.flash = self.messages.registration_pending + else: + user.update_record(registration_key = '') + current.session.flash = self.messages.email_verified + # make sure session has same user.registrato_key as db record + if current.session.auth and current.session.auth.user: + current.session.auth.user.registration_key = user.registration_key + if log == DEFAULT: + log = self.messages.verify_email_log + if next == DEFAULT: + next = self.settings.verify_email_next + if onaccept == DEFAULT: + onaccept = self.settings.verify_email_onaccept + self.log_event(log, user) + callback(onaccept,user) + redirect(next) + + def retrieve_username( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form to retrieve the user username + (only if there is a username field) + + .. method:: Auth.retrieve_username([next=DEFAULT + [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + if not 'username' in table_user.fields: + raise HTTP(404) + request = current.request + response = current.response + session = current.session + captcha = self.settings.retrieve_username_captcha or \ + (self.settings.retrieve_username_captcha!=False and self.settings.captcha) + if not self.settings.mailer: + response.flash = self.messages.function_disabled + return '' + if next == DEFAULT: + next = self.next or self.settings.retrieve_username_next + if onvalidation == DEFAULT: + onvalidation = self.settings.retrieve_username_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.retrieve_username_onaccept + if log == DEFAULT: + log = self.messages.retrieve_username_log + old_requires = table_user.email.requires + table_user.email.requires = [IS_IN_DB(self.db, table_user.email, + error_message=self.messages.invalid_email)] + form = SQLFORM(table_user, + fields=['email'], + hidden = dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.submit_button, + delete_label=self.messages.delete_label, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + if captcha: + addrow(form, captcha.label, captcha, captcha.comment,self.settings.formstyle, 'captcha__row') + + if form.accepts(request, session, + formname='retrieve_username', dbio=False, + onvalidation=onvalidation,hideerror=self.settings.hideerror): + user = self.db(table_user.email == form.vars.email).select().first() + if not user: + current.session.flash = \ + self.messages.invalid_email + redirect(self.url(args=request.args)) + username = user.username + self.settings.mailer.send(to=form.vars.email, + subject=self.messages.retrieve_username_subject, + message=self.messages.retrieve_username + % dict(username=username)) + session.flash = self.messages.email_sent + self.log_event(log, user) + callback(onaccept,form) + if not next: + next = self.url(args = request.args) + else: + next = replace_id(next, form) + redirect(next) + table_user.email.requires = old_requires + return form + + def random_password(self): + import string + import random + password = '' + specials=r'!#$*' + for i in range(0,3): + password += random.choice(string.lowercase) + password += random.choice(string.uppercase) + password += random.choice(string.digits) + password += random.choice(specials) + return ''.join(random.sample(password,len(password))) + + def reset_password_deprecated( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form to reset the user password (deprecated) + + .. method:: Auth.reset_password_deprecated([next=DEFAULT + [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + request = current.request + response = current.response + session = current.session + if not self.settings.mailer: + response.flash = self.messages.function_disabled + return '' + if next == DEFAULT: + next = self.next or self.settings.retrieve_password_next + if onvalidation == DEFAULT: + onvalidation = self.settings.retrieve_password_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.retrieve_password_onaccept + if log == DEFAULT: + log = self.messages.retrieve_password_log + old_requires = table_user.email.requires + table_user.email.requires = [IS_IN_DB(self.db, table_user.email, + error_message=self.messages.invalid_email)] + form = SQLFORM(table_user, + fields=['email'], + hidden = dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.submit_button, + delete_label=self.messages.delete_label, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + if form.accepts(request, session, + formname='retrieve_password', dbio=False, + onvalidation=onvalidation,hideerror=self.settings.hideerror): + user = self.db(table_user.email == form.vars.email).select().first() + if not user: + current.session.flash = \ + self.messages.invalid_email + redirect(self.url(args=request.args)) + elif user.registration_key in ('pending','disabled','blocked'): + current.session.flash = \ + self.messages.registration_pending + redirect(self.url(args=request.args)) + password = self.random_password() + passfield = self.settings.password_field + d = {passfield: table_user[passfield].validate(password)[0], + 'registration_key': ''} + user.update_record(**d) + if self.settings.mailer and \ + self.settings.mailer.send(to=form.vars.email, + subject=self.messages.retrieve_password_subject, + message=self.messages.retrieve_password \ + % dict(password=password)): + session.flash = self.messages.email_sent + else: + session.flash = self.messages.unable_to_send_email + self.log_event(log, user) + callback(onaccept,form) + if not next: + next = self.url(args = request.args) + else: + next = replace_id(next, form) + redirect(next) + table_user.email.requires = old_requires + return form + + def reset_password( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form to reset the user password + + .. method:: Auth.reset_password([next=DEFAULT + [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + request = current.request + # response = current.response + session = current.session + + if next == DEFAULT: + next = self.next or self.settings.reset_password_next + try: + key = request.vars.key or request.args[-1] + t0 = int(key.split('-')[0]) + if time.time()-t0 > 60*60*24: raise Exception + user = self.db(table_user.reset_password_key == key).select().first() + if not user: raise Exception + except Exception: + session.flash = self.messages.invalid_reset_password + redirect(next) + passfield = self.settings.password_field + form = SQLFORM.factory( + Field('new_password', 'password', + label=self.messages.new_password, + requires=self.settings.table_user[passfield].requires), + Field('new_password2', 'password', + label=self.messages.verify_password, + requires=[IS_EXPR('value==%s' % repr(request.vars.new_password), + self.messages.mismatched_password)]), + submit_button=self.messages.password_reset_button, + hidden = dict(_next=next), + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + if form.accepts(request,session,hideerror=self.settings.hideerror): + user.update_record(**{passfield:form.vars.new_password, + 'registration_key':'', + 'reset_password_key':''}) + session.flash = self.messages.password_changed + redirect(next) + return form + + def request_reset_password( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form to reset the user password + + .. method:: Auth.reset_password([next=DEFAULT + [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + request = current.request + response = current.response + session = current.session + captcha = self.settings.retrieve_password_captcha or \ + (self.settings.retrieve_password_captcha!=False and self.settings.captcha) + + if next == DEFAULT: + next = self.next or self.settings.request_reset_password_next + if not self.settings.mailer: + response.flash = self.messages.function_disabled + return '' + if onvalidation == DEFAULT: + onvalidation = self.settings.reset_password_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.reset_password_onaccept + if log == DEFAULT: + log = self.messages.reset_password_log + table_user.email.requires = [ + IS_EMAIL(error_message=self.messages.invalid_email), + IS_IN_DB(self.db, table_user.email, + error_message=self.messages.invalid_email)] + form = SQLFORM(table_user, + fields=['email'], + hidden = dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.password_reset_button, + delete_label=self.messages.delete_label, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + if captcha: + addrow(form, captcha.label, captcha, captcha.comment, self.settings.formstyle,'captcha__row') + if form.accepts(request, session, + formname='reset_password', dbio=False, + onvalidation=onvalidation, + hideerror=self.settings.hideerror): + user = self.db(table_user.email == form.vars.email).select().first() + if not user: + session.flash = self.messages.invalid_email + redirect(self.url(args=request.args)) + elif user.registration_key in ('pending','disabled','blocked'): + session.flash = self.messages.registration_pending + redirect(self.url(args=request.args)) + reset_password_key = str(int(time.time()))+'-' + web2py_uuid() + + if self.settings.mailer.send(to=form.vars.email, + subject=self.messages.reset_password_subject, + message=self.messages.reset_password % \ + dict(key=reset_password_key)): + session.flash = self.messages.email_sent + user.update_record(reset_password_key=reset_password_key) + else: + session.flash = self.messages.unable_to_send_email + self.log_event(log, user) + callback(onaccept,form) + if not next: + next = self.url(args = request.args) + else: + next = replace_id(next, form) + redirect(next) + # old_requires = table_user.email.requires + return form + + def retrieve_password( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + if self.settings.reset_password_requires_verification: + return self.request_reset_password(next,onvalidation,onaccept,log) + else: + return self.reset_password_deprecated(next,onvalidation,onaccept,log) + + def change_password( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form that lets the user change password + + .. method:: Auth.change_password([next=DEFAULT[, onvalidation=DEFAULT[, + onaccept=DEFAULT[, log=DEFAULT]]]]) + """ + + if not self.is_logged_in(): + redirect(self.settings.login_url) + db = self.db + table_user = self.settings.table_user + usern = self.settings.table_user_name + s = db(table_user.id == self.user.id) + + request = current.request + session = current.session + if next == DEFAULT: + next = self.next or self.settings.change_password_next + if onvalidation == DEFAULT: + onvalidation = self.settings.change_password_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.change_password_onaccept + if log == DEFAULT: + log = self.messages.change_password_log + passfield = self.settings.password_field + form = SQLFORM.factory( + Field('old_password', 'password', + label=self.messages.old_password, + requires=validators( + table_user[passfield].requires, + IS_IN_DB(s, '%s.%s' % (usern, passfield), + error_message=self.messages.invalid_password))), + Field('new_password', 'password', + label=self.messages.new_password, + requires=table_user[passfield].requires), + Field('new_password2', 'password', + label=self.messages.verify_password, + requires=[IS_EXPR('value==%s' % repr(request.vars.new_password), + self.messages.mismatched_password)]), + submit_button=self.messages.password_change_button, + hidden = dict(_next=next), + formstyle = self.settings.formstyle, + separator=self.settings.label_separator + ) + if form.accepts(request, session, + formname='change_password', + onvalidation=onvalidation, + hideerror=self.settings.hideerror): + d = {passfield: form.vars.new_password} + s.update(**d) + session.flash = self.messages.password_changed + self.log_event(log, self.user) + callback(onaccept,form) + if not next: + next = self.url(args=request.args) + else: + next = replace_id(next, form) + redirect(next) + return form + + def profile( + self, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + ): + """ + returns a form that lets the user change his/her profile + + .. method:: Auth.profile([next=DEFAULT [, onvalidation=DEFAULT + [, onaccept=DEFAULT [, log=DEFAULT]]]]) + + """ + + table_user = self.settings.table_user + if not self.is_logged_in(): + redirect(self.settings.login_url) + passfield = self.settings.password_field + self.settings.table_user[passfield].writable = False + request = current.request + session = current.session + if next == DEFAULT: + next = self.next or self.settings.profile_next + if onvalidation == DEFAULT: + onvalidation = self.settings.profile_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.profile_onaccept + if log == DEFAULT: + log = self.messages.profile_log + form = SQLFORM( + table_user, + self.user.id, + fields = self.settings.profile_fields, + hidden = dict(_next=next), + showid = self.settings.showid, + submit_button = self.messages.profile_save_button, + delete_label = self.messages.delete_label, + upload = self.settings.download_url, + formstyle = self.settings.formstyle, + separator=self.settings.label_separator + ) + if form.accepts(request, session, + formname='profile', + onvalidation=onvalidation, hideerror=self.settings.hideerror): + self.user.update(table_user._filter_fields(form.vars)) + session.flash = self.messages.profile_updated + self.log_event(log,self.user) + callback(onaccept,form) + if not next: + next = self.url(args=request.args) + else: + next = replace_id(next, form) + redirect(next) + return form + + def is_impersonating(self): + return current.session.auth.impersonator + + def impersonate(self, user_id=DEFAULT): + """ + usage: POST TO http://..../impersonate request.post_vars.user_id= + set request.post_vars.user_id to 0 to restore original user. + + requires impersonator is logged in and + has_permission('impersonate', 'auth_user', user_id) + """ + request = current.request + session = current.session + auth = session.auth + if not self.is_logged_in(): + raise HTTP(401, "Not Authorized") + current_id = auth.user.id + requested_id = user_id + if user_id == DEFAULT: + user_id = current.request.post_vars.user_id + if user_id and user_id != self.user.id and user_id != '0': + if not self.has_permission('impersonate', + self.settings.table_user_name, + user_id): + raise HTTP(403, "Forbidden") + user = self.settings.table_user(user_id) + if not user: + raise HTTP(401, "Not Authorized") + auth.impersonator = cPickle.dumps(session) + auth.user.update( + self.settings.table_user._filter_fields(user, True)) + self.user = auth.user + if self.settings.login_onaccept: + form = Storage(dict(vars=self.user)) + self.settings.login_onaccept(form) + log = self.messages.impersonate_log + self.log_event(log,dict(id=current_id, other_id=auth.user.id)) + elif user_id in (0, '0') and self.is_impersonating(): + session.clear() + session.update(cPickle.loads(auth.impersonator)) + self.user = session.auth.user + if requested_id == DEFAULT and not request.post_vars: + return SQLFORM.factory(Field('user_id', 'integer')) + return self.user + + def groups(self): + """ + displays the groups and their roles for the logged in user + """ + + if not self.is_logged_in(): + redirect(self.settings.login_url) + memberships = self.db(self.settings.table_membership.user_id + == self.user.id).select() + table = TABLE() + for membership in memberships: + groups = self.db(self.settings.table_group.id + == membership.group_id).select() + if groups: + group = groups[0] + table.append(TR(H3(group.role, '(%s)' % group.id))) + table.append(TR(P(group.description))) + if not memberships: + return None + return table + + def not_authorized(self): + """ + you can change the view for this page to make it look as you like + """ + if current.request.ajax: + raise HTTP(403,'ACCESS DENIED') + return 'ACCESS DENIED' + + def requires(self, condition): + """ + decorator that prevents access to action if not logged in + """ + + def decorator(action): + + def f(*a, **b): + + if self.settings.allow_basic_login_only and not self.basic(): + if current.request.is_restful: + raise HTTP(403,"Not authorized") + return call_or_redirect( + self.settings.on_failed_authorization) + if not self.basic() and not self.is_logged_in(): + if current.request.is_restful: + raise HTTP(403,"Not authorized") + elif current.request.ajax: + return A('login',_href=self.settings.login_url) + request = current.request + next = self.here() + current.session.flash = current.response.flash + return call_or_redirect( + self.settings.on_failed_authentication, + self.settings.login_url+\ + '?_next='+urllib.quote(next)) + if not condition: + current.session.flash = self.messages.access_denied + return call_or_redirect( + self.settings.on_failed_authorization) + return action(*a, **b) + f.__doc__ = action.__doc__ + f.__name__ = action.__name__ + f.__dict__.update(action.__dict__) + return f + + return decorator + + def requires_login(self): + """ + decorator that prevents access to action if not logged in + """ + return self.requires(self.is_logged_in()) + + def requires_membership(self, role=None, group_id=None): + """ + decorator that prevents access to action if not logged in or + if user logged in is not a member of group_id. + If role is provided instead of group_id then the + group_id is calculated. + """ + return self.requires(self.has_membership(group_id=group_id, role=role)) + + def requires_permission(self, name, table_name='', record_id=0): + """ + decorator that prevents access to action if not logged in or + if user logged in is not a member of any group (role) that + has 'name' access to 'table_name', 'record_id'. + """ + return self.requires(self.has_permission(name, table_name, record_id)) + + def requires_signature(self): + """ + decorator that prevents access to action if not logged in or + if user logged in is not a member of group_id. + If role is provided instead of group_id then the + group_id is calculated. + """ + return self.requires(URL.verify(current.request,user_signature=True)) + + def add_group(self, role, description=''): + """ + creates a group associated to a role + """ + + group_id = self.settings.table_group.insert( + role=role, description=description) + self.log_event(self.messages.add_group_log, + dict(group_id=group_id, role=role)) + return group_id + + def del_group(self, group_id): + """ + deletes a group + """ + + self.db(self.settings.table_group.id == group_id).delete() + self.db(self.settings.table_membership.group_id == group_id).delete() + self.db(self.settings.table_permission.group_id == group_id).delete() + self.log_event(self.messages.del_group_log,dict(group_id=group_id)) + + def id_group(self, role): + """ + returns the group_id of the group specified by the role + """ + rows = self.db(self.settings.table_group.role == role).select() + if not rows: + return None + return rows[0].id + + def user_group(self, user_id = None): + """ + returns the group_id of the group uniquely associated to this user + i.e. role=user:[user_id] + """ + if not user_id and self.user: + user_id = self.user.id + role = 'user_%s' % user_id + return self.id_group(role) + + def has_membership(self, group_id=None, user_id=None, role=None): + """ + checks if user is member of group_id or role + """ + + group_id = group_id or self.id_group(role) + try: + group_id = int(group_id) + except: + group_id = self.id_group(group_id) # interpret group_id as a role + if not user_id and self.user: + user_id = self.user.id + membership = self.settings.table_membership + if self.db((membership.user_id == user_id) + & (membership.group_id == group_id)).select(): + r = True + else: + r = False + self.log_event(self.messages.has_membership_log, + dict(user_id=user_id,group_id=group_id, check=r)) + return r + + def add_membership(self, group_id=None, user_id=None, role=None): + """ + gives user_id membership of group_id or role + if user is None than user_id is that of current logged in user + """ + + group_id = group_id or self.id_group(role) + try: + group_id = int(group_id) + except: + group_id = self.id_group(group_id) # interpret group_id as a role + if not user_id and self.user: + user_id = self.user.id + membership = self.settings.table_membership + record = membership(user_id = user_id,group_id = group_id) + if record: + return record.id + else: + id = membership.insert(group_id=group_id, user_id=user_id) + self.log_event(self.messages.add_membership_log, + dict(user_id=user_id, group_id=group_id)) + return id + + def del_membership(self, group_id, user_id=None, role=None): + """ + revokes membership from group_id to user_id + if user_id is None than user_id is that of current logged in user + """ + + group_id = group_id or self.id_group(role) + if not user_id and self.user: + user_id = self.user.id + membership = self.settings.table_membership + self.log_event(self.messages.del_membership_log, + dict(user_id=user_id,group_id=group_id)) + return self.db(membership.user_id + == user_id)(membership.group_id + == group_id).delete() + + def has_permission( + self, + name='any', + table_name='', + record_id=0, + user_id=None, + group_id=None, + ): + """ + checks if user_id or current logged in user is member of a group + that has 'name' permission on 'table_name' and 'record_id' + if group_id is passed, it checks whether the group has the permission + """ + + if not user_id and not group_id and self.user: + user_id = self.user.id + if user_id: + membership = self.settings.table_membership + rows = self.db(membership.user_id + == user_id).select(membership.group_id) + groups = set([row.group_id for row in rows]) + if group_id and not group_id in groups: + return False + else: + groups = set([group_id]) + permission = self.settings.table_permission + rows = self.db(permission.name == name)(permission.table_name + == str(table_name))(permission.record_id + == record_id).select(permission.group_id) + groups_required = set([row.group_id for row in rows]) + if record_id: + rows = self.db(permission.name + == name)(permission.table_name + == str(table_name))(permission.record_id + == 0).select(permission.group_id) + groups_required = groups_required.union(set([row.group_id + for row in rows])) + if groups.intersection(groups_required): + r = True + else: + r = False + if user_id: + self.log_event(self.messages.has_permission_log, + dict(user_id=user_id, name=name, + table_name=table_name, record_id=record_id)) + return r + + def add_permission( + self, + group_id, + name='any', + table_name='', + record_id=0, + ): + """ + gives group_id 'name' access to 'table_name' and 'record_id' + """ + + permission = self.settings.table_permission + if group_id == 0: + group_id = self.user_group() + id = permission.insert(group_id=group_id, name=name, + table_name=str(table_name), + record_id=long(record_id)) + self.log_event(self.messages.add_permission_log, + dict(permission_id=id, group_id=group_id, + name=name, table_name=table_name, + record_id=record_id)) + return id + + def del_permission( + self, + group_id, + name='any', + table_name='', + record_id=0, + ): + """ + revokes group_id 'name' access to 'table_name' and 'record_id' + """ + + permission = self.settings.table_permission + self.log_event(self.messages.del_permission_log, + dict(group_id=group_id, name=name, + table_name=table_name, record_id=record_id)) + return self.db(permission.group_id == group_id)(permission.name + == name)(permission.table_name + == str(table_name))(permission.record_id + == long(record_id)).delete() + + def accessible_query(self, name, table, user_id=None): + """ + returns a query with all accessible records for user_id or + the current logged in user + this method does not work on GAE because uses JOIN and IN + + example:: + + db(auth.accessible_query('read', db.mytable)).select(db.mytable.ALL) + + """ + if not user_id: + user_id = self.user_id + if self.has_permission(name, table, 0, user_id): + return table.id > 0 + db = self.db + membership = self.settings.table_membership + permission = self.settings.table_permission + return table.id.belongs(db(membership.user_id == user_id)\ + (membership.group_id == permission.group_id)\ + (permission.name == name)\ + (permission.table_name == table)\ + ._select(permission.record_id)) + + @staticmethod + def archive(form,archive_table=None,current_record='current_record'): + """ + If you have a table (db.mytable) that needs full revision history you can just do:: + + form=crud.update(db.mytable,myrecord,onaccept=auth.archive) + + or + + form=SQLFORM(db.mytable,myrecord).process(onaccept=auth.archive) + + crud.archive will define a new table "mytable_archive" and store the + previous record in the newly created table including a reference + to the current record. + + If you want to access such table you need to define it yourself in a model:: + + db.define_table('mytable_archive', + Field('current_record',db.mytable), + db.mytable) + + Notice such table includes all fields of db.mytable plus one: current_record. + crud.archive does not timestamp the stored record unless your original table + has a fields like:: + + db.define_table(..., + Field('saved_on','datetime', + default=request.now,update=request.now,writable=False), + Field('saved_by',auth.user, + default=auth.user_id,update=auth.user_id,writable=False), + + there is nothing special about these fields since they are filled before + the record is archived. + + If you want to change the archive table name and the name of the reference field + you can do, for example:: + + db.define_table('myhistory', + Field('parent_record',db.mytable), + db.mytable) + + and use it as:: + + form=crud.update(db.mytable,myrecord, + onaccept=lambda form:crud.archive(form, + archive_table=db.myhistory, + current_record='parent_record')) + + """ + old_record = form.record + if not old_record: + return None + table = form.table + if not archive_table: + archive_table_name = '%s_archive' % table + if archive_table_name in table._db: + archive_table = table._db[archive_table_name] + else: + archive_table = table._db.define_table(archive_table_name, + Field(current_record,table), + table) + new_record = {current_record:old_record.id} + for fieldname in archive_table.fields: + if not fieldname in ['id',current_record] and fieldname in old_record: + new_record[fieldname]=old_record[fieldname] + id = archive_table.insert(**new_record) + return id + +class Crud(object): + + def url(self, f=None, args=None, vars=None): + """ + this should point to the controller that exposes + download and crud + """ + if args is None: args=[] + if vars is None: vars={} + return URL(c=self.settings.controller, f=f, args=args, vars=vars) + + def __init__(self, environment, db=None, controller='default'): + self.db = db + if not db and environment and isinstance(environment,DAL): + self.db = environment + elif not db: + raise SyntaxError, "must pass db as first or second argument" + self.environment = current + settings = self.settings = Settings() + settings.auth = None + settings.logger = None + + settings.create_next = None + settings.update_next = None + settings.controller = controller + settings.delete_next = self.url() + settings.download_url = self.url('download') + settings.create_onvalidation = StorageList() + settings.update_onvalidation = StorageList() + settings.delete_onvalidation = StorageList() + settings.create_onaccept = StorageList() + settings.update_onaccept = StorageList() + settings.update_ondelete = StorageList() + settings.delete_onaccept = StorageList() + settings.update_deletable = True + settings.showid = False + settings.keepvalues = False + settings.create_captcha = None + settings.update_captcha = None + settings.captcha = None + settings.formstyle = 'table3cols' + settings.label_separator = ': ' + settings.hideerror = False + settings.detect_record_change = True + settings.hmac_key = None + settings.lock_keys = True + + messages = self.messages = Messages(current.T) + messages.submit_button = 'Submit' + messages.delete_label = 'Check to delete:' + messages.record_created = 'Record Created' + messages.record_updated = 'Record Updated' + messages.record_deleted = 'Record Deleted' + + messages.update_log = 'Record %(id)s updated' + messages.create_log = 'Record %(id)s created' + messages.read_log = 'Record %(id)s read' + messages.delete_log = 'Record %(id)s deleted' + + messages.lock_keys = True + + def __call__(self): + args = current.request.args + if len(args) < 1: + raise HTTP(404) + elif args[0] == 'tables': + return self.tables() + elif len(args) > 1 and not args(1) in self.db.tables: + raise HTTP(404) + table = self.db[args(1)] + if args[0] == 'create': + return self.create(table) + elif args[0] == 'select': + return self.select(table,linkto=self.url(args='read')) + elif args[0] == 'search': + form, rows = self.search(table,linkto=self.url(args='read')) + return DIV(form,SQLTABLE(rows)) + elif args[0] == 'read': + return self.read(table, args(2)) + elif args[0] == 'update': + return self.update(table, args(2)) + elif args[0] == 'delete': + return self.delete(table, args(2)) + else: + raise HTTP(404) + + def log_event(self, message, vars): + if self.settings.logger: + self.settings.logger.log_event(message, vars, origin = 'crud') + + def has_permission(self, name, table, record=0): + if not self.settings.auth: + return True + try: + record_id = record.id + except: + record_id = record + return self.settings.auth.has_permission(name, str(table), record_id) + + def tables(self): + return TABLE(*[TR(A(name, + _href=self.url(args=('select',name)))) \ + for name in self.db.tables]) + + @staticmethod + def archive(form,archive_table=None,current_record='current_record'): + return Auth.archive(form,archive_table=archive_table, + current_record=current_record) + + def update( + self, + table, + record, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + ondelete=DEFAULT, + log=DEFAULT, + message=DEFAULT, + deletable=DEFAULT, + formname=DEFAULT, + ): + """ + .. method:: Crud.update(table, record, [next=DEFAULT + [, onvalidation=DEFAULT [, onaccept=DEFAULT [, log=DEFAULT + [, message=DEFAULT[, deletable=DEFAULT]]]]]]) + + """ + if not (isinstance(table, self.db.Table) or table in self.db.tables) \ + or (isinstance(record, str) and not str(record).isdigit()): + raise HTTP(404) + if not isinstance(table, self.db.Table): + table = self.db[table] + try: + record_id = record.id + except: + record_id = record or 0 + if record_id and not self.has_permission('update', table, record_id): + redirect(self.settings.auth.settings.on_failed_authorization) + if not record_id and not self.has_permission('create', table, record_id): + redirect(self.settings.auth.settings.on_failed_authorization) + + request = current.request + response = current.response + session = current.session + if request.extension == 'json' and request.vars.json: + request.vars.update(json_parser.loads(request.vars.json)) + if next == DEFAULT: + next = request.get_vars._next \ + or request.post_vars._next \ + or self.settings.update_next + if onvalidation == DEFAULT: + onvalidation = self.settings.update_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.update_onaccept + if ondelete == DEFAULT: + ondelete = self.settings.update_ondelete + if log == DEFAULT: + log = self.messages.update_log + if deletable == DEFAULT: + deletable = self.settings.update_deletable + if message == DEFAULT: + message = self.messages.record_updated + form = SQLFORM( + table, + record, + hidden=dict(_next=next), + showid=self.settings.showid, + submit_button=self.messages.submit_button, + delete_label=self.messages.delete_label, + deletable=deletable, + upload=self.settings.download_url, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + self.accepted = False + self.deleted = False + captcha = self.settings.update_captcha or self.settings.captcha + if record and captcha: + addrow(form, captcha.label, captcha, captcha.comment, + self.settings.formstyle,'captcha__row') + captcha = self.settings.create_captcha or self.settings.captcha + if not record and captcha: + addrow(form, captcha.label, captcha, captcha.comment, + self.settings.formstyle,'captcha__row') + if not request.extension in ('html','load'): + (_session, _formname) = (None, None) + else: + (_session, _formname) = (session, '%s/%s' % (table._tablename, form.record_id)) + if formname!=DEFAULT: + _formname = formname + keepvalues = self.settings.keepvalues + if request.vars.delete_this_record: + keepvalues = False + if isinstance(onvalidation,StorageList): + onvalidation=onvalidation.get(table._tablename, []) + if form.accepts(request, _session, formname=_formname, + onvalidation=onvalidation, keepvalues=keepvalues, + hideerror=self.settings.hideerror, + detect_record_change = self.settings.detect_record_change): + self.accepted = True + response.flash = message + if log: + self.log_event(log, form.vars) + if request.vars.delete_this_record: + self.deleted = True + message = self.messages.record_deleted + callback(ondelete,form,table._tablename) + response.flash = message + callback(onaccept,form,table._tablename) + if not request.extension in ('html','load'): + raise HTTP(200, 'RECORD CREATED/UPDATED') + if isinstance(next, (list, tuple)): ### fix issue with 2.6 + next = next[0] + if next: # Only redirect when explicit + next = replace_id(next, form) + session.flash = response.flash + redirect(next) + elif not request.extension in ('html','load'): + raise HTTP(401) + return form + + def create( + self, + table, + next=DEFAULT, + onvalidation=DEFAULT, + onaccept=DEFAULT, + log=DEFAULT, + message=DEFAULT, + formname=DEFAULT, + ): + """ + .. method:: Crud.create(table, [next=DEFAULT [, onvalidation=DEFAULT + [, onaccept=DEFAULT [, log=DEFAULT[, message=DEFAULT]]]]]) + """ + + if next == DEFAULT: + next = self.settings.create_next + if onvalidation == DEFAULT: + onvalidation = self.settings.create_onvalidation + if onaccept == DEFAULT: + onaccept = self.settings.create_onaccept + if log == DEFAULT: + log = self.messages.create_log + if message == DEFAULT: + message = self.messages.record_created + return self.update( + table, + None, + next=next, + onvalidation=onvalidation, + onaccept=onaccept, + log=log, + message=message, + deletable=False, + formname=formname, + ) + + def read(self, table, record): + if not (isinstance(table, self.db.Table) or table in self.db.tables) \ + or (isinstance(record, str) and not str(record).isdigit()): + raise HTTP(404) + if not isinstance(table, self.db.Table): + table = self.db[table] + if not self.has_permission('read', table, record): + redirect(self.settings.auth.settings.on_failed_authorization) + form = SQLFORM( + table, + record, + readonly=True, + comments=False, + upload=self.settings.download_url, + showid=self.settings.showid, + formstyle=self.settings.formstyle, + separator=self.settings.label_separator + ) + if not current.request.extension in ('html','load'): + return table._filter_fields(form.record, id=True) + return form + + def delete( + self, + table, + record_id, + next=DEFAULT, + message=DEFAULT, + ): + """ + .. method:: Crud.delete(table, record_id, [next=DEFAULT + [, message=DEFAULT]]) + """ + if not (isinstance(table, self.db.Table) or table in self.db.tables) \ + or not str(record_id).isdigit(): + raise HTTP(404) + if not isinstance(table, self.db.Table): + table = self.db[table] + if not self.has_permission('delete', table, record_id): + redirect(self.settings.auth.settings.on_failed_authorization) + request = current.request + session = current.session + if next == DEFAULT: + next = request.get_vars._next \ + or request.post_vars._next \ + or self.settings.delete_next + if message == DEFAULT: + message = self.messages.record_deleted + record = table[record_id] + if record: + callback(self.settings.delete_onvalidation,record) + del table[record_id] + callback(self.settings.delete_onaccept,record,table._tablename) + session.flash = message + redirect(next) + + def rows( + self, + table, + query=None, + fields=None, + orderby=None, + limitby=None, + ): + if not (isinstance(table, self.db.Table) or table in self.db.tables): + raise HTTP(404) + if not self.has_permission('select', table): + redirect(self.settings.auth.settings.on_failed_authorization) + #if record_id and not self.has_permission('select', table): + # redirect(self.settings.auth.settings.on_failed_authorization) + if not isinstance(table, self.db.Table): + table = self.db[table] + if not query: + query = table.id > 0 + if not fields: + fields = [field for field in table if field.readable] + rows = self.db(query).select(*fields,**dict(orderby=orderby, + limitby=limitby)) + return rows + + def select( + self, + table, + query=None, + fields=None, + orderby=None, + limitby=None, + headers=None, + **attr + ): + headers = headers or {} + rows = self.rows(table,query,fields,orderby,limitby) + if not rows: + return None # Nicer than an empty table. + if not 'upload' in attr: + attr['upload'] = self.url('download') + if not current.request.extension in ('html','load'): + return rows.as_list() + if not headers: + if isinstance(table,str): + table = self.db[table] + headers = dict((str(k),k.label) for k in table) + return SQLTABLE(rows,headers=headers,**attr) + + def get_format(self, field): + rtable = field._db[field.type[10:]] + format = rtable.get('_format', None) + if format and isinstance(format, str): + return format[2:-2] + return field.name + + def get_query(self, field, op, value, refsearch=False): + try: + if refsearch: format = self.get_format(field) + if op == 'equals': + if not refsearch: + return field == value + else: + return lambda row: row[field.name][format] == value + elif op == 'not equal': + if not refsearch: + return field != value + else: + return lambda row: row[field.name][format] != value + elif op == 'greater than': + if not refsearch: + return field > value + else: + return lambda row: row[field.name][format] > value + elif op == 'less than': + if not refsearch: + return field < value + else: + return lambda row: row[field.name][format] < value + elif op == 'starts with': + if not refsearch: + return field.like(value+'%') + else: + return lambda row: str(row[field.name][format]).startswith(value) + elif op == 'ends with': + if not refsearch: + return field.like('%'+value) + else: + return lambda row: str(row[field.name][format]).endswith(value) + elif op == 'contains': + if not refsearch: + return field.like('%'+value+'%') + else: + return lambda row: value in row[field.name][format] + except: + return None + + def search(self, *tables, **args): + """ + Creates a search form and its results for a table + Example usage: + form, results = crud.search(db.test, + queries = ['equals', 'not equal', 'contains'], + query_labels={'equals':'Equals', + 'not equal':'Not equal'}, + fields = ['id','children'], + field_labels = {'id':'ID','children':'Children'}, + zero='Please choose', + query = (db.test.id > 0)&(db.test.id != 3) ) + """ + table = tables[0] + fields = args.get('fields', table.fields) + request = current.request + db = self.db + if not (isinstance(table, db.Table) or table in db.tables): + raise HTTP(404) + attributes = {} + for key in ('orderby','groupby','left','distinct','limitby','cache'): + if key in args: attributes[key]=args[key] + tbl = TABLE() + selected = []; refsearch = []; results = [] + showall = args.get('showall', False) + if showall: + selected = fields + chkall = args.get('chkall', False) + if chkall: + for f in fields: + request.vars['chk%s'%f] = 'on' + ops = args.get('queries', []) + zero = args.get('zero', '') + if not ops: + ops = ['equals', 'not equal', 'greater than', + 'less than', 'starts with', + 'ends with', 'contains'] + ops.insert(0,zero) + query_labels = args.get('query_labels', {}) + query = args.get('query',table.id > 0) + field_labels = args.get('field_labels',{}) + for field in fields: + field = table[field] + if not field.readable: continue + fieldname = field.name + chkval = request.vars.get('chk' + fieldname, None) + txtval = request.vars.get('txt' + fieldname, None) + opval = request.vars.get('op' + fieldname, None) + row = TR(TD(INPUT(_type = "checkbox", _name = "chk" + fieldname, + _disabled = (field.type == 'id'), + value = (field.type == 'id' or chkval == 'on'))), + TD(field_labels.get(fieldname,field.label)), + TD(SELECT([OPTION(query_labels.get(op,op), + _value=op) for op in ops], + _name = "op" + fieldname, + value = opval)), + TD(INPUT(_type = "text", _name = "txt" + fieldname, + _value = txtval, _id='txt' + fieldname, + _class = str(field.type)))) + tbl.append(row) + if request.post_vars and (chkval or field.type=='id'): + if txtval and opval != '': + if field.type[0:10] == 'reference ': + refsearch.append(self.get_query(field, + opval, txtval, refsearch=True)) + else: + value, error = field.validate(txtval) + if not error: + ### TODO deal with 'starts with', 'ends with', 'contains' on GAE + query &= self.get_query(field, opval, value) + else: + row[3].append(DIV(error,_class='error')) + selected.append(field) + form = FORM(tbl,INPUT(_type="submit")) + if selected: + try: + results = db(query).select(*selected,**attributes) + for r in refsearch: + results = results.find(r) + except: # hmmm, we should do better here + results = None + return form, results + + +urllib2.install_opener(urllib2.build_opener(urllib2.HTTPCookieProcessor())) + +def fetch(url, data=None, headers=None, + cookie=Cookie.SimpleCookie(), + user_agent='Mozilla/5.0'): + headers = headers or {} + if not data is None: + data = urllib.urlencode(data) + if user_agent: headers['User-agent'] = user_agent + headers['Cookie'] = ' '.join(['%s=%s;'%(c.key,c.value) for c in cookie.values()]) + try: + from google.appengine.api import urlfetch + except ImportError: + req = urllib2.Request(url, data, headers) + html = urllib2.urlopen(req).read() + else: + method = ((data is None) and urlfetch.GET) or urlfetch.POST + while url is not None: + response = urlfetch.fetch(url=url, payload=data, + method=method, headers=headers, + allow_truncated=False,follow_redirects=False, + deadline=10) + # next request will be a get, so no need to send the data again + data = None + method = urlfetch.GET + # load cookies from the response + cookie.load(response.headers.get('set-cookie', '')) + url = response.headers.get('location') + html = response.content + return html + +regex_geocode = \ + re.compile('\(?P[^,]*),(?P[^,]*).*?\') + + +def geocode(address): + try: + a = urllib.quote(address) + txt = fetch('http://maps.google.com/maps/geo?q=%s&output=xml' + % a) + item = regex_geocode.search(txt) + (la, lo) = (float(item.group('la')), float(item.group('lo'))) + return (la, lo) + except: + return (0.0, 0.0) + + +def universal_caller(f, *a, **b): + c = f.func_code.co_argcount + n = f.func_code.co_varnames[:c] + + defaults = f.func_defaults or [] + pos_args = n[0:-len(defaults)] + named_args = n[-len(defaults):] + + arg_dict = {} + + # Fill the arg_dict with name and value for the submitted, positional values + for pos_index, pos_val in enumerate(a[:c]): + arg_dict[n[pos_index]] = pos_val # n[pos_index] is the name of the argument + + # There might be pos_args left, that are sent as named_values. Gather them as well. + # If a argument already is populated with values we simply replaces them. + for arg_name in pos_args[len(arg_dict):]: + if b.has_key(arg_name): + arg_dict[arg_name] = b[arg_name] + + if len(arg_dict) >= len(pos_args): + # All the positional arguments is found. The function may now be called. + # However, we need to update the arg_dict with the values from the named arguments as well. + for arg_name in named_args: + if b.has_key(arg_name): + arg_dict[arg_name] = b[arg_name] + + return f(**arg_dict) + + # Raise an error, the function cannot be called. + raise HTTP(404, "Object does not exist") + + +class Service(object): + + def __init__(self, environment=None): + self.run_procedures = {} + self.csv_procedures = {} + self.xml_procedures = {} + self.rss_procedures = {} + self.json_procedures = {} + self.jsonrpc_procedures = {} + self.xmlrpc_procedures = {} + self.amfrpc_procedures = {} + self.amfrpc3_procedures = {} + self.soap_procedures = {} + + def run(self, f): + """ + example:: + + service = Service() + @service.run + def myfunction(a, b): + return a + b + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/run/myfunction?a=3&b=4 + + """ + self.run_procedures[f.__name__] = f + return f + + def csv(self, f): + """ + example:: + + service = Service() + @service.csv + def myfunction(a, b): + return a + b + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/csv/myfunction?a=3&b=4 + + """ + self.run_procedures[f.__name__] = f + return f + + def xml(self, f): + """ + example:: + + service = Service() + @service.xml + def myfunction(a, b): + return a + b + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/xml/myfunction?a=3&b=4 + + """ + self.run_procedures[f.__name__] = f + return f + + def rss(self, f): + """ + example:: + + service = Service() + @service.rss + def myfunction(): + return dict(title=..., link=..., description=..., + created_on=..., entries=[dict(title=..., link=..., + description=..., created_on=...]) + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/rss/myfunction + + """ + self.rss_procedures[f.__name__] = f + return f + + def json(self, f): + """ + example:: + + service = Service() + @service.json + def myfunction(a, b): + return [{a: b}] + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/json/myfunction?a=hello&b=world + + """ + self.json_procedures[f.__name__] = f + return f + + def jsonrpc(self, f): + """ + example:: + + service = Service() + @service.jsonrpc + def myfunction(a, b): + return a + b + def call(): + return service() + + Then call it with:: + + wget http://..../app/default/call/jsonrpc/myfunction?a=hello&b=world + + """ + self.jsonrpc_procedures[f.__name__] = f + return f + + def xmlrpc(self, f): + """ + example:: + + service = Service() + @service.xmlrpc + def myfunction(a, b): + return a + b + def call(): + return service() + + The call it with:: + + wget http://..../app/default/call/xmlrpc/myfunction?a=hello&b=world + + """ + self.xmlrpc_procedures[f.__name__] = f + return f + + def amfrpc(self, f): + """ + example:: + + service = Service() + @service.amfrpc + def myfunction(a, b): + return a + b + def call(): + return service() + + The call it with:: + + wget http://..../app/default/call/amfrpc/myfunction?a=hello&b=world + + """ + self.amfrpc_procedures[f.__name__] = f + return f + + def amfrpc3(self, domain='default'): + """ + example:: + + service = Service() + @service.amfrpc3('domain') + def myfunction(a, b): + return a + b + def call(): + return service() + + The call it with:: + + wget http://..../app/default/call/amfrpc3/myfunction?a=hello&b=world + + """ + if not isinstance(domain, str): + raise SyntaxError, "AMF3 requires a domain for function" + + def _amfrpc3(f): + if domain: + self.amfrpc3_procedures[domain+'.'+f.__name__] = f + else: + self.amfrpc3_procedures[f.__name__] = f + return f + return _amfrpc3 + + def soap(self, name=None, returns=None, args=None,doc=None): + """ + example:: + + service = Service() + @service.soap('MyFunction',returns={'result':int},args={'a':int,'b':int,}) + def myfunction(a, b): + return a + b + def call(): + return service() + + The call it with:: + + from gluon.contrib.pysimplesoap.client import SoapClient + client = SoapClient(wsdl="http://..../app/default/call/soap?WSDL") + response = client.MyFunction(a=1,b=2) + return response['result'] + + Exposes online generated documentation and xml example messages at: + - http://..../app/default/call/soap + """ + + def _soap(f): + self.soap_procedures[name or f.__name__] = f, returns, args, doc + return f + return _soap + + def serve_run(self, args=None): + request = current.request + if not args: + args = request.args + if args and args[0] in self.run_procedures: + return str(universal_caller(self.run_procedures[args[0]], + *args[1:], **dict(request.vars))) + self.error() + + def serve_csv(self, args=None): + request = current.request + response = current.response + response.headers['Content-Type'] = 'text/x-csv' + if not args: + args = request.args + + def none_exception(value): + if isinstance(value, unicode): + return value.encode('utf8') + if hasattr(value, 'isoformat'): + return value.isoformat()[:19].replace('T', ' ') + if value is None: + return '' + return value + if args and args[0] in self.run_procedures: + r = universal_caller(self.run_procedures[args[0]], + *args[1:], **dict(request.vars)) + s = cStringIO.StringIO() + if hasattr(r, 'export_to_csv_file'): + r.export_to_csv_file(s) + elif r and isinstance(r[0], (dict, Storage)): + import csv + writer = csv.writer(s) + writer.writerow(r[0].keys()) + for line in r: + writer.writerow([none_exception(v) \ + for v in line.values()]) + else: + import csv + writer = csv.writer(s) + for line in r: + writer.writerow(line) + return s.getvalue() + self.error() + + def serve_xml(self, args=None): + request = current.request + response = current.response + response.headers['Content-Type'] = 'text/xml' + if not args: + args = request.args + if args and args[0] in self.run_procedures: + s = universal_caller(self.run_procedures[args[0]], + *args[1:], **dict(request.vars)) + if hasattr(s, 'as_list'): + s = s.as_list() + return serializers.xml(s) + self.error() + + def serve_rss(self, args=None): + request = current.request + response = current.response + if not args: + args = request.args + if args and args[0] in self.rss_procedures: + feed = universal_caller(self.rss_procedures[args[0]], + *args[1:], **dict(request.vars)) + else: + self.error() + response.headers['Content-Type'] = 'application/rss+xml' + return serializers.rss(feed) + + def serve_json(self, args=None): + request = current.request + response = current.response + response.headers['Content-Type'] = 'application/json; charset=utf-8' + if not args: + args = request.args + d = dict(request.vars) + if args and args[0] in self.json_procedures: + s = universal_caller(self.json_procedures[args[0]],*args[1:],**d) + if hasattr(s, 'as_list'): + s = s.as_list() + return response.json(s) + self.error() + + class JsonRpcException(Exception): + def __init__(self,code,info): + self.code,self.info = code,info + + def serve_jsonrpc(self): + def return_response(id, result): + return serializers.json({'version': '1.1', + 'id': id, 'result': result, 'error': None}) + def return_error(id, code, message): + return serializers.json({'id': id, + 'version': '1.1', + 'error': {'name': 'JSONRPCError', + 'code': code, 'message': message} + }) + + request = current.request + response = current.response + response.headers['Content-Type'] = 'application/json; charset=utf-8' + methods = self.jsonrpc_procedures + data = json_parser.loads(request.body.read()) + id, method, params = data['id'], data['method'], data.get('params','') + if not method in methods: + return return_error(id, 100, 'method "%s" does not exist' % method) + try: + s = methods[method](*params) + if hasattr(s, 'as_list'): + s = s.as_list() + return return_response(id, s) + except Service.JsonRpcException, e: + return return_error(id, e.code, e.info) + except BaseException: + etype, eval, etb = sys.exc_info() + return return_error(id, 100, '%s: %s' % (etype.__name__, eval)) + except: + etype, eval, etb = sys.exc_info() + return return_error(id, 100, 'Exception %s: %s' % (etype, eval)) + + def serve_xmlrpc(self): + request = current.request + response = current.response + services = self.xmlrpc_procedures.values() + return response.xmlrpc(request, services) + + def serve_amfrpc(self, version=0): + try: + import pyamf + import pyamf.remoting.gateway + except: + return "pyamf not installed or not in Python sys.path" + request = current.request + response = current.response + if version == 3: + services = self.amfrpc3_procedures + base_gateway = pyamf.remoting.gateway.BaseGateway(services) + pyamf_request = pyamf.remoting.decode(request.body) + else: + services = self.amfrpc_procedures + base_gateway = pyamf.remoting.gateway.BaseGateway(services) + context = pyamf.get_context(pyamf.AMF0) + pyamf_request = pyamf.remoting.decode(request.body, context) + pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion) + for name, message in pyamf_request: + pyamf_response[name] = base_gateway.getProcessor(message)(message) + response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE + if version==3: + return pyamf.remoting.encode(pyamf_response).getvalue() + else: + return pyamf.remoting.encode(pyamf_response, context).getvalue() + + def serve_soap(self, version="1.1"): + try: + from contrib.pysimplesoap.server import SoapDispatcher + except: + return "pysimplesoap not installed in contrib" + request = current.request + response = current.response + procedures = self.soap_procedures + + location = "%s://%s%s" % ( + request.env.wsgi_url_scheme, + request.env.http_host, + URL(r=request,f="call/soap",vars={})) + namespace = 'namespace' in response and response.namespace or location + documentation = response.description or '' + dispatcher = SoapDispatcher( + name = response.title, + location = location, + action = location, # SOAPAction + namespace = namespace, + prefix='pys', + documentation = documentation, + ns = True) + for method, (function, returns, args, doc) in procedures.items(): + dispatcher.register_function(method, function, returns, args, doc) + if request.env.request_method == 'POST': + # Process normal Soap Operation + response.headers['Content-Type'] = 'text/xml' + return dispatcher.dispatch(request.body.read()) + elif 'WSDL' in request.vars: + # Return Web Service Description + response.headers['Content-Type'] = 'text/xml' + return dispatcher.wsdl() + elif 'op' in request.vars: + # Return method help webpage + response.headers['Content-Type'] = 'text/html' + method = request.vars['op'] + sample_req_xml, sample_res_xml, doc = dispatcher.help(method) + body = [H1("Welcome to Web2Py SOAP webservice gateway"), + A("See all webservice operations", + _href=URL(r=request,f="call/soap",vars={})), + H2(method), + P(doc), + UL(LI("Location: %s" % dispatcher.location), + LI("Namespace: %s" % dispatcher.namespace), + LI("SoapAction: %s" % dispatcher.action), + ), + H3("Sample SOAP XML Request Message:"), + CODE(sample_req_xml,language="xml"), + H3("Sample SOAP XML Response Message:"), + CODE(sample_res_xml,language="xml"), + ] + return {'body': body} + else: + # Return general help and method list webpage + response.headers['Content-Type'] = 'text/html' + body = [H1("Welcome to Web2Py SOAP webservice gateway"), + P(response.description), + P("The following operations are available"), + A("See WSDL for webservice description", + _href=URL(r=request,f="call/soap",vars={"WSDL":None})), + UL([LI(A("%s: %s" % (method, doc or ''), + _href=URL(r=request,f="call/soap",vars={'op': method}))) + for method, doc in dispatcher.list_methods()]), + ] + return {'body': body} + + def __call__(self): + """ + register services with: + service = Service() + @service.run + @service.rss + @service.json + @service.jsonrpc + @service.xmlrpc + @service.amfrpc + @service.amfrpc3('domain') + @service.soap('Method', returns={'Result':int}, args={'a':int,'b':int,}) + + expose services with + + def call(): return service() + + call services with + http://..../app/default/call/run?[parameters] + http://..../app/default/call/rss?[parameters] + http://..../app/default/call/json?[parameters] + http://..../app/default/call/jsonrpc + http://..../app/default/call/xmlrpc + http://..../app/default/call/amfrpc + http://..../app/default/call/amfrpc3 + http://..../app/default/call/soap + """ + + request = current.request + if len(request.args) < 1: + raise HTTP(404, "Not Found") + arg0 = request.args(0) + if arg0 == 'run': + return self.serve_run(request.args[1:]) + elif arg0 == 'rss': + return self.serve_rss(request.args[1:]) + elif arg0 == 'csv': + return self.serve_csv(request.args[1:]) + elif arg0 == 'xml': + return self.serve_xml(request.args[1:]) + elif arg0 == 'json': + return self.serve_json(request.args[1:]) + elif arg0 == 'jsonrpc': + return self.serve_jsonrpc() + elif arg0 == 'xmlrpc': + return self.serve_xmlrpc() + elif arg0 == 'amfrpc': + return self.serve_amfrpc() + elif arg0 == 'amfrpc3': + return self.serve_amfrpc(3) + elif arg0 == 'soap': + return self.serve_soap() + else: + self.error() + + def error(self): + raise HTTP(404, "Object does not exist") + + +def completion(callback): + """ + Executes a task on completion of the called action. For example: + + from gluon.tools import completion + @completion(lambda d: logging.info(repr(d))) + def index(): + return dict(message='hello') + + It logs the output of the function every time input is called. + The argument of completion is executed in a new thread. + """ + def _completion(f): + def __completion(*a,**b): + d = None + try: + d = f(*a,**b) + return d + finally: + thread.start_new_thread(callback,(d,)) + return __completion + return _completion + +def prettydate(d,T=lambda x:x): + try: + dt = datetime.datetime.now() - d + except: + return '' + if dt.days >= 2*365: + return T('%d years ago') % int(dt.days / 365) + elif dt.days >= 365: + return T('1 year ago') + elif dt.days >= 60: + return T('%d months ago') % int(dt.days / 30) + elif dt.days > 21: + return T('1 month ago') + elif dt.days >= 14: + return T('%d weeks ago') % int(dt.days / 7) + elif dt.days >= 7: + return T('1 week ago') + elif dt.days > 1: + return T('%d days ago') % dt.days + elif dt.days == 1: + return T('1 day ago') + elif dt.seconds >= 2*60*60: + return T('%d hours ago') % int(dt.seconds / 3600) + elif dt.seconds >= 60*60: + return T('1 hour ago') + elif dt.seconds >= 2*60: + return T('%d minutes ago') % int(dt.seconds / 60) + elif dt.seconds >= 60: + return T('1 minute ago') + elif dt.seconds > 1: + return T('%d seconds ago') % dt.seconds + elif dt.seconds == 1: + return T('1 second ago') + else: + return T('now') + +def test_thread_separation(): + def f(): + c=PluginManager() + lock1.acquire() + lock2.acquire() + c.x=7 + lock1.release() + lock2.release() + lock1=thread.allocate_lock() + lock2=thread.allocate_lock() + lock1.acquire() + thread.start_new_thread(f,()) + a=PluginManager() + a.x=5 + lock1.release() + lock2.acquire() + return a.x + +class PluginManager(object): + """ + + Plugin Manager is similar to a storage object but it is a single level singleton + this means that multiple instances within the same thread share the same attributes + Its constructor is also special. The first argument is the name of the plugin you are defining. + The named arguments are parameters needed by the plugin with default values. + If the parameters were previous defined, the old values are used. + + For example: + + ### in some general configuration file: + >>> plugins = PluginManager() + >>> plugins.me.param1=3 + + ### within the plugin model + >>> _ = PluginManager('me',param1=5,param2=6,param3=7) + + ### where the plugin is used + >>> print plugins.me.param1 + 3 + >>> print plugins.me.param2 + 6 + >>> plugins.me.param3 = 8 + >>> print plugins.me.param3 + 8 + + Here are some tests: + + >>> a=PluginManager() + >>> a.x=6 + >>> b=PluginManager('check') + >>> print b.x + 6 + >>> b=PluginManager() # reset settings + >>> print b.x + + >>> b.x=7 + >>> print a.x + 7 + >>> a.y.z=8 + >>> print b.y.z + 8 + >>> test_thread_separation() + 5 + >>> plugins=PluginManager('me',db='mydb') + >>> print plugins.me.db + mydb + >>> print 'me' in plugins + True + >>> print plugins.me.installed + True + """ + instances = {} + def __new__(cls,*a,**b): + id = thread.get_ident() + lock = thread.allocate_lock() + try: + lock.acquire() + try: + return cls.instances[id] + except KeyError: + instance = object.__new__(cls,*a,**b) + cls.instances[id] = instance + return instance + finally: + lock.release() + def __init__(self,plugin=None,**defaults): + if not plugin: + self.__dict__.clear() + settings = self.__getattr__(plugin) + settings.installed = True + [settings.update({key:value}) for key,value in defaults.items() \ + if not key in settings] + def __getattr__(self, key): + if not key in self.__dict__: + self.__dict__[key] = Storage() + return self.__dict__[key] + def keys(self): + return self.__dict__.keys() + def __contains__(self,key): + return key in self.__dict__ + +if __name__ == '__main__': + import doctest + doctest.testmod() ADDED gluon/tools.pyc Index: gluon/tools.pyc ================================================================== --- /dev/null +++ gluon/tools.pyc cannot compute difference between binary files ADDED gluon/utils.py Index: gluon/utils.py ================================================================== --- /dev/null +++ gluon/utils.py @@ -0,0 +1,129 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +This file specifically includes utilities for security. +""" + +import hashlib +import hmac +import uuid +import random +import time +import os +import logging + +logger = logging.getLogger("web2py") + +def md5_hash(text): + """ Generate a md5 hash with the given text """ + return hashlib.md5(text).hexdigest() + +def simple_hash(text, digest_alg = 'md5'): + """ + Generates hash with the given text using the specified + digest hashing algorithm + """ + if not digest_alg: + raise RuntimeError, "simple_hash with digest_alg=None" + elif not isinstance(digest_alg,str): + h = digest_alg(text) + else: + h = hashlib.new(digest_alg) + h.update(text) + return h.hexdigest() + +def get_digest(value): + """ + Returns a hashlib digest algorithm from a string + """ + if not isinstance(value,str): + return value + value = value.lower() + if value == "md5": + return hashlib.md5 + elif value == "sha1": + return hashlib.sha1 + elif value == "sha224": + return hashlib.sha224 + elif value == "sha256": + return hashlib.sha256 + elif value == "sha384": + return hashlib.sha384 + elif value == "sha512": + return hashlib.sha512 + else: + raise ValueError("Invalid digest algorithm") + +def hmac_hash(value, key, digest_alg='md5', salt=None): + if ':' in key: + digest_alg, key = key.split(':') + digest_alg = get_digest(digest_alg) + d = hmac.new(key,value,digest_alg) + if salt: + d.update(str(salt)) + return d.hexdigest() + + +### compute constant ctokens +def initialize_urandom(): + """ + This function and the web2py_uuid follow from the following discussion: + http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 + + At startup web2py compute a unique ID that identifies the machine by adding + uuid.getnode() + int(time.time() * 1e3) + + This is a 48-bit number. It converts the number into 16 8-bit tokens. + It uses this value to initialize the entropy source ('/dev/urandom') and to seed random. + + If os.random() is not supported, it falls back to using random and issues a warning. + """ + node_id = uuid.getnode() + microseconds = int(time.time() * 1e6) + ctokens = [((node_id + microseconds) >> ((i%6)*8)) % 256 for i in range(16)] + random.seed(node_id + microseconds) + try: + os.urandom(1) + try: + # try to add process-specific entropy + frandom = open('/dev/urandom','wb') + try: + frandom.write(''.join(chr(t) for t in ctokens)) + finally: + frandom.close() + except IOError: + # works anyway + pass + except NotImplementedError: + logger.warning( +"""Cryptographically secure session management is not possible on your system because +your system does not provide a cryptographically secure entropy source. +This is not specific to web2py; consider deploying on a different operating system.""") + return ctokens +ctokens = initialize_urandom() + +def web2py_uuid(): + """ + This function follows from the following discussion: + http://groups.google.com/group/web2py-developers/browse_thread/thread/7fd5789a7da3f09 + + It works like uuid.uuid4 except that tries to use os.urandom() if possible + and it XORs the output with the tokens uniquely associated with this machine. + """ + bytes = [random.randrange(256) for i in range(16)] + try: + ubytes = [ord(c) for c in os.urandom(16)] # use /dev/urandom if possible + bytes = [bytes[i] ^ ubytes[i] for i in range(16)] + except NotImplementedError: + pass + ## xor bytes with constant ctokens + bytes = ''.join(chr(c ^ ctokens[i]) for i,c in enumerate(bytes)) + return str(uuid.UUID(bytes=bytes, version=4)) + + + ADDED gluon/utils.pyc Index: gluon/utils.pyc ================================================================== --- /dev/null +++ gluon/utils.pyc cannot compute difference between binary files ADDED gluon/validators.py Index: gluon/validators.py ================================================================== --- /dev/null +++ gluon/validators.py @@ -0,0 +1,2971 @@ +#!/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +Thanks to ga2arch for help with IS_IN_DB and IS_NOT_IN_DB on GAE +""" + +import os +import re +import datetime +import time +import cgi +import urllib +import struct +import decimal +import unicodedata +from cStringIO import StringIO +from utils import simple_hash, hmac_hash, web2py_uuid + +__all__ = [ + 'CLEANUP', + 'CRYPT', + 'IS_ALPHANUMERIC', + 'IS_DATE_IN_RANGE', + 'IS_DATE', + 'IS_DATETIME_IN_RANGE', + 'IS_DATETIME', + 'IS_DECIMAL_IN_RANGE', + 'IS_EMAIL', + 'IS_EMPTY_OR', + 'IS_EXPR', + 'IS_FLOAT_IN_RANGE', + 'IS_IMAGE', + 'IS_IN_DB', + 'IS_IN_SET', + 'IS_INT_IN_RANGE', + 'IS_IPV4', + 'IS_LENGTH', + 'IS_LIST_OF', + 'IS_LOWER', + 'IS_MATCH', + 'IS_EQUAL_TO', + 'IS_NOT_EMPTY', + 'IS_NOT_IN_DB', + 'IS_NULL_OR', + 'IS_SLUG', + 'IS_STRONG', + 'IS_TIME', + 'IS_UPLOAD_FILENAME', + 'IS_UPPER', + 'IS_URL', + ] + +def translate(text): + if isinstance(text,(str,unicode)): + from globals import current + if hasattr(current,'T'): + return current.T(text) + return text + +def options_sorter(x,y): + return (str(x[1]).upper()>str(y[1]).upper() and 1) or -1 + +class Validator(object): + """ + Root for all validators, mainly for documentation purposes. + + Validators are classes used to validate input fields (including forms + generated from database tables). + + Here is an example of using a validator with a FORM:: + + INPUT(_name='a', requires=IS_INT_IN_RANGE(0, 10)) + + Here is an example of how to require a validator for a table field:: + + db.define_table('person', SQLField('name')) + db.person.name.requires=IS_NOT_EMPTY() + + Validators are always assigned using the requires attribute of a field. A + field can have a single validator or multiple validators. Multiple + validators are made part of a list:: + + db.person.name.requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db, 'person.id')] + + Validators are called by the function accepts on a FORM or other HTML + helper object that contains a form. They are always called in the order in + which they are listed. + + Built-in validators have constructors that take the optional argument error + message which allows you to change the default error message. + Here is an example of a validator on a database table:: + + db.person.name.requires=IS_NOT_EMPTY(error_message=T('fill this')) + + where we have used the translation operator T to allow for + internationalization. + + Notice that default error messages are not translated. + """ + + def formatter(self, value): + """ + For some validators returns a formatted version (matching the validator) + of value. Otherwise just returns the value. + """ + return value + + +class IS_MATCH(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_MATCH('.+')) + + the argument of IS_MATCH is a regular expression:: + + >>> IS_MATCH('.+')('hello') + ('hello', None) + + >>> IS_MATCH('hell')('hello') + ('hello', 'invalid expression') + + >>> IS_MATCH('hell.*', strict=False)('hello') + ('hello', None) + + >>> IS_MATCH('hello')('shello') + ('shello', 'invalid expression') + + >>> IS_MATCH('hello', search=True)('shello') + ('hello', None) + + >>> IS_MATCH('hello', search=True, strict=False)('shellox') + ('hello', None) + + >>> IS_MATCH('.*hello.*', search=True, strict=False)('shellox') + ('shellox', None) + + >>> IS_MATCH('.+')('') + ('', 'invalid expression') + """ + + def __init__(self, expression, error_message='invalid expression', strict=True, search=False): + if strict: + if not expression.endswith('$'): + expression = '(%s)$' % expression + if not search: + if not expression.startswith('^'): + expression = '^(%s)' % expression + self.regex = re.compile(expression) + self.error_message = error_message + + def __call__(self, value): + match = self.regex.search(value) + if match: + return (match.group(), None) + return (value, translate(self.error_message)) + + +class IS_EQUAL_TO(Validator): + """ + example:: + + INPUT(_type='text', _name='password') + INPUT(_type='text', _name='password2', + requires=IS_EQUAL_TO(request.vars.password)) + + the argument of IS_EQUAL_TO is a string + + >>> IS_EQUAL_TO('aaa')('aaa') + ('aaa', None) + + >>> IS_EQUAL_TO('aaa')('aab') + ('aab', 'no match') + """ + + def __init__(self, expression, error_message='no match'): + self.expression = expression + self.error_message = error_message + + def __call__(self, value): + if value == self.expression: + return (value, None) + return (value, translate(self.error_message)) + + +class IS_EXPR(Validator): + """ + example:: + + INPUT(_type='text', _name='name', + requires=IS_EXPR('5 < int(value) < 10')) + + the argument of IS_EXPR must be python condition:: + + >>> IS_EXPR('int(value) < 2')('1') + ('1', None) + + >>> IS_EXPR('int(value) < 2')('2') + ('2', 'invalid expression') + """ + + def __init__(self, expression, error_message='invalid expression'): + self.expression = expression + self.error_message = error_message + + def __call__(self, value): + environment = {'value': value} + exec '__ret__=' + self.expression in environment + if environment['__ret__']: + return (value, None) + return (value, translate(self.error_message)) + + +class IS_LENGTH(Validator): + """ + Checks if length of field's value fits between given boundaries. Works + for both text and file inputs. + + Arguments: + + maxsize: maximum allowed length / size + minsize: minimum allowed length / size + + Examples:: + + #Check if text string is shorter than 33 characters: + INPUT(_type='text', _name='name', requires=IS_LENGTH(32)) + + #Check if password string is longer than 5 characters: + INPUT(_type='password', _name='name', requires=IS_LENGTH(minsize=6)) + + #Check if uploaded file has size between 1KB and 1MB: + INPUT(_type='file', _name='name', requires=IS_LENGTH(1048576, 1024)) + + >>> IS_LENGTH()('') + ('', None) + >>> IS_LENGTH()('1234567890') + ('1234567890', None) + >>> IS_LENGTH(maxsize=5, minsize=0)('1234567890') # too long + ('1234567890', 'enter from 0 to 5 characters') + >>> IS_LENGTH(maxsize=50, minsize=20)('1234567890') # too short + ('1234567890', 'enter from 20 to 50 characters') + """ + + def __init__(self, maxsize=255, minsize=0, + error_message='enter from %(min)g to %(max)g characters'): + self.maxsize = maxsize + self.minsize = minsize + self.error_message = error_message + + def __call__(self, value): + if isinstance(value, cgi.FieldStorage): + if value.file: + value.file.seek(0, os.SEEK_END) + length = value.file.tell() + value.file.seek(0, os.SEEK_SET) + else: + val = value.value + if val: + length = len(val) + else: + length = 0 + if self.minsize <= length <= self.maxsize: + return (value, None) + elif isinstance(value, (str, unicode, list)): + if self.minsize <= len(value) <= self.maxsize: + return (value, None) + elif self.minsize <= len(str(value)) <= self.maxsize: + try: + value.decode('utf8') + return (value, None) + except: + pass + return (value, translate(self.error_message) \ + % dict(min=self.minsize, max=self.maxsize)) + + +class IS_IN_SET(Validator): + """ + example:: + + INPUT(_type='text', _name='name', + requires=IS_IN_SET(['max', 'john'],zero='')) + + the argument of IS_IN_SET must be a list or set + + >>> IS_IN_SET(['max', 'john'])('max') + ('max', None) + >>> IS_IN_SET(['max', 'john'])('massimo') + ('massimo', 'value not allowed') + >>> IS_IN_SET(['max', 'john'], multiple=True)(('max', 'john')) + (('max', 'john'), None) + >>> IS_IN_SET(['max', 'john'], multiple=True)(('bill', 'john')) + (('bill', 'john'), 'value not allowed') + >>> IS_IN_SET(('id1','id2'), ['first label','second label'])('id1') # Traditional way + ('id1', None) + >>> IS_IN_SET({'id1':'first label', 'id2':'second label'})('id1') + ('id1', None) + >>> import itertools + >>> IS_IN_SET(itertools.chain(['1','3','5'],['2','4','6']))('1') + ('1', None) + >>> IS_IN_SET([('id1','first label'), ('id2','second label')])('id1') # Redundant way + ('id1', None) + """ + + def __init__( + self, + theset, + labels=None, + error_message='value not allowed', + multiple=False, + zero='', + sort=False, + ): + self.multiple = multiple + if isinstance(theset, dict): + self.theset = [str(item) for item in theset] + self.labels = theset.values() + elif theset and isinstance(theset, (tuple,list)) \ + and isinstance(theset[0], (tuple,list)) and len(theset[0])==2: + self.theset = [str(item) for item,label in theset] + self.labels = [str(label) for item,label in theset] + else: + self.theset = [str(item) for item in theset] + self.labels = labels + self.error_message = error_message + self.zero = zero + self.sort = sort + + def options(self,zero=True): + if not self.labels: + items = [(k, k) for (i, k) in enumerate(self.theset)] + else: + items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] + if self.sort: + items.sort(options_sorter) + if zero and not self.zero is None and not self.multiple: + items.insert(0,('',self.zero)) + return items + + def __call__(self, value): + if self.multiple: + ### if below was values = re.compile("[\w\-:]+").findall(str(value)) + if isinstance(value, (str,unicode)): + values = [value] + elif isinstance(value, (tuple, list)): + values = value + elif not value: + values = [] + else: + values = [value] + failures = [x for x in values if not x in self.theset] + if failures and self.theset: + if self.multiple and (value is None or value == ''): + return ([], None) + return (value, translate(self.error_message)) + if self.multiple: + if isinstance(self.multiple,(tuple,list)) and \ + not self.multiple[0]<=len(values)[^\)]+)\)s') + + +class IS_IN_DB(Validator): + """ + example:: + + INPUT(_type='text', _name='name', + requires=IS_IN_DB(db, db.mytable.myfield, zero='')) + + used for reference fields, rendered as a dropbox + """ + + def __init__( + self, + dbset, + field, + label=None, + error_message='value not in database', + orderby=None, + groupby=None, + cache=None, + multiple=False, + zero='', + sort=False, + _and=None, + ): + from dal import Table + if isinstance(field,Table): field = field._id + + if hasattr(dbset, 'define_table'): + self.dbset = dbset() + else: + self.dbset = dbset + self.field = field + (ktable, kfield) = str(self.field).split('.') + if not label: + label = '%%(%s)s' % kfield + if isinstance(label,str): + if regex1.match(str(label)): + label = '%%(%s)s' % str(label).split('.')[-1] + ks = regex2.findall(label) + if not kfield in ks: + ks += [kfield] + fields = ks + else: + ks = [kfield] + fields = 'all' + self.fields = fields + self.label = label + self.ktable = ktable + self.kfield = kfield + self.ks = ks + self.error_message = error_message + self.theset = None + self.orderby = orderby + self.groupby = groupby + self.cache = cache + self.multiple = multiple + self.zero = zero + self.sort = sort + self._and = _and + + def set_self_id(self, id): + if self._and: + self._and.record_id = id + + def build_set(self): + if self.fields == 'all': + fields = [f for f in self.dbset.db[self.ktable]] + else: + fields = [self.dbset.db[self.ktable][k] for k in self.fields] + if self.dbset.db._dbname != 'gae': + orderby = self.orderby or reduce(lambda a,b:a|b,fields) + groupby = self.groupby + dd = dict(orderby=orderby, groupby=groupby, cache=self.cache) + records = self.dbset.select(*fields, **dd) + else: + orderby = self.orderby or reduce(lambda a,b:a|b,(f for f in fields if not f.name=='id')) + dd = dict(orderby=orderby, cache=self.cache) + records = self.dbset.select(self.dbset.db[self.ktable].ALL, **dd) + self.theset = [str(r[self.kfield]) for r in records] + if isinstance(self.label,str): + self.labels = [self.label % dict(r) for r in records] + else: + self.labels = [self.label(r) for r in records] + + def options(self, zero=True): + self.build_set() + items = [(k, self.labels[i]) for (i, k) in enumerate(self.theset)] + if self.sort: + items.sort(options_sorter) + if zero and not self.zero is None and not self.multiple: + items.insert(0,('',self.zero)) + return items + + def __call__(self, value): + if self.multiple: + if isinstance(value,list): + values=value + elif value: + values = [value] + else: + values = [] + if isinstance(self.multiple,(tuple,list)) and \ + not self.multiple[0]<=len(values) 0: + if isinstance(self.record_id, dict): + for f in self.record_id: + if str(getattr(rows[0], f)) != str(self.record_id[f]): + return (value, translate(self.error_message)) + elif str(rows[0].id) != str(self.record_id): + return (value, translate(self.error_message)) + return (value, None) + + +class IS_INT_IN_RANGE(Validator): + """ + Determine that the argument is (or can be represented as) an int, + and that it falls within the specified range. The range is interpreted + in the Pythonic way, so the test is: min <= value < max. + + The minimum and maximum limits can be None, meaning no lower or upper limit, + respectively. + + example:: + + INPUT(_type='text', _name='name', requires=IS_INT_IN_RANGE(0, 10)) + + >>> IS_INT_IN_RANGE(1,5)('4') + (4, None) + >>> IS_INT_IN_RANGE(1,5)(4) + (4, None) + >>> IS_INT_IN_RANGE(1,5)(1) + (1, None) + >>> IS_INT_IN_RANGE(1,5)(5) + (5, 'enter an integer between 1 and 4') + >>> IS_INT_IN_RANGE(1,5)(5) + (5, 'enter an integer between 1 and 4') + >>> IS_INT_IN_RANGE(1,5)(3.5) + (3, 'enter an integer between 1 and 4') + >>> IS_INT_IN_RANGE(None,5)('4') + (4, None) + >>> IS_INT_IN_RANGE(None,5)('6') + (6, 'enter an integer less than or equal to 4') + >>> IS_INT_IN_RANGE(1,None)('4') + (4, None) + >>> IS_INT_IN_RANGE(1,None)('0') + (0, 'enter an integer greater than or equal to 1') + >>> IS_INT_IN_RANGE()(6) + (6, None) + >>> IS_INT_IN_RANGE()('abc') + ('abc', 'enter an integer') + """ + + def __init__( + self, + minimum=None, + maximum=None, + error_message=None, + ): + self.minimum = self.maximum = None + if minimum is None: + if maximum is None: + self.error_message = error_message or 'enter an integer' + else: + self.maximum = int(maximum) + if error_message is None: + error_message = 'enter an integer less than or equal to %(max)g' + self.error_message = translate(error_message) % dict(max=self.maximum-1) + elif maximum is None: + self.minimum = int(minimum) + if error_message is None: + error_message = 'enter an integer greater than or equal to %(min)g' + self.error_message = translate(error_message) % dict(min=self.minimum) + else: + self.minimum = int(minimum) + self.maximum = int(maximum) + if error_message is None: + error_message = 'enter an integer between %(min)g and %(max)g' + self.error_message = translate(error_message) \ + % dict(min=self.minimum, max=self.maximum-1) + + def __call__(self, value): + try: + fvalue = float(value) + value = int(value) + if value != fvalue: + return (value, self.error_message) + if self.minimum is None: + if self.maximum is None or value < self.maximum: + return (value, None) + elif self.maximum is None: + if value >= self.minimum: + return (value, None) + elif self.minimum <= value < self.maximum: + return (value, None) + except ValueError: + pass + return (value, self.error_message) + +def str2dec(number): + s = str(number) + if not '.' in s: s+='.00' + else: s+='0'*(2-len(s.split('.')[1])) + return s + +class IS_FLOAT_IN_RANGE(Validator): + """ + Determine that the argument is (or can be represented as) a float, + and that it falls within the specified inclusive range. + The comparison is made with native arithmetic. + + The minimum and maximum limits can be None, meaning no lower or upper limit, + respectively. + + example:: + + INPUT(_type='text', _name='name', requires=IS_FLOAT_IN_RANGE(0, 10)) + + >>> IS_FLOAT_IN_RANGE(1,5)('4') + (4.0, None) + >>> IS_FLOAT_IN_RANGE(1,5)(4) + (4.0, None) + >>> IS_FLOAT_IN_RANGE(1,5)(1) + (1.0, None) + >>> IS_FLOAT_IN_RANGE(1,5)(5.25) + (5.25, 'enter a number between 1 and 5') + >>> IS_FLOAT_IN_RANGE(1,5)(6.0) + (6.0, 'enter a number between 1 and 5') + >>> IS_FLOAT_IN_RANGE(1,5)(3.5) + (3.5, None) + >>> IS_FLOAT_IN_RANGE(1,None)(3.5) + (3.5, None) + >>> IS_FLOAT_IN_RANGE(None,5)(3.5) + (3.5, None) + >>> IS_FLOAT_IN_RANGE(1,None)(0.5) + (0.5, 'enter a number greater than or equal to 1') + >>> IS_FLOAT_IN_RANGE(None,5)(6.5) + (6.5, 'enter a number less than or equal to 5') + >>> IS_FLOAT_IN_RANGE()(6.5) + (6.5, None) + >>> IS_FLOAT_IN_RANGE()('abc') + ('abc', 'enter a number') + """ + + def __init__( + self, + minimum=None, + maximum=None, + error_message=None, + dot='.' + ): + self.minimum = self.maximum = None + self.dot = dot + if minimum is None: + if maximum is None: + if error_message is None: + error_message = 'enter a number' + else: + self.maximum = float(maximum) + if error_message is None: + error_message = 'enter a number less than or equal to %(max)g' + elif maximum is None: + self.minimum = float(minimum) + if error_message is None: + error_message = 'enter a number greater than or equal to %(min)g' + else: + self.minimum = float(minimum) + self.maximum = float(maximum) + if error_message is None: + error_message = 'enter a number between %(min)g and %(max)g' + self.error_message = translate(error_message) \ + % dict(min=self.minimum, max=self.maximum) + + def __call__(self, value): + try: + if self.dot=='.': + fvalue = float(value) + else: + fvalue = float(str(value).replace(self.dot,'.')) + if self.minimum is None: + if self.maximum is None or fvalue <= self.maximum: + return (fvalue, None) + elif self.maximum is None: + if fvalue >= self.minimum: + return (fvalue, None) + elif self.minimum <= fvalue <= self.maximum: + return (fvalue, None) + except (ValueError, TypeError): + pass + return (value, self.error_message) + + def formatter(self,value): + return str2dec(value).replace('.',self.dot) + + +class IS_DECIMAL_IN_RANGE(Validator): + """ + Determine that the argument is (or can be represented as) a Python Decimal, + and that it falls within the specified inclusive range. + The comparison is made with Python Decimal arithmetic. + + The minimum and maximum limits can be None, meaning no lower or upper limit, + respectively. + + example:: + + INPUT(_type='text', _name='name', requires=IS_DECIMAL_IN_RANGE(0, 10)) + + >>> IS_DECIMAL_IN_RANGE(1,5)('4') + (Decimal('4'), None) + >>> IS_DECIMAL_IN_RANGE(1,5)(4) + (Decimal('4'), None) + >>> IS_DECIMAL_IN_RANGE(1,5)(1) + (Decimal('1'), None) + >>> IS_DECIMAL_IN_RANGE(1,5)(5.25) + (5.25, 'enter a number between 1 and 5') + >>> IS_DECIMAL_IN_RANGE(5.25,6)(5.25) + (Decimal('5.25'), None) + >>> IS_DECIMAL_IN_RANGE(5.25,6)('5.25') + (Decimal('5.25'), None) + >>> IS_DECIMAL_IN_RANGE(1,5)(6.0) + (6.0, 'enter a number between 1 and 5') + >>> IS_DECIMAL_IN_RANGE(1,5)(3.5) + (Decimal('3.5'), None) + >>> IS_DECIMAL_IN_RANGE(1.5,5.5)(3.5) + (Decimal('3.5'), None) + >>> IS_DECIMAL_IN_RANGE(1.5,5.5)(6.5) + (6.5, 'enter a number between 1.5 and 5.5') + >>> IS_DECIMAL_IN_RANGE(1.5,None)(6.5) + (Decimal('6.5'), None) + >>> IS_DECIMAL_IN_RANGE(1.5,None)(0.5) + (0.5, 'enter a number greater than or equal to 1.5') + >>> IS_DECIMAL_IN_RANGE(None,5.5)(4.5) + (Decimal('4.5'), None) + >>> IS_DECIMAL_IN_RANGE(None,5.5)(6.5) + (6.5, 'enter a number less than or equal to 5.5') + >>> IS_DECIMAL_IN_RANGE()(6.5) + (Decimal('6.5'), None) + >>> IS_DECIMAL_IN_RANGE(0,99)(123.123) + (123.123, 'enter a number between 0 and 99') + >>> IS_DECIMAL_IN_RANGE(0,99)('123.123') + ('123.123', 'enter a number between 0 and 99') + >>> IS_DECIMAL_IN_RANGE(0,99)('12.34') + (Decimal('12.34'), None) + >>> IS_DECIMAL_IN_RANGE()('abc') + ('abc', 'enter a decimal number') + """ + + def __init__( + self, + minimum=None, + maximum=None, + error_message=None, + dot='.' + ): + self.minimum = self.maximum = None + self.dot = dot + if minimum is None: + if maximum is None: + if error_message is None: + error_message = 'enter a decimal number' + else: + self.maximum = decimal.Decimal(str(maximum)) + if error_message is None: + error_message = 'enter a number less than or equal to %(max)g' + elif maximum is None: + self.minimum = decimal.Decimal(str(minimum)) + if error_message is None: + error_message = 'enter a number greater than or equal to %(min)g' + else: + self.minimum = decimal.Decimal(str(minimum)) + self.maximum = decimal.Decimal(str(maximum)) + if error_message is None: + error_message = 'enter a number between %(min)g and %(max)g' + self.error_message = translate(error_message) \ + % dict(min=self.minimum, max=self.maximum) + + def __call__(self, value): + try: + if isinstance(value,decimal.Decimal): + v = value + else: + v = decimal.Decimal(str(value).replace(self.dot,'.')) + if self.minimum is None: + if self.maximum is None or v <= self.maximum: + return (v, None) + elif self.maximum is None: + if v >= self.minimum: + return (v, None) + elif self.minimum <= v <= self.maximum: + return (v, None) + except (ValueError, TypeError, decimal.InvalidOperation): + pass + return (value, self.error_message) + + def formatter(self, value): + return str2dec(value).replace('.',self.dot) + +def is_empty(value, empty_regex=None): + "test empty field" + if isinstance(value, (str, unicode)): + value = value.strip() + if empty_regex is not None and empty_regex.match(value): + value = '' + if value is None or value == '' or value == []: + return (value, True) + return (value, False) + +class IS_NOT_EMPTY(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_NOT_EMPTY()) + + >>> IS_NOT_EMPTY()(1) + (1, None) + >>> IS_NOT_EMPTY()(0) + (0, None) + >>> IS_NOT_EMPTY()('x') + ('x', None) + >>> IS_NOT_EMPTY()(' x ') + ('x', None) + >>> IS_NOT_EMPTY()(None) + (None, 'enter a value') + >>> IS_NOT_EMPTY()('') + ('', 'enter a value') + >>> IS_NOT_EMPTY()(' ') + ('', 'enter a value') + >>> IS_NOT_EMPTY()(' \\n\\t') + ('', 'enter a value') + >>> IS_NOT_EMPTY()([]) + ([], 'enter a value') + >>> IS_NOT_EMPTY(empty_regex='def')('def') + ('', 'enter a value') + >>> IS_NOT_EMPTY(empty_regex='de[fg]')('deg') + ('', 'enter a value') + >>> IS_NOT_EMPTY(empty_regex='def')('abc') + ('abc', None) + """ + + def __init__(self, error_message='enter a value', empty_regex=None): + self.error_message = error_message + if empty_regex is not None: + self.empty_regex = re.compile(empty_regex) + else: + self.empty_regex = None + + def __call__(self, value): + value, empty = is_empty(value, empty_regex=self.empty_regex) + if empty: + return (value, translate(self.error_message)) + return (value, None) + + +class IS_ALPHANUMERIC(IS_MATCH): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_ALPHANUMERIC()) + + >>> IS_ALPHANUMERIC()('1') + ('1', None) + >>> IS_ALPHANUMERIC()('') + ('', None) + >>> IS_ALPHANUMERIC()('A_a') + ('A_a', None) + >>> IS_ALPHANUMERIC()('!') + ('!', 'enter only letters, numbers, and underscore') + """ + + def __init__(self, error_message='enter only letters, numbers, and underscore'): + IS_MATCH.__init__(self, '^[\w]*$', error_message) + + +class IS_EMAIL(Validator): + """ + Checks if field's value is a valid email address. Can be set to disallow + or force addresses from certain domain(s). + + Email regex adapted from + http://haacked.com/archive/2007/08/21/i-knew-how-to-validate-an-email-address-until-i.aspx, + generally following the RFCs, except that we disallow quoted strings + and permit underscores and leading numerics in subdomain labels + + Arguments: + + - banned: regex text for disallowed address domains + - forced: regex text for required address domains + + Both arguments can also be custom objects with a match(value) method. + + Examples:: + + #Check for valid email address: + INPUT(_type='text', _name='name', + requires=IS_EMAIL()) + + #Check for valid email address that can't be from a .com domain: + INPUT(_type='text', _name='name', + requires=IS_EMAIL(banned='^.*\.com(|\..*)$')) + + #Check for valid email address that must be from a .edu domain: + INPUT(_type='text', _name='name', + requires=IS_EMAIL(forced='^.*\.edu(|\..*)$')) + + >>> IS_EMAIL()('a@b.com') + ('a@b.com', None) + >>> IS_EMAIL()('abc@def.com') + ('abc@def.com', None) + >>> IS_EMAIL()('abc@3def.com') + ('abc@3def.com', None) + >>> IS_EMAIL()('abc@def.us') + ('abc@def.us', None) + >>> IS_EMAIL()('abc@d_-f.us') + ('abc@d_-f.us', None) + >>> IS_EMAIL()('@def.com') # missing name + ('@def.com', 'enter a valid email address') + >>> IS_EMAIL()('"abc@def".com') # quoted name + ('"abc@def".com', 'enter a valid email address') + >>> IS_EMAIL()('abc+def.com') # no @ + ('abc+def.com', 'enter a valid email address') + >>> IS_EMAIL()('abc@def.x') # one-char TLD + ('abc@def.x', 'enter a valid email address') + >>> IS_EMAIL()('abc@def.12') # numeric TLD + ('abc@def.12', 'enter a valid email address') + >>> IS_EMAIL()('abc@def..com') # double-dot in domain + ('abc@def..com', 'enter a valid email address') + >>> IS_EMAIL()('abc@.def.com') # dot starts domain + ('abc@.def.com', 'enter a valid email address') + >>> IS_EMAIL()('abc@def.c_m') # underscore in TLD + ('abc@def.c_m', 'enter a valid email address') + >>> IS_EMAIL()('NotAnEmail') # missing @ + ('NotAnEmail', 'enter a valid email address') + >>> IS_EMAIL()('abc@NotAnEmail') # missing TLD + ('abc@NotAnEmail', 'enter a valid email address') + >>> IS_EMAIL()('customer/department@example.com') + ('customer/department@example.com', None) + >>> IS_EMAIL()('$A12345@example.com') + ('$A12345@example.com', None) + >>> IS_EMAIL()('!def!xyz%abc@example.com') + ('!def!xyz%abc@example.com', None) + >>> IS_EMAIL()('_Yosemite.Sam@example.com') + ('_Yosemite.Sam@example.com', None) + >>> IS_EMAIL()('~@example.com') + ('~@example.com', None) + >>> IS_EMAIL()('.wooly@example.com') # dot starts name + ('.wooly@example.com', 'enter a valid email address') + >>> IS_EMAIL()('wo..oly@example.com') # adjacent dots in name + ('wo..oly@example.com', 'enter a valid email address') + >>> IS_EMAIL()('pootietang.@example.com') # dot ends name + ('pootietang.@example.com', 'enter a valid email address') + >>> IS_EMAIL()('.@example.com') # name is bare dot + ('.@example.com', 'enter a valid email address') + >>> IS_EMAIL()('Ima.Fool@example.com') + ('Ima.Fool@example.com', None) + >>> IS_EMAIL()('Ima Fool@example.com') # space in name + ('Ima Fool@example.com', 'enter a valid email address') + >>> IS_EMAIL()('localguy@localhost') # localhost as domain + ('localguy@localhost', None) + + """ + + regex = re.compile(''' + ^(?!\.) # name may not begin with a dot + ( + [-a-z0-9!\#$%&'*+/=?^_`{|}~] # all legal characters except dot + | + (? obtained on 2008-Nov-10 + +official_url_schemes = [ + 'aaa', + 'aaas', + 'acap', + 'cap', + 'cid', + 'crid', + 'data', + 'dav', + 'dict', + 'dns', + 'fax', + 'file', + 'ftp', + 'go', + 'gopher', + 'h323', + 'http', + 'https', + 'icap', + 'im', + 'imap', + 'info', + 'ipp', + 'iris', + 'iris.beep', + 'iris.xpc', + 'iris.xpcs', + 'iris.lws', + 'ldap', + 'mailto', + 'mid', + 'modem', + 'msrp', + 'msrps', + 'mtqp', + 'mupdate', + 'news', + 'nfs', + 'nntp', + 'opaquelocktoken', + 'pop', + 'pres', + 'prospero', + 'rtsp', + 'service', + 'shttp', + 'sip', + 'sips', + 'snmp', + 'soap.beep', + 'soap.beeps', + 'tag', + 'tel', + 'telnet', + 'tftp', + 'thismessage', + 'tip', + 'tv', + 'urn', + 'vemmi', + 'wais', + 'xmlrpc.beep', + 'xmlrpc.beep', + 'xmpp', + 'z39.50r', + 'z39.50s', + ] +unofficial_url_schemes = [ + 'about', + 'adiumxtra', + 'aim', + 'afp', + 'aw', + 'callto', + 'chrome', + 'cvs', + 'ed2k', + 'feed', + 'fish', + 'gg', + 'gizmoproject', + 'iax2', + 'irc', + 'ircs', + 'itms', + 'jar', + 'javascript', + 'keyparc', + 'lastfm', + 'ldaps', + 'magnet', + 'mms', + 'msnim', + 'mvn', + 'notes', + 'nsfw', + 'psyc', + 'paparazzi:http', + 'rmi', + 'rsync', + 'secondlife', + 'sgn', + 'skype', + 'ssh', + 'sftp', + 'smb', + 'sms', + 'soldat', + 'steam', + 'svn', + 'teamspeak', + 'unreal', + 'ut2004', + 'ventrilo', + 'view-source', + 'webcal', + 'wyciwyg', + 'xfire', + 'xri', + 'ymsgr', + ] +all_url_schemes = [None] + official_url_schemes + unofficial_url_schemes +http_schemes = [None, 'http', 'https'] + + +# This regex comes from RFC 2396, Appendix B. It's used to split a URL into +# its component parts +# Here are the regex groups that it extracts: +# scheme = group(2) +# authority = group(4) +# path = group(5) +# query = group(7) +# fragment = group(9) + +url_split_regex = \ + re.compile('^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?') + +# Defined in RFC 3490, Section 3.1, Requirement #1 +# Use this regex to split the authority component of a unicode URL into +# its component labels +label_split_regex = re.compile(u'[\u002e\u3002\uff0e\uff61]') + + +def escape_unicode(string): + ''' + Converts a unicode string into US-ASCII, using a simple conversion scheme. + Each unicode character that does not have a US-ASCII equivalent is + converted into a URL escaped form based on its hexadecimal value. + For example, the unicode character '\u4e86' will become the string '%4e%86' + + :param string: unicode string, the unicode string to convert into an + escaped US-ASCII form + :returns: the US-ASCII escaped form of the inputted string + :rtype: string + + @author: Jonathan Benn + ''' + returnValue = StringIO() + + for character in string: + code = ord(character) + if code > 0x7F: + hexCode = hex(code) + returnValue.write('%' + hexCode[2:4] + '%' + hexCode[4:6]) + else: + returnValue.write(character) + + return returnValue.getvalue() + + +def unicode_to_ascii_authority(authority): + ''' + Follows the steps in RFC 3490, Section 4 to convert a unicode authority + string into its ASCII equivalent. + For example, u'www.Alliancefran\xe7aise.nu' will be converted into + 'www.xn--alliancefranaise-npb.nu' + + :param authority: unicode string, the URL authority component to convert, + e.g. u'www.Alliancefran\xe7aise.nu' + :returns: the US-ASCII character equivalent to the inputed authority, + e.g. 'www.xn--alliancefranaise-npb.nu' + :rtype: string + :raises Exception: if the function is not able to convert the inputed + authority + + @author: Jonathan Benn + ''' + #RFC 3490, Section 4, Step 1 + #The encodings.idna Python module assumes that AllowUnassigned == True + + #RFC 3490, Section 4, Step 2 + labels = label_split_regex.split(authority) + + #RFC 3490, Section 4, Step 3 + #The encodings.idna Python module assumes that UseSTD3ASCIIRules == False + + #RFC 3490, Section 4, Step 4 + #We use the ToASCII operation because we are about to put the authority + #into an IDN-unaware slot + asciiLabels = [] + try: + import encodings.idna + for label in labels: + if label: + asciiLabels.append(encodings.idna.ToASCII(label)) + else: + #encodings.idna.ToASCII does not accept an empty string, but + #it is necessary for us to allow for empty labels so that we + #don't modify the URL + asciiLabels.append('') + except: + asciiLabels=[str(label) for label in labels] + #RFC 3490, Section 4, Step 5 + return str(reduce(lambda x, y: x + unichr(0x002E) + y, asciiLabels)) + + +def unicode_to_ascii_url(url, prepend_scheme): + ''' + Converts the inputed unicode url into a US-ASCII equivalent. This function + goes a little beyond RFC 3490, which is limited in scope to the domain name + (authority) only. Here, the functionality is expanded to what was observed + on Wikipedia on 2009-Jan-22: + + Component Can Use Unicode? + --------- ---------------- + scheme No + authority Yes + path Yes + query Yes + fragment No + + The authority component gets converted to punycode, but occurrences of + unicode in other components get converted into a pair of URI escapes (we + assume 4-byte unicode). E.g. the unicode character U+4E2D will be + converted into '%4E%2D'. Testing with Firefox v3.0.5 has shown that it can + understand this kind of URI encoding. + + :param url: unicode string, the URL to convert from unicode into US-ASCII + :param prepend_scheme: string, a protocol scheme to prepend to the URL if + we're having trouble parsing it. + e.g. "http". Input None to disable this functionality + :returns: a US-ASCII equivalent of the inputed url + :rtype: string + + @author: Jonathan Benn + ''' + #convert the authority component of the URL into an ASCII punycode string, + #but encode the rest using the regular URI character encoding + + groups = url_split_regex.match(url).groups() + #If no authority was found + if not groups[3]: + #Try appending a scheme to see if that fixes the problem + scheme_to_prepend = prepend_scheme or 'http' + groups = url_split_regex.match( + unicode(scheme_to_prepend) + u'://' + url).groups() + #if we still can't find the authority + if not groups[3]: + raise Exception('No authority component found, '+ \ + 'could not decode unicode to US-ASCII') + + #We're here if we found an authority, let's rebuild the URL + scheme = groups[1] + authority = groups[3] + path = groups[4] or '' + query = groups[5] or '' + fragment = groups[7] or '' + + if prepend_scheme: + scheme = str(scheme) + '://' + else: + scheme = '' + return scheme + unicode_to_ascii_authority(authority) +\ + escape_unicode(path) + escape_unicode(query) + str(fragment) + + +class IS_GENERIC_URL(Validator): + """ + Rejects a URL string if any of the following is true: + * The string is empty or None + * The string uses characters that are not allowed in a URL + * The URL scheme specified (if one is specified) is not valid + + Based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html + + This function only checks the URL's syntax. It does not check that the URL + points to a real document, for example, or that it otherwise makes sense + semantically. This function does automatically prepend 'http://' in front + of a URL if and only if that's necessary to successfully parse the URL. + Please note that a scheme will be prepended only for rare cases + (e.g. 'google.ca:80') + + The list of allowed schemes is customizable with the allowed_schemes + parameter. If you exclude None from the list, then abbreviated URLs + (lacking a scheme such as 'http') will be rejected. + + The default prepended scheme is customizable with the prepend_scheme + parameter. If you set prepend_scheme to None then prepending will be + disabled. URLs that require prepending to parse will still be accepted, + but the return value will not be modified. + + @author: Jonathan Benn + + >>> IS_GENERIC_URL()('http://user@abc.com') + ('http://user@abc.com', None) + + """ + + def __init__( + self, + error_message='enter a valid URL', + allowed_schemes=None, + prepend_scheme=None, + ): + """ + :param error_message: a string, the error message to give the end user + if the URL does not validate + :param allowed_schemes: a list containing strings or None. Each element + is a scheme the inputed URL is allowed to use + :param prepend_scheme: a string, this scheme is prepended if it's + necessary to make the URL valid + """ + + self.error_message = error_message + if allowed_schemes is None: + self.allowed_schemes = all_url_schemes + else: + self.allowed_schemes = allowed_schemes + self.prepend_scheme = prepend_scheme + if self.prepend_scheme not in self.allowed_schemes: + raise SyntaxError, \ + "prepend_scheme='%s' is not in allowed_schemes=%s" \ + % (self.prepend_scheme, self.allowed_schemes) + + def __call__(self, value): + """ + :param value: a string, the URL to validate + :returns: a tuple, where tuple[0] is the inputed value (possible + prepended with prepend_scheme), and tuple[1] is either + None (success!) or the string error_message + """ + try: + # if the URL does not misuse the '%' character + if not re.compile( + r"%[^0-9A-Fa-f]{2}|%[^0-9A-Fa-f][0-9A-Fa-f]|%[0-9A-Fa-f][^0-9A-Fa-f]|%$|%[0-9A-Fa-f]$|%[^0-9A-Fa-f]$" + ).search(value): + # if the URL is only composed of valid characters + if re.compile( + r"[A-Za-z0-9;/?:@&=+$,\-_\.!~*'\(\)%#]+$").match(value): + # Then split up the URL into its components and check on + # the scheme + scheme = url_split_regex.match(value).group(2) + # Clean up the scheme before we check it + if not scheme is None: + scheme = urllib.unquote(scheme).lower() + # If the scheme really exists + if scheme in self.allowed_schemes: + # Then the URL is valid + return (value, None) + else: + # else, for the possible case of abbreviated URLs with + # ports, check to see if adding a valid scheme fixes + # the problem (but only do this if it doesn't have + # one already!) + if not re.compile('://').search(value) and None\ + in self.allowed_schemes: + schemeToUse = self.prepend_scheme or 'http' + prependTest = self.__call__(schemeToUse + + '://' + value) + # if the prepend test succeeded + if prependTest[1] is None: + # if prepending in the output is enabled + if self.prepend_scheme: + return prependTest + else: + # else return the original, + # non-prepended value + return (value, None) + except: + pass + # else the URL is not valid + return (value, translate(self.error_message)) + +# Sources (obtained 2008-Nov-11): +# http://en.wikipedia.org/wiki/Top-level_domain +# http://www.iana.org/domains/root/db/ + +official_top_level_domains = [ + 'ac', + 'ad', + 'ae', + 'aero', + 'af', + 'ag', + 'ai', + 'al', + 'am', + 'an', + 'ao', + 'aq', + 'ar', + 'arpa', + 'as', + 'asia', + 'at', + 'au', + 'aw', + 'ax', + 'az', + 'ba', + 'bb', + 'bd', + 'be', + 'bf', + 'bg', + 'bh', + 'bi', + 'biz', + 'bj', + 'bl', + 'bm', + 'bn', + 'bo', + 'br', + 'bs', + 'bt', + 'bv', + 'bw', + 'by', + 'bz', + 'ca', + 'cat', + 'cc', + 'cd', + 'cf', + 'cg', + 'ch', + 'ci', + 'ck', + 'cl', + 'cm', + 'cn', + 'co', + 'com', + 'coop', + 'cr', + 'cu', + 'cv', + 'cx', + 'cy', + 'cz', + 'de', + 'dj', + 'dk', + 'dm', + 'do', + 'dz', + 'ec', + 'edu', + 'ee', + 'eg', + 'eh', + 'er', + 'es', + 'et', + 'eu', + 'example', + 'fi', + 'fj', + 'fk', + 'fm', + 'fo', + 'fr', + 'ga', + 'gb', + 'gd', + 'ge', + 'gf', + 'gg', + 'gh', + 'gi', + 'gl', + 'gm', + 'gn', + 'gov', + 'gp', + 'gq', + 'gr', + 'gs', + 'gt', + 'gu', + 'gw', + 'gy', + 'hk', + 'hm', + 'hn', + 'hr', + 'ht', + 'hu', + 'id', + 'ie', + 'il', + 'im', + 'in', + 'info', + 'int', + 'invalid', + 'io', + 'iq', + 'ir', + 'is', + 'it', + 'je', + 'jm', + 'jo', + 'jobs', + 'jp', + 'ke', + 'kg', + 'kh', + 'ki', + 'km', + 'kn', + 'kp', + 'kr', + 'kw', + 'ky', + 'kz', + 'la', + 'lb', + 'lc', + 'li', + 'lk', + 'localhost', + 'lr', + 'ls', + 'lt', + 'lu', + 'lv', + 'ly', + 'ma', + 'mc', + 'md', + 'me', + 'mf', + 'mg', + 'mh', + 'mil', + 'mk', + 'ml', + 'mm', + 'mn', + 'mo', + 'mobi', + 'mp', + 'mq', + 'mr', + 'ms', + 'mt', + 'mu', + 'museum', + 'mv', + 'mw', + 'mx', + 'my', + 'mz', + 'na', + 'name', + 'nc', + 'ne', + 'net', + 'nf', + 'ng', + 'ni', + 'nl', + 'no', + 'np', + 'nr', + 'nu', + 'nz', + 'om', + 'org', + 'pa', + 'pe', + 'pf', + 'pg', + 'ph', + 'pk', + 'pl', + 'pm', + 'pn', + 'pr', + 'pro', + 'ps', + 'pt', + 'pw', + 'py', + 'qa', + 're', + 'ro', + 'rs', + 'ru', + 'rw', + 'sa', + 'sb', + 'sc', + 'sd', + 'se', + 'sg', + 'sh', + 'si', + 'sj', + 'sk', + 'sl', + 'sm', + 'sn', + 'so', + 'sr', + 'st', + 'su', + 'sv', + 'sy', + 'sz', + 'tc', + 'td', + 'tel', + 'test', + 'tf', + 'tg', + 'th', + 'tj', + 'tk', + 'tl', + 'tm', + 'tn', + 'to', + 'tp', + 'tr', + 'travel', + 'tt', + 'tv', + 'tw', + 'tz', + 'ua', + 'ug', + 'uk', + 'um', + 'us', + 'uy', + 'uz', + 'va', + 'vc', + 've', + 'vg', + 'vi', + 'vn', + 'vu', + 'wf', + 'ws', + 'xn--0zwm56d', + 'xn--11b5bs3a9aj6g', + 'xn--80akhbyknj4f', + 'xn--9t4b11yi5a', + 'xn--deba0ad', + 'xn--g6w251d', + 'xn--hgbk6aj7f53bba', + 'xn--hlcj6aya9esc7a', + 'xn--jxalpdlp', + 'xn--kgbechtv', + 'xn--zckzah', + 'ye', + 'yt', + 'yu', + 'za', + 'zm', + 'zw', + ] + + +class IS_HTTP_URL(Validator): + """ + Rejects a URL string if any of the following is true: + * The string is empty or None + * The string uses characters that are not allowed in a URL + * The string breaks any of the HTTP syntactic rules + * The URL scheme specified (if one is specified) is not 'http' or 'https' + * The top-level domain (if a host name is specified) does not exist + + Based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html + + This function only checks the URL's syntax. It does not check that the URL + points to a real document, for example, or that it otherwise makes sense + semantically. This function does automatically prepend 'http://' in front + of a URL in the case of an abbreviated URL (e.g. 'google.ca'). + + The list of allowed schemes is customizable with the allowed_schemes + parameter. If you exclude None from the list, then abbreviated URLs + (lacking a scheme such as 'http') will be rejected. + + The default prepended scheme is customizable with the prepend_scheme + parameter. If you set prepend_scheme to None then prepending will be + disabled. URLs that require prepending to parse will still be accepted, + but the return value will not be modified. + + @author: Jonathan Benn + + >>> IS_HTTP_URL()('http://1.2.3.4') + ('http://1.2.3.4', None) + >>> IS_HTTP_URL()('http://abc.com') + ('http://abc.com', None) + >>> IS_HTTP_URL()('https://abc.com') + ('https://abc.com', None) + >>> IS_HTTP_URL()('httpx://abc.com') + ('httpx://abc.com', 'enter a valid URL') + >>> IS_HTTP_URL()('http://abc.com:80') + ('http://abc.com:80', None) + >>> IS_HTTP_URL()('http://user@abc.com') + ('http://user@abc.com', None) + >>> IS_HTTP_URL()('http://user@1.2.3.4') + ('http://user@1.2.3.4', None) + + """ + + def __init__( + self, + error_message='enter a valid URL', + allowed_schemes=None, + prepend_scheme='http', + ): + """ + :param error_message: a string, the error message to give the end user + if the URL does not validate + :param allowed_schemes: a list containing strings or None. Each element + is a scheme the inputed URL is allowed to use + :param prepend_scheme: a string, this scheme is prepended if it's + necessary to make the URL valid + """ + + self.error_message = error_message + if allowed_schemes is None: + self.allowed_schemes = http_schemes + else: + self.allowed_schemes = allowed_schemes + self.prepend_scheme = prepend_scheme + + for i in self.allowed_schemes: + if i not in http_schemes: + raise SyntaxError, \ + "allowed_scheme value '%s' is not in %s" % \ + (i, http_schemes) + + if self.prepend_scheme not in self.allowed_schemes: + raise SyntaxError, \ + "prepend_scheme='%s' is not in allowed_schemes=%s" % \ + (self.prepend_scheme, self.allowed_schemes) + + def __call__(self, value): + """ + :param value: a string, the URL to validate + :returns: a tuple, where tuple[0] is the inputed value + (possible prepended with prepend_scheme), and tuple[1] is either + None (success!) or the string error_message + """ + + try: + # if the URL passes generic validation + x = IS_GENERIC_URL(error_message=self.error_message, + allowed_schemes=self.allowed_schemes, + prepend_scheme=self.prepend_scheme) + if x(value)[1] is None: + componentsMatch = url_split_regex.match(value) + authority = componentsMatch.group(4) + # if there is an authority component + if authority: + # if authority is a valid IP address + if re.compile( + "([\w.!~*'|;:&=+$,-]+@)?\d+\.\d+\.\d+\.\d+(:\d*)*$").match(authority): + # Then this HTTP URL is valid + return (value, None) + else: + # else if authority is a valid domain name + domainMatch = \ + re.compile( + "([\w.!~*'|;:&=+$,-]+@)?(([A-Za-z0-9]+[A-Za-z0-9\-]*[A-Za-z0-9]+\.)*([A-Za-z0-9]+\.)*)*([A-Za-z]+[A-Za-z0-9\-]*[A-Za-z0-9]+)\.?(:\d*)*$" + ).match(authority) + if domainMatch: + # if the top-level domain really exists + if domainMatch.group(5).lower()\ + in official_top_level_domains: + # Then this HTTP URL is valid + return (value, None) + else: + # else this is a relative/abbreviated URL, which will parse + # into the URL's path component + path = componentsMatch.group(5) + # relative case: if this is a valid path (if it starts with + # a slash) + if re.compile('/').match(path): + # Then this HTTP URL is valid + return (value, None) + else: + # abbreviated case: if we haven't already, prepend a + # scheme and see if it fixes the problem + if not re.compile('://').search(value): + schemeToUse = self.prepend_scheme or 'http' + prependTest = self.__call__(schemeToUse + + '://' + value) + # if the prepend test succeeded + if prependTest[1] is None: + # if prepending in the output is enabled + if self.prepend_scheme: + return prependTest + else: + # else return the original, non-prepended + # value + return (value, None) + except: + pass + # else the HTTP URL is not valid + return (value, translate(self.error_message)) + + +class IS_URL(Validator): + """ + Rejects a URL string if any of the following is true: + * The string is empty or None + * The string uses characters that are not allowed in a URL + * The string breaks any of the HTTP syntactic rules + * The URL scheme specified (if one is specified) is not 'http' or 'https' + * The top-level domain (if a host name is specified) does not exist + + (These rules are based on RFC 2616: http://www.faqs.org/rfcs/rfc2616.html) + + This function only checks the URL's syntax. It does not check that the URL + points to a real document, for example, or that it otherwise makes sense + semantically. This function does automatically prepend 'http://' in front + of a URL in the case of an abbreviated URL (e.g. 'google.ca'). + + If the parameter mode='generic' is used, then this function's behavior + changes. It then rejects a URL string if any of the following is true: + * The string is empty or None + * The string uses characters that are not allowed in a URL + * The URL scheme specified (if one is specified) is not valid + + (These rules are based on RFC 2396: http://www.faqs.org/rfcs/rfc2396.html) + + The list of allowed schemes is customizable with the allowed_schemes + parameter. If you exclude None from the list, then abbreviated URLs + (lacking a scheme such as 'http') will be rejected. + + The default prepended scheme is customizable with the prepend_scheme + parameter. If you set prepend_scheme to None then prepending will be + disabled. URLs that require prepending to parse will still be accepted, + but the return value will not be modified. + + IS_URL is compatible with the Internationalized Domain Name (IDN) standard + specified in RFC 3490 (http://tools.ietf.org/html/rfc3490). As a result, + URLs can be regular strings or unicode strings. + If the URL's domain component (e.g. google.ca) contains non-US-ASCII + letters, then the domain will be converted into Punycode (defined in + RFC 3492, http://tools.ietf.org/html/rfc3492). IS_URL goes a bit beyond + the standards, and allows non-US-ASCII characters to be present in the path + and query components of the URL as well. These non-US-ASCII characters will + be escaped using the standard '%20' type syntax. e.g. the unicode + character with hex code 0x4e86 will become '%4e%86' + + Code Examples:: + + INPUT(_type='text', _name='name', requires=IS_URL()) + >>> IS_URL()('abc.com') + ('http://abc.com', None) + + INPUT(_type='text', _name='name', requires=IS_URL(mode='generic')) + >>> IS_URL(mode='generic')('abc.com') + ('abc.com', None) + + INPUT(_type='text', _name='name', + requires=IS_URL(allowed_schemes=['https'], prepend_scheme='https')) + >>> IS_URL(allowed_schemes=['https'], prepend_scheme='https')('https://abc.com') + ('https://abc.com', None) + + INPUT(_type='text', _name='name', + requires=IS_URL(prepend_scheme='https')) + >>> IS_URL(prepend_scheme='https')('abc.com') + ('https://abc.com', None) + + INPUT(_type='text', _name='name', + requires=IS_URL(mode='generic', allowed_schemes=['ftps', 'https'], + prepend_scheme='https')) + >>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https'], prepend_scheme='https')('https://abc.com') + ('https://abc.com', None) + >>> IS_URL(mode='generic', allowed_schemes=['ftps', 'https', None], prepend_scheme='https')('abc.com') + ('abc.com', None) + + @author: Jonathan Benn + """ + + def __init__( + self, + error_message='enter a valid URL', + mode='http', + allowed_schemes=None, + prepend_scheme='http', + ): + """ + :param error_message: a string, the error message to give the end user + if the URL does not validate + :param allowed_schemes: a list containing strings or None. Each element + is a scheme the inputed URL is allowed to use + :param prepend_scheme: a string, this scheme is prepended if it's + necessary to make the URL valid + """ + + self.error_message = error_message + self.mode = mode.lower() + if not self.mode in ['generic', 'http']: + raise SyntaxError, "invalid mode '%s' in IS_URL" % self.mode + self.allowed_schemes = allowed_schemes + + if self.allowed_schemes: + if prepend_scheme not in self.allowed_schemes: + raise SyntaxError, \ + "prepend_scheme='%s' is not in allowed_schemes=%s" \ + % (prepend_scheme, self.allowed_schemes) + + # if allowed_schemes is None, then we will defer testing + # prepend_scheme's validity to a sub-method + + self.prepend_scheme = prepend_scheme + + def __call__(self, value): + """ + :param value: a unicode or regular string, the URL to validate + :returns: a (string, string) tuple, where tuple[0] is the modified + input value and tuple[1] is either None (success!) or the + string error_message. The input value will never be modified in the + case of an error. However, if there is success then the input URL + may be modified to (1) prepend a scheme, and/or (2) convert a + non-compliant unicode URL into a compliant US-ASCII version. + """ + + if self.mode == 'generic': + subMethod = IS_GENERIC_URL(error_message=self.error_message, + allowed_schemes=self.allowed_schemes, + prepend_scheme=self.prepend_scheme) + elif self.mode == 'http': + subMethod = IS_HTTP_URL(error_message=self.error_message, + allowed_schemes=self.allowed_schemes, + prepend_scheme=self.prepend_scheme) + else: + raise SyntaxError, "invalid mode '%s' in IS_URL" % self.mode + + if type(value) != unicode: + return subMethod(value) + else: + try: + asciiValue = unicode_to_ascii_url(value, self.prepend_scheme) + except Exception: + #If we are not able to convert the unicode url into a + # US-ASCII URL, then the URL is not valid + return (value, translate(self.error_message)) + + methodResult = subMethod(asciiValue) + #if the validation of the US-ASCII version of the value failed + if not methodResult[1] is None: + # then return the original input value, not the US-ASCII version + return (value, methodResult[1]) + else: + return methodResult + + +regex_time = re.compile( + '((?P[0-9]+))([^0-9 ]+(?P[0-9 ]+))?([^0-9ap ]+(?P[0-9]*))?((?P[ap]m))?') + + +class IS_TIME(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_TIME()) + + understands the following formats + hh:mm:ss [am/pm] + hh:mm [am/pm] + hh [am/pm] + + [am/pm] is optional, ':' can be replaced by any other non-space non-digit + + >>> IS_TIME()('21:30') + (datetime.time(21, 30), None) + >>> IS_TIME()('21-30') + (datetime.time(21, 30), None) + >>> IS_TIME()('21.30') + (datetime.time(21, 30), None) + >>> IS_TIME()('21:30:59') + (datetime.time(21, 30, 59), None) + >>> IS_TIME()('5:30') + (datetime.time(5, 30), None) + >>> IS_TIME()('5:30 am') + (datetime.time(5, 30), None) + >>> IS_TIME()('5:30 pm') + (datetime.time(17, 30), None) + >>> IS_TIME()('5:30 whatever') + ('5:30 whatever', 'enter time as hh:mm:ss (seconds, am, pm optional)') + >>> IS_TIME()('5:30 20') + ('5:30 20', 'enter time as hh:mm:ss (seconds, am, pm optional)') + >>> IS_TIME()('24:30') + ('24:30', 'enter time as hh:mm:ss (seconds, am, pm optional)') + >>> IS_TIME()('21:60') + ('21:60', 'enter time as hh:mm:ss (seconds, am, pm optional)') + >>> IS_TIME()('21:30::') + ('21:30::', 'enter time as hh:mm:ss (seconds, am, pm optional)') + >>> IS_TIME()('') + ('', 'enter time as hh:mm:ss (seconds, am, pm optional)') + """ + + def __init__(self, error_message='enter time as hh:mm:ss (seconds, am, pm optional)'): + self.error_message = error_message + + def __call__(self, value): + try: + ivalue = value + value = regex_time.match(value.lower()) + (h, m, s) = (int(value.group('h')), 0, 0) + if not value.group('m') is None: + m = int(value.group('m')) + if not value.group('s') is None: + s = int(value.group('s')) + if value.group('d') == 'pm' and 0 < h < 12: + h = h + 12 + if not (h in range(24) and m in range(60) and s + in range(60)): + raise ValueError\ + ('Hours or minutes or seconds are outside of allowed range') + value = datetime.time(h, m, s) + return (value, None) + except AttributeError: + pass + except ValueError: + pass + return (ivalue, translate(self.error_message)) + + +class IS_DATE(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_DATE()) + + date has to be in the ISO8960 format YYYY-MM-DD + """ + + def __init__(self, format='%Y-%m-%d', + error_message='enter date as %(format)s'): + self.format = str(format) + self.error_message = str(error_message) + + def __call__(self, value): + if isinstance(value,datetime.date): + return (value,None) + try: + (y, m, d, hh, mm, ss, t0, t1, t2) = \ + time.strptime(value, str(self.format)) + value = datetime.date(y, m, d) + return (value, None) + except: + return (value, translate(self.error_message) % IS_DATETIME.nice(self.format)) + + def formatter(self, value): + format = self.format + year = value.year + y = '%.4i' % year + format = format.replace('%y',y[-2:]) + format = format.replace('%Y',y) + if year<1900: + year = 2000 + d = datetime.date(year,value.month,value.day) + return d.strftime(format) + + +class IS_DATETIME(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=IS_DATETIME()) + + datetime has to be in the ISO8960 format YYYY-MM-DD hh:mm:ss + """ + + isodatetime = '%Y-%m-%d %H:%M:%S' + + @staticmethod + def nice(format): + code=(('%Y','1963'), + ('%y','63'), + ('%d','28'), + ('%m','08'), + ('%b','Aug'), + ('%b','August'), + ('%H','14'), + ('%I','02'), + ('%p','PM'), + ('%M','30'), + ('%S','59')) + for (a,b) in code: + format=format.replace(a,b) + return dict(format=format) + + def __init__(self, format='%Y-%m-%d %H:%M:%S', + error_message='enter date and time as %(format)s'): + self.format = str(format) + self.error_message = str(error_message) + + def __call__(self, value): + if isinstance(value,datetime.datetime): + return (value,None) + try: + (y, m, d, hh, mm, ss, t0, t1, t2) = \ + time.strptime(value, str(self.format)) + value = datetime.datetime(y, m, d, hh, mm, ss) + return (value, None) + except: + return (value, translate(self.error_message) % IS_DATETIME.nice(self.format)) + + def formatter(self, value): + format = self.format + year = value.year + y = '%.4i' % year + format = format.replace('%y',y[-2:]) + format = format.replace('%Y',y) + if year<1900: + year = 2000 + d = datetime.datetime(year,value.month,value.day,value.hour,value.minute,value.second) + return d.strftime(format) + +class IS_DATE_IN_RANGE(IS_DATE): + """ + example:: + + >>> v = IS_DATE_IN_RANGE(minimum=datetime.date(2008,1,1), \ + maximum=datetime.date(2009,12,31), \ + format="%m/%d/%Y",error_message="oops") + + >>> v('03/03/2008') + (datetime.date(2008, 3, 3), None) + + >>> v('03/03/2010') + (datetime.date(2010, 3, 3), 'oops') + + >>> v(datetime.date(2008,3,3)) + (datetime.date(2008, 3, 3), None) + + >>> v(datetime.date(2010,3,3)) + (datetime.date(2010, 3, 3), 'oops') + + """ + def __init__(self, + minimum = None, + maximum = None, + format='%Y-%m-%d', + error_message = None): + self.minimum = minimum + self.maximum = maximum + if error_message is None: + if minimum is None: + error_message = "enter date on or before %(max)s" + elif maximum is None: + error_message = "enter date on or after %(min)s" + else: + error_message = "enter date in range %(min)s %(max)s" + d = dict(min=minimum, max=maximum) + IS_DATE.__init__(self, + format = format, + error_message = error_message % d) + + def __call__(self, value): + (value, msg) = IS_DATE.__call__(self,value) + if msg is not None: + return (value, msg) + if self.minimum and self.minimum > value: + return (value, translate(self.error_message)) + if self.maximum and value > self.maximum: + return (value, translate(self.error_message)) + return (value, None) + + +class IS_DATETIME_IN_RANGE(IS_DATETIME): + """ + example:: + + >>> v = IS_DATETIME_IN_RANGE(\ + minimum=datetime.datetime(2008,1,1,12,20), \ + maximum=datetime.datetime(2009,12,31,12,20), \ + format="%m/%d/%Y %H:%M",error_message="oops") + >>> v('03/03/2008 12:40') + (datetime.datetime(2008, 3, 3, 12, 40), None) + + >>> v('03/03/2010 10:34') + (datetime.datetime(2010, 3, 3, 10, 34), 'oops') + + >>> v(datetime.datetime(2008,3,3,0,0)) + (datetime.datetime(2008, 3, 3, 0, 0), None) + + >>> v(datetime.datetime(2010,3,3,0,0)) + (datetime.datetime(2010, 3, 3, 0, 0), 'oops') + """ + def __init__(self, + minimum = None, + maximum = None, + format = '%Y-%m-%d %H:%M:%S', + error_message = None): + self.minimum = minimum + self.maximum = maximum + if error_message is None: + if minimum is None: + error_message = "enter date and time on or before %(max)s" + elif maximum is None: + error_message = "enter date and time on or after %(min)s" + else: + error_message = "enter date and time in range %(min)s %(max)s" + d = dict(min = minimum, max = maximum) + IS_DATETIME.__init__(self, + format = format, + error_message = error_message % d) + + def __call__(self, value): + (value, msg) = IS_DATETIME.__call__(self, value) + if msg is not None: + return (value, msg) + if self.minimum and self.minimum > value: + return (value, translate(self.error_message)) + if self.maximum and value > self.maximum: + return (value, translate(self.error_message)) + return (value, None) + + +class IS_LIST_OF(Validator): + + def __init__(self, other): + self.other = other + + def __call__(self, value): + ivalue = value + if not isinstance(value, list): + ivalue = [ivalue] + new_value = [] + for item in ivalue: + (v, e) = self.other(item) + if e: + return (value, e) + else: + new_value.append(v) + return (new_value, None) + + +class IS_LOWER(Validator): + """ + convert to lower case + + >>> IS_LOWER()('ABC') + ('abc', None) + >>> IS_LOWER()('Ñ') + ('\\xc3\\xb1', None) + """ + + def __call__(self, value): + return (value.decode('utf8').lower().encode('utf8'), None) + + +class IS_UPPER(Validator): + """ + convert to upper case + + >>> IS_UPPER()('abc') + ('ABC', None) + >>> IS_UPPER()('ñ') + ('\\xc3\\x91', None) + """ + + def __call__(self, value): + return (value.decode('utf8').upper().encode('utf8'), None) + + +def urlify(value, maxlen=80, keep_underscores=False): + """ + Convert incoming string to a simplified ASCII subset. + if (keep_underscores): underscores are retained in the string + else: underscores are translated to hyphens (default) + """ + s = value.lower() # to lowercase + s = s.decode('utf-8') # to utf-8 + s = unicodedata.normalize('NFKD', s) # normalize eg è => e, ñ => n + s = s.encode('ASCII', 'ignore') # encode as ASCII + s = re.sub('&\w+;', '', s) # strip html entities + if keep_underscores: + s = re.sub('\s+', '-', s) # whitespace to hyphens + s = re.sub('[^\w\-]', '', s) # strip all but alphanumeric/underscore/hyphen + else: + s = re.sub('[\s_]+', '-', s) # whitespace & underscores to hyphens + s = re.sub('[^a-z0-9\-]', '', s) # strip all but alphanumeric/hyphen + s = re.sub('[-_][-_]+', '-', s) # collapse strings of hyphens + s = s.strip('-') # remove leading and trailing hyphens + return s[:maxlen] # enforce maximum length + + +class IS_SLUG(Validator): + """ + convert arbitrary text string to a slug + + >>> IS_SLUG()('abc123') + ('abc123', None) + >>> IS_SLUG()('ABC123') + ('abc123', None) + >>> IS_SLUG()('abc-123') + ('abc-123', None) + >>> IS_SLUG()('abc--123') + ('abc-123', None) + >>> IS_SLUG()('abc 123') + ('abc-123', None) + >>> IS_SLUG()('abc\t_123') + ('abc-123', None) + >>> IS_SLUG()('-abc-') + ('abc', None) + >>> IS_SLUG()('--a--b--_ -c--') + ('a-b-c', None) + >>> IS_SLUG()('abc&123') + ('abc123', None) + >>> IS_SLUG()('abc&123&def') + ('abc123def', None) + >>> IS_SLUG()('ñ') + ('n', None) + >>> IS_SLUG(maxlen=4)('abc123') + ('abc1', None) + >>> IS_SLUG()('abc_123') + ('abc-123', None) + >>> IS_SLUG(keep_underscores=False)('abc_123') + ('abc-123', None) + >>> IS_SLUG(keep_underscores=True)('abc_123') + ('abc_123', None) + >>> IS_SLUG(check=False)('abc') + ('abc', None) + >>> IS_SLUG(check=True)('abc') + ('abc', None) + >>> IS_SLUG(check=False)('a bc') + ('a-bc', None) + >>> IS_SLUG(check=True)('a bc') + ('a bc', 'must be slug') + """ + + @staticmethod + def urlify(value, maxlen=80, keep_underscores=False): + return urlify(value, maxlen, keep_underscores) + + def __init__(self, maxlen=80, check=False, error_message='must be slug', keep_underscores=False): + self.maxlen = maxlen + self.check = check + self.error_message = error_message + self.keep_underscores = keep_underscores + + def __call__(self, value): + if self.check and value != urlify(value, self.maxlen, self.keep_underscores): + return (value, translate(self.error_message)) + return (urlify(value,self.maxlen, self.keep_underscores), None) + +class IS_EMPTY_OR(Validator): + """ + dummy class for testing IS_EMPTY_OR + + >>> IS_EMPTY_OR(IS_EMAIL())('abc@def.com') + ('abc@def.com', None) + >>> IS_EMPTY_OR(IS_EMAIL())(' ') + (None, None) + >>> IS_EMPTY_OR(IS_EMAIL(), null='abc')(' ') + ('abc', None) + >>> IS_EMPTY_OR(IS_EMAIL(), null='abc', empty_regex='def')('def') + ('abc', None) + >>> IS_EMPTY_OR(IS_EMAIL())('abc') + ('abc', 'enter a valid email address') + >>> IS_EMPTY_OR(IS_EMAIL())(' abc ') + ('abc', 'enter a valid email address') + """ + + def __init__(self, other, null=None, empty_regex=None): + (self.other, self.null) = (other, null) + if empty_regex is not None: + self.empty_regex = re.compile(empty_regex) + else: + self.empty_regex = None + if hasattr(other, 'multiple'): + self.multiple = other.multiple + if hasattr(other, 'options'): + self.options=self._options + + def _options(self): + options = self.other.options() + if (not options or options[0][0]!='') and not self.multiple: + options.insert(0,('','')) + return options + + def set_self_id(self, id): + if isinstance(self.other, (list, tuple)): + for item in self.other: + if hasattr(item, 'set_self_id'): + item.set_self_id(id) + else: + if hasattr(self.other, 'set_self_id'): + self.other.set_self_id(id) + + def __call__(self, value): + value, empty = is_empty(value, empty_regex=self.empty_regex) + if empty: + return (self.null, None) + if isinstance(self.other, (list, tuple)): + for item in self.other: + value, error = item(value) + if error: break + return value, error + else: + return self.other(value) + + def formatter(self, value): + if hasattr(self.other, 'formatter'): + return self.other.formatter(value) + return value + +IS_NULL_OR = IS_EMPTY_OR # for backward compatibility + + +class CLEANUP(Validator): + """ + example:: + + INPUT(_type='text', _name='name', requires=CLEANUP()) + + removes special characters on validation + """ + + def __init__(self, regex='[^\x09\x0a\x0d\x20-\x7e]'): + self.regex = re.compile(regex) + + def __call__(self, value): + v = self.regex.sub('',str(value).strip()) + return (v, None) + + +class CRYPT(object): + """ + example:: + + INPUT(_type='text', _name='name', requires=CRYPT()) + + encodes the value on validation with a digest. + + If no arguments are provided CRYPT uses the MD5 algorithm. + If the key argument is provided the HMAC+MD5 algorithm is used. + If the digest_alg is specified this is used to replace the + MD5 with, for example, SHA512. The digest_alg can be + the name of a hashlib algorithm as a string or the algorithm itself. + + min_length is the minimal password length (default 4) - IS_STRONG for serious security + error_message is the message if password is too short + + Notice that an empty password is accepted but invalid. It will not allow login back. + Stores junk as hashed password. + """ + + def __init__(self, key=None, digest_alg='md5', min_length=0, error_message='too short'): + self.key = key + self.digest_alg = digest_alg + self.min_length = min_length + self.error_message = error_message + + def __call__(self, value): + if not value and self.min_length>0: + value = web2py_uuid() + elif len(value),.:;{}[]|', + invalid=' "', error_message=None): + self.min = min + self.max = max + self.upper = upper + self.lower = lower + self.number = number + self.special = special + self.specials = specials + self.invalid = invalid + self.error_message = error_message + + def __call__(self, value): + failures = [] + if type(self.min) == int and self.min > 0: + if not len(value) >= self.min: + failures.append("Minimum length is %s" % self.min) + if type(self.max) == int and self.max > 0: + if not len(value) <= self.max: + failures.append("Maximum length is %s" % self.max) + if type(self.special) == int: + all_special = [ch in value for ch in self.specials] + if self.special > 0: + if not all_special.count(True) >= self.special: + failures.append("Must include at least %s of the following : %s" % (self.special, self.specials)) + if self.invalid: + all_invalid = [ch in value for ch in self.invalid] + if all_invalid.count(True) > 0: + failures.append("May not contain any of the following: %s" \ + % self.invalid) + if type(self.upper) == int: + all_upper = re.findall("[A-Z]", value) + if self.upper > 0: + if not len(all_upper) >= self.upper: + failures.append("Must include at least %s upper case" \ + % str(self.upper)) + else: + if len(all_upper) > 0: + failures.append("May not include any upper case letters") + if type(self.lower) == int: + all_lower = re.findall("[a-z]", value) + if self.lower > 0: + if not len(all_lower) >= self.lower: + failures.append("Must include at least %s lower case" \ + % str(self.lower)) + else: + if len(all_lower) > 0: + failures.append("May not include any lower case letters") + if type(self.number) == int: + all_number = re.findall("[0-9]", value) + if self.number > 0: + numbers = "number" + if self.number > 1: + numbers = "numbers" + if not len(all_number) >= self.number: + failures.append("Must include at least %s %s" \ + % (str(self.number), numbers)) + else: + if len(all_number) > 0: + failures.append("May not include any numbers") + if len(failures) == 0: + return (value, None) + if not translate(self.error_message): + from html import XML + return (value, XML('
    '.join(failures))) + else: + return (value, translate(self.error_message)) + + +class IS_IN_SUBSET(IS_IN_SET): + + def __init__(self, *a, **b): + IS_IN_SET.__init__(self, *a, **b) + + def __call__(self, value): + values = re.compile("\w+").findall(str(value)) + failures = [x for x in values if IS_IN_SET.__call__(self, x)[1]] + if failures: + return (value, translate(self.error_message)) + return (value, None) + + +class IS_IMAGE(Validator): + """ + Checks if file uploaded through file input was saved in one of selected + image formats and has dimensions (width and height) within given boundaries. + + Does *not* check for maximum file size (use IS_LENGTH for that). Returns + validation failure if no data was uploaded. + + Supported file formats: BMP, GIF, JPEG, PNG. + + Code parts taken from + http://mail.python.org/pipermail/python-list/2007-June/617126.html + + Arguments: + + extensions: iterable containing allowed *lowercase* image file extensions + ('jpg' extension of uploaded file counts as 'jpeg') + maxsize: iterable containing maximum width and height of the image + minsize: iterable containing minimum width and height of the image + + Use (-1, -1) as minsize to pass image size check. + + Examples:: + + #Check if uploaded file is in any of supported image formats: + INPUT(_type='file', _name='name', requires=IS_IMAGE()) + + #Check if uploaded file is either JPEG or PNG: + INPUT(_type='file', _name='name', + requires=IS_IMAGE(extensions=('jpeg', 'png'))) + + #Check if uploaded file is PNG with maximum size of 200x200 pixels: + INPUT(_type='file', _name='name', + requires=IS_IMAGE(extensions=('png'), maxsize=(200, 200))) + """ + + def __init__(self, + extensions=('bmp', 'gif', 'jpeg', 'png'), + maxsize=(10000, 10000), + minsize=(0, 0), + error_message='invalid image'): + + self.extensions = extensions + self.maxsize = maxsize + self.minsize = minsize + self.error_message = error_message + + def __call__(self, value): + try: + extension = value.filename.rfind('.') + assert extension >= 0 + extension = value.filename[extension + 1:].lower() + if extension == 'jpg': + extension = 'jpeg' + assert extension in self.extensions + if extension == 'bmp': + width, height = self.__bmp(value.file) + elif extension == 'gif': + width, height = self.__gif(value.file) + elif extension == 'jpeg': + width, height = self.__jpeg(value.file) + elif extension == 'png': + width, height = self.__png(value.file) + else: + width = -1 + height = -1 + assert self.minsize[0] <= width <= self.maxsize[0] \ + and self.minsize[1] <= height <= self.maxsize[1] + value.file.seek(0) + return (value, None) + except: + return (value, translate(self.error_message)) + + def __bmp(self, stream): + if stream.read(2) == 'BM': + stream.read(16) + return struct.unpack("= 0xC0 and code <= 0xC3: + return tuple(reversed( + struct.unpack("!xHH", stream.read(5)))) + else: + stream.read(length - 2) + return (-1, -1) + + def __png(self, stream): + if stream.read(8) == '\211PNG\r\n\032\n': + stream.read(4) + if stream.read(4) == "IHDR": + return struct.unpack("!LL", stream.read(8)) + return (-1, -1) + + +class IS_UPLOAD_FILENAME(Validator): + """ + Checks if name and extension of file uploaded through file input matches + given criteria. + + Does *not* ensure the file type in any way. Returns validation failure + if no data was uploaded. + + Arguments:: + + filename: filename (before dot) regex + extension: extension (after dot) regex + lastdot: which dot should be used as a filename / extension separator: + True means last dot, eg. file.png -> file / png + False means first dot, eg. file.tar.gz -> file / tar.gz + case: 0 - keep the case, 1 - transform the string into lowercase (default), + 2 - transform the string into uppercase + + If there is no dot present, extension checks will be done against empty + string and filename checks against whole value. + + Examples:: + + #Check if file has a pdf extension (case insensitive): + INPUT(_type='file', _name='name', + requires=IS_UPLOAD_FILENAME(extension='pdf')) + + #Check if file has a tar.gz extension and name starting with backup: + INPUT(_type='file', _name='name', + requires=IS_UPLOAD_FILENAME(filename='backup.*', + extension='tar.gz', lastdot=False)) + + #Check if file has no extension and name matching README + #(case sensitive): + INPUT(_type='file', _name='name', + requires=IS_UPLOAD_FILENAME(filename='^README$', + extension='^$', case=0)) + """ + + def __init__(self, filename=None, extension=None, lastdot=True, case=1, + error_message='enter valid filename'): + if isinstance(filename, str): + filename = re.compile(filename) + if isinstance(extension, str): + extension = re.compile(extension) + self.filename = filename + self.extension = extension + self.lastdot = lastdot + self.case = case + self.error_message = error_message + + def __call__(self, value): + try: + string = value.filename + except: + return (value, translate(self.error_message)) + if self.case == 1: + string = string.lower() + elif self.case == 2: + string = string.upper() + if self.lastdot: + dot = string.rfind('.') + else: + dot = string.find('.') + if dot == -1: + dot = len(string) + if self.filename and not self.filename.match(string[:dot]): + return (value, translate(self.error_message)) + elif self.extension and not self.extension.match(string[dot + 1:]): + return (value, translate(self.error_message)) + else: + return (value, None) + + +class IS_IPV4(Validator): + """ + Checks if field's value is an IP version 4 address in decimal form. Can + be set to force addresses from certain range. + + IPv4 regex taken from: http://regexlib.com/REDetails.aspx?regexp_id=1411 + + Arguments: + + minip: lowest allowed address; accepts: + str, eg. 192.168.0.1 + list or tuple of octets, eg. [192, 168, 0, 1] + maxip: highest allowed address; same as above + invert: True to allow addresses only from outside of given range; note + that range boundaries are not matched this way + is_localhost: localhost address treatment: + None (default): indifferent + True (enforce): query address must match localhost address + (127.0.0.1) + False (forbid): query address must not match localhost + address + is_private: same as above, except that query address is checked against + two address ranges: 172.16.0.0 - 172.31.255.255 and + 192.168.0.0 - 192.168.255.255 + is_automatic: same as above, except that query address is checked against + one address range: 169.254.0.0 - 169.254.255.255 + + Minip and maxip may also be lists or tuples of addresses in all above + forms (str, int, list / tuple), allowing setup of multiple address ranges: + + minip = (minip1, minip2, ... minipN) + | | | + | | | + maxip = (maxip1, maxip2, ... maxipN) + + Longer iterable will be truncated to match length of shorter one. + + Examples:: + + #Check for valid IPv4 address: + INPUT(_type='text', _name='name', requires=IS_IPV4()) + + #Check for valid IPv4 address belonging to specific range: + INPUT(_type='text', _name='name', + requires=IS_IPV4(minip='100.200.0.0', maxip='100.200.255.255')) + + #Check for valid IPv4 address belonging to either 100.110.0.0 - + #100.110.255.255 or 200.50.0.0 - 200.50.0.255 address range: + INPUT(_type='text', _name='name', + requires=IS_IPV4(minip=('100.110.0.0', '200.50.0.0'), + maxip=('100.110.255.255', '200.50.0.255'))) + + #Check for valid IPv4 address belonging to private address space: + INPUT(_type='text', _name='name', requires=IS_IPV4(is_private=True)) + + #Check for valid IPv4 address that is not a localhost address: + INPUT(_type='text', _name='name', requires=IS_IPV4(is_localhost=False)) + + >>> IS_IPV4()('1.2.3.4') + ('1.2.3.4', None) + >>> IS_IPV4()('255.255.255.255') + ('255.255.255.255', None) + >>> IS_IPV4()('1.2.3.4 ') + ('1.2.3.4 ', 'enter valid IPv4 address') + >>> IS_IPV4()('1.2.3.4.5') + ('1.2.3.4.5', 'enter valid IPv4 address') + >>> IS_IPV4()('123.123') + ('123.123', 'enter valid IPv4 address') + >>> IS_IPV4()('1111.2.3.4') + ('1111.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4()('0111.2.3.4') + ('0111.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4()('256.2.3.4') + ('256.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4()('300.2.3.4') + ('300.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4(minip='1.2.3.4', maxip='1.2.3.4')('1.2.3.4') + ('1.2.3.4', None) + >>> IS_IPV4(minip='1.2.3.5', maxip='1.2.3.9', error_message='bad ip')('1.2.3.4') + ('1.2.3.4', 'bad ip') + >>> IS_IPV4(maxip='1.2.3.4', invert=True)('127.0.0.1') + ('127.0.0.1', None) + >>> IS_IPV4(maxip='1.2.3.4', invert=True)('1.2.3.4') + ('1.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4(is_localhost=True)('127.0.0.1') + ('127.0.0.1', None) + >>> IS_IPV4(is_localhost=True)('1.2.3.4') + ('1.2.3.4', 'enter valid IPv4 address') + >>> IS_IPV4(is_localhost=False)('127.0.0.1') + ('127.0.0.1', 'enter valid IPv4 address') + >>> IS_IPV4(maxip='100.0.0.0', is_localhost=True)('127.0.0.1') + ('127.0.0.1', 'enter valid IPv4 address') + """ + + regex = re.compile( + '^(([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])\.){3}([1-9]?\d|1\d\d|2[0-4]\d|25[0-5])$') + numbers = (16777216, 65536, 256, 1) + localhost = 2130706433 + private = ((2886729728L, 2886795263L), (3232235520L, 3232301055L)) + automatic = (2851995648L, 2852061183L) + + def __init__( + self, + minip='0.0.0.0', + maxip='255.255.255.255', + invert=False, + is_localhost=None, + is_private=None, + is_automatic=None, + error_message='enter valid IPv4 address'): + for n, value in enumerate((minip, maxip)): + temp = [] + if isinstance(value, str): + temp.append(value.split('.')) + elif isinstance(value, (list, tuple)): + if len(value) == len(filter(lambda item: isinstance(item, int), value)) == 4: + temp.append(value) + else: + for item in value: + if isinstance(item, str): + temp.append(item.split('.')) + elif isinstance(item, (list, tuple)): + temp.append(item) + numbers = [] + for item in temp: + number = 0 + for i, j in zip(self.numbers, item): + number += i * int(j) + numbers.append(number) + if n == 0: + self.minip = numbers + else: + self.maxip = numbers + self.invert = invert + self.is_localhost = is_localhost + self.is_private = is_private + self.is_automatic = is_automatic + self.error_message = error_message + + def __call__(self, value): + if self.regex.match(value): + number = 0 + for i, j in zip(self.numbers, value.split('.')): + number += i * int(j) + ok = False + for bottom, top in zip(self.minip, self.maxip): + if self.invert != (bottom <= number <= top): + ok = True + if not (self.is_localhost is None or self.is_localhost == \ + (number == self.localhost)): + ok = False + if not (self.is_private is None or self.is_private == \ + (sum([number[0] <= number <= number[1] for number in self.private]) > 0)): + ok = False + if not (self.is_automatic is None or self.is_automatic == \ + (self.automatic[0] <= number <= self.automatic[1])): + ok = False + if ok: + return (value, None) + return (value, translate(self.error_message)) + +if __name__ == '__main__': + import doctest + doctest.testmod() + + + ADDED gluon/validators.pyc Index: gluon/validators.pyc ================================================================== --- /dev/null +++ gluon/validators.pyc cannot compute difference between binary files ADDED gluon/widget.py Index: gluon/widget.py ================================================================== --- /dev/null +++ gluon/widget.py @@ -0,0 +1,977 @@ +#!/-usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +The widget is called from web2py. +""" + +import sys +import cStringIO +import time +import thread +import re +import os +import socket +import signal +import math +import logging +import newcron +import main + +from fileutils import w2p_pack, read_file, write_file +from shell import run, test +from settings import global_settings + +try: + import Tkinter, tkMessageBox + import contrib.taskbar_widget + from winservice import web2py_windows_service_handler +except: + pass + + +try: + BaseException +except NameError: + BaseException = Exception + +ProgramName = 'web2py Web Framework' +ProgramAuthor = 'Created by Massimo Di Pierro, Copyright 2007-2011' +ProgramVersion = read_file('VERSION').strip() + +ProgramInfo = '''%s + %s + %s''' % (ProgramName, ProgramAuthor, ProgramVersion) + +if not sys.version[:3] in ['2.4', '2.5', '2.6', '2.7']: + msg = 'Warning: web2py requires Python 2.4, 2.5 (recommended), 2.6 or 2.7 but you are running:\n%s' + msg = msg % sys.version + sys.stderr.write(msg) + +logger = logging.getLogger("web2py") + +class IO(object): + """ """ + + def __init__(self): + """ """ + + self.buffer = cStringIO.StringIO() + + def write(self, data): + """ """ + + sys.__stdout__.write(data) + if hasattr(self, 'callback'): + self.callback(data) + else: + self.buffer.write(data) + + +def try_start_browser(url): + """ Try to start the default browser """ + + try: + import webbrowser + webbrowser.open(url) + except: + print 'warning: unable to detect your browser' + + +def start_browser(ip, port): + """ Starts the default browser """ + print 'please visit:' + print '\thttp://%s:%s' % (ip, port) + print 'starting browser...' + try_start_browser('http://%s:%s' % (ip, port)) + + +def presentation(root): + """ Draw the splash screen """ + + root.withdraw() + + dx = root.winfo_screenwidth() + dy = root.winfo_screenheight() + + dialog = Tkinter.Toplevel(root, bg='white') + dialog.geometry('%ix%i+%i+%i' % (500, 300, dx / 2 - 200, dy / 2 - 150)) + + dialog.overrideredirect(1) + dialog.focus_force() + + canvas = Tkinter.Canvas(dialog, + background='white', + width=500, + height=300) + canvas.pack() + root.update() + + logo = 'splashlogo.gif' + if os.path.exists(logo): + img = Tkinter.PhotoImage(file=logo) + pnl = Tkinter.Label(canvas, image=img, background='white', bd=0) + pnl.pack(side='top', fill='both', expand='yes') + # Prevent garbage collection of img + pnl.image=img + + def add_label(text='Change Me', font_size=12, foreground='#195866', height=1): + return Tkinter.Label( + master=canvas, + width=250, + height=height, + text=text, + font=('Helvetica', font_size), + anchor=Tkinter.CENTER, + foreground=foreground, + background='white' + ) + + add_label('Welcome to...').pack(side='top') + add_label(ProgramName, 18, '#FF5C1F', 2).pack() + add_label(ProgramAuthor).pack() + add_label(ProgramVersion).pack() + + root.update() + time.sleep(5) + dialog.destroy() + return + + +class web2pyDialog(object): + """ Main window dialog """ + + def __init__(self, root, options): + """ web2pyDialog constructor """ + + root.title('web2py server') + self.root = Tkinter.Toplevel(root) + self.options = options + self.menu = Tkinter.Menu(self.root) + servermenu = Tkinter.Menu(self.menu, tearoff=0) + httplog = os.path.join(self.options.folder, 'httpserver.log') + + # Building the Menu + item = lambda: try_start_browser(httplog) + servermenu.add_command(label='View httpserver.log', + command=item) + + servermenu.add_command(label='Quit (pid:%i)' % os.getpid(), + command=self.quit) + + self.menu.add_cascade(label='Server', menu=servermenu) + + self.pagesmenu = Tkinter.Menu(self.menu, tearoff=0) + self.menu.add_cascade(label='Pages', menu=self.pagesmenu) + + helpmenu = Tkinter.Menu(self.menu, tearoff=0) + + # Home Page + item = lambda: try_start_browser('http://www.web2py.com') + helpmenu.add_command(label='Home Page', + command=item) + + # About + item = lambda: tkMessageBox.showinfo('About web2py', ProgramInfo) + helpmenu.add_command(label='About', + command=item) + + self.menu.add_cascade(label='Info', menu=helpmenu) + + self.root.config(menu=self.menu) + + if options.taskbar: + self.root.protocol('WM_DELETE_WINDOW', + lambda: self.quit(True)) + else: + self.root.protocol('WM_DELETE_WINDOW', self.quit) + + sticky = Tkinter.NW + + # IP + Tkinter.Label(self.root, + text='Server IP:', + justify=Tkinter.LEFT).grid(row=0, + column=0, + sticky=sticky) + self.ip = Tkinter.Entry(self.root) + self.ip.insert(Tkinter.END, self.options.ip) + self.ip.grid(row=0, column=1, sticky=sticky) + + # Port + Tkinter.Label(self.root, + text='Server Port:', + justify=Tkinter.LEFT).grid(row=1, + column=0, + sticky=sticky) + + self.port_number = Tkinter.Entry(self.root) + self.port_number.insert(Tkinter.END, self.options.port) + self.port_number.grid(row=1, column=1, sticky=sticky) + + # Password + Tkinter.Label(self.root, + text='Choose Password:', + justify=Tkinter.LEFT).grid(row=2, + column=0, + sticky=sticky) + + self.password = Tkinter.Entry(self.root, show='*') + self.password.bind('', lambda e: self.start()) + self.password.focus_force() + self.password.grid(row=2, column=1, sticky=sticky) + + # Prepare the canvas + self.canvas = Tkinter.Canvas(self.root, + width=300, + height=100, + bg='black') + self.canvas.grid(row=3, column=0, columnspan=2) + self.canvas.after(1000, self.update_canvas) + + # Prepare the frame + frame = Tkinter.Frame(self.root) + frame.grid(row=4, column=0, columnspan=2) + + # Start button + self.button_start = Tkinter.Button(frame, + text='start server', + command=self.start) + + self.button_start.grid(row=0, column=0) + + # Stop button + self.button_stop = Tkinter.Button(frame, + text='stop server', + command=self.stop) + + self.button_stop.grid(row=0, column=1) + self.button_stop.configure(state='disabled') + + if options.taskbar: + self.tb = contrib.taskbar_widget.TaskBarIcon() + self.checkTaskBar() + + if options.password != '': + self.password.insert(0, options.password) + self.start() + self.root.withdraw() + else: + self.tb = None + + def checkTaskBar(self): + """ Check taskbar status """ + + if self.tb.status: + if self.tb.status[0] == self.tb.EnumStatus.QUIT: + self.quit() + elif self.tb.status[0] == self.tb.EnumStatus.TOGGLE: + if self.root.state() == 'withdrawn': + self.root.deiconify() + else: + self.root.withdraw() + elif self.tb.status[0] == self.tb.EnumStatus.STOP: + self.stop() + elif self.tb.status[0] == self.tb.EnumStatus.START: + self.start() + elif self.tb.status[0] == self.tb.EnumStatus.RESTART: + self.stop() + self.start() + del self.tb.status[0] + + self.root.after(1000, self.checkTaskBar) + + def update(self, text): + """ Update app text """ + + try: + self.text.configure(state='normal') + self.text.insert('end', text) + self.text.configure(state='disabled') + except: + pass # ## this should only happen in case app is destroyed + + def connect_pages(self): + """ Connect pages """ + + for arq in os.listdir('applications/'): + if os.path.exists('applications/%s/__init__.py' % arq): + url = self.url + '/' + arq + start_browser = lambda u = url: try_start_browser(u) + self.pagesmenu.add_command(label=url, + command=start_browser) + + def quit(self, justHide=False): + """ Finish the program execution """ + + if justHide: + self.root.withdraw() + else: + try: + self.server.stop() + except: + pass + + try: + self.tb.Destroy() + except: + pass + + self.root.destroy() + sys.exit(0) + + def error(self, message): + """ Show error message """ + + tkMessageBox.showerror('web2py start server', message) + + def start(self): + """ Start web2py server """ + + password = self.password.get() + + if not password: + self.error('no password, no web admin interface') + + ip = self.ip.get() + + regexp = '\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}' + if ip and not re.compile(regexp).match(ip): + return self.error('invalid host ip address') + + try: + port = int(self.port_number.get()) + except: + return self.error('invalid port number') + + self.url = 'http://%s:%s' % (ip, port) + self.connect_pages() + self.button_start.configure(state='disabled') + + try: + options = self.options + req_queue_size = options.request_queue_size + self.server = main.HttpServer( + ip, + port, + password, + pid_filename=options.pid_filename, + log_filename=options.log_filename, + profiler_filename=options.profiler_filename, + ssl_certificate=options.ssl_certificate, + ssl_private_key=options.ssl_private_key, + min_threads=options.minthreads, + max_threads=options.maxthreads, + server_name=options.server_name, + request_queue_size=req_queue_size, + timeout=options.timeout, + shutdown_timeout=options.shutdown_timeout, + path=options.folder, + interfaces=options.interfaces) + + thread.start_new_thread(self.server.start, ()) + except Exception, e: + self.button_start.configure(state='normal') + return self.error(str(e)) + + self.button_stop.configure(state='normal') + + if not options.taskbar: + thread.start_new_thread(start_browser, (ip, port)) + + self.password.configure(state='readonly') + self.ip.configure(state='readonly') + self.port_number.configure(state='readonly') + + if self.tb: + self.tb.SetServerRunning() + + def stop(self): + """ Stop web2py server """ + + self.button_start.configure(state='normal') + self.button_stop.configure(state='disabled') + self.password.configure(state='normal') + self.ip.configure(state='normal') + self.port_number.configure(state='normal') + self.server.stop() + + if self.tb: + self.tb.SetServerStopped() + + def update_canvas(self): + """ Update canvas """ + + try: + t1 = os.path.getsize('httpserver.log') + except: + self.canvas.after(1000, self.update_canvas) + return + + try: + fp = open('httpserver.log', 'r') + fp.seek(self.t0) + data = fp.read(t1 - self.t0) + fp.close() + value = self.p0[1:] + [10 + 90.0 / math.sqrt(1 + data.count('\n'))] + self.p0 = value + + for i in xrange(len(self.p0) - 1): + c = self.canvas.coords(self.q0[i]) + self.canvas.coords(self.q0[i], + (c[0], + self.p0[i], + c[2], + self.p0[i + 1])) + self.t0 = t1 + except BaseException: + self.t0 = time.time() + self.t0 = t1 + self.p0 = [100] * 300 + self.q0 = [self.canvas.create_line(i, 100, i + 1, 100, + fill='green') for i in xrange(len(self.p0) - 1)] + + self.canvas.after(1000, self.update_canvas) + + +def console(): + """ Defines the behavior of the console web2py execution """ + import optparse + import textwrap + + usage = "python web2py.py" + + description = """\ + web2py Web Framework startup script. + ATTENTION: unless a password is specified (-a 'passwd') web2py will + attempt to run a GUI. In this case command line options are ignored.""" + + description = textwrap.dedent(description) + + parser = optparse.OptionParser(usage, None, optparse.Option, ProgramVersion) + + parser.description = description + + parser.add_option('-i', + '--ip', + default='127.0.0.1', + dest='ip', + help='ip address of the server (127.0.0.1)') + + parser.add_option('-p', + '--port', + default='8000', + dest='port', + type='int', + help='port of server (8000)') + + msg = 'password to be used for administration' + msg += ' (use -a "" to reuse the last password))' + parser.add_option('-a', + '--password', + default='', + dest='password', + help=msg) + + parser.add_option('-c', + '--ssl_certificate', + default='', + dest='ssl_certificate', + help='file that contains ssl certificate') + + parser.add_option('-k', + '--ssl_private_key', + default='', + dest='ssl_private_key', + help='file that contains ssl private key') + + parser.add_option('--ca-cert', + action='store', + dest='ssl_ca_certificate', + default=None, + help='Use this file containing the CA certificate to validate X509 certificates from clients') + + parser.add_option('-d', + '--pid_filename', + default='httpserver.pid', + dest='pid_filename', + help='file to store the pid of the server') + + parser.add_option('-l', + '--log_filename', + default='httpserver.log', + dest='log_filename', + help='file to log connections') + + parser.add_option('-n', + '--numthreads', + default=None, + type='int', + dest='numthreads', + help='number of threads (deprecated)') + + parser.add_option('--minthreads', + default=None, + type='int', + dest='minthreads', + help='minimum number of server threads') + + parser.add_option('--maxthreads', + default=None, + type='int', + dest='maxthreads', + help='maximum number of server threads') + + parser.add_option('-s', + '--server_name', + default=socket.gethostname(), + dest='server_name', + help='server name for the web server') + + msg = 'max number of queued requests when server unavailable' + parser.add_option('-q', + '--request_queue_size', + default='5', + type='int', + dest='request_queue_size', + help=msg) + + parser.add_option('-o', + '--timeout', + default='10', + type='int', + dest='timeout', + help='timeout for individual request (10 seconds)') + + parser.add_option('-z', + '--shutdown_timeout', + default='5', + type='int', + dest='shutdown_timeout', + help='timeout on shutdown of server (5 seconds)') + parser.add_option('-f', + '--folder', + default=os.getcwd(), + dest='folder', + help='folder from which to run web2py') + + parser.add_option('-v', + '--verbose', + action='store_true', + dest='verbose', + default=False, + help='increase --test verbosity') + + parser.add_option('-Q', + '--quiet', + action='store_true', + dest='quiet', + default=False, + help='disable all output') + + msg = 'set debug output level (0-100, 0 means all, 100 means none;' + msg += ' default is 30)' + parser.add_option('-D', + '--debug', + dest='debuglevel', + default=30, + type='int', + help=msg) + + msg = 'run web2py in interactive shell or IPython (if installed) with' + msg += ' specified appname (if app does not exist it will be created).' + msg += ' APPNAME like a/c/f (c,f optional)' + parser.add_option('-S', + '--shell', + dest='shell', + metavar='APPNAME', + help=msg) + + msg = 'run web2py in interactive shell or bpython (if installed) with' + msg += ' specified appname (if app does not exist it will be created).' + msg += '\n Use combined with --shell' + parser.add_option('-B', + '--bpython', + action='store_true', + default=False, + dest='bpython', + help=msg) + + msg = 'only use plain python shell; should be used with --shell option' + parser.add_option('-P', + '--plain', + action='store_true', + default=False, + dest='plain', + help=msg) + + msg = 'auto import model files; default is False; should be used' + msg += ' with --shell option' + parser.add_option('-M', + '--import_models', + action='store_true', + default=False, + dest='import_models', + help=msg) + + msg = 'run PYTHON_FILE in web2py environment;' + msg += ' should be used with --shell option' + parser.add_option('-R', + '--run', + dest='run', + metavar='PYTHON_FILE', + default='', + help=msg) + + msg = 'run scheduled tasks for the specified apps' + msg += '-K app1,app2,app3' + msg += 'requires a scheduler defined in the models' + parser.add_option('-K', + '--scheduler', + dest='scheduler', + default=None, + help=msg) + + msg = 'run doctests in web2py environment; ' +\ + 'TEST_PATH like a/c/f (c,f optional)' + parser.add_option('-T', + '--test', + dest='test', + metavar='TEST_PATH', + default=None, + help=msg) + + parser.add_option('-W', + '--winservice', + dest='winservice', + default='', + help='-W install|start|stop as Windows service') + + msg = 'trigger a cron run manually; usually invoked from a system crontab' + parser.add_option('-C', + '--cron', + action='store_true', + dest='extcron', + default=False, + help=msg) + + msg = 'triggers the use of softcron' + parser.add_option('--softcron', + action='store_true', + dest='softcron', + default=False, + help=msg) + + parser.add_option('-N', + '--no-cron', + action='store_true', + dest='nocron', + default=False, + help='do not start cron automatically') + + parser.add_option('-J', + '--cronjob', + action='store_true', + dest='cronjob', + default=False, + help='identify cron-initiated command') + + parser.add_option('-L', + '--config', + dest='config', + default='', + help='config file') + + parser.add_option('-F', + '--profiler', + dest='profiler_filename', + default=None, + help='profiler filename') + + parser.add_option('-t', + '--taskbar', + action='store_true', + dest='taskbar', + default=False, + help='use web2py gui and run in taskbar (system tray)') + + parser.add_option('', + '--nogui', + action='store_true', + default=False, + dest='nogui', + help='text-only, no GUI') + + parser.add_option('-A', + '--args', + action='store', + dest='args', + default=None, + help='should be followed by a list of arguments to be passed to script, to be used with -S, -A must be the last option') + + parser.add_option('--no-banner', + action='store_true', + default=False, + dest='nobanner', + help='Do not print header banner') + + + msg = 'listen on multiple addresses: "ip:port:cert:key:ca_cert;ip2:port2:cert2:key2:ca_cert2;..." (:cert:key optional; no spaces)' + parser.add_option('--interfaces', + action='store', + dest='interfaces', + default=None, + help=msg) + + if '-A' in sys.argv: k = sys.argv.index('-A') + elif '--args' in sys.argv: k = sys.argv.index('--args') + else: k=len(sys.argv) + sys.argv, other_args = sys.argv[:k], sys.argv[k+1:] + (options, args) = parser.parse_args() + options.args = [options.run] + other_args + global_settings.cmd_options = options + global_settings.cmd_args = args + + if options.quiet: + capture = cStringIO.StringIO() + sys.stdout = capture + logger.setLevel(logging.CRITICAL + 1) + else: + logger.setLevel(options.debuglevel) + + if options.config[-3:] == '.py': + options.config = options.config[:-3] + + if options.cronjob: + global_settings.cronjob = True # tell the world + options.nocron = True # don't start cron jobs + options.plain = True # cronjobs use a plain shell + + options.folder = os.path.abspath(options.folder) + + # accept --interfaces in the form + # "ip:port:cert:key;ip2:port2;ip3:port3:cert3:key3" + # (no spaces; optional cert:key indicate SSL) + if isinstance(options.interfaces, str): + options.interfaces = [ + interface.split(':') for interface in options.interfaces.split(';')] + for interface in options.interfaces: + interface[1] = int(interface[1]) # numeric port + options.interfaces = [ + tuple(interface) for interface in options.interfaces] + + if options.numthreads is not None and options.minthreads is None: + options.minthreads = options.numthreads # legacy + + if not options.cronjob: + # If we have the applications package or if we should upgrade + if not os.path.exists('applications/__init__.py'): + write_file('applications/__init__.py', '') + + if not os.path.exists('welcome.w2p') or os.path.exists('NEWINSTALL'): + try: + w2p_pack('welcome.w2p','applications/welcome') + os.unlink('NEWINSTALL') + except: + msg = "New installation: unable to create welcome.w2p file" + sys.stderr.write(msg) + + return (options, args) + +def start_schedulers(options): + apps = [app.strip() for app in options.scheduler.split(',')] + try: + from multiprocessing import Process + except: + sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n') + return + processes = [] + code = "from gluon import current; current._scheduler.loop()" + for app in apps: + print 'starting scheduler for "%s"...' % app + args = (app,True,True,None,False,code) + logging.getLogger().setLevel(logging.DEBUG) + p = Process(target=run, args=args) + processes.append(p) + print "Currently running %s scheduler processes" % (len(processes)) + p.start() + print "Processes started" + for p in processes: + try: + p.join() + except KeyboardInterrupt: + p.terminate() + p.join() + + +def start(cron=True): + """ Start server """ + + # ## get command line arguments + + (options, args) = console() + + if not options.nobanner: + print ProgramName + print ProgramAuthor + print ProgramVersion + + from dal import drivers + if not options.nobanner: + print 'Database drivers available: %s' % ', '.join(drivers) + + + # ## if -L load options from options.config file + if options.config: + try: + options2 = __import__(options.config, {}, {}, '') + except Exception: + try: + # Jython doesn't like the extra stuff + options2 = __import__(options.config) + except Exception: + print 'Cannot import config file [%s]' % options.config + sys.exit(1) + for key in dir(options2): + if hasattr(options,key): + setattr(options,key,getattr(options2,key)) + + # ## if -T run doctests (no cron) + if hasattr(options,'test') and options.test: + test(options.test, verbose=options.verbose) + return + + # ## if -K + if options.scheduler: + try: + start_schedulers(options) + except KeyboardInterrupt: + pass + return + + # ## if -S start interactive shell (also no cron) + if options.shell: + if not options.args is None: + sys.argv[:] = options.args + run(options.shell, plain=options.plain, bpython=options.bpython, + import_models=options.import_models, startfile=options.run) + return + + # ## if -C start cron run (extcron) and exit + # ## if -N or not cron disable cron in this *process* + # ## if --softcron use softcron + # ## use hardcron in all other cases + if options.extcron: + print 'Starting extcron...' + global_settings.web2py_crontype = 'external' + extcron = newcron.extcron(options.folder) + extcron.start() + extcron.join() + return + elif cron and not options.nocron and options.softcron: + print 'Using softcron (but this is not very efficient)' + global_settings.web2py_crontype = 'soft' + elif cron and not options.nocron: + print 'Starting hardcron...' + global_settings.web2py_crontype = 'hard' + newcron.hardcron(options.folder).start() + + # ## if -W install/start/stop web2py as service + if options.winservice: + if os.name == 'nt': + web2py_windows_service_handler(['', options.winservice], + options.config) + else: + print 'Error: Windows services not supported on this platform' + sys.exit(1) + return + + # ## if no password provided and havetk start Tk interface + # ## or start interface if we want to put in taskbar (system tray) + + try: + options.taskbar + except: + options.taskbar = False + + if options.taskbar and os.name != 'nt': + print 'Error: taskbar not supported on this platform' + sys.exit(1) + + root = None + + if not options.nogui: + try: + import Tkinter + havetk = True + except ImportError: + logger.warn('GUI not available because Tk library is not installed') + havetk = False + + if options.password == '' and havetk or options.taskbar and havetk: + try: + root = Tkinter.Tk() + except: + pass + + if root: + root.focus_force() + if not options.quiet: + presentation(root) + master = web2pyDialog(root, options) + signal.signal(signal.SIGTERM, lambda a, b: master.quit()) + + try: + root.mainloop() + except: + master.quit() + + sys.exit() + + # ## if no tk and no password, ask for a password + + if not root and options.password == '': + options.password = raw_input('choose a password:') + + if not options.password and not options.nobanner: + print 'no password, no admin interface' + + # ## start server + + (ip, port) = (options.ip, int(options.port)) + + if not options.nobanner: + print 'please visit:' + print '\thttp://%s:%s' % (ip, port) + print 'use "kill -SIGTERM %i" to shutdown the web2py server' % os.getpid() + + server = main.HttpServer(ip=ip, + port=port, + password=options.password, + pid_filename=options.pid_filename, + log_filename=options.log_filename, + profiler_filename=options.profiler_filename, + ssl_certificate=options.ssl_certificate, + ssl_private_key=options.ssl_private_key, + ssl_ca_certificate=options.ssl_ca_certificate, + min_threads=options.minthreads, + max_threads=options.maxthreads, + server_name=options.server_name, + request_queue_size=options.request_queue_size, + timeout=options.timeout, + shutdown_timeout=options.shutdown_timeout, + path=options.folder, + interfaces=options.interfaces) + + try: + server.start() + except KeyboardInterrupt: + server.stop() + logging.shutdown() + + + ADDED gluon/widget.pyc Index: gluon/widget.pyc ================================================================== --- /dev/null +++ gluon/widget.pyc cannot compute difference between binary files ADDED gluon/winservice.py Index: gluon/winservice.py ================================================================== --- /dev/null +++ gluon/winservice.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +This file is part of the web2py Web Framework +Developed by Massimo Di Pierro and +Limodou . +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +This makes uses of the pywin32 package +(http://sourceforge.net/projects/pywin32/). +You do not need to install this package to use web2py. + + +""" + +import time +import os +import sys +import traceback +try: + import win32serviceutil + import win32service + import win32event +except: + if os.name == 'nt': + print "Warning, winservice is unable to install the Mark Hammond Win32 extensions" +import servicemanager +import _winreg +from fileutils import up + +__all__ = ['web2py_windows_service_handler'] + +class Service(win32serviceutil.ServiceFramework): + + _svc_name_ = '_unNamed' + _svc_display_name_ = '_Service Template' + + def __init__(self, *args): + win32serviceutil.ServiceFramework.__init__(self, *args) + self.stop_event = win32event.CreateEvent(None, 0, 0, None) + + def log(self, msg): + servicemanager.LogInfoMsg(str(msg)) + + def SvcDoRun(self): + self.ReportServiceStatus(win32service.SERVICE_START_PENDING) + try: + self.ReportServiceStatus(win32service.SERVICE_RUNNING) + self.start() + win32event.WaitForSingleObject(self.stop_event, + win32event.INFINITE) + except: + self.log(traceback.format_exc(sys.exc_info)) + self.SvcStop() + self.ReportServiceStatus(win32service.SERVICE_STOPPED) + + def SvcStop(self): + self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING) + try: + self.stop() + except: + self.log(traceback.format_exc(sys.exc_info)) + win32event.SetEvent(self.stop_event) + self.ReportServiceStatus(win32service.SERVICE_STOPPED) + + # to be overridden + + def start(self): + pass + + # to be overridden + + def stop(self): + pass + + +class Web2pyService(Service): + + _svc_name_ = 'web2py' + _svc_display_name_ = 'web2py Service' + _exe_args_ = 'options' + server = None + + def chdir(self): + try: + h = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, + r'SYSTEM\CurrentControlSet\Services\%s' + % self._svc_name_) + try: + cls = _winreg.QueryValue(h, 'PythonClass') + finally: + _winreg.CloseKey(h) + dir = os.path.dirname(cls) + os.chdir(dir) + return True + except: + self.log("Can't change to web2py working path; server is stopped") + return False + + def start(self): + self.log('web2py server starting') + if not self.chdir(): + return + if len(sys.argv) == 2: + opt_mod = sys.argv[1] + else: + opt_mod = self._exe_args_ + options = __import__(opt_mod, [], [], '') + if True: # legacy support for old options files, which have only (deprecated) numthreads + if hasattr(options, 'numthreads') and not hasattr(options, 'minthreads'): + options.minthreads = options.numthreads + if not hasattr(options, 'minthreads'): options.minthreads = None + if not hasattr(options, 'maxthreads'): options.maxthreads = None + import main + self.server = main.HttpServer( + ip=options.ip, + port=options.port, + password=options.password, + pid_filename=options.pid_filename, + log_filename=options.log_filename, + profiler_filename=options.profiler_filename, + ssl_certificate=options.ssl_certificate, + ssl_private_key=options.ssl_private_key, + min_threads=options.minthreads, + max_threads=options.maxthreads, + server_name=options.server_name, + request_queue_size=options.request_queue_size, + timeout=options.timeout, + shutdown_timeout=options.shutdown_timeout, + path=options.folder + ) + try: + self.server.start() + except: + + # self.server.stop() + + self.server = None + raise + + def stop(self): + self.log('web2py server stopping') + if not self.chdir(): + return + if self.server: + self.server.stop() + time.sleep(1) + + +def web2py_windows_service_handler(argv=None, opt_file='options'): + path = os.path.dirname(__file__) + classstring = os.path.normpath(os.path.join(up(path), + 'gluon.winservice.Web2pyService')) + if opt_file: + Web2pyService._exe_args_ = opt_file + win32serviceutil.HandleCommandLine(Web2pyService, + serviceClassString=classstring, argv=['', 'install']) + win32serviceutil.HandleCommandLine(Web2pyService, + serviceClassString=classstring, argv=argv) + + +if __name__ == '__main__': + web2py_windows_service_handler() + + + ADDED gluon/xmlrpc.py Index: gluon/xmlrpc.py ================================================================== --- /dev/null +++ gluon/xmlrpc.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +""" + +from SimpleXMLRPCServer import SimpleXMLRPCDispatcher + + +def handler(request, response, methods): + response.session_id = None # no sessions for xmlrpc + dispatcher = SimpleXMLRPCDispatcher(allow_none=True, encoding=None) + for method in methods: + dispatcher.register_function(method) + dispatcher.register_introspection_functions() + response.headers['Content-Type'] = 'text/xml' + dispatch = getattr(dispatcher, '_dispatch', None) + return dispatcher._marshaled_dispatch(request.body.read(), dispatch) + + + ADDED gluon/xmlrpc.pyc Index: gluon/xmlrpc.pyc ================================================================== --- /dev/null +++ gluon/xmlrpc.pyc cannot compute difference between binary files ADDED isapiwsgihandler.py Index: isapiwsgihandler.py ================================================================== --- /dev/null +++ isapiwsgihandler.py @@ -0,0 +1,36 @@ +""" +web2py handler for isapi-wsgi for IIS. Requires: +http://code.google.com/p/isapi-wsgi/ +""" +# The entry point for the ISAPI extension. +def __ExtensionFactory__(): + import os + import sys + path = os.path.dirname(os.path.abspath(__file__)) + os.chdir(path) + sys.path = [path]+[p for p in sys.path if not p==path] + import gluon.main + import isapi_wsgi + application=gluon.main.wsgibase + return isapi_wsgi.ISAPIThreadPoolHandler(application) + +# ISAPI installation: +if __name__=='__main__': + import sys + if len(sys.argv)<2: + print "USAGE: python isapiwsgihandler.py install --server=Sitename" + sys.exit(0) + from isapi.install import ISAPIParameters + from isapi.install import ScriptMapParams + from isapi.install import VirtualDirParameters + from isapi.install import HandleCommandLine + + params = ISAPIParameters() + sm = [ ScriptMapParams(Extension="*", Flags=0) ] + vd = VirtualDirParameters(Name="appname", + Description = "Web2py in Python", + ScriptMaps = sm, + ScriptMapUpdate = "replace") + params.VirtualDirs = [vd] + HandleCommandLine(params) + ADDED logging.example.conf Index: logging.example.conf ================================================================== --- /dev/null +++ logging.example.conf @@ -0,0 +1,96 @@ +[loggers] +keys=root,rocket,markdown,web2py,rewrite,app,welcome + +# the default configuration is console-based (stdout) for backward compatibility +# +# note that file-based handlers are thread-safe but not mp-safe; +# for mp-safe logging, configure the appropriate syslog handler + +[handlers] +keys=consoleHandler +#keys=consoleHandler,rotatingFileHandler +#keys=osxSysLogHandler + +[formatters] +keys=simpleFormatter + +[logger_root] +level=WARNING +handlers=consoleHandler + +[logger_web2py] +level=WARNING +handlers=consoleHandler +qualname=web2py +propagate=0 + +[logger_rewrite] +level=WARNING +qualname=web2py.rewrite +handlers=consoleHandler +propagate=0 + +# generic app handler +[logger_app] +level=WARNING +qualname=web2py.app +handlers=consoleHandler +propagate=0 + +# welcome app handler +[logger_welcome] +level=WARNING +qualname=web2py.app.welcome +handlers=consoleHandler +propagate=0 + +# loggers for legacy getLogger calls: Rocket and markdown +[logger_rocket] +level=WARNING +handlers=consoleHandler +qualname=Rocket +propagate=0 + +[logger_markdown] +level=WARNING +handlers=consoleHandler +qualname=markdown +propagate=0 + +[handler_consoleHandler] +class=StreamHandler +level=WARNING +formatter=simpleFormatter +args=(sys.stdout,) + +# Rotating file handler +# mkdir logs in the web2py base directory if not already present +# args: (filename[, mode[, maxBytes[, backupCount[, encoding[, delay]]]]]) +# +[handler_rotatingFileHandler] +class=handlers.RotatingFileHandler +level=INFO +formatter=simpleFormatter +args=("logs/web2py.log", "a", 1000000, 5) + +[handler_osxSysLogHandler] +class=handlers.SysLogHandler +level=WARNING +formatter=simpleFormatter +args=("/var/run/syslog", handlers.SysLogHandler.LOG_DAEMON) + +[handler_linuxSysLogHandler] +class=handlers.SysLogHandler +level=WARNING +formatter=simpleFormatter +args=("/dev/log", handlers.SysLogHandler.LOG_DAEMON) + +[handler_remoteSysLogHandler] +class=handlers.SysLogHandler +level=WARNING +formatter=simpleFormatter +args=(('sysloghost.domain.com', handlers.SYSLOG_UDP_PORT), handlers.SysLogHandler.LOG_DAEMON) + +[formatter_simpleFormatter] +format=%(asctime)s - %(name)s - %(levelname)s - %(message)s +datefmt= ADDED modpythonhandler.py Index: modpythonhandler.py ================================================================== --- /dev/null +++ modpythonhandler.py @@ -0,0 +1,225 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + +WSGI wrapper for mod_python. Requires Python 2.2 or greater. +Part of CherryPy mut modified by Massimo Di Pierro (2008) for web2py + + + SetHandler python-program + PythonHandler modpythonhandler + PythonPath \"['/path/to/web2py/'] + sys.path\" + PythonOption SCRIPT_NAME /myapp + + +Some WSGI implementations assume that the SCRIPT_NAME environ variable will +always be equal to 'the root URL of the app'; Apache probably won't act as +you expect in that case. You can add another PythonOption directive to tell +modpython_gateway to force that behavior: + + PythonOption SCRIPT_NAME /mcontrol + +The module.function will be called with no arguments on server shutdown, +once for each child process or thread. +""" + +import traceback +import sys +import os +from mod_python import apache + +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] + +import gluon.main + + +class InputWrapper(object): + """ Input wrapper for the wsgi handler """ + + def __init__(self, req): + """ InputWrapper constructor """ + + self.req = req + + def close(self): + """ """ + + pass + + def read(self, size=-1): + """ Wrapper for req.read """ + + return self.req.read(size) + + def readline(self, size=-1): + """ Wrapper for req.readline """ + + return self.req.readline(size) + + def readlines(self, hint=-1): + """ Wrapper for req.readlines """ + + return self.req.readlines(hint) + + def __iter__(self): + """ Defines a generator with the req data """ + + line = self.readline() + while line: + yield line + + # Notice this won't prefetch the next line; it only + # gets called if the generator is resumed. + line = self.readline() + + +class ErrorWrapper(object): + """ Error wrapper for the wsgi handler """ + + def __init__(self, req): + """ ErrorWrapper constructor """ + + self.req = req + + def flush(self): + """ """ + + pass + + def write(self, msg): + """ Logs the given msg in the log file """ + + self.req.log_error(msg) + + def writelines(self, seq): + """ Writes various lines in the log file """ + + self.write(''.join(seq)) + + +bad_value = "You must provide a PythonOption '%s', either 'on' or 'off', when running a version of mod_python < 3.1" + + +class Handler: + """ Defines the handler """ + + def __init__(self, req): + """ Handler constructor """ + + self.started = False + options = req.get_options() + + # Threading and forking + try: + q = apache.mpm_query + threaded = q(apache.AP_MPMQ_IS_THREADED) + forked = q(apache.AP_MPMQ_IS_FORKED) + except AttributeError: + threaded = options.get('multithread', '').lower() + + if threaded == 'on': + threaded = True + elif threaded == 'off': + threaded = False + else: + raise ValueError(bad_value % 'multithread') + + forked = options.get('multiprocess', '').lower() + + if forked == 'on': + forked = True + elif forked == 'off': + forked = False + else: + raise ValueError(bad_value % 'multiprocess') + + env = self.environ = dict(apache.build_cgi_env(req)) + + if 'SCRIPT_NAME' in options: + # Override SCRIPT_NAME and PATH_INFO if requested. + env['SCRIPT_NAME'] = options['SCRIPT_NAME'] + env['PATH_INFO'] = req.uri[len(options['SCRIPT_NAME']):] + + env['wsgi.input'] = InputWrapper(req) + env['wsgi.errors'] = ErrorWrapper(req) + env['wsgi.version'] = (1, 0) + env['wsgi.run_once'] = False + + if env.get('HTTPS') in ('yes', 'on', '1'): + env['wsgi.url_scheme'] = 'https' + else: + env['wsgi.url_scheme'] = 'http' + + env['wsgi.multithread'] = threaded + env['wsgi.multiprocess'] = forked + + self.request = req + + def run(self, application): + """ Run the application """ + + try: + result = application(self.environ, self.start_response) + + for data in result: + self.write(data) + + if not self.started: + self.request.set_content_length(0) + + if hasattr(result, 'close'): + result.close() + except: + traceback.print_exc(None, self.environ['wsgi.errors']) + + if not self.started: + self.request.status = 500 + self.request.content_type = 'text/plain' + data = 'A server error occurred. Please contact the ' + \ + 'administrator.' + self.request.set_content_length(len(data)) + self.request.write(data) + + def start_response(self, status, headers, exc_info=None): + """ Defines the request data """ + + if exc_info: + try: + if self.started: + raise exc_info[0], exc_info[1], exc_info[2] + finally: + exc_info = None + + self.request.status = int(status[:3]) + + for (key, val) in headers: + if key.lower() == 'content-length': + self.request.set_content_length(int(val)) + elif key.lower() == 'content-type': + self.request.content_type = val + else: + self.request.headers_out.add(key, val) + + return self.write + + def write(self, data): + """ Write the request data """ + + if not self.started: + self.started = True + + self.request.write(data) + + +def handler(req): + """ Execute the gluon app """ + + Handler(req).run(gluon.main.wsgibase) + return apache.OK + ADDED options_std.py Index: options_std.py ================================================================== --- /dev/null +++ options_std.py @@ -0,0 +1,34 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# when web2py is run as a windows service (web2py.exe -W) +# it does not load the command line options but it +# expects to find conifguration settings in a file called +# +# web2py/options.py +# +# this file is an example for options.py + +import socket +import os + +ip = '0.0.0.0' +port = 80 +interfaces=[('0.0.0.0',80),('0.0.0.0',443,'ssl_private_key.pem','ssl_certificate.pem')] +password = '' # ## means use the previous password +pid_filename = 'httpserver.pid' +log_filename = 'httpserver.log' +profiler_filename = None +#ssl_certificate = 'ssl_certificate.pem' # ## path to certificate file +#ssl_private_key = 'ssl_private_key.pem' # ## path to private key file +#numthreads = 50 # ## deprecated; remove +minthreads = None +maxthreads = None +server_name = socket.gethostname() +request_queue_size = 5 +timeout = 30 +shutdown_timeout = 5 +folder = os.getcwd() +extcron = None +nocron = None + ADDED queue.example.yaml Index: queue.example.yaml ================================================================== --- /dev/null +++ queue.example.yaml @@ -0,0 +1,8 @@ +# To configure Google App Engine task queues, copy this file to queue.yaml +# and edit as required +# See http://code.google.com/appengine/docs/python/config/queue.html + +queue: +- name: default + rate: 20/m + bucket_size: 1 ADDED router.example.py Index: router.example.py ================================================================== --- /dev/null +++ router.example.py @@ -0,0 +1,202 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# routers are dictionaries of URL routing parameters. +# +# For each request, the effective router is: +# the built-in default base router (shown below), +# updated by the BASE router in routes.py routers, +# updated by the app-specific router in routes.py routers (if any), +# updated by the app-specific router from applications/app/routes.py routers (if any) +# +# +# Router members: +# +# default_application: default application name +# applications: list of all recognized applications, or 'ALL' to use all currently installed applications +# Names in applications are always treated as an application names when they appear first in an incoming URL. +# Set applications=None to disable the removal of application names from outgoing URLs. +# domains: optional dict mapping domain names to application names +# The domain name can include a port number: domain.com:8080 +# The application name can include a controller: appx/ctlrx +# or a controller and a function: appx/ctlrx/fcnx +# Example: +# domains = { "domain.com" : "app", +# "x.domain.com" : "appx", +# }, +# path_prefix: a path fragment that is prefixed to all outgoing URLs and stripped from all incoming URLs +# +# Note: default_application, applications, domains & path_prefix are permitted only in the BASE router, +# and domain makes sense only in an application-specific router. +# The remaining members can appear in the BASE router (as defaults for all applications) +# or in application-specific routers. +# +# default_controller: name of default controller +# default_function: name of default function (in all controllers) or dictionary of default functions +# by controller +# controllers: list of valid controllers in selected app +# or "DEFAULT" to use all controllers in the selected app plus 'static' +# or None to disable controller-name removal. +# Names in controllers are always treated as controller names when they appear in an incoming URL after +# the (optional) application and language names. +# functions: list of valid functions in the default controller (default None) or dictionary of valid +# functions by controller. +# If present, the default function name will be omitted when the controller is the default controller +# and the first arg does not create an ambiguity. +# languages: list of all supported languages +# Names in languages are always treated as language names when they appear in an incoming URL after +# the (optional) application name. +# default_language +# The language code (for example: en, it-it) optionally appears in the URL following +# the application (which may be omitted). For incoming URLs, the code is copied to +# request.language; for outgoing URLs it is taken from request.language. +# If languages=None, language support is disabled. +# The default_language, if any, is omitted from the URL. +# root_static: list of static files accessed from root (by default, favicon.ico & robots.txt) +# (mapped to the default application's static/ directory) +# Each default (including domain-mapped) application has its own root-static files. +# domain: the domain that maps to this application (alternative to using domains in the BASE router) +# exclusive_domain: If True (default is False), an exception is raised if an attempt is made to generate +# an outgoing URL with a different application without providing an explicit host. +# map_hyphen: If True (default is False), hyphens in incoming /a/c/f fields are converted +# to underscores, and back to hyphens in outgoing URLs. +# Language, args and the query string are not affected. +# map_static: By default, the default application is not stripped from static URLs. +# Set map_static=True to override this policy. +# acfe_match: regex for valid application, controller, function, extension /a/c/f.e +# file_match: regex for valid file (used for static file names) +# args_match: regex for valid args +# This validation provides a measure of security. +# If it is changed, the application perform its own validation. +# +# +# The built-in default router supplies default values (undefined members are None): +# +# default_router = dict( +# default_application = 'init', +# applications = 'ALL', +# default_controller = 'default', +# controllers = 'DEFAULT', +# default_function = 'index', +# functions = None, +# default_language = None, +# languages = None, +# root_static = ['favicon.ico', 'robots.txt'], +# domains = None, +# map_hyphen = False, +# acfe_match = r'\w+$', # legal app/ctlr/fcn/ext +# file_match = r'(\w+[-=./]?)+$', # legal file (path) name +# args_match = r'([\w@ -]+[=.]?)+$', # legal arg in args +# ) +# +# See rewrite.map_url_in() and rewrite.map_url_out() for implementation details. + + +# This simple router set overrides only the default application name, +# but provides full rewrite functionality. + +routers = dict( + + # base router + BASE = dict( + default_application = 'welcome', + ), +) + + +# Error-handling redirects all HTTP errors (status codes >= 400) to a specified +# path. If you wish to use error-handling redirects, uncomment the tuple +# below. You can customize responses by adding a tuple entry with the first +# value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are +# routed. ) and the value as a path to redirect the user to. You may also use +# '*' as a wildcard. +# +# The error handling page is also passed the error code and ticket as +# variables. Traceback information will be stored in the ticket. +# +# routes_onerror = [ +# (r'init/400', r'/init/default/login') +# ,(r'init/*', r'/init/static/fail.html') +# ,(r'*/404', r'/init/static/cantfind.html') +# ,(r'*/*', r'/init/error/index') +# ] + +# specify action in charge of error handling +# +# error_handler = dict(application='error', +# controller='default', +# function='index') + +# In the event that the error-handling page itself returns an error, web2py will +# fall back to its old static responses. You can customize them here. +# ErrorMessageTicket takes a string format dictionary containing (only) the +# "ticket" key. + +# error_message = '

    %s

    ' +# error_message_ticket = '

    Internal error

    Ticket issued: %(ticket)s' + +def __routes_doctest(): + ''' + Dummy function for doctesting routes.py. + + Use filter_url() to test incoming or outgoing routes; + filter_err() for error redirection. + + filter_url() accepts overrides for method and remote host: + filter_url(url, method='get', remote='0.0.0.0', out=False) + + filter_err() accepts overrides for application and ticket: + filter_err(status, application='app', ticket='tkt') + + >>> import os + >>> import gluon.main + >>> from gluon.rewrite import load, filter_url, filter_err, get_effective_router + >>> load(routes=os.path.basename(__file__)) + + >>> filter_url('http://domain.com/abc', app=True) + 'welcome' + >>> filter_url('http://domain.com/welcome', app=True) + 'welcome' + >>> os.path.relpath(filter_url('http://domain.com/favicon.ico')) + 'applications/welcome/static/favicon.ico' + >>> filter_url('http://domain.com/abc') + '/welcome/default/abc' + >>> filter_url('http://domain.com/index/abc') + "/welcome/default/index ['abc']" + >>> filter_url('http://domain.com/default/abc.css') + '/welcome/default/abc.css' + >>> filter_url('http://domain.com/default/index/abc') + "/welcome/default/index ['abc']" + >>> filter_url('http://domain.com/default/index/a bc') + "/welcome/default/index ['a bc']" + + >>> filter_url('https://domain.com/app/ctr/fcn', out=True) + '/app/ctr/fcn' + >>> filter_url('https://domain.com/welcome/ctr/fcn', out=True) + '/ctr/fcn' + >>> filter_url('https://domain.com/welcome/default/fcn', out=True) + '/fcn' + >>> filter_url('https://domain.com/welcome/default/index', out=True) + '/' + >>> filter_url('https://domain.com/welcome/appadmin/index', out=True) + '/appadmin' + >>> filter_url('http://domain.com/welcome/default/fcn?query', out=True) + '/fcn?query' + >>> filter_url('http://domain.com/welcome/default/fcn#anchor', out=True) + '/fcn#anchor' + >>> filter_url('http://domain.com/welcome/default/fcn?query#anchor', out=True) + '/fcn?query#anchor' + + >>> filter_err(200) + 200 + >>> filter_err(399) + 399 + >>> filter_err(400) + 400 + ''' + pass + +if __name__ == '__main__': + import doctest + doctest.testmod() + ADDED routes.example.py Index: routes.example.py ================================================================== --- /dev/null +++ routes.example.py @@ -0,0 +1,166 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# default_application, default_controller, default_function +# are used when the respective element is missing from the +# (possibly rewritten) incoming URL +# +default_application = 'init' # ordinarily set in base routes.py +default_controller = 'default' # ordinarily set in app-specific routes.py +default_function = 'index' # ordinarily set in app-specific routes.py + +# routes_app is a tuple of tuples. The first item in each is a regexp that will +# be used to match the incoming request URL. The second item in the tuple is +# an applicationname. This mechanism allows you to specify the use of an +# app-specific routes.py. This entry is meaningful only in the base routes.py. +# +# Example: support welcome, admin, app and myapp, with myapp the default: + + +routes_app = ((r'/(?Pwelcome|admin|app)\b.*', r'\g'), + (r'(.*)', r'myapp'), + (r'/?(.*)', r'myapp')) + +# routes_in is a tuple of tuples. The first item in each is a regexp that will +# be used to match the incoming request URL. The second item in the tuple is +# what it will be replaced with. This mechanism allows you to redirect incoming +# routes to different web2py locations +# +# Example: If you wish for your entire website to use init's static directory: +# +# routes_in=( (r'/static/(?P[\w./-]+)', r'/init/static/\g') ) +# + +routes_in = ((r'.*:/favicon.ico', r'/examples/static/favicon.ico'), + (r'.*:/robots.txt', r'/examples/static/robots.txt'), + ((r'.*http://otherdomain.com.* (?P.*)', r'/app/ctr\g'))) + +# routes_out, like routes_in translates URL paths created with the web2py URL() +# function in the same manner that route_in translates inbound URL paths. +# + +routes_out = ((r'.*http://otherdomain.com.* /app/ctr(?P.*)', r'\g'), + (r'/app(?P.*)', r'\g')) + +# Error-handling redirects all HTTP errors (status codes >= 400) to a specified +# path. If you wish to use error-handling redirects, uncomment the tuple +# below. You can customize responses by adding a tuple entry with the first +# value in 'appName/HTTPstatusCode' format. ( Only HTTP codes >= 400 are +# routed. ) and the value as a path to redirect the user to. You may also use +# '*' as a wildcard. +# +# The error handling page is also passed the error code and ticket as +# variables. Traceback information will be stored in the ticket. +# +# routes_onerror = [ +# (r'init/400', r'/init/default/login') +# ,(r'init/*', r'/init/static/fail.html') +# ,(r'*/404', r'/init/static/cantfind.html') +# ,(r'*/*', r'/init/error/index') +# ] + +# specify action in charge of error handling +# +# error_handler = dict(application='error', +# controller='default', +# function='index') + +# In the event that the error-handling page itself returns an error, web2py will +# fall back to its old static responses. You can customize them here. +# ErrorMessageTicket takes a string format dictionary containing (only) the +# "ticket" key. + +# error_message = '

    %s

    ' +# error_message_ticket = '

    Internal error

    Ticket issued: %(ticket)s' + +# specify a list of apps that bypass args-checking and use request.raw_args +# +#routes_apps_raw=['myapp'] +#routes_apps_raw=['myapp', 'myotherapp'] + +def __routes_doctest(): + ''' + Dummy function for doctesting routes.py. + + Use filter_url() to test incoming or outgoing routes; + filter_err() for error redirection. + + filter_url() accepts overrides for method and remote host: + filter_url(url, method='get', remote='0.0.0.0', out=False) + + filter_err() accepts overrides for application and ticket: + filter_err(status, application='app', ticket='tkt') + + >>> import os + >>> import gluon.main + >>> from gluon.rewrite import regex_select, load, filter_url, regex_filter_out, filter_err, compile_regex + >>> regex_select() + >>> load(routes=os.path.basename(__file__)) + + >>> os.path.relpath(filter_url('http://domain.com/favicon.ico')) + 'applications/examples/static/favicon.ico' + >>> os.path.relpath(filter_url('http://domain.com/robots.txt')) + 'applications/examples/static/robots.txt' + >>> filter_url('http://domain.com') + '/init/default/index' + >>> filter_url('http://domain.com/') + '/init/default/index' + >>> filter_url('http://domain.com/init/default/fcn') + '/init/default/fcn' + >>> filter_url('http://domain.com/init/default/fcn/') + '/init/default/fcn' + >>> filter_url('http://domain.com/app/ctr/fcn') + '/app/ctr/fcn' + >>> filter_url('http://domain.com/app/ctr/fcn/arg1') + "/app/ctr/fcn ['arg1']" + >>> filter_url('http://domain.com/app/ctr/fcn/arg1/') + "/app/ctr/fcn ['arg1']" + >>> filter_url('http://domain.com/app/ctr/fcn/arg1//') + "/app/ctr/fcn ['arg1', '']" + >>> filter_url('http://domain.com/app/ctr/fcn//arg1') + "/app/ctr/fcn ['', 'arg1']" + >>> filter_url('HTTP://DOMAIN.COM/app/ctr/fcn') + '/app/ctr/fcn' + >>> filter_url('http://domain.com/app/ctr/fcn?query') + '/app/ctr/fcn ?query' + >>> filter_url('http://otherdomain.com/fcn') + '/app/ctr/fcn' + >>> regex_filter_out('/app/ctr/fcn') + '/ctr/fcn' + >>> filter_url('https://otherdomain.com/app/ctr/fcn', out=True) + '/ctr/fcn' + >>> filter_url('https://otherdomain.com/app/ctr/fcn/arg1//', out=True) + '/ctr/fcn/arg1//' + >>> filter_url('http://otherdomain.com/app/ctr/fcn', out=True) + '/fcn' + >>> filter_url('http://otherdomain.com/app/ctr/fcn?query', out=True) + '/fcn?query' + >>> filter_url('http://otherdomain.com/app/ctr/fcn#anchor', out=True) + '/fcn#anchor' + >>> filter_err(200) + 200 + >>> filter_err(399) + 399 + >>> filter_err(400) + 400 + >>> filter_url('http://domain.com/welcome', app=True) + 'welcome' + >>> filter_url('http://domain.com/', app=True) + 'myapp' + >>> filter_url('http://domain.com', app=True) + 'myapp' + >>> compile_regex('.*http://otherdomain.com.* (?P.*)', '/app/ctr\g')[0].pattern + '^.*http://otherdomain.com.* (?P.*)$' + >>> compile_regex('.*http://otherdomain.com.* (?P.*)', '/app/ctr\g')[1] + '/app/ctr\\\\g' + >>> compile_regex('/$c/$f', '/init/$c/$f')[0].pattern + '^.*?:https?://[^:/]+:[a-z]+ /(?P\\\\w+)/(?P\\\\w+)$' + >>> compile_regex('/$c/$f', '/init/$c/$f')[1] + '/init/\\\\g/\\\\g' + ''' + pass + +if __name__ == '__main__': + import doctest + doctest.testmod() + ADDED scgihandler.py Index: scgihandler.py ================================================================== --- /dev/null +++ scgihandler.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +scgihandler.py - handler for SCGI protocol + +Modified by Michele Comitini +from fcgihandler.py to support SCGI + +fcgihandler has the following copyright: +" This file is part of the web2py Web Framework + Copyrighted by Massimo Di Pierro + License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) +" + +This is a handler for lighttpd+scgi +This file has to be in the PYTHONPATH +Put something like this in the lighttpd.conf file: + +server.document-root="/var/www/web2py/" +# for >= linux-2.6 +server.event-handler = "linux-sysepoll" + +url.rewrite-once = ( + "^(/.+?/static/.+)$" => "/applications$1", + "(^|/.*)$" => "/handler_web2py.scgi$1", +) +scgi.server = ( "/handler_web2py.scgi" => + ("handler_web2py" => + ( "host" => "127.0.0.1", + "port" => "4000", + "check-local" => "disable", # don't forget to set "disable"! + ) + ) +) + + + + +""" + +LOGGING = False +SOFTCRON = False + +import sys +import os + +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] + +import gluon.main + +# uncomment one of the two imports below depending on the SCGIWSGI server installed +#import paste.util.scgiserver as scgi +from wsgitools.scgi.forkpool import SCGIServer + +if LOGGING: + application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, + logfilename='httpserver.log', + profilerfilename=None) +else: + application = gluon.main.wsgibase + +if SOFTCRON: + from gluon.settings import global_settings + global_settings.web2py_crontype = 'soft' + +# uncomment one of the two rows below depending on the SCGIWSGI server installed +#scgi.serve_application(application, '', 4000).run() +SCGIServer(application, port=4000).run() + ADDED scripts/autoroutes.py Index: scripts/autoroutes.py ================================================================== --- /dev/null +++ scripts/autoroutes.py @@ -0,0 +1,141 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' +autoroutes writes routes for you based on a simpler routing +configuration file called routes.conf. Example: + +----- BEGIN routes.conf------- +127.0.0.1 /examples/default +domain1.com /app1/default +domain2.com /app2/default +domain3.com /app3/default +----- END ---------- + +It maps a domain (the left-hand side) to an app (one app per domain), +and shortens the URLs for the app by removing the listed path prefix. That means: + +http://domain1.com/index is mapped to /app1/default/index +http://domain2.com/index is mapped to /app2/default/index + +It preserves admin, appadmin, static files, favicon.ico and robots.txt: + +http://domain1.com/favicon.ico /welcome/static/favicon.ico +http://domain1.com/robots.txt /welcome/static/robots.txt +http://domain1.com/admin/... /admin/... +http://domain1.com/appadmin/... /app1/appadmin/... +http://domain1.com/static/... /app1/static/... + +and vice-versa. + +To use, cp scripts/autoroutes.py routes.py + +and either edit the config string below, or set config = "" and edit routes.conf +''' + +config = ''' +127.0.0.1 /examples/default +domain1.com /app1/default +domain2.com /app2/default +domain3.com /app3/defcon3 +''' +if not config.strip(): + try: + config_file = open('routes.conf','r') + try: + config = config_file.read() + finally: + config_file.close() + except: + config='' + +def auto_in(apps): + routes = [ + ('/robots.txt','/welcome/static/robots.txt'), + ('/favicon.ico','/welcome/static/favicon.ico'), + ('/admin$anything','/admin$anything'), + ] + for domain,path in [x.strip().split() for x in apps.split('\n') if x.strip() and not x.strip().startswith('#')]: + if not path.startswith('/'): path = '/'+path + if path.endswith('/'): path = path[:-1] + app = path.split('/')[1] + routes += [ + ('.*:https?://(.*\.)?%s:$method /' % domain,'%s' % path), + ('.*:https?://(.*\.)?%s:$method /static/$anything' % domain,'/%s/static/$anything' % app), + ('.*:https?://(.*\.)?%s:$method /appadmin/$anything' % domain,'/%s/appadmin/$anything' % app), + ('.*:https?://(.*\.)?%s:$method /$anything' % domain,'%s/$anything' % path), + ] + return routes + +def auto_out(apps): + routes = [] + for domain,path in [x.strip().split() for x in apps.split('\n') if x.strip() and not x.strip().startswith('#')]: + if not path.startswith('/'): path = '/'+path + if path.endswith('/'): path = path[:-1] + app = path.split('/')[1] + routes += [ + ('/%s/static/$anything' % app,'/static/$anything'), + ('/%s/appadmin/$anything' % app, '/appadmin/$anything'), + ('%s/$anything' % path, '/$anything'), + ] + return routes + +routes_in = auto_in(config) +routes_out = auto_out(config) + +def __routes_doctest(): + ''' + Dummy function for doctesting autoroutes.py. + + Use filter_url() to test incoming or outgoing routes; + filter_err() for error redirection. + + filter_url() accepts overrides for method and remote host: + filter_url(url, method='get', remote='0.0.0.0', out=False) + + filter_err() accepts overrides for application and ticket: + filter_err(status, application='app', ticket='tkt') + + >>> filter_url('http://domain1.com/favicon.ico') + 'http://domain1.com/welcome/static/favicon.ico' + >>> filter_url('https://domain2.com/robots.txt') + 'https://domain2.com/welcome/static/robots.txt' + >>> filter_url('http://domain3.com/fcn') + 'http://domain3.com/app3/defcon3/fcn' + >>> filter_url('http://127.0.0.1/fcn') + 'http://127.0.0.1/examples/default/fcn' + >>> filter_url('HTTP://DOMAIN.COM/app/ctr/fcn') + 'http://domain.com/app/ctr/fcn' + >>> filter_url('http://domain.com/app/ctr/fcn?query') + 'http://domain.com/app/ctr/fcn?query' + >>> filter_url('http://otherdomain.com/fcn') + 'http://otherdomain.com/fcn' + >>> regex_filter_out('/app/ctr/fcn') + '/app/ctr/fcn' + >>> regex_filter_out('/app1/ctr/fcn') + '/app1/ctr/fcn' + >>> filter_url('https://otherdomain.com/app1/default/fcn', out=True) + '/fcn' + >>> filter_url('http://otherdomain.com/app2/ctr/fcn', out=True) + '/app2/ctr/fcn' + >>> filter_url('http://domain1.com/app1/default/fcn?query', out=True) + '/fcn?query' + >>> filter_url('http://domain2.com/app3/defcon3/fcn#anchor', out=True) + '/fcn#anchor' + ''' + pass + +if __name__ == '__main__': + try: + import gluon.main + except ImportError: + import sys, os + os.chdir(os.path.dirname(os.path.dirname(__file__))) + sys.path.append(os.path.dirname(os.path.dirname(__file__))) + import gluon.main + from gluon.rewrite import regex_select, load, filter_url, regex_filter_out + regex_select() # use base routing parameters + load(routes=__file__) # load this file + + import doctest + doctest.testmod() + ADDED scripts/cleancss.py Index: scripts/cleancss.py ================================================================== --- /dev/null +++ scripts/cleancss.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import sys +import re + +filename = sys.argv[1] + +datafile = open(filename, 'r') +try: + data = datafile.read() +finally: + datafile.close() +data = re.compile('\s*{\s*').sub(' { ', data) +data = re.compile('\s*;\s*').sub('; ', data) +data = re.compile('\s*}\s*').sub(' }\n', data) +data = re.compile('[ ]+').sub(' ', data) + +print data + ADDED scripts/cleanhtml.py Index: scripts/cleanhtml.py ================================================================== --- /dev/null +++ scripts/cleanhtml.py @@ -0,0 +1,61 @@ +import sys +import re + +def cleancss(text): + text=re.compile('\s+').sub(' ', text) + text=re.compile('\s*(?P,|:)\s*').sub('\g ', text) + text=re.compile('\s*;\s*').sub(';\n ', text) + text=re.compile('\s*\{\s*').sub(' {\n ', text) + text=re.compile('\s*\}\s*').sub('\n}\n\n', text) + return text + +def cleanhtml(text): + text=text.lower() + r=re.compile('\', re.DOTALL) + scripts=r.findall(text) + text=r.sub(' +
    +' >/etc/uwsgi-python/apps-available/web2py.xml +ln -s /etc/uwsgi-python/apps-available/web2py.xml /etc/uwsgi-python/apps-enabled/web2py.xml + +# Install Web2py +apt-get -y install unzip +cd /home +mkdir www-data +cd www-data +wget http://web2py.com/examples/static/web2py_src.zip +unzip web2py_src.zip +rm web2py_src.zip +chown -R www-data:www-data web2py +cd /home/www-data/web2py +sudo -u www-data python -c "from gluon.main import save_password; save_password('$PW',443)" +/etc/init.d/uwsgi-python restart +/etc/init.d/nginx restart ADDED scripts/setup-web2py-ubuntu.sh Index: scripts/setup-web2py-ubuntu.sh ================================================================== --- /dev/null +++ scripts/setup-web2py-ubuntu.sh @@ -0,0 +1,169 @@ +echo "This script will: +1) install all modules need to run web2py on Ubuntu/Debian +2) install web2py in /home/www-data/ +3) create a self signed sll certificate +4) setup web2py with mod_wsgi +5) overwrite /etc/apache2/sites-available/default +6) restart apache. + +You may want to read this cript before running it. + +Press a key to continue...[ctrl+C to abort]" + +read CONFIRM + +#!/bin/bash +# optional +# dpkg-reconfigure console-setup +# dpkg-reconfigure timezoneconf +# nano /etc/hostname +# nano /etc/network/interfaces +# nano /etc/resolv.conf +# reboot now +# ifconfig eth0 + +echo "installing useful packages" +echo "==========================" +apt-get update +apt-get -y install ssh +apt-get -y install zip unzip +apt-get -y install tar +apt-get -y install openssh-server +apt-get -y install build-essential +apt-get -y install python2.5 +apt-get -y install ipython +apt-get -y install python-dev +apt-get -y install postgresql +apt-get -y install apache2 +apt-get -y install libapache2-mod-wsgi +apt-get -y install python2.5-psycopg2 +apt-get -y install postfix +apt-get -y install wget +apt-get -y install python-matplotlib +apt-get -y install python-reportlab +apt-get -y install mercurial +/etc/init.d/postgresql restart + +# optional, uncomment for emacs +# apt-get -y install emacs + +# optional, uncomment for backups using samba +# apt-get -y install samba +# apt-get -y install smbfs + +echo "downloading, installing and starting web2py" +echo "===========================================" +cd /home +mkdir www-data +cd www-data +rm web2py_src.zip* +wget http://web2py.com/examples/static/web2py_src.zip +unzip web2py_src.zip +chown -R www-data:www-data web2py + +echo "setting up apache modules" +echo "=========================" +a2enmod ssl +a2enmod proxy +a2enmod proxy_http +a2enmod headers +a2enmod expires +mkdir /etc/apache2/ssl + +echo "creating a self signed certificate" +echo "==================================" +openssl genrsa 1024 > /etc/apache2/ssl/self_signed.key +chmod 400 /etc/apache2/ssl/self_signed.key +openssl req -new -x509 -nodes -sha1 -days 365 -key /etc/apache2/ssl/self_signed.key > /etc/apache2/ssl/self_signed.cert +openssl x509 -noout -fingerprint -text < /etc/apache2/ssl/self_signed.cert > /etc/apache2/ssl/self_signed.info + +echo "rewriting your apache config file to use mod_wsgi" +echo "=================================================" +echo ' +NameVirtualHost *:80 +NameVirtualHost *:443 + + + WSGIDaemonProcess web2py user=www-data group=www-data + WSGIProcessGroup web2py + WSGIScriptAlias / /home/www-data/web2py/wsgihandler.py + + + AllowOverride None + Order Allow,Deny + Deny from all + + Allow from all + + + + AliasMatch ^/([^/]+)/static/(.*) \ + /home/www-data/web2py/applications/$1/static/$2 + + Options -Indexes + Order Allow,Deny + Allow from all + + + + Deny from all + + + + Deny from all + + + CustomLog /var/log/apache2/access.log common + ErrorLog /var/log/apache2/error.log + + + + SSLEngine on + SSLCertificateFile /etc/apache2/ssl/self_signed.cert + SSLCertificateKeyFile /etc/apache2/ssl/self_signed.key + + WSGIProcessGroup web2py + + WSGIScriptAlias / /home/www-data/web2py/wsgihandler.py + + + AllowOverride None + Order Allow,Deny + Deny from all + + Allow from all + + + + AliasMatch ^/([^/]+)/static/(.*) \ + /home/www-data/web2py/applications/$1/static/$2 + + + Options -Indexes + ExpiresActive On + ExpiresDefault "access plus 1 hour" + Order Allow,Deny + Allow from all + + + CustomLog /var/log/apache2/access.log common + ErrorLog /var/log/apache2/error.log + +' > /etc/apache2/sites-available/default + +# echo "setting up PAM" +# echo "================" +# sudo apt-get install pwauth +# sudo ln -s /etc/apache2/mods-available/authnz_external.load /etc/apache2/mods-enabled +# ln -s /etc/pam.d/apache2 /etc/pam.d/httpd +# usermod -a -G shadow www-data + +echo "restarting apage" +echo "================" + +/etc/init.d/apache2 restart +cd /home/www-data/web2py +sudo -u www-data python -c "from gluon.widget import console; console();" +sudo -u www-data python -c "from gluon.main import save_password; save_password(raw_input('admin password: '),443)" +echo "done!" + ADDED scripts/standalone_exe_cxfreeze.py Index: scripts/standalone_exe_cxfreeze.py ================================================================== --- /dev/null +++ scripts/standalone_exe_cxfreeze.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Usage: + Install cx_Freeze: http://cx-freeze.sourceforge.net/ + Copy script to the web2py directory + c:\Python27\python standalone_exe_cxfreeze.py build_exe +""" +from cx_Freeze import setup, Executable +from gluon.import_all import base_modules, contributed_modules +from gluon.fileutils import readlines_file +from glob import glob +import fnmatch +import os +import shutil +import sys +import re + +#read web2py version from VERSION file +web2py_version_line = readlines_file('VERSION')[0] +#use regular expression to get just the version number +v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') +web2py_version = v_re.search(web2py_version_line).group(0) + +base = None + +if sys.platform == 'win32': + base = "Win32GUI" + +base_modules.remove('macpath') +buildOptions = dict( + compressed = True, + excludes = ["macpath","PyQt4"], + includes = base_modules, + include_files=[ + 'applications', + 'ABOUT', + 'LICENSE', + 'VERSION', + 'logging.example.conf', + 'options_std.py', + 'app.example.yaml', + 'queue.example.yaml', + ], + # append any extra module by extending the list below - + # "contributed_modules+["lxml"]" + packages = contributed_modules, + ) + +setup( + name = "Web2py", + version=web2py_version, + author="Massimo DiPierro", + description="web2py web framework", + license = "LGPL v3", + options = dict(build_exe = buildOptions), + executables = [Executable("web2py.py", + base=base, + compress = True, + icon = "web2py.ico", + targetName="web2py.exe", + copyDependentFiles = True)], + ) ADDED scripts/sync_languages.py Index: scripts/sync_languages.py ================================================================== --- /dev/null +++ scripts/sync_languages.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# TODO: Comment this code + +import sys +import shutil +import os + +from gluon.languages import findT + +sys.path.insert(0, '.') + +file = sys.argv[1] +apps = sys.argv[2:] + +d = {} +for app in apps: + path = 'applications/%s/' % app + findT(path, file) + langfile = open(os.path.join(path, 'languages', '%s.py' % file)) + try: + data = eval(langfile.read()) + finally: + langfile.close() + d.update(data) + +path = 'applications/%s/' % apps[-1] +file1 = os.path.join(path, 'languages', '%s.py' % file) + +f = open(file1, 'w') +try: + f.write('{\n') + keys = d.keys() + keys.sort() + for key in keys: + f.write('%s:%s,\n' % (repr(key), repr(str(d[key])))) + f.write('}\n') +finally: + f.close() + +oapps = reversed(apps[:-1]) +for app in oapps: + path2 = 'applications/%s/' % app + file2 = os.path.join(path2, 'languages', '%s.py' % file) + shutil.copyfile(file1, file2) + ADDED scripts/tickets2db.py Index: scripts/tickets2db.py ================================================================== --- /dev/null +++ scripts/tickets2db.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import sys +import os +import time +import stat +import datetime + +from gluon.utils import md5_hash +from gluon.restricted import RestrictedError + +SLEEP_MINUTES = 5 +DB_URI = 'sqlite://tickets.db' +ALLOW_DUPLICATES = True + +path = os.path.join(request.folder, 'errors') + +db = SQLDB(DB_URI) +db.define_table('ticket', SQLField('app'), SQLField('name'), + SQLField('date_saved', 'datetime'), SQLField('layer'), + SQLField('traceback', 'text'), SQLField('code', 'text')) + +hashes = {} + +while 1: + for file in os.listdir(path): + filename = os.path.join(path, file) + + if not ALLOW_DUPLICATES: + fileobj = open(filename, 'r') + try: + file_data = fileobj.read() + finally: + fileobj.close() + key = md5_hash(file_data) + + if key in hashes: + continue + + hashes[key] = 1 + + error = RestrictedError() + error.load(request, request.application, filename) + + modified_time = os.stat(filename)[stat.ST_MTIME] + modified_time = datetime.datetime.fromtimestamp(modified_time) + + db.ticket.insert(app=request.application, + date_saved=modified_time, + name=file, + layer=error.layer, + traceback=error.traceback, + code=error.code) + + os.unlink(filename) + + db.commit() + time.sleep(SLEEP_MINUTES * 60) + ADDED scripts/tickets2email.py Index: scripts/tickets2email.py ================================================================== --- /dev/null +++ scripts/tickets2email.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import sys +import os +import time +import stat +import datetime + +from gluon.utils import md5_hash +from gluon.restricted import RestrictedError +from gluon.tools import Mail + + +path = os.path.join(request.folder, 'errors') +hashes = {} +mail = Mail() + +### CONFIGURE HERE +SLEEP_MINUTES = 5 +ALLOW_DUPLICATES = True +mail.settings.server = 'localhost:25' +mail.settings.sender = 'you@localhost' +administrator_email = 'you@localhost' +### END CONFIGURATION + +while 1: + for file in os.listdir(path): + filename = os.path.join(path, file) + + if not ALLOW_DUPLICATES: + fileobj = open(filename, 'r') + try: + file_data = fileobj.read() + finally: + fileobj.close() + key = md5_hash(file_data) + + if key in hashes: + continue + + hashes[key] = 1 + + error = RestrictedError() + error.load(request, request.application, filename) + + mail.send(to=administrator_email, subject='new web2py ticket', message=error.traceback) + + os.unlink(filename) + time.sleep(SLEEP_MINUTES * 60) + ADDED scripts/update-web2py.sh Index: scripts/update-web2py.sh ================================================================== --- /dev/null +++ scripts/update-web2py.sh @@ -0,0 +1,59 @@ +#!/bin/bash +# +# update-web2py.sh +# 2009-12-16 +# +# install in web2py/.. or web2py/ or web2py/scripts as update-web2py.sh +# make executable: chmod +x web2py.sh +# +# save a snapshot of current web2py/ as web2py/../web2py-version.zip +# download the current stable version of web2py +# unzip downloaded version over web2py/ +# +TARGET=web2py + +if [ ! -d $TARGET ]; then + # in case we're in web2py/ + if [ -f ../$TARGET/VERSION ]; then + cd .. + # in case we're in web2py/scripts + elif [ -f ../../$TARGET/VERSION ]; then + cd ../.. + fi +fi +read a VERSION c < $TARGET/VERSION +SAVE=$TARGET-$VERSION +URL=http://www.web2py.com/examples/static/web2py_src.zip + +ZIP=`basename $URL` +SAVED="" + +# Save a zip archive of the current version, +# but don't overwrite a previous save of the same version. +# +if [ -f $SAVE.zip ]; then + echo "Remove or rename $SAVE.zip first" >&2 + exit 1 +fi +if [ -d $TARGET ]; then + echo -n ">>Save old version: " >&2 + cat $TARGET/VERSION >&2 + zip -q -r $SAVE.zip $TARGET + SAVED=$SAVE.zip +fi +# +# Download the new version. +# +echo ">>Download latest web2py release:" >&2 +curl -O $URL +# +# Unzip into web2py/ +# +unzip -q -o $ZIP +rm $ZIP +echo -n ">>New version: " >&2 +cat $TARGET/VERSION >&2 +if [ "$SAVED" != "" ]; then + echo ">>Old version saved as $SAVED" +fi + ADDED scripts/update_web2py.py Index: scripts/update_web2py.py ================================================================== --- /dev/null +++ scripts/update_web2py.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +crontab -e +* 3 * * * root path/to/this/file +""" + +USER = 'www-data' +TMPFILENAME = 'web2py_src_update.zip' + +import sys +import os +import urllib +import zipfile + +if len(sys.argv)>1 and sys.argv[1] == 'nightly': + version = 'http://web2py.com/examples/static/nightly/web2py_src.zip' +else: + version = 'http://web2py.com/examples/static/web2py_src.zip' + +realpath = os.path.realpath(__file__) +path = os.path.dirname(os.path.dirname(os.path.dirname(realpath))) +os.chdir(path) +try: + old_version = open('web2py/VERSION','r').read().strip() +except IOError: + old_version = '' +open(TMPFILENAME,'wb').write(urllib.urlopen(version).read()) +new_version = zipfile.ZipFile(TMPFILENAME).read('web2py/VERSION').strip() +if new_version>old_version: + os.system('sudo -u %s unzip -o %s' % (USER,TMPFILENAME)) + os.system('apachectl restart | apache2ctl restart') ADDED scripts/web2py-lock.sh Index: scripts/web2py-lock.sh ================================================================== --- /dev/null +++ scripts/web2py-lock.sh @@ -0,0 +1,11 @@ +chown -R nobody:nobody *.py +chown -R nobody:nobody gluon +chown -R nobody:nobody scripts +chown -R nobody:nobody applications/*/modules/ +chown -R nobody:nobody applications/*/models/ +chown -R nobody:nobody applications/*/controllers/ +chown -R nobody:nobody applications/*/views/ +chown -R nobody:nobody applications/*/static/ +chown -R nobody:nobody applications/*/cron/ + +echo "unlock with chown -R www-data:www-data ./" ADDED scripts/web2py.archlinux.sh Index: scripts/web2py.archlinux.sh ================================================================== --- /dev/null +++ scripts/web2py.archlinux.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# the script should be run +# from WEB2PY root directory + +prog=`basename $0` + +cd `pwd` +chmod +x $prog + +function web2py_start { + nohup ./$prog -a "" 2>/dev/null & + pid=`pgrep $prog | tail -1` + if [ $pid -ne $$ ] + then + echo "WEB2PY has been started." + fi +} +function web2py_stop { + kill -15 `pgrep $prog | grep -v $$` 2>/dev/null + pid=`pgrep $prog | head -1` + if [ $pid -ne $$ ] + then + echo "WEB2PY has been stopped." + fi +} + +case "$1" in + start) + web2py_start + ;; + stop) + web2py_stop + ;; + restart) + web2py_stop + web2py_start + ;; + *) + echo "Usage: $prog [start|stop|restart]" + ;; +esac + +exit 0 + ADDED scripts/web2py.fedora.sh Index: scripts/web2py.fedora.sh ================================================================== --- /dev/null +++ scripts/web2py.fedora.sh @@ -0,0 +1,82 @@ +#!/bin/bash +# +# /etc/rc.d/init.d/web2pyd +# +# Starts the Web2py Daemon on Fedora (Red Hat Linux) +# +# To execute automatically at startup +# +# sudo chkconfig --add web2pyd +# +# chkconfig: 2345 90 10 +# description: Web2py Daemon +# processname: web2pyd +# pidfile: /var/lock/subsys/web2pyd + +source /etc/rc.d/init.d/functions + +RETVAL=0 +NAME=web2pyd +DESC="Web2py Daemon" +DAEMON_DIR="/usr/lib/web2py" +ADMINPASS="admin" +#ADMINPASS="\" +PIDFILE=/var/run/$NAME.pid +PORT=8001 +PYTHON=python + +cd $DAEMON_DIR + +start() { + echo -n $"Starting $DESC ($NAME): " + daemon --check $NAME $PYTHON $DAEMON_DIR/web2py.py -Q --nogui -a $ADMINPASS -d $PIDFILE -p $PORT & + RETVAL=$? + if [ $RETVAL -eq 0 ]; then + touch /var/lock/subsys/$NAME + fi + echo + return $RETVAL +} + +stop() { + echo -n $"Shutting down $DESC ($NAME): " + killproc -p "$PIDFILE" -d 3 "$NAME" + echo + if [ $RETVAL -eq 0 ]; then + rm -f /var/lock/subsys/$NAME + rm -f $PIDFILE + fi + return $RETVAL +} + +restart() { + stop + start +} + +status() { + if [ -r "$PIDFILE" ]; then + pid=`cat $PIDFILE` + fi + if [ $pid ]; then + echo "$NAME (pid $pid) is running..." + else + echo "$NAME is stopped" + fi +} + +case "$1" in + start) start;; + stop) stop;; + status) status;; + restart) restart;; + condrestart) [ -e /var/lock/subsys/$NAME ] && restart + RETVAL=$? + ;; + *) echo $"Usage: $0 {start|stop|restart|condrestart|status}" + RETVAL=1 + ;; +esac + +exit $RETVAL + ADDED scripts/web2py.ubuntu.sh Index: scripts/web2py.ubuntu.sh ================================================================== --- /dev/null +++ scripts/web2py.ubuntu.sh @@ -0,0 +1,223 @@ +#! /bin/sh +### BEGIN INIT INFO +# startup script for Ubuntu and Debian Linux servers +# +# To use this file +# cp ubuntu.sh /etc/init.d/web2py +# +# To automatitcally start at reboot +# sudo update-rc.d web2py defaults +# +# Provides: web2py +# Required-Start: $local_fs $remote_fs +# Required-Stop: $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: S 0 1 6 +# Short-Description: web2py initscript +# Description: This file starts up the web2py server. +### END INIT INFO + +# Author: Mark Moore + +PATH=/usr/sbin:/usr/bin:/sbin:/bin +DESC="Web Framework" +NAME=web2py +PIDDIR=/var/run/$NAME +PIDFILE=$PIDDIR/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME +DAEMON=/usr/bin/python +DAEMON_DIR=/usr/lib/$NAME +DAEMON_ARGS="web2py.py --password= --pid_filename=$PIDFILE" +DAEMON_USER=web2py + +# Exit if the package is not installed +[ -x "$DAEMON" ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Load the VERBOSE setting and other rcS variables +[ -f /etc/default/rcS ] && . /etc/default/rcS + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.0-6) to ensure that this file is present. +. /lib/lsb/init-functions + +# +# Function that starts the daemon/service +# +do_start() +{ + # Return + # 0 if daemon has been started + # 1 if daemon was already running + # 2 if daemon could not be started + + # The PIDDIR should normally be created during installation. This + # fixes things just in case. + [ -d $PIDDIR ] || mkdir -p $PIDDIR + [ -n "$DAEMON_USER" ] && chown --recursive $DAEMON_USER $PIDDIR + + # Check to see if the daemon is already running. + start-stop-daemon --stop --test --quiet --pidfile $PIDFILE \ + && return 1 + + start-stop-daemon --start --quiet --pidfile $PIDFILE \ + ${DAEMON_USER:+--chuid $DAEMON_USER} --chdir $DAEMON_DIR \ + --background --exec $DAEMON -- $DAEMON_ARGS \ + || return 2 + + return 0; +} + +# +# Function that stops the daemon/service +# +do_stop() +{ + # Return + # 0 if daemon has been stopped + # 1 if daemon was already stopped + # 2 if daemon could not be stopped + # other if a failure occurred + + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL=$? + # Many daemons don't delete their pidfiles when they exit. + rm -f $PIDFILE + return "$RETVAL" +} + +# +# Function that restarts the daemon/service +# +do_restart() +{ + # Return + # 0 if daemon was (re-)started + # 1 if daemon was not strated or re-started + + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) RETVAL=0 ;; + 1) RETVAL=1 ;; # Old process is still running + *) RETVAL=1 ;; # Failed to start + esac + ;; + *) RETVAL=1 ;; # Failed to stop + esac + + return "$RETVAL" +} + +# +# Function that sends a SIGHUP to the daemon/service +# +do_reload() { + # + # If the daemon can reload its configuration without + # restarting (for example, when it is sent a SIGHUP), + # then implement that here. + # + start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE + return 0 +} + +# +# Function that queries the status of the daemon/service +# +do_status() +{ + # Return + # 0 if daemon is responding and OK + # 1 if daemon is not responding, but PIDFILE exists + # 2 if daemon is not responding, but LOCKFILE exists + # 3 if deamon is not running + # 4 if daemon status is unknown + + # Check to see if the daemon is already running. + start-stop-daemon --stop --test --quiet --pidfile $PIDFILE \ + && return 0 + [ -f $PIDFILE ] && return 1 + return 3 +} + +case "$1" in + start) + [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC" "$NAME" + do_start + RETVAL=$? + [ "$VERBOSE" != no ] && + case "$RETVAL" in + 0|1) log_end_msg 0 ;; + *) log_end_msg 1 ;; + esac + exit "$RETVAL" + ;; + stop) + [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + RETVAL=$? + [ "$VERBOSE" != no ] && + case "$RETVAL" in + 0|1) log_end_msg 0 ;; + *) log_end_msg 1 ;; + esac + exit "$RETVAL" + ;; + #reload|force-reload) + # + # If do_reload() is not implemented then leave this commented out + # and leave 'force-reload' as an alias for 'restart'. + # + #[ "$VERBOSE" != no ] && log_daemon_msg "Reloading $DESC" "$NAME" + #do_reload + #RETVAL=$? + #[ "$VERBOSE" != no ] && log_end_msg $? + #exit "$RETVAL" + #;; + restart|force-reload) + # + # If the "reload" option is implemented then remove the + # 'force-reload' alias + # + [ "$VERBOSE" != no ] && log_daemon_msg "Restarting $DESC" "$NAME" + do_restart + RETVAL=$? + [ "$VERBOSE" != no ] && log_end_msg "$RETVAL" + exit "$RETVAL" + ;; + status) + do_status + RETVAL=$? + [ "$VERBOSE" != no ] && + case "$RETVAL" in + 0) log_success_msg "$NAME is running" ;; + *) log_failure_msg "$NAME is not running" ;; + esac + exit "$RETVAL" + ;; + *) + echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload|status}" >&2 + exit 3 + ;; +esac + +: + +# This was based off /etc/init.d/skeleton from the Ubuntu 8.04 Hardy release. +# (md5sum: da0162012b6a916bdbd4e2580282af78). If we notice that changes, we +# should re-examine things. + +# The configuration at the very top seems to be documented as part of the +# Linux Standard Base (LSB) Specification. See section 20.6 Facility Names +# in particular. This is also where I got the spec for the status parm. + +# References: +# http://refspecs.linux-foundation.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic.pdf +# Debian Policy SysV init: http://www.debian.org/doc/debian-policy/ch-opersys.html#s-sysvinit +# Examine files in /usr/share/doc/sysv-rc/ + ADDED setup.py Index: setup.py ================================================================== --- /dev/null +++ setup.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python + +from distutils.core import setup +from gluon.fileutils import tar, untar, read_file, write_file +import tarfile +import sys + +def tar(file, filelist, expression='^.+$'): + """ + tars dir/files into file, only tars file that match expression + """ + + tar = tarfile.TarFile(file, 'w') + try: + for element in filelist: + try: + for file in listdir(element, expression, add_dirs=True): + tar.add(os.path.join(element, file), file, False) + except: + tar.add(element) + finally: + tar.close() + +def start(): + if 'sdist' in sys.argv: + tar('gluon/env.tar',['applications','VERSION','splashlogo.gif']) + + setup(name='web2py', + version=read_file("VERSION").split()[1], + description="""full-stack framework for rapid development and prototyping + of secure database-driven web-based applications, written and + programmable in Python.""", + long_description=""" + Everything in one package with no dependencies. Development, deployment, + debugging, testing, database administration and maintenance of applications can + be done via the provided web interface. web2py has no configuration files, + requires no installation, can run off a USB drive. web2py uses Python for the + Model, the Views and the Controllers, has a built-in ticketing system to manage + errors, an internationalization engine, works with SQLite, PostgreSQL, MySQL, + MSSQL, FireBird, Oracle, IBM DB2, Informix, Ingres, sybase and Google App Engine via a + Database Abstraction Layer. web2py includes libraries to handle + HTML/XML, RSS, ATOM, CSV, RTF, JSON, AJAX, XMLRPC, WIKI markup. Production + ready, capable of upload/download streaming of very large files, and always + backward compatible. + """, + author='Massimo Di Pierro', + author_email='mdipierro@cs.depaul.edu', + license = 'http://web2py.com/examples/default/license', + classifiers = ["Development Status :: 5 - Production/Stable"], + url='http://web2py.com', + platforms ='Windows, Linux, Mac, Unix,Windows Mobile', + packages=['gluon', + 'gluon/contrib', + 'gluon/contrib/gateways', + 'gluon/contrib/login_methods', + 'gluon/contrib/markdown', + 'gluon/contrib/markmin', + 'gluon/contrib/memcache', + 'gluon/contrib/pyfpdf', + 'gluon/contrib/pymysql', + 'gluon/contrib/pyrtf', + 'gluon/contrib/pysimplesoap', + 'gluon/contrib/simplejson', + 'gluon/tests', + ], + package_data = {'gluon':['env.tar']}, + scripts = ['w2p_apps','w2p_run','w2p_clone'], + ) + +if __name__ == '__main__': + #print "web2py does not require installation and" + #print "you should just start it with:" + #print + #print "$ python web2py.py" + #print + #print "are you sure you want to install it anyway (y/n)?" + #s = raw_input('>') + #if s.lower()[:1]=='y': + start() + ADDED setup_app.py Index: setup_app.py ================================================================== --- /dev/null +++ setup_app.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This is a setup.py script generated by py2applet + +Usage: + python setup.py py2app +""" + +from setuptools import setup +from gluon.import_all import base_modules, contributed_modules +import os +import fnmatch + +class reglob: + def __init__(self, directory, pattern="*"): + self.stack = [directory] + self.pattern = pattern + self.files = [] + self.index = 0 + def __getitem__(self, index): + while 1: + try: + file = self.files[self.index] + self.index = self.index + 1 + except IndexError: + self.index = 0 + self.directory = self.stack.pop() + self.files = os.listdir(self.directory) + else: + fullname = os.path.join(self.directory, file) + if os.path.isdir(fullname) and not os.path.islink(fullname): + self.stack.append(fullname) + if not (file.startswith('.') or file.startswith('#') or file.endswith('~')) \ + and fnmatch.fnmatch(file, self.pattern): + return fullname + +setup(app=['web2py.py'], + data_files=[ + 'NEWINSTALL', + 'ABOUT', + 'LICENSE', + 'VERSION', + ] + \ + [x for x in reglob('applications/examples')] + \ + [x for x in reglob('applications/welcome')] + \ + [x for x in reglob('applications/admin')], + options={'py2app': { + 'argv_emulation': True, + 'includes': base_modules, + 'packages': contributed_modules, + }}, + setup_requires=['py2app']) + ADDED setup_exe.py Index: setup_exe.py ================================================================== --- /dev/null +++ setup_exe.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Usage: + Install py2exe: http://sourceforge.net/projects/py2exe/files/ + Copy script to the web2py directory + c:\bin\python26\python build_windows_exe.py py2exe + +Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py +""" + +from distutils.core import setup +import py2exe +from gluon.import_all import base_modules, contributed_modules +from gluon.fileutils import readlines_file +from glob import glob +import fnmatch +import os +import shutil +import sys +import re +import zipfile + +#read web2py version from VERSION file +web2py_version_line = readlines_file('VERSION')[0] +#use regular expression to get just the version number +v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') +web2py_version = v_re.search(web2py_version_line).group(0) + +#pull in preferences from config file +import ConfigParser +Config = ConfigParser.ConfigParser() +Config.read('setup_exe.conf') +remove_msft_dlls = Config.getboolean("Setup", "remove_microsoft_dlls") +copy_apps = Config.getboolean("Setup", "copy_apps") +copy_site_packages = Config.getboolean("Setup", "copy_site_packages") +copy_scripts = Config.getboolean("Setup", "copy_scripts") +make_zip = Config.getboolean("Setup", "make_zip") +zip_filename = Config.get("Setup", "zip_filename") +remove_build_files = Config.getboolean("Setup", "remove_build_files") + + +# Python base version +python_version = sys.version[:3] + +# List of modules deprecated in python2.6 that are in the above set +py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] + +if python_version == '2.6': + base_modules += ['json', 'multiprocessing'] + base_modules = list(set(base_modules).difference(set(py26_deprecated))) + + +#I don't know if this is even necessary +if python_version == '2.6': + # Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52 + try: + shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/') + except: + print "You MUST copy Microsoft.VC90.CRT folder into the dist directory" + + +setup( + console=['web2py.py'], + windows=[{'script':'web2py.py', + 'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe + }], + name="web2py", + version=web2py_version, + description="web2py web framework", + author="Massimo DiPierro", + license = "LGPL v3", + data_files=[ + 'ABOUT', + 'LICENSE', + 'VERSION', + 'splashlogo.gif', + 'logging.example.conf', + 'options_std.py', + 'app.example.yaml', + 'queue.example.yaml' + ], + options={'py2exe': { + 'packages': contributed_modules, + 'includes': base_modules, + }}, + ) + +print "web2py binary successfully built" + +def copy_folders(source, destination): + """Copy files & folders from source to destination (within dist/)""" + if os.path.exists(os.path.join('dist',destination)): + shutil.rmtree(os.path.join('dist',destination)) + shutil.copytree(os.path.join(source), os.path.join('dist',destination)) + +#should we remove Windows OS dlls user is unlikely to be able to distribute + +if remove_msft_dlls: + print "Deleted Microsoft files not licensed for open source distribution" + print "You are still responsible for making sure you have the rights to distribute any other included files!" + #delete the API-MS-Win-Core DLLs + for f in glob ('dist/API-MS-Win-*.dll'): + os.unlink (f) + #then delete some other files belonging to Microsoft + other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll'] + for f in other_ms_files: + try: + os.unlink(os.path.join('dist',f)) + except: + print "unable to delete dist/"+f + sys.exit(1) + + +#Should we include applications? +if copy_apps: + copy_folders('applications', 'applications') + print "Your application(s) have been added" +else: + #only copy web2py's default applications + copy_folders('applications/admin', 'applications/admin') + copy_folders('applications/welcome', 'applications/welcome') + copy_folders('applications/examples', 'applications/examples') + print "Only web2py's admin, examples & welcome applications have been added" + + +#should we copy project's site-packages into dist/site-packages +if copy_site_packages: + #copy site-packages + copy_folders('site-packages', 'site-packages') +else: + #no worries, web2py will create the (empty) folder first run + print "Skipping site-packages" + pass + +#should we copy project's scripts into dist/scripts +if copy_scripts: + #copy scripts + copy_folders('scripts', 'scripts') +else: + #no worries, web2py will create the (empty) folder first run + print "Skipping scripts" + pass + + + +#borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile +def recursive_zip(zipf, directory, folder = ""): + for item in os.listdir(directory): + if os.path.isfile(os.path.join(directory, item)): + zipf.write(os.path.join(directory, item), folder + os.sep + item) + elif os.path.isdir(os.path.join(directory, item)): + recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item) + +#should we create a zip file of the build? + +if make_zip: + #to keep consistent with how official web2py windows zip file is setup, + #create a web2py folder & copy dist's files into it + shutil.copytree('dist','zip_temp/web2py') + #create zip file + #use filename specified via command line + zipf = zipfile.ZipFile(zip_filename+".zip", "w", compression=zipfile.ZIP_DEFLATED ) + path = 'zip_temp' #just temp so the web2py directory is included in our zip file + recursive_zip(zipf, path) #leave the first folder as None, as path is root. + zipf.close() + shutil.rmtree('zip_temp') + print "Your Windows binary version of web2py can be found in "+zip_filename+".zip" + print "You may extract the archive anywhere and then run web2py/web2py.exe" + +#should py2exe build files be removed? +if remove_build_files: + shutil.rmtree('build') + shutil.rmtree('deposit') + shutil.rmtree('dist') + print "py2exe build files removed" + +#final info +if not make_zip and not remove_build_files: + print "Your Windows binary & associated files can also be found in /dist" + +print "Finished!" +print "Enjoy web2py " +web2py_version_line + ADDED setup_exe_2.6.py Index: setup_exe_2.6.py ================================================================== --- /dev/null +++ setup_exe_2.6.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Usage: + Install py2exe: http://sourceforge.net/projects/py2exe/files/ + Copy script to the web2py directory + c:\bin\python26\python build_windows_exe.py py2exe + +Adapted from http://bazaar.launchpad.net/~flavour/sahana-eden/trunk/view/head:/static/scripts/tools/standalone_exe.py +""" + +from distutils.core import setup +import py2exe +from gluon.import_all import base_modules, contributed_modules +from glob import glob +import fnmatch +import os +import shutil +import sys +import re +import zipfile + +# Python base version +python_version = sys.version[:3] + +# List of modules deprecated in python2.6 that are in the above set +py26_deprecated = ['mhlib', 'multifile', 'mimify', 'sets', 'MimeWriter'] + +if python_version == '2.6': + base_modules += ['json', 'multiprocessing'] + base_modules = list(set(base_modules).difference(set(py26_deprecated))) + + +#I don't know if this is even necessary +if python_version == '2.6': + # Python26 compatibility: http://www.py2exe.org/index.cgi/Tutorial#Step52 + try: + shutil.copytree('C:\Bin\Microsoft.VC90.CRT', 'dist/') + except: + print "You MUST copy Microsoft.VC90.CRT folder into the dist directory" + +#read web2py version from VERSION file +web2py_version_line = readlines_file('VERSION')[0] +#use regular expression to get just the version number +v_re = re.compile('[0-9]+\.[0-9]+\.[0-9]+') +web2py_version = v_re.search(web2py_version_line).group(0) + +setup( + console=['web2py.py'], + windows=[{'script':'web2py.py', + 'dest_base':'web2py_no_console' # MUST NOT be just 'web2py' otherwise it overrides the standard web2py.exe + }], + name="web2py", + version=web2py_version, + description="web2py web framework", + author="Massimo DiPierro", + license = "LGPL v3", + data_files=[ + 'ABOUT', + 'LICENSE', + 'VERSION', + 'splashlogo.gif', + 'logging.example.conf', + 'options_std.py', + 'app.example.yaml', + 'queue.example.yaml' + ], + options={'py2exe': { + 'packages': contributed_modules, + 'includes': base_modules, + }}, + ) + +print "web2py binary successfully built" + +#offer to remove Windows OS dlls user is unlikely to be able to distribute +print "The process of building a windows executable often includes copying files that belong to Windows." +delete_ms_files = raw_input("Delete API-MS-Win-* files that are probably unsafe for distribution? (Y/n) ") +if delete_ms_files.lower().startswith("y"): + print "Deleted Microsoft files not licensed for open source distribution" + print "You are still responsible for making sure you have the rights to distribute any other included files!" + #delete the API-MS-Win-Core DLLs + for f in glob ('dist/API-MS-Win-*.dll'): + os.unlink (f) + #then delete some other files belonging to Microsoft + other_ms_files = ['KERNELBASE.dll', 'MPR.dll', 'MSWSOCK.dll', 'POWRPROF.dll'] + for f in other_ms_files: + try: + os.unlink(os.path.join('dist',f)) + except: + print "unable to delete dist/"+f + sys.exit(1) + +#Offer to include applications +copy_apps = raw_input("Include your web2py application(s)? (Y/n) ") +if os.path.exists('dist/applications'): + shutil.rmtree('dist/applications') +if copy_apps.lower().startswith("y"): + shutil.copytree('applications', 'dist/applications') + print "Your application(s) have been added" +else: + shutil.copytree('applications/admin', 'dist/applications/admin') + shutil.copytree('applications/welcome', 'dist/applications/welcome') + shutil.copytree('applications/examples', 'dist/applications/examples') + print "Only web2py's admin/welcome/examples applications have been added" +print "" + +#Offer to copy project's site-packages into dist/site-packages +copy_apps = raw_input("Include your web2py site-packages & scripts folders? (Y/n) ") +if copy_apps.lower().startswith("y"): + #copy site-packages + if os.path.exists('dist/site-packages') + shutil.rmtree('dist/site-packages') + shutil.copytree('site-packages', 'dist/site-packages') + #copy scripts + if os.path.exists('dist/scripts'): + shutil.rmtree('dist/scripts') + shutil.copytree('scripts', 'dist/scripts') +else: + #no worries, web2py will create the (empty) folder first run + print "Skipping site-packages & scripts" + pass + + +print "" + +#borrowed from http://bytes.com/topic/python/answers/851018-how-zip-directory-python-using-zipfile +def recursive_zip(zipf, directory, folder = ""): + for item in os.listdir(directory): + if os.path.isfile(os.path.join(directory, item)): + zipf.write(os.path.join(directory, item), folder + os.sep + item) + elif os.path.isdir(os.path.join(directory, item)): + recursive_zip(zipf, os.path.join(directory, item), folder + os.sep + item) + +create_zip = raw_input("Create a zip file of web2py for Windows (Y/n)? ") +if create_zip.lower().startswith("y"): + #to keep consistent with how official web2py windows zip file is setup, + #create a web2py folder & copy dist's files into it + shutil.copytree('dist','zip_temp/web2py') + #create zip file + zipf = zipfile.ZipFile("web2py_win.zip", "w", compression=zipfile.ZIP_DEFLATED ) + path = 'zip_temp' #just temp so the web2py directory is included in our zip file + recursive_zip(zipf, path) #leave the first folder as None, as path is root. + zipf.close() + shutil.rmtree('zip_temp') + print "Your Windows binary version of web2py can be found in web2py_win.zip" + print "You may extract the archive anywhere and then run web2py/web2py.exe" + + # offer to clear up + print "Since you created a zip file you likely do not need the build, deposit and dist folders used while building binary." + clean_up_files = raw_input("Delete these un-necessary folders/files? (Y/n) ") + if clean_up_files.lower().startswith("y"): + shutil.rmtree('build') + shutil.rmtree('deposit') + shutil.rmtree('dist') + else: + print "Your Windows binary & associated files can also be found in /dist" +else: + #Didn't want zip file created + print "" + print "Creation of web2py Windows binary completed." + print "You should copy the /dist directory and its contents." + print "To run use web2py.exe" +print "Finished!" +print "Enjoy web2py " +web2py_version_line + ADDED site-packages/__init__.py Index: site-packages/__init__.py ================================================================== --- /dev/null +++ site-packages/__init__.py @@ -0,0 +1,1 @@ + ADDED splashlogo.gif Index: splashlogo.gif ================================================================== --- /dev/null +++ splashlogo.gif cannot compute difference between binary files ADDED web2py.py Index: web2py.py ================================================================== --- /dev/null +++ web2py.py @@ -0,0 +1,21 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +import os +import sys + +if '__file__' in globals(): + path = os.path.dirname(os.path.abspath(__file__)) + os.chdir(path) +else: + path = os.getcwd() # Seems necessary for py2exe + +sys.path = [path]+[p for p in sys.path if not p==path] + +# import gluon.import_all ##### This should be uncommented for py2exe.py +import gluon.widget + +# Start Web2py and Web2py cron service! +if __name__ == '__main__': + gluon.widget.start(cron=True) + ADDED wsgihandler.py Index: wsgihandler.py ================================================================== --- /dev/null +++ wsgihandler.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +This file is part of the web2py Web Framework +Copyrighted by Massimo Di Pierro +License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) + + +This is a WSGI handler for Apache +Requires apache+mod_wsgi. + +In httpd.conf put something like: + + LoadModule wsgi_module modules/mod_wsgi.so + WSGIScriptAlias / /path/to/wsgihandler.py + +""" + +# change these parameters as required +LOGGING = False +SOFTCRON = False + +import sys +import os + +path = os.path.dirname(os.path.abspath(__file__)) +os.chdir(path) +sys.path = [path]+[p for p in sys.path if not p==path] + +sys.stdout=sys.stderr + +import gluon.main + +if LOGGING: + application = gluon.main.appfactory(wsgiapp=gluon.main.wsgibase, + logfilename='httpserver.log', + profilerfilename=None) +else: + application = gluon.main.wsgibase + +if SOFTCRON: + from gluon.settings import global_settings + global_settings.web2py_crontype = 'soft' +