Package web2py :: Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module web2py.gluon.dal

   1  #!/bin/env python 
   2  # -*- coding: utf-8 -*- 
   3   
   4  """ 
   5  This file is part of the web2py Web Framework 
   6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
   7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
   8   
   9  Thanks to 
  10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
  11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
  12      * Denes 
  13      * Chris Clark 
  14      * clach05 
  15      * Denes Lengyel 
  16      * and many others who have contributed to current and previous versions 
  17   
  18  This file contains the DAL support for many relational databases, 
  19  including: 
  20  - SQLite 
  21  - MySQL 
  22  - Postgres 
  23  - Oracle 
  24  - MS SQL 
  25  - DB2 
  26  - Interbase 
  27  - Ingres 
  28  - SapDB (experimental) 
  29  - Cubrid (experimental) 
  30  - CouchDB (experimental) 
  31  - MongoDB (in progress) 
  32  - Google:nosql 
  33  - Google:sql 
  34   
  35  Example of usage: 
  36   
  37  >>> # from dal import DAL, Field 
  38   
  39  ### create DAL connection (and create DB if not exists) 
  40  >>> db=DAL(('mysql://a:b@locahost/x','sqlite://storage.sqlite'),folder=None) 
  41   
  42  ### define a table 'person' (create/aster as necessary) 
  43  >>> person = db.define_table('person',Field('name','string')) 
  44   
  45  ### insert a record 
  46  >>> id = person.insert(name='James') 
  47   
  48  ### retrieve it by id 
  49  >>> james = person(id) 
  50   
  51  ### retrieve it by name 
  52  >>> james = person(name='James') 
  53   
  54  ### retrieve it by arbitrary query 
  55  >>> query = (person.name=='James')&(person.name.startswith('J')) 
  56  >>> james = db(query).select(person.ALL)[0] 
  57   
  58  ### update one record 
  59  >>> james.update_record(name='Jim') 
  60   
  61  ### update multiple records by query 
  62  >>> db(person.name.like('J%')).update(name='James') 
  63  1 
  64   
  65  ### delete records by query 
  66  >>> db(person.name.lower()=='jim').delete() 
  67  0 
  68   
  69  ### retrieve multiple records (rows) 
  70  >>> people = db(person).select(orderby=person.name,groupby=person.name,limitby=(0,100)) 
  71   
  72  ### further filter them 
  73  >>> james = people.find(lambda row: row.name=='James').first() 
  74  >>> print james.id, james.name 
  75  1 James 
  76   
  77  ### check aggrgates 
  78  >>> counter = person.id.count() 
  79  >>> print db(person).select(counter).first()(counter) 
  80  1 
  81   
  82  ### delete one record 
  83  >>> james.delete_record() 
  84  1 
  85   
  86  ### delete (drop) entire database table 
  87  >>> person.drop() 
  88   
  89  Supported field types: 
  90  id string text boolean integer double decimal password upload blob time date datetime, 
  91   
  92  Supported DAL URI strings: 
  93  'sqlite://test.db' 
  94  'sqlite:memory' 
  95  'jdbc:sqlite://test.db' 
  96  'mysql://root:none@localhost/test' 
  97  'postgres://mdipierro:none@localhost/test' 
  98  'jdbc:postgres://mdipierro:none@localhost/test' 
  99  'mssql://web2py:none@A64X2/web2py_test' 
 100  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
 101  'oracle://username:password@database' 
 102  'firebird://user:password@server:3050/database' 
 103  'db2://DSN=dsn;UID=user;PWD=pass' 
 104  'firebird://username:password@hostname/database' 
 105  'firebird_embedded://username:password@c://path' 
 106  'informix://user:password@server:3050/database' 
 107  'informixu://user:password@server:3050/database' # unicode informix 
 108  'google:datastore' # for google app engine datastore 
 109  'google:sql' # for google app engine with sql (mysql compatible) 
 110  'teradata://DSN=dsn;UID=user;PWD=pass' # experimental  
 111   
 112  For more info: 
 113  help(DAL) 
 114  help(Field) 
 115  """ 
 116   
 117  ################################################################################### 
 118  # this file orly exposes DAL and Field 
 119  ################################################################################### 
 120   
 121  __all__ = ['DAL', 'Field'] 
 122  MAXCHARLENGTH = 512 
 123  INFINITY = 2**15 # not quite but reasonable default max char length 
 124   
 125  import re 
 126  import sys 
 127  import locale 
 128  import os 
 129  import types 
 130  import cPickle 
 131  import datetime 
 132  import threading 
 133  import time 
 134  import cStringIO 
 135  import csv 
 136  import copy 
 137  import socket 
 138  import logging 
 139  import copy_reg 
 140  import base64 
 141  import shutil 
 142  import marshal 
 143  import decimal 
 144  import struct 
 145  import urllib 
 146  import hashlib 
 147  import uuid 
 148  import glob 
 149   
 150  CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, 
 151                   types.MethodType, types.BuiltinMethodType) 
 152   
 153   
 154  ################################################################################### 
 155  # following checks allows running of dal without web2py as a standalone module 
 156  ################################################################################### 
 157  try: 
 158      from utils import web2py_uuid 
 159  except ImportError: 
 160      import uuid 
161 - def web2py_uuid(): return str(uuid.uuid4())
162 163 try: 164 import portalocker 165 have_portalocker = True 166 except ImportError: 167 have_portalocker = False 168 169 try: 170 import serializers 171 have_serializers = True 172 except ImportError: 173 have_serializers = False 174 175 try: 176 import validators 177 have_validators = True 178 except ImportError: 179 have_validators = False 180 181 logger = logging.getLogger("web2py.dal") 182 DEFAULT = lambda:0 183 184 sql_locker = threading.RLock() 185 thread = threading.local() 186 187 # internal representation of tables with field 188 # <table>.<field>, tables and fields may only be [a-zA-Z0-0_] 189 190 regex_dbname = re.compile('^(\w+)(\:\w+)*') 191 table_field = re.compile('^[\w_]+\.[\w_]+$') 192 regex_content = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 193 regex_cleanup_fn = re.compile('[\'"\s;]+') 194 string_unpack=re.compile('(?<!\|)\|(?!\|)') 195 regex_python_keywords = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 196 197 198 199 # list of drivers will be built on the fly 200 # and lists only what is available 201 drivers = [] 202 203 try: 204 from new import classobj 205 from google.appengine.ext import db as gae 206 from google.appengine.api import namespace_manager, rdbms 207 from google.appengine.api.datastore_types import Key ### needed for belongs on ID 208 from google.appengine.ext.db.polymodel import PolyModel 209 drivers.append('google') 210 except ImportError: 211 pass 212 213 if not 'google' in drivers: 214 215 try: 216 from pysqlite2 import dbapi2 as sqlite3 217 drivers.append('pysqlite2') 218 except ImportError: 219 try: 220 from sqlite3 import dbapi2 as sqlite3 221 drivers.append('SQLite3') 222 except ImportError: 223 logger.debug('no sqlite3 or pysqlite2.dbapi2 driver') 224 225 try: 226 import contrib.pymysql as pymysql 227 drivers.append('pymysql') 228 except ImportError: 229 logger.debug('no pymysql driver') 230 231 try: 232 import psycopg2 233 drivers.append('PostgreSQL') 234 except ImportError: 235 logger.debug('no psycopg2 driver') 236 237 try: 238 import cx_Oracle 239 drivers.append('Oracle') 240 except ImportError: 241 logger.debug('no cx_Oracle driver') 242 243 try: 244 import pyodbc 245 drivers.append('MSSQL/DB2') 246 except ImportError: 247 logger.debug('no MSSQL/DB2 driver') 248 249 try: 250 import kinterbasdb 251 drivers.append('Interbase') 252 except ImportError: 253 logger.debug('no kinterbasdb driver') 254 255 try: 256 import firebirdsql 257 drivers.append('Firebird') 258 except ImportError: 259 logger.debug('no Firebird driver') 260 261 try: 262 import informixdb 263 drivers.append('Informix') 264 logger.warning('Informix support is experimental') 265 except ImportError: 266 logger.debug('no informixdb driver') 267 268 try: 269 import sapdb 270 drivers.append('SAPDB') 271 logger.warning('SAPDB support is experimental') 272 except ImportError: 273 logger.debug('no sapdb driver') 274 275 try: 276 import cubriddb 277 drivers.append('Cubrid') 278 logger.warning('Cubrid support is experimental') 279 except ImportError: 280 logger.debug('no cubriddb driver') 281 282 try: 283 from com.ziclix.python.sql import zxJDBC 284 import java.sql 285 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 286 from org.sqlite import JDBC # required by java.sql; ensure we have it 287 drivers.append('zxJDBC') 288 logger.warning('zxJDBC support is experimental') 289 is_jdbc = True 290 except ImportError: 291 logger.debug('no zxJDBC driver') 292 is_jdbc = False 293 294 try: 295 import ingresdbi 296 drivers.append('Ingres') 297 except ImportError: 298 logger.debug('no Ingres driver') 299 # NOTE could try JDBC....... 300 301 try: 302 import couchdb 303 drivers.append('CouchDB') 304 except ImportError: 305 logger.debug('no couchdb driver') 306 307 try: 308 import pymongo 309 drivers.append('mongoDB') 310 except: 311 logger.debug('no mongoDB driver') 312 313 314 if 'google' in drivers: 315 316 is_jdbc = False 317
318 - class GAEDecimalProperty(gae.Property):
319 """ 320 GAE decimal implementation 321 """ 322 data_type = decimal.Decimal 323
324 - def __init__(self, precision, scale, **kwargs):
325 super(GAEDecimalProperty, self).__init__(self, **kwargs) 326 d = '1.' 327 for x in range(scale): 328 d += '0' 329 self.round = decimal.Decimal(d)
330
331 - def get_value_for_datastore(self, model_instance):
332 value = super(GAEDecimalProperty, self).get_value_for_datastore(model_instance) 333 if value: 334 return str(value) 335 else: 336 return None
337
338 - def make_value_from_datastore(self, value):
339 if value: 340 return decimal.Decimal(value).quantize(self.round) 341 else: 342 return None
343
344 - def validate(self, value):
345 value = super(GAEDecimalProperty, self).validate(value) 346 if value is None or isinstance(value, decimal.Decimal): 347 return value 348 elif isinstance(value, basestring): 349 return decimal.Decimal(value) 350 raise gae.BadValueError("Property %s must be a Decimal or string." % self.name)
351 352 ################################################################################### 353 # class that handles connection pooling (all adapters derived form this one) 354 ################################################################################### 355
356 -class ConnectionPool(object):
357 358 pools = {} 359 360 @staticmethod
361 - def set_folder(folder):
362 thread.folder = folder
363 364 # ## this allows gluon to commit/rollback all dbs in this thread 365 366 @staticmethod
367 - def close_all_instances(action):
368 """ to close cleanly databases in a multithreaded environment """ 369 if not hasattr(thread,'instances'): 370 return 371 while thread.instances: 372 instance = thread.instances.pop() 373 getattr(instance,action)() 374 # ## if you want pools, recycle this connection 375 really = True 376 if instance.pool_size: 377 sql_locker.acquire() 378 pool = ConnectionPool.pools[instance.uri] 379 if len(pool) < instance.pool_size: 380 pool.append(instance.connection) 381 really = False 382 sql_locker.release() 383 if really: 384 getattr(instance,'close')() 385 return
386
387 - def find_or_make_work_folder(self):
388 """ this actually does not make the folder. it has to be there """ 389 if hasattr(thread,'folder'): 390 self.folder = thread.folder 391 else: 392 self.folder = thread.folder = '' 393 394 # Creating the folder if it does not exist 395 if False and self.folder and not os.path.exists(self.folder): 396 os.mkdir(self.folder)
397
398 - def pool_connection(self, f):
399 if not self.pool_size: 400 self.connection = f() 401 else: 402 uri = self.uri 403 sql_locker.acquire() 404 if not uri in ConnectionPool.pools: 405 ConnectionPool.pools[uri] = [] 406 if ConnectionPool.pools[uri]: 407 self.connection = ConnectionPool.pools[uri].pop() 408 sql_locker.release() 409 else: 410 sql_locker.release() 411 self.connection = f() 412 if not hasattr(thread,'instances'): 413 thread.instances = [] 414 thread.instances.append(self)
415 416 417 ################################################################################### 418 # this is a generic adapter that does nothing; all others are derived form this one 419 ################################################################################### 420
421 -class BaseAdapter(ConnectionPool):
422 423 driver = None 424 maxcharlength = INFINITY 425 commit_on_alter_table = False 426 support_distributed_transaction = False 427 uploads_in_blob = False 428 types = { 429 'boolean': 'CHAR(1)', 430 'string': 'CHAR(%(length)s)', 431 'text': 'TEXT', 432 'password': 'CHAR(%(length)s)', 433 'blob': 'BLOB', 434 'upload': 'CHAR(%(length)s)', 435 'integer': 'INTEGER', 436 'double': 'DOUBLE', 437 'decimal': 'DOUBLE', 438 'date': 'DATE', 439 'time': 'TIME', 440 'datetime': 'TIMESTAMP', 441 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 442 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 443 'list:integer': 'TEXT', 444 'list:string': 'TEXT', 445 'list:reference': 'TEXT', 446 } 447
448 - def integrity_error(self):
449 return self.driver.IntegrityError
450
451 - def file_exists(self, filename):
452 """ 453 to be used ONLY for files that on GAE may not be on filesystem 454 """ 455 return os.path.exists(filename)
456
457 - def file_open(self, filename, mode='rb', lock=True):
458 """ 459 to be used ONLY for files that on GAE may not be on filesystem 460 """ 461 fileobj = open(filename,mode) 462 if have_portalocker and lock: 463 if mode in ('r','rb'): 464 portalocker.lock(fileobj,portalocker.LOCK_SH) 465 elif mode in ('w','wb','a'): 466 portalocker.lock(fileobj,portalocker.LOCK_EX) 467 else: 468 fileobj.close() 469 raise RuntimeError, "Unsupported file_open mode" 470 return fileobj
471
472 - def file_close(self, fileobj, unlock=True):
473 """ 474 to be used ONLY for files that on GAE may not be on filesystem 475 """ 476 if fileobj: 477 if have_portalocker and unlock: 478 portalocker.unlock(fileobj) 479 fileobj.close()
480
481 - def file_delete(self, filename):
482 os.unlink(filename)
483
484 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 485 credential_decoder=lambda x:x, driver_args={}, 486 adapter_args={}):
487 self.db = db 488 self.dbengine = "None" 489 self.uri = uri 490 self.pool_size = pool_size 491 self.folder = folder 492 self.db_codec = db_codec 493 class Dummy(object): 494 lastrowid = 1 495 def __getattr__(self, value): 496 return lambda *a, **b: []
497 self.connection = Dummy() 498 self.cursor = Dummy() 499
500 - def sequence_name(self,tablename):
501 return '%s_sequence' % tablename
502
503 - def trigger_name(self,tablename):
504 return '%s_sequence' % tablename
505 506
507 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
508 fields = [] 509 sql_fields = {} 510 sql_fields_aux = {} 511 TFK = {} 512 tablename = table._tablename 513 sortable = 0 514 for field in table: 515 sortable += 1 516 k = field.name 517 if isinstance(field.type,SQLCustomType): 518 ftype = field.type.native or field.type.type 519 elif field.type.startswith('reference'): 520 referenced = field.type[10:].strip() 521 constraint_name = self.constraint_name(tablename, field.name) 522 if hasattr(table,'_primarykey'): 523 rtablename,rfieldname = referenced.split('.') 524 rtable = table._db[rtablename] 525 rfield = rtable[rfieldname] 526 # must be PK reference or unique 527 if rfieldname in rtable._primarykey or rfield.unique: 528 ftype = self.types[rfield.type[:9]] % dict(length=rfield.length) 529 # multicolumn primary key reference? 530 if not rfield.unique and len(rtable._primarykey)>1 : 531 # then it has to be a table level FK 532 if rtablename not in TFK: 533 TFK[rtablename] = {} 534 TFK[rtablename][rfieldname] = field.name 535 else: 536 ftype = ftype + \ 537 self.types['reference FK'] %dict(\ 538 constraint_name=constraint_name, 539 table_name=tablename, 540 field_name=field.name, 541 foreign_key='%s (%s)'%(rtablename, rfieldname), 542 on_delete_action=field.ondelete) 543 else: 544 # make a guess here for circular references 545 id_fieldname = referenced in table._db and table._db[referenced]._id.name or 'id' 546 ftype = self.types[field.type[:9]]\ 547 % dict(table_name=tablename, 548 field_name=field.name, 549 constraint_name=constraint_name, 550 foreign_key=referenced + ('(%s)' % id_fieldname), 551 on_delete_action=field.ondelete) 552 elif field.type.startswith('list:reference'): 553 ftype = self.types[field.type[:14]] 554 elif field.type.startswith('decimal'): 555 precision, scale = map(int,field.type[8:-1].split(',')) 556 ftype = self.types[field.type[:7]] % \ 557 dict(precision=precision,scale=scale) 558 elif not field.type in self.types: 559 raise SyntaxError, 'Field: unknown field type: %s for %s' % \ 560 (field.type, field.name) 561 else: 562 ftype = self.types[field.type]\ 563 % dict(length=field.length) 564 if not field.type.startswith('id') and not field.type.startswith('reference'): 565 if field.notnull: 566 ftype += ' NOT NULL' 567 else: 568 ftype += self.ALLOW_NULL() 569 if field.unique: 570 ftype += ' UNIQUE' 571 572 # add to list of fields 573 sql_fields[field.name] = dict(sortable=sortable, 574 type=str(field.type), 575 sql=ftype) 576 577 if isinstance(field.default,(str,int,float)): 578 # caveat: sql_fields and sql_fields_aux differ for default values 579 # sql_fields is used to trigger migrations and sql_fields_aux 580 # are used for create table 581 # the reason is that we do not want to trigger a migration simply 582 # because a default value changes 583 not_null = self.NOT_NULL(field.default,field.type) 584 ftype = ftype.replace('NOT NULL',not_null) 585 sql_fields_aux[field.name] = dict(sql=ftype) 586 587 fields.append('%s %s' % (field.name, ftype)) 588 other = ';' 589 590 # backend-specific extensions to fields 591 if self.dbengine == 'mysql': 592 if not hasattr(table, "_primarykey"): 593 fields.append('PRIMARY KEY(%s)' % table._id.name) 594 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 595 596 fields = ',\n '.join(fields) 597 for rtablename in TFK: 598 rfields = TFK[rtablename] 599 pkeys = table._db[rtablename]._primarykey 600 fkeys = [ rfields[k] for k in pkeys ] 601 fields = fields + ',\n ' + \ 602 self.types['reference TFK'] %\ 603 dict(table_name=tablename, 604 field_name=', '.join(fkeys), 605 foreign_table=rtablename, 606 foreign_key=', '.join(pkeys), 607 on_delete_action=field.ondelete) 608 609 if hasattr(table,'_primarykey'): 610 query = '''CREATE TABLE %s(\n %s,\n %s) %s''' % \ 611 (tablename, fields, self.PRIMARY_KEY(', '.join(table._primarykey)),other) 612 else: 613 query = '''CREATE TABLE %s(\n %s\n)%s''' % \ 614 (tablename, fields, other) 615 616 if self.uri.startswith('sqlite:///'): 617 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 618 dbpath = self.uri[9:self.uri.rfind('/')].decode('utf8').encode(path_encoding) 619 else: 620 dbpath = self.folder 621 622 if not migrate: 623 return query 624 elif self.uri.startswith('sqlite:memory'): 625 table._dbt = None 626 elif isinstance(migrate, str): 627 table._dbt = os.path.join(dbpath, migrate) 628 else: 629 table._dbt = os.path.join(dbpath, '%s_%s.table' \ 630 % (table._db._uri_hash, tablename)) 631 if table._dbt: 632 table._loggername = os.path.join(dbpath, 'sql.log') 633 logfile = self.file_open(table._loggername, 'a') 634 else: 635 logfile = None 636 if not table._dbt or not self.file_exists(table._dbt): 637 if table._dbt: 638 logfile.write('timestamp: %s\n' 639 % datetime.datetime.today().isoformat()) 640 logfile.write(query + '\n') 641 if not fake_migrate: 642 self.create_sequence_and_triggers(query,table) 643 table._db.commit() 644 if table._dbt: 645 tfile = self.file_open(table._dbt, 'w') 646 cPickle.dump(sql_fields, tfile) 647 self.file_close(tfile) 648 if fake_migrate: 649 logfile.write('faked!\n') 650 else: 651 logfile.write('success!\n') 652 else: 653 tfile = self.file_open(table._dbt, 'r') 654 try: 655 sql_fields_old = cPickle.load(tfile) 656 except EOFError: 657 self.file_close(tfile) 658 self.file_close(logfile) 659 raise RuntimeError, 'File %s appears corrupted' % table._dbt 660 self.file_close(tfile) 661 if sql_fields != sql_fields_old: 662 self.migrate_table(table, 663 sql_fields, sql_fields_old, 664 sql_fields_aux, logfile, 665 fake_migrate=fake_migrate) 666 self.file_close(logfile) 667 return query
668
669 - def migrate_table( 670 self, 671 table, 672 sql_fields, 673 sql_fields_old, 674 sql_fields_aux, 675 logfile, 676 fake_migrate=False, 677 ):
678 tablename = table._tablename 679 def fix(item): 680 k,v=item 681 if not isinstance(v,dict): 682 v=dict(type='unkown',sql=v) 683 return k.lower(),v
684 ### make sure all field names are lower case to avoid conflicts 685 sql_fields = dict(map(fix,sql_fields.items())) 686 sql_fields_old = dict(map(fix,sql_fields_old.items())) 687 sql_fields_aux = dict(map(fix,sql_fields_aux.items())) 688 689 keys = sql_fields.keys() 690 for key in sql_fields_old: 691 if not key in keys: 692 keys.append(key) 693 if self.dbengine == 'mssql': 694 new_add = '; ALTER TABLE %s ADD ' % tablename 695 else: 696 new_add = ', ADD ' 697 698 metadata_change = False 699 sql_fields_current = copy.copy(sql_fields_old) 700 for key in keys: 701 query = None 702 if not key in sql_fields_old: 703 sql_fields_current[key] = sql_fields[key] 704 query = ['ALTER TABLE %s ADD %s %s;' % \ 705 (tablename, key, 706 sql_fields_aux[key]['sql'].replace(', ', new_add))] 707 metadata_change = True 708 elif self.dbengine == 'sqlite': 709 if key in sql_fields: 710 sql_fields_current[key] = sql_fields[key] 711 metadata_change = True 712 elif not key in sql_fields: 713 del sql_fields_current[key] 714 if not self.dbengine in ('firebird',): 715 query = ['ALTER TABLE %s DROP COLUMN %s;' % (tablename, key)] 716 else: 717 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 718 metadata_change = True 719 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 720 and not isinstance(table[key].type, SQLCustomType) \ 721 and not (table[key].type.startswith('reference') and \ 722 sql_fields[key]['sql'].startswith('INT,') and \ 723 sql_fields_old[key]['sql'].startswith('INT NOT NULL,')): 724 sql_fields_current[key] = sql_fields[key] 725 t = tablename 726 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 727 if not self.dbengine in ('firebird',): 728 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 729 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 730 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 731 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 732 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 733 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 734 else: 735 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 736 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 737 'ALTER TABLE %s DROP %s;' % (t, key), 738 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 739 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 740 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 741 metadata_change = True 742 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 743 sql_fields_current[key] = sql_fields[key] 744 metadata_change = True 745 746 if query: 747 logfile.write('timestamp: %s\n' 748 % datetime.datetime.today().isoformat()) 749 table._db['_lastsql'] = '\n'.join(query) 750 for sub_query in query: 751 logfile.write(sub_query + '\n') 752 if not fake_migrate: 753 self.execute(sub_query) 754 # caveat. mysql, oracle and firebird do not allow multiple alter table 755 # in one transaction so we must commit partial transactions and 756 # update table._dbt after alter table. 757 if table._db._adapter.commit_on_alter_table: 758 table._db.commit() 759 tfile = self.file_open(table._dbt, 'w') 760 cPickle.dump(sql_fields_current, tfile) 761 self.file_close(tfile) 762 logfile.write('success!\n') 763 else: 764 logfile.write('faked!\n') 765 elif metadata_change: 766 tfile = self.file_open(table._dbt, 'w') 767 cPickle.dump(sql_fields_current, tfile) 768 self.file_close(tfile) 769 770 if metadata_change and \ 771 not (query and self.dbengine in ('mysql','oracle','firebird')): 772 table._db.commit() 773 tfile = self.file_open(table._dbt, 'w') 774 cPickle.dump(sql_fields_current, tfile) 775 self.file_close(tfile) 776
777 - def LOWER(self,first):
778 return 'LOWER(%s)' % self.expand(first)
779
780 - def UPPER(self,first):
781 return 'UPPER(%s)' % self.expand(first)
782
783 - def EXTRACT(self,first,what):
784 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
785
786 - def AGGREGATE(self,first,what):
787 return "%s(%s)" % (what,self.expand(first))
788
789 - def JOIN(self):
790 return 'JOIN'
791
792 - def LEFT_JOIN(self):
793 return 'LEFT JOIN'
794
795 - def RANDOM(self):
796 return 'Random()'
797
798 - def NOT_NULL(self,default,field_type):
799 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
800
801 - def COALESCE_ZERO(self,first):
802 return 'COALESCE(%s,0)' % self.expand(first)
803
804 - def ALLOW_NULL(self):
805 return ''
806
807 - def SUBSTRING(self,field,parameters):
808 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
809
810 - def PRIMARY_KEY(self,key):
811 return 'PRIMARY KEY(%s)' % key
812
813 - def _drop(self,table,mode):
814 return ['DROP TABLE %s;' % table]
815
816 - def drop(self, table, mode=''):
817 if table._dbt: 818 logfile = self.file_open(table._loggername, 'a') 819 queries = self._drop(table, mode) 820 for query in queries: 821 if table._dbt: 822 logfile.write(query + '\n') 823 self.execute(query) 824 table._db.commit() 825 del table._db[table._tablename] 826 del table._db.tables[table._db.tables.index(table._tablename)] 827 table._db._update_referenced_by(table._tablename) 828 if table._dbt: 829 self.file_delete(table._dbt) 830 logfile.write('success!\n')
831
832 - def _insert(self,table,fields):
833 keys = ','.join(f.name for f,v in fields) 834 values = ','.join(self.expand(v,f.type) for f,v in fields) 835 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values)
836
837 - def insert(self,table,fields):
838 query = self._insert(table,fields) 839 try: 840 self.execute(query) 841 except Exception, e: 842 if isinstance(e,self.integrity_error_class()): 843 return None 844 raise e 845 if hasattr(table,'_primarykey'): 846 return dict([(k[0].name, k[1]) for k in fields \ 847 if k[0].name in table._primarykey]) 848 id = self.lastrowid(table) 849 if not isinstance(id,int): 850 return id 851 rid = Reference(id) 852 (rid._table, rid._record) = (table, None) 853 return rid
854
855 - def bulk_insert(self,table,items):
856 return [self.insert(table,item) for item in items]
857
858 - def NOT(self,first):
859 return '(NOT %s)' % self.expand(first)
860
861 - def AND(self,first,second):
862 return '(%s AND %s)' % (self.expand(first),self.expand(second))
863
864 - def OR(self,first,second):
865 return '(%s OR %s)' % (self.expand(first),self.expand(second))
866
867 - def BELONGS(self,first,second):
868 if isinstance(second,str): 869 return '(%s IN (%s))' % (self.expand(first),second[:-1]) 870 elif second==[] or second==(): 871 return '(0)' 872 items =','.join(self.expand(item,first.type) for item in second) 873 return '(%s IN (%s))' % (self.expand(first),items)
874
875 - def LIKE(self,first,second):
876 return '(%s LIKE %s)' % (self.expand(first),self.expand(second,'string'))
877
878 - def STARTSWITH(self,first,second):
879 return '(%s LIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
880
881 - def ENDSWITH(self,first,second):
882 return '(%s LIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
883
884 - def CONTAINS(self,first,second):
885 if first.type in ('string','text'): 886 key = '%'+str(second).replace('%','%%')+'%' 887 elif first.type.startswith('list:'): 888 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 889 return '(%s LIKE %s)' % (self.expand(first),self.expand(key,'string'))
890
891 - def EQ(self,first,second=None):
892 if second is None: 893 return '(%s IS NULL)' % self.expand(first) 894 return '(%s = %s)' % (self.expand(first),self.expand(second,first.type))
895
896 - def NE(self,first,second=None):
897 if second is None: 898 return '(%s IS NOT NULL)' % self.expand(first) 899 return '(%s <> %s)' % (self.expand(first),self.expand(second,first.type))
900
901 - def LT(self,first,second=None):
902 return '(%s < %s)' % (self.expand(first),self.expand(second,first.type))
903
904 - def LE(self,first,second=None):
905 return '(%s <= %s)' % (self.expand(first),self.expand(second,first.type))
906
907 - def GT(self,first,second=None):
908 return '(%s > %s)' % (self.expand(first),self.expand(second,first.type))
909
910 - def GE(self,first,second=None):
911 return '(%s >= %s)' % (self.expand(first),self.expand(second,first.type))
912
913 - def ADD(self,first,second):
914 return '(%s + %s)' % (self.expand(first),self.expand(second,first.type))
915
916 - def SUB(self,first,second):
917 return '(%s - %s)' % (self.expand(first),self.expand(second,first.type))
918
919 - def MUL(self,first,second):
920 return '(%s * %s)' % (self.expand(first),self.expand(second,first.type))
921
922 - def DIV(self,first,second):
923 return '(%s / %s)' % (self.expand(first),self.expand(second,first.type))
924
925 - def MOD(self,first,second):
926 return '(%s %% %s)' % (self.expand(first),self.expand(second,first.type))
927
928 - def AS(self,first,second):
929 return '%s AS %s' % (self.expand(first),second)
930
931 - def ON(self,first,second):
932 return '%s ON %s' % (self.expand(first),self.expand(second))
933
934 - def INVERT(self,first):
935 return '%s DESC' % self.expand(first)
936
937 - def COMMA(self,first,second):
938 return '%s, %s' % (self.expand(first),self.expand(second))
939
940 - def expand(self,expression,field_type=None):
941 if isinstance(expression,Field): 942 return str(expression) 943 elif isinstance(expression, (Expression, Query)): 944 if not expression.second is None: 945 return expression.op(expression.first, expression.second) 946 elif not expression.first is None: 947 return expression.op(expression.first) 948 else: 949 return expression.op() 950 elif field_type: 951 return self.represent(expression,field_type) 952 elif isinstance(expression,(list,tuple)): 953 return ','.join([self.represent(item,field_type) for item in expression]) 954 else: 955 return str(expression)
956
957 - def alias(self,table,alias):
958 """ 959 given a table object, makes a new table object 960 with alias name. 961 """ 962 other = copy.copy(table) 963 other['_ot'] = other._tablename 964 other['ALL'] = SQLALL(other) 965 other['_tablename'] = alias 966 for fieldname in other.fields: 967 other[fieldname] = copy.copy(other[fieldname]) 968 other[fieldname]._tablename = alias 969 other[fieldname].tablename = alias 970 other[fieldname].table = other 971 table._db[alias] = other 972 return other
973
974 - def _truncate(self,table,mode = ''):
975 tablename = table._tablename 976 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
977
978 - def truncate(self,table,mode= ' '):
979 # Prepare functions "write_to_logfile" and "close_logfile" 980 if table._dbt: 981 logfile = self.file_open(table._loggername, 'a') 982 else: 983 class Logfile(object): 984 def write(self, value): 985 pass
986 def close(self): 987 pass 988 logfile = Logfile() 989 990 try: 991 queries = table._db._adapter._truncate(table, mode) 992 for query in queries: 993 logfile.write(query + '\n') 994 self.execute(query) 995 table._db.commit() 996 logfile.write('success!\n') 997 finally: 998 logfile.close() 999
1000 - def _update(self,tablename,query,fields):
1001 if query: 1002 sql_w = ' WHERE ' + self.expand(query) 1003 else: 1004 sql_w = '' 1005 sql_v = ','.join(['%s=%s' % (field.name, self.expand(value,field.type)) for (field,value) in fields]) 1006 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1007
1008 - def update(self,tablename,query,fields):
1009 sql = self._update(tablename,query,fields) 1010 self.execute(sql) 1011 try: 1012 return self.cursor.rowcount 1013 except: 1014 return None
1015
1016 - def _delete(self,tablename, query):
1017 if query: 1018 sql_w = ' WHERE ' + self.expand(query) 1019 else: 1020 sql_w = '' 1021 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1022
1023 - def delete(self,tablename,query):
1024 sql = self._delete(tablename,query) 1025 ### special code to handle CASCADE in SQLite 1026 db = self.db 1027 table = db[tablename] 1028 if self.dbengine=='sqlite' and table._referenced_by: 1029 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1030 ### end special code to handle CASCADE in SQLite 1031 self.execute(sql) 1032 try: 1033 counter = self.cursor.rowcount 1034 except: 1035 counter = None 1036 ### special code to handle CASCADE in SQLite 1037 if self.dbengine=='sqlite' and counter: 1038 for tablename,fieldname in table._referenced_by: 1039 f = db[tablename][fieldname] 1040 if f.type=='reference '+table._tablename and f.ondelete=='CASCADE': 1041 db(db[tablename][fieldname].belongs(deleted)).delete() 1042 ### end special code to handle CASCADE in SQLite 1043 return counter
1044
1045 - def get_table(self,query):
1046 tablenames = self.tables(query) 1047 if len(tablenames)==1: 1048 return tablenames[0] 1049 elif len(tablenames)<1: 1050 raise RuntimeError, "No table selected" 1051 else: 1052 raise RuntimeError, "Too many tables selected"
1053
1054 - def _select(self, query, fields, attributes):
1055 for key in set(attributes.keys())-set(('orderby','groupby','limitby', 1056 'required','cache','left', 1057 'distinct','having', 'join')): 1058 raise SyntaxError, 'invalid select attribute: %s' % key 1059 # ## if not fields specified take them all from the requested tables 1060 new_fields = [] 1061 for item in fields: 1062 if isinstance(item,SQLALL): 1063 new_fields += item.table 1064 else: 1065 new_fields.append(item) 1066 fields = new_fields 1067 tablenames = self.tables(query) 1068 query = self.filter_tenant(query,tablenames) 1069 if not fields: 1070 for table in tablenames: 1071 for field in self.db[table]: 1072 fields.append(field) 1073 else: 1074 for field in fields: 1075 if isinstance(field,basestring) and table_field.match(field): 1076 tn,fn = field.split('.') 1077 field = self.db[tn][fn] 1078 for tablename in self.tables(field): 1079 if not tablename in tablenames: 1080 tablenames.append(tablename) 1081 if len(tablenames) < 1: 1082 raise SyntaxError, 'Set: no tables selected' 1083 sql_f = ', '.join(map(self.expand,fields)) 1084 self._colnames = [c.strip() for c in sql_f.split(', ')] 1085 if query: 1086 sql_w = ' WHERE ' + self.expand(query) 1087 else: 1088 sql_w = '' 1089 sql_o = '' 1090 sql_s = '' 1091 left = attributes.get('left', False) 1092 inner_join = attributes.get('join', False) 1093 distinct = attributes.get('distinct', False) 1094 groupby = attributes.get('groupby', False) 1095 orderby = attributes.get('orderby', False) 1096 having = attributes.get('having', False) 1097 limitby = attributes.get('limitby', False) 1098 if distinct is True: 1099 sql_s += 'DISTINCT' 1100 elif distinct: 1101 sql_s += 'DISTINCT ON (%s)' % distinct 1102 if inner_join: 1103 icommand = self.JOIN() 1104 if not isinstance(inner_join, (tuple, list)): 1105 inner_join = [inner_join] 1106 ijoint = [t._tablename for t in inner_join if not isinstance(t,Expression)] 1107 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1108 ijoinont = [t.first._tablename for t in ijoinon] 1109 iexcluded = [t for t in tablenames if not t in ijoint + ijoinont] 1110 if left: 1111 join = attributes['left'] 1112 command = self.LEFT_JOIN() 1113 if not isinstance(join, (tuple, list)): 1114 join = [join] 1115 joint = [t._tablename for t in join if not isinstance(t,Expression)] 1116 joinon = [t for t in join if isinstance(t, Expression)] 1117 #patch join+left patch (solves problem with ordering in left joins) 1118 tables_to_merge={} 1119 [tables_to_merge.update(dict.fromkeys(self.tables(t))) for t in joinon] 1120 joinont = [t.first._tablename for t in joinon] 1121 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1122 important_tablenames = joint + joinont + tables_to_merge.keys() 1123 excluded = [t for t in tablenames if not t in important_tablenames ] 1124 def alias(t): 1125 return str(self.db[t])
1126 if inner_join and not left: 1127 sql_t = ', '.join(alias(t) for t in iexcluded) 1128 for t in ijoinon: 1129 sql_t += ' %s %s' % (icommand, str(t)) 1130 elif not inner_join and left: 1131 sql_t = ', '.join([alias(t) for t in excluded + tables_to_merge.keys()]) 1132 if joint: 1133 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1134 for t in joinon: 1135 sql_t += ' %s %s' % (command, str(t)) 1136 elif inner_join and left: 1137 sql_t = ','.join([alias(t) for t in excluded + \ 1138 tables_to_merge.keys() if t in iexcluded ]) 1139 for t in ijoinon: 1140 sql_t += ' %s %s' % (icommand, str(t)) 1141 if joint: 1142 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1143 for t in joinon: 1144 sql_t += ' %s %s' % (command, str(t)) 1145 else: 1146 sql_t = ', '.join(alias(t) for t in tablenames) 1147 if groupby: 1148 if isinstance(groupby, (list, tuple)): 1149 groupby = xorify(groupby) 1150 sql_o += ' GROUP BY %s' % self.expand(groupby) 1151 if having: 1152 sql_o += ' HAVING %s' % attributes['having'] 1153 if orderby: 1154 if isinstance(orderby, (list, tuple)): 1155 orderby = xorify(orderby) 1156 if str(orderby) == '<random>': 1157 sql_o += ' ORDER BY %s' % self.RANDOM() 1158 else: 1159 sql_o += ' ORDER BY %s' % self.expand(orderby) 1160 if limitby: 1161 if not orderby and tablenames: 1162 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in ((hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey) or [self.db[t]._id.name])]) 1163 # oracle does not support limitby 1164 return self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1165
1166 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1167 if limitby: 1168 (lmin, lmax) = limitby 1169 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1170 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1171
1172 - def select(self,query,fields,attributes):
1173 """ 1174 Always returns a Rows object, even if it may be empty 1175 """ 1176 def response(sql): 1177 self.execute(sql) 1178 return self.cursor.fetchall()
1179 sql = self._select(query,fields,attributes) 1180 if attributes.get('cache', None): 1181 (cache_model, time_expire) = attributes['cache'] 1182 del attributes['cache'] 1183 key = self.uri + '/' + sql 1184 key = (key<=200) and key or hashlib.md5(key).hexdigest() 1185 rows = cache_model(key, lambda: response(sql), time_expire) 1186 else: 1187 rows = response(sql) 1188 if isinstance(rows,tuple): 1189 rows = list(rows) 1190 limitby = attributes.get('limitby',None) or (0,) 1191 rows = self.rowslice(rows,limitby[0],None) 1192 return self.parse(rows,self._colnames) 1193
1194 - def _count(self,query,distinct=None):
1195 tablenames = self.tables(query) 1196 if query: 1197 sql_w = ' WHERE ' + self.expand(query) 1198 else: 1199 sql_w = '' 1200 sql_t = ','.join(tablenames) 1201 if distinct: 1202 if isinstance(distinct,(list,tuple)): 1203 distinct = xorify(distinct) 1204 sql_d = self.expand(distinct) 1205 return 'SELECT count(DISTINCT %s) FROM %s%s' % (sql_d, sql_t, sql_w) 1206 return 'SELECT count(*) FROM %s%s' % (sql_t, sql_w)
1207
1208 - def count(self,query,distinct=None):
1209 self.execute(self._count(query,distinct)) 1210 return self.cursor.fetchone()[0]
1211 1212
1213 - def tables(self,query):
1214 tables = set() 1215 if isinstance(query, Field): 1216 tables.add(query.tablename) 1217 elif isinstance(query, (Expression, Query)): 1218 if query.first!=None: 1219 tables = tables.union(self.tables(query.first)) 1220 if query.second!=None: 1221 tables = tables.union(self.tables(query.second)) 1222 return list(tables)
1223
1224 - def commit(self):
1225 return self.connection.commit()
1226
1227 - def rollback(self):
1228 return self.connection.rollback()
1229
1230 - def close(self):
1231 return self.connection.close()
1232
1233 - def distributed_transaction_begin(self,key):
1234 return
1235
1236 - def prepare(self,key):
1237 self.connection.prepare()
1238
1239 - def commit_prepared(self,key):
1240 self.connection.commit()
1241
1242 - def rollback_prepared(self,key):
1243 self.connection.rollback()
1244
1245 - def concat_add(self,table):
1246 return ', ADD '
1247
1248 - def constraint_name(self, table, fieldname):
1249 return '%s_%s__constraint' % (table,fieldname)
1250
1251 - def create_sequence_and_triggers(self, query, table, **args):
1252 self.execute(query)
1253
1254 - def log_execute(self,*a,**b):
1255 self.db._lastsql = a[0] 1256 t0 = time.time() 1257 ret = self.cursor.execute(*a,**b) 1258 self.db._timings.append((a[0],time.time()-t0)) 1259 return ret
1260
1261 - def execute(self,*a,**b):
1262 return self.log_execute(*a, **b)
1263
1264 - def represent(self, obj, fieldtype):
1265 if isinstance(obj,CALLABLETYPES): 1266 obj = obj() 1267 if isinstance(fieldtype, SQLCustomType): 1268 return fieldtype.encoder(obj) 1269 if isinstance(obj, (Expression, Field)): 1270 return str(obj) 1271 if fieldtype.startswith('list:'): 1272 if not obj: 1273 obj = [] 1274 if not isinstance(obj, (list, tuple)): 1275 obj = [obj] 1276 if isinstance(obj, (list, tuple)): 1277 obj = bar_encode(obj) 1278 if obj is None: 1279 return 'NULL' 1280 if obj == '' and not fieldtype[:2] in ['st', 'te', 'pa', 'up']: 1281 return 'NULL' 1282 r = self.represent_exceptions(obj,fieldtype) 1283 if r != None: 1284 return r 1285 if fieldtype == 'boolean': 1286 if obj and not str(obj)[:1].upper() in ['F', '0']: 1287 return "'T'" 1288 else: 1289 return "'F'" 1290 if fieldtype == 'id' or fieldtype == 'integer': 1291 return str(int(obj)) 1292 if fieldtype.startswith('decimal'): 1293 return str(obj) 1294 elif fieldtype.startswith('reference'): # reference 1295 if fieldtype.find('.')>0: 1296 return repr(obj) 1297 elif isinstance(obj, (Row, Reference)): 1298 return str(obj['id']) 1299 return str(int(obj)) 1300 elif fieldtype == 'double': 1301 return repr(float(obj)) 1302 if isinstance(obj, unicode): 1303 obj = obj.encode(self.db_codec) 1304 if fieldtype == 'blob': 1305 obj = base64.b64encode(str(obj)) 1306 elif fieldtype == 'date': 1307 if isinstance(obj, (datetime.date, datetime.datetime)): 1308 obj = obj.isoformat()[:10] 1309 else: 1310 obj = str(obj) 1311 elif fieldtype == 'datetime': 1312 if isinstance(obj, datetime.datetime): 1313 obj = obj.isoformat()[:19].replace('T',' ') 1314 elif isinstance(obj, datetime.date): 1315 obj = obj.isoformat()[:10]+' 00:00:00' 1316 else: 1317 obj = str(obj) 1318 elif fieldtype == 'time': 1319 if isinstance(obj, datetime.time): 1320 obj = obj.isoformat()[:10] 1321 else: 1322 obj = str(obj) 1323 if not isinstance(obj,str): 1324 obj = str(obj) 1325 try: 1326 obj.decode(self.db_codec) 1327 except: 1328 obj = obj.decode('latin1').encode(self.db_codec) 1329 return "'%s'" % obj.replace("'", "''")
1330
1331 - def represent_exceptions(self, obj, fieldtype):
1332 return None
1333
1334 - def lastrowid(self,table):
1335 return None
1336
1337 - def integrity_error_class(self):
1338 return type(None)
1339
1340 - def rowslice(self,rows,minimum=0,maximum=None):
1341 """ by default this function does nothing, overload when db does not do slicing """ 1342 return rows
1343
1344 - def parse(self, rows, colnames, blob_decode=True):
1345 db = self.db 1346 virtualtables = [] 1347 new_rows = [] 1348 for (i,row) in enumerate(rows): 1349 new_row = Row() 1350 for j,colname in enumerate(colnames): 1351 value = row[j] 1352 if not table_field.match(colnames[j]): 1353 if not '_extra' in new_row: 1354 new_row['_extra'] = Row() 1355 new_row['_extra'][colnames[j]] = value 1356 select_as_parser = re.compile("\s+AS\s+(\S+)") 1357 new_column_name = select_as_parser.search(colnames[j]) 1358 if not new_column_name is None: 1359 column_name = new_column_name.groups(0) 1360 setattr(new_row,column_name[0],value) 1361 continue 1362 (tablename, fieldname) = colname.split('.') 1363 table = db[tablename] 1364 field = table[fieldname] 1365 field_type = field.type 1366 if field.type != 'blob' and isinstance(value, str): 1367 try: 1368 value = value.decode(db._db_codec) 1369 except Exception: 1370 pass 1371 if isinstance(value, unicode): 1372 value = value.encode('utf-8') 1373 if not tablename in new_row: 1374 colset = new_row[tablename] = Row() 1375 if tablename not in virtualtables: 1376 virtualtables.append(tablename) 1377 else: 1378 colset = new_row[tablename] 1379 1380 if isinstance(field_type, SQLCustomType): 1381 colset[fieldname] = field_type.decoder(value) 1382 # field_type = field_type.type 1383 elif not isinstance(field_type, str) or value is None: 1384 colset[fieldname] = value 1385 elif isinstance(field_type, str) and \ 1386 field_type.startswith('reference'): 1387 referee = field_type[10:].strip() 1388 if not '.' in referee: 1389 colset[fieldname] = rid = Reference(value) 1390 (rid._table, rid._record) = (db[referee], None) 1391 else: ### reference not by id 1392 colset[fieldname] = value 1393 elif field_type == 'boolean': 1394 if value == True or str(value)[:1].lower() == 't': 1395 colset[fieldname] = True 1396 else: 1397 colset[fieldname] = False 1398 elif field_type == 'date' \ 1399 and (not isinstance(value, datetime.date)\ 1400 or isinstance(value, datetime.datetime)): 1401 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1402 colset[fieldname] = datetime.date(y, m, d) 1403 elif field_type == 'time' \ 1404 and not isinstance(value, datetime.time): 1405 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1406 if len(time_items) == 3: 1407 (h, mi, s) = time_items 1408 else: 1409 (h, mi, s) = time_items + [0] 1410 colset[fieldname] = datetime.time(h, mi, s) 1411 elif field_type == 'datetime'\ 1412 and not isinstance(value, datetime.datetime): 1413 (y, m, d) = map(int,str(value)[:10].strip().split('-')) 1414 time_items = map(int,str(value)[11:19].strip().split(':')[:3]) 1415 if len(time_items) == 3: 1416 (h, mi, s) = time_items 1417 else: 1418 (h, mi, s) = time_items + [0] 1419 colset[fieldname] = datetime.datetime(y, m, d, h, mi, s) 1420 elif field_type == 'blob' and blob_decode: 1421 colset[fieldname] = base64.b64decode(str(value)) 1422 elif field_type.startswith('decimal'): 1423 decimals = int(field_type[8:-1].split(',')[-1]) 1424 if self.dbengine == 'sqlite': 1425 value = ('%.' + str(decimals) + 'f') % value 1426 if not isinstance(value, decimal.Decimal): 1427 value = decimal.Decimal(str(value)) 1428 colset[fieldname] = value 1429 elif field_type.startswith('list:integer'): 1430 if not self.dbengine=='google:datastore': 1431 colset[fieldname] = bar_decode_integer(value) 1432 else: 1433 colset[fieldname] = value 1434 elif field_type.startswith('list:reference'): 1435 if not self.dbengine=='google:datastore': 1436 colset[fieldname] = bar_decode_integer(value) 1437 else: 1438 colset[fieldname] = value 1439 elif field_type.startswith('list:string'): 1440 if not self.dbengine=='google:datastore': 1441 colset[fieldname] = bar_decode_string(value) 1442 else: 1443 colset[fieldname] = value 1444 else: 1445 colset[fieldname] = value 1446 if field_type == 'id': 1447 id = colset[field.name] 1448 colset.update_record = lambda _ = (colset, table, id), **a: update_record(_, a) 1449 colset.delete_record = lambda t = table, i = id: t._db(t._id==i).delete() 1450 for (referee_table, referee_name) in \ 1451 table._referenced_by: 1452 s = db[referee_table][referee_name] 1453 if not referee_table in colset: 1454 # for backward compatibility 1455 colset[referee_table] = Set(db, s == id) 1456 ### add new feature? 1457 ### colset[referee_table+'_by_'+refree_name] = Set(db, s == id) 1458 colset['id'] = id 1459 new_rows.append(new_row) 1460 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 1461 for tablename in virtualtables: 1462 for item in db[tablename].virtualfields: 1463 try: 1464 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 1465 except KeyError: 1466 # to avoid breaking virtualfields when partial select 1467 pass 1468 return rowsobj
1469
1470 - def filter_tenant(self,query,tablenames):
1471 fieldname = self.db._request_tenant 1472 for tablename in tablenames: 1473 table = self.db[tablename] 1474 if fieldname in table: 1475 default = table[fieldname].default 1476 if default!=None: 1477 query = query&(table[fieldname]==default) 1478 return query
1479 1480 ################################################################################### 1481 # List of all the available adapters, they all extend BaseAdapter 1482 ################################################################################### 1483
1484 -class SQLiteAdapter(BaseAdapter):
1485 1486 driver = globals().get('sqlite3',None) 1487
1488 - def EXTRACT(self,field,what):
1489 return "web2py_extract('%s',%s)" % (what,self.expand(field))
1490 1491 @staticmethod
1492 - def web2py_extract(lookup, s):
1493 table = { 1494 'year': (0, 4), 1495 'month': (5, 7), 1496 'day': (8, 10), 1497 'hour': (11, 13), 1498 'minute': (14, 16), 1499 'second': (17, 19), 1500 } 1501 try: 1502 (i, j) = table[lookup] 1503 return int(s[i:j]) 1504 except: 1505 return None
1506
1507 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1508 credential_decoder=lambda x:x, driver_args={}, 1509 adapter_args={}):
1510 self.db = db 1511 self.dbengine = "sqlite" 1512 self.uri = uri 1513 self.pool_size = pool_size 1514 self.folder = folder 1515 self.db_codec = db_codec 1516 self.find_or_make_work_folder() 1517 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1518 if uri.startswith('sqlite:memory'): 1519 dbpath = ':memory:' 1520 else: 1521 dbpath = uri.split('://')[1] 1522 if dbpath[0] != '/': 1523 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1524 if not 'check_same_thread' in driver_args: 1525 driver_args['check_same_thread'] = False 1526 def connect(dbpath=dbpath, driver_args=driver_args): 1527 return self.driver.Connection(dbpath, **driver_args)
1528 self.pool_connection(connect) 1529 self.cursor = self.connection.cursor() 1530 self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract)
1531
1532 - def _truncate(self,table,mode = ''):
1533 tablename = table._tablename 1534 return ['DELETE FROM %s;' % tablename, 1535 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
1536
1537 - def lastrowid(self,table):
1538 return self.cursor.lastrowid
1539 1540
1541 -class JDBCSQLiteAdapter(SQLiteAdapter):
1542 1543 driver = globals().get('zxJDBC',None) 1544
1545 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1546 credential_decoder=lambda x:x, driver_args={}, 1547 adapter_args={}):
1548 self.db = db 1549 self.dbengine = "sqlite" 1550 self.uri = uri 1551 self.pool_size = pool_size 1552 self.folder = folder 1553 self.db_codec = db_codec 1554 self.find_or_make_work_folder() 1555 path_encoding = sys.getfilesystemencoding() or locale.getdefaultlocale()[1] or 'utf8' 1556 if uri.startswith('sqlite:memory'): 1557 dbpath = ':memory:' 1558 else: 1559 dbpath = uri.split('://')[1] 1560 if dbpath[0] != '/': 1561 dbpath = os.path.join(self.folder.decode(path_encoding).encode('utf8'),dbpath) 1562 def connect(dbpath=dbpath,driver_args=driver_args): 1563 return self.driver.connect(java.sql.DriverManager.getConnection('jdbc:sqlite:'+dbpath),**driver_args)
1564 self.pool_connection(connect) 1565 self.cursor = self.connection.cursor()
1566 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 1567 # self.connection.create_function('web2py_extract', 2, SQLiteAdapter.web2py_extract) 1568
1569 - def execute(self,a):
1570 return self.log_execute(a[:-1])
1571 1572
1573 -class MySQLAdapter(BaseAdapter):
1574 1575 driver = globals().get('pymysql',None) 1576 maxcharlength = 255 1577 commit_on_alter_table = True 1578 support_distributed_transaction = True 1579 types = { 1580 'boolean': 'CHAR(1)', 1581 'string': 'VARCHAR(%(length)s)', 1582 'text': 'LONGTEXT', 1583 'password': 'VARCHAR(%(length)s)', 1584 'blob': 'LONGBLOB', 1585 'upload': 'VARCHAR(%(length)s)', 1586 'integer': 'INT', 1587 'double': 'DOUBLE', 1588 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1589 'date': 'DATE', 1590 'time': 'TIME', 1591 'datetime': 'DATETIME', 1592 'id': 'INT AUTO_INCREMENT NOT NULL', 1593 'reference': 'INT, INDEX %(field_name)s__idx (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1594 'list:integer': 'LONGTEXT', 1595 'list:string': 'LONGTEXT', 1596 'list:reference': 'LONGTEXT', 1597 } 1598
1599 - def RANDOM(self):
1600 return 'RAND()'
1601
1602 - def SUBSTRING(self,field,parameters):
1603 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1604
1605 - def _drop(self,table,mode):
1606 # breaks db integrity but without this mysql does not drop table 1607 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table,'SET FOREIGN_KEY_CHECKS=1;']
1608
1609 - def distributed_transaction_begin(self,key):
1610 self.execute('XA START;')
1611
1612 - def prepare(self,key):
1613 self.execute("XA END;") 1614 self.execute("XA PREPARE;")
1615
1616 - def commit_prepared(self,ley):
1617 self.execute("XA COMMIT;")
1618
1619 - def rollback_prepared(self,key):
1620 self.execute("XA ROLLBACK;")
1621
1622 - def concat_add(self,table):
1623 return '; ALTER TABLE %s ADD ' % table
1624
1625 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1626 credential_decoder=lambda x:x, driver_args={}, 1627 adapter_args={}):
1628 self.db = db 1629 self.dbengine = "mysql" 1630 self.uri = uri 1631 self.pool_size = pool_size 1632 self.folder = folder 1633 self.db_codec = db_codec 1634 self.find_or_make_work_folder() 1635 uri = uri.split('://')[1] 1636 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 1637 if not m: 1638 raise SyntaxError, \ 1639 "Invalid URI string in DAL: %s" % self.uri 1640 user = credential_decoder(m.group('user')) 1641 if not user: 1642 raise SyntaxError, 'User required' 1643 password = credential_decoder(m.group('password')) 1644 if not password: 1645 password = '' 1646 host = m.group('host') 1647 if not host: 1648 raise SyntaxError, 'Host name required' 1649 db = m.group('db') 1650 if not db: 1651 raise SyntaxError, 'Database name required' 1652 port = int(m.group('port') or '3306') 1653 charset = m.group('charset') or 'utf8' 1654 driver_args.update(dict(db=db, 1655 user=credential_decoder(user), 1656 passwd=credential_decoder(password), 1657 host=host, 1658 port=port, 1659 charset=charset)) 1660 def connect(driver_args=driver_args): 1661 return self.driver.connect(**driver_args)
1662 self.pool_connection(connect) 1663 self.cursor = self.connection.cursor() 1664 self.execute('SET FOREIGN_KEY_CHECKS=1;') 1665 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
1666
1667 - def lastrowid(self,table):
1668 self.execute('select last_insert_id();') 1669 return int(self.cursor.fetchone()[0])
1670 1671
1672 -class PostgreSQLAdapter(BaseAdapter):
1673 1674 driver = globals().get('psycopg2',None) 1675 1676 support_distributed_transaction = True 1677 types = { 1678 'boolean': 'CHAR(1)', 1679 'string': 'VARCHAR(%(length)s)', 1680 'text': 'TEXT', 1681 'password': 'VARCHAR(%(length)s)', 1682 'blob': 'BYTEA', 1683 'upload': 'VARCHAR(%(length)s)', 1684 'integer': 'INTEGER', 1685 'double': 'FLOAT8', 1686 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1687 'date': 'DATE', 1688 'time': 'TIME', 1689 'datetime': 'TIMESTAMP', 1690 'id': 'SERIAL PRIMARY KEY', 1691 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1692 'list:integer': 'TEXT', 1693 'list:string': 'TEXT', 1694 'list:reference': 'TEXT', 1695 } 1696
1697 - def sequence_name(self,table):
1698 return '%s_id_Seq' % table
1699
1700 - def RANDOM(self):
1701 return 'RANDOM()'
1702
1703 - def distributed_transaction_begin(self,key):
1704 return
1705
1706 - def prepare(self,key):
1707 self.execute("PREPARE TRANSACTION '%s';" % key)
1708
1709 - def commit_prepared(self,key):
1710 self.execute("COMMIT PREPARED '%s';" % key)
1711
1712 - def rollback_prepared(self,key):
1713 self.execute("ROLLBACK PREPARED '%s';" % key)
1714
1715 - def create_sequence_and_triggers(self, query, table, **args):
1716 # following lines should only be executed if table._sequence_name does not exist 1717 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 1718 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 1719 # % (table._tablename, table._fieldname, table._sequence_name)) 1720 self.execute(query)
1721
1722 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1723 credential_decoder=lambda x:x, driver_args={}, 1724 adapter_args={}):
1725 self.db = db 1726 self.dbengine = "postgres" 1727 self.uri = uri 1728 self.pool_size = pool_size 1729 self.folder = folder 1730 self.db_codec = db_codec 1731 self.find_or_make_work_folder() 1732 uri = uri.split('://')[1] 1733 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$').match(uri) 1734 if not m: 1735 raise SyntaxError, "Invalid URI string in DAL" 1736 user = credential_decoder(m.group('user')) 1737 if not user: 1738 raise SyntaxError, 'User required' 1739 password = credential_decoder(m.group('password')) 1740 if not password: 1741 password = '' 1742 host = m.group('host') 1743 if not host: 1744 raise SyntaxError, 'Host name required' 1745 db = m.group('db') 1746 if not db: 1747 raise SyntaxError, 'Database name required' 1748 port = m.group('port') or '5432' 1749 sslmode = m.group('sslmode') 1750 if sslmode: 1751 msg = ("dbname='%s' user='%s' host='%s'" 1752 "port=%s password='%s' sslmode='%s'") \ 1753 % (db, user, host, port, password, sslmode) 1754 else: 1755 msg = ("dbname='%s' user='%s' host='%s'" 1756 "port=%s password='%s'") \ 1757 % (db, user, host, port, password) 1758 def connect(msg=msg,driver_args=driver_args): 1759 return self.driver.connect(msg,**driver_args)
1760 self.pool_connection(connect) 1761 self.connection.set_client_encoding('UTF8') 1762 self.cursor = self.connection.cursor() 1763 self.execute('BEGIN;') 1764 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 1765 self.execute("SET standard_conforming_strings=on;")
1766
1767 - def lastrowid(self,table):
1768 self.execute("select currval('%s')" % table._sequence_name) 1769 return int(self.cursor.fetchone()[0])
1770
1771 - def LIKE(self,first,second):
1772 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second,'string'))
1773
1774 - def STARTSWITH(self,first,second):
1775 return '(%s ILIKE %s)' % (self.expand(first),self.expand(second+'%','string'))
1776
1777 - def ENDSWITH(self,first,second):
1778 return '(%s ILIKE %s)' % (self.expand(first),self.expand('%'+second,'string'))
1779
1780 - def CONTAINS(self,first,second):
1781 if first.type in ('string','text'): 1782 key = '%'+str(second).replace('%','%%')+'%' 1783 elif first.type.startswith('list:'): 1784 key = '%|'+str(second).replace('|','||').replace('%','%%')+'|%' 1785 return '(%s ILIKE %s)' % (self.expand(first),self.expand(key,'string'))
1786
1787 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
1788
1789 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1790 credential_decoder=lambda x:x, driver_args={}, 1791 adapter_args={}):
1792 self.db = db 1793 self.dbengine = "postgres" 1794 self.uri = uri 1795 self.pool_size = pool_size 1796 self.folder = folder 1797 self.db_codec = db_codec 1798 self.find_or_make_work_folder() 1799 uri = uri.split('://')[1] 1800 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 1801 if not m: 1802 raise SyntaxError, "Invalid URI string in DAL" 1803 user = credential_decoder(m.group('user')) 1804 if not user: 1805 raise SyntaxError, 'User required' 1806 password = credential_decoder(m.group('password')) 1807 if not password: 1808 password = '' 1809 host = m.group('host') 1810 if not host: 1811 raise SyntaxError, 'Host name required' 1812 db = m.group('db') 1813 if not db: 1814 raise SyntaxError, 'Database name required' 1815 port = m.group('port') or '5432' 1816 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 1817 def connect(msg=msg,driver_args=driver_args): 1818 return self.driver.connect(*msg,**driver_args)
1819 self.pool_connection(connect) 1820 self.connection.set_client_encoding('UTF8') 1821 self.cursor = self.connection.cursor() 1822 self.execute('BEGIN;') 1823 self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
1824 1825
1826 -class OracleAdapter(BaseAdapter):
1827 1828 driver = globals().get('cx_Oracle',None) 1829 1830 commit_on_alter_table = False 1831 types = { 1832 'boolean': 'CHAR(1)', 1833 'string': 'VARCHAR2(%(length)s)', 1834 'text': 'CLOB', 1835 'password': 'VARCHAR2(%(length)s)', 1836 'blob': 'CLOB', 1837 'upload': 'VARCHAR2(%(length)s)', 1838 'integer': 'INT', 1839 'double': 'FLOAT', 1840 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1841 'date': 'DATE', 1842 'time': 'CHAR(8)', 1843 'datetime': 'DATE', 1844 'id': 'NUMBER PRIMARY KEY', 1845 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1846 'list:integer': 'CLOB', 1847 'list:string': 'CLOB', 1848 'list:reference': 'CLOB', 1849 } 1850
1851 - def sequence_name(self,tablename):
1852 return '%s_sequence' % tablename
1853
1854 - def trigger_name(self,tablename):
1855 return '%s_trigger' % tablename
1856
1857 - def LEFT_JOIN(self):
1858 return 'LEFT OUTER JOIN'
1859
1860 - def RANDOM(self):
1861 return 'dbms_random.value'
1862
1863 - def NOT_NULL(self,default,field_type):
1864 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
1865
1866 - def _drop(self,table,mode):
1867 sequence_name = table._sequence_name 1868 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
1869
1870 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1871 if limitby: 1872 (lmin, lmax) = limitby 1873 if len(sql_w) > 1: 1874 sql_w_row = sql_w + ' AND w_row > %i' % lmin 1875 else: 1876 sql_w_row = 'WHERE w_row > %i' % lmin 1877 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 1878 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
1879
1880 - def constraint_name(self, tablename, fieldname):
1881 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 1882 if len(constraint_name)>30: 1883 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 1884 return constraint_name
1885
1886 - def represent_exceptions(self, obj, fieldtype):
1887 if fieldtype == 'blob': 1888 obj = base64.b64encode(str(obj)) 1889 return ":CLOB('%s')" % obj 1890 elif fieldtype == 'date': 1891 if isinstance(obj, (datetime.date, datetime.datetime)): 1892 obj = obj.isoformat()[:10] 1893 else: 1894 obj = str(obj) 1895 return "to_date('%s','yyyy-mm-dd')" % obj 1896 elif fieldtype == 'datetime': 1897 if isinstance(obj, datetime.datetime): 1898 obj = obj.isoformat()[:19].replace('T',' ') 1899 elif isinstance(obj, datetime.date): 1900 obj = obj.isoformat()[:10]+' 00:00:00' 1901 else: 1902 obj = str(obj) 1903 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 1904 return None
1905
1906 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 1907 credential_decoder=lambda x:x, driver_args={}, 1908 adapter_args={}):
1909 self.db = db 1910 self.dbengine = "oracle" 1911 self.uri = uri 1912 self.pool_size = pool_size 1913 self.folder = folder 1914 self.db_codec = db_codec 1915 self.find_or_make_work_folder() 1916 uri = uri.split('://')[1] 1917 if not 'threaded' in driver_args: 1918 driver_args['threaded']=True 1919 def connect(uri=uri,driver_args=driver_args): 1920 return self.driver.connect(uri,**driver_args)
1921 self.pool_connection(connect) 1922 self.cursor = self.connection.cursor() 1923 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 1924 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
1925 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 1926
1927 - def execute(self, command):
1928 args = [] 1929 i = 1 1930 while True: 1931 m = self.oracle_fix.match(command) 1932 if not m: 1933 break 1934 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 1935 args.append(m.group('clob')[6:-2].replace("''", "'")) 1936 i += 1 1937 return self.log_execute(command[:-1], args)
1938
1939 - def create_sequence_and_triggers(self, query, table, **args):
1940 tablename = table._tablename 1941 sequence_name = table._sequence_name 1942 trigger_name = table._trigger_name 1943 self.execute(query) 1944 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE;' % sequence_name) 1945 self.execute('CREATE OR REPLACE TRIGGER %s BEFORE INSERT ON %s FOR EACH ROW BEGIN SELECT %s.nextval INTO :NEW.id FROM DUAL; END;\n' % (trigger_name, tablename, sequence_name))
1946
1947 - def lastrowid(self,table):
1948 sequence_name = table._sequence_name 1949 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 1950 return int(self.cursor.fetchone()[0])
1951 1952
1953 -class MSSQLAdapter(BaseAdapter):
1954 1955 driver = globals().get('pyodbc',None) 1956 1957 types = { 1958 'boolean': 'BIT', 1959 'string': 'VARCHAR(%(length)s)', 1960 'text': 'TEXT', 1961 'password': 'VARCHAR(%(length)s)', 1962 'blob': 'IMAGE', 1963 'upload': 'VARCHAR(%(length)s)', 1964 'integer': 'INT', 1965 'double': 'FLOAT', 1966 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 1967 'date': 'DATETIME', 1968 'time': 'CHAR(8)', 1969 'datetime': 'DATETIME', 1970 'id': 'INT IDENTITY PRIMARY KEY', 1971 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1972 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 1973 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 1974 'list:integer': 'TEXT', 1975 'list:string': 'TEXT', 1976 'list:reference': 'TEXT', 1977 } 1978
1979 - def EXTRACT(self,field,what):
1980 return "DATEPART(%s,%s)" % (what, self.expand(field))
1981
1982 - def LEFT_JOIN(self):
1983 return 'LEFT OUTER JOIN'
1984
1985 - def RANDOM(self):
1986 return 'NEWID()'
1987
1988 - def ALLOW_NULL(self):
1989 return ' NULL'
1990
1991 - def SUBSTRING(self,field,parameters):
1992 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1993
1994 - def PRIMARY_KEY(self,key):
1995 return 'PRIMARY KEY CLUSTERED (%s)' % key
1996
1997 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1998 if limitby: 1999 (lmin, lmax) = limitby 2000 sql_s += ' TOP %i' % lmax 2001 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2002
2003 - def represent_exceptions(self, obj, fieldtype):
2004 if fieldtype == 'boolean': 2005 if obj and not str(obj)[0].upper() == 'F': 2006 return '1' 2007 else: 2008 return '0' 2009 return None
2010
2011 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2012 credential_decoder=lambda x:x, driver_args={}, 2013 adapter_args={}, fake_connect=False):
2014 self.db = db 2015 self.dbengine = "mssql" 2016 self.uri = uri 2017 self.pool_size = pool_size 2018 self.folder = folder 2019 self.db_codec = db_codec 2020 self.find_or_make_work_folder() 2021 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 2022 uri = uri.split('://')[1] 2023 if '@' not in uri: 2024 try: 2025 m = re.compile('^(?P<dsn>.+)$').match(uri) 2026 if not m: 2027 raise SyntaxError, \ 2028 'Parsing uri string(%s) has no result' % self.uri 2029 dsn = m.group('dsn') 2030 if not dsn: 2031 raise SyntaxError, 'DSN required' 2032 except SyntaxError, e: 2033 logger.error('NdGpatch error') 2034 raise e 2035 cnxn = 'DSN=%s' % dsn 2036 else: 2037 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$').match(uri) 2038 if not m: 2039 raise SyntaxError, \ 2040 "Invalid URI string in DAL: %s" % uri 2041 user = credential_decoder(m.group('user')) 2042 if not user: 2043 raise SyntaxError, 'User required' 2044 password = credential_decoder(m.group('password')) 2045 if not password: 2046 password = '' 2047 host = m.group('host') 2048 if not host: 2049 raise SyntaxError, 'Host name required' 2050 db = m.group('db') 2051 if not db: 2052 raise SyntaxError, 'Database name required' 2053 port = m.group('port') or '1433' 2054 # Parse the optional url name-value arg pairs after the '?' 2055 # (in the form of arg1=value1&arg2=value2&...) 2056 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 2057 argsdict = { 'DRIVER':'{SQL Server}' } 2058 urlargs = m.group('urlargs') or '' 2059 argpattern = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 2060 for argmatch in argpattern.finditer(urlargs): 2061 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 2062 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.items()]) 2063 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 2064 % (host, port, db, user, password, urlargs) 2065 def connect(cnxn=cnxn,driver_args=driver_args): 2066 return self.driver.connect(cnxn,**driver_args)
2067 if not fake_connect: 2068 self.pool_connection(connect) 2069 self.cursor = self.connection.cursor()
2070
2071 - def lastrowid(self,table):
2072 #self.execute('SELECT @@IDENTITY;') 2073 self.execute('SELECT SCOPE_IDENTITY();') 2074 return int(self.cursor.fetchone()[0])
2075
2076 - def integrity_error_class(self):
2077 return pyodbc.IntegrityError
2078
2079 - def rowslice(self,rows,minimum=0,maximum=None):
2080 if maximum is None: 2081 return rows[minimum:] 2082 return rows[minimum:maximum]
2083 2084
2085 -class MSSQL2Adapter(MSSQLAdapter):
2086 types = { 2087 'boolean': 'CHAR(1)', 2088 'string': 'NVARCHAR(%(length)s)', 2089 'text': 'NTEXT', 2090 'password': 'NVARCHAR(%(length)s)', 2091 'blob': 'IMAGE', 2092 'upload': 'NVARCHAR(%(length)s)', 2093 'integer': 'INT', 2094 'double': 'FLOAT', 2095 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2096 'date': 'DATETIME', 2097 'time': 'CHAR(8)', 2098 'datetime': 'DATETIME', 2099 'id': 'INT IDENTITY PRIMARY KEY', 2100 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2101 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2102 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2103 'list:integer': 'NTEXT', 2104 'list:string': 'NTEXT', 2105 'list:reference': 'NTEXT', 2106 } 2107
2108 - def represent(self, obj, fieldtype):
2109 value = BaseAdapter.represent(self, obj, fieldtype) 2110 if (fieldtype == 'string' or fieldtype == 'text') and value[:1]=="'": 2111 value = 'N'+value 2112 return value
2113
2114 - def execute(self,a):
2115 return self.log_execute(a.decode('utf8'))
2116 2117
2118 -class FireBirdAdapter(BaseAdapter):
2119 2120 driver = globals().get('pyodbc',None) 2121 2122 commit_on_alter_table = False 2123 support_distributed_transaction = True 2124 types = { 2125 'boolean': 'CHAR(1)', 2126 'string': 'VARCHAR(%(length)s)', 2127 'text': 'BLOB SUB_TYPE 1', 2128 'password': 'VARCHAR(%(length)s)', 2129 'blob': 'BLOB SUB_TYPE 0', 2130 'upload': 'VARCHAR(%(length)s)', 2131 'integer': 'INTEGER', 2132 'double': 'DOUBLE PRECISION', 2133 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 2134 'date': 'DATE', 2135 'time': 'TIME', 2136 'datetime': 'TIMESTAMP', 2137 'id': 'INTEGER PRIMARY KEY', 2138 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2139 'list:integer': 'BLOB SUB_TYPE 1', 2140 'list:string': 'BLOB SUB_TYPE 1', 2141 'list:reference': 'BLOB SUB_TYPE 1', 2142 } 2143
2144 - def sequence_name(self,tablename):
2145 return 'genid_%s' % tablename
2146
2147 - def trigger_name(self,tablename):
2148 return 'trg_id_%s' % tablename
2149
2150 - def RANDOM(self):
2151 return 'RAND()'
2152
2153 - def NOT_NULL(self,default,field_type):
2154 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2155
2156 - def SUBSTRING(self,field,parameters):
2157 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
2158
2159 - def _drop(self,table,mode):
2160 sequence_name = table._sequence_name 2161 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
2162
2163 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2164 if limitby: 2165 (lmin, lmax) = limitby 2166 sql_s += ' FIRST %i SKIP %i' % (lmax - lmin, lmin) 2167 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2168
2169 - def _truncate(self,table,mode = ''):
2170 return ['DELETE FROM %s;' % table._tablename, 2171 'SET GENERATOR %s TO 0;' % table._sequence_name]
2172
2173 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2174 credential_decoder=lambda x:x, driver_args={}, 2175 adapter_args={}):
2176 self.db = db 2177 self.dbengine = "firebird" 2178 self.uri = uri 2179 self.pool_size = pool_size 2180 self.folder = folder 2181 self.db_codec = db_codec 2182 self.find_or_make_work_folder() 2183 uri = uri.split('://')[1] 2184 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2185 if not m: 2186 raise SyntaxError, "Invalid URI string in DAL: %s" % uri 2187 user = credential_decoder(m.group('user')) 2188 if not user: 2189 raise SyntaxError, 'User required' 2190 password = credential_decoder(m.group('password')) 2191 if not password: 2192 password = '' 2193 host = m.group('host') 2194 if not host: 2195 raise SyntaxError, 'Host name required' 2196 port = int(m.group('port') or 3050) 2197 db = m.group('db') 2198 if not db: 2199 raise SyntaxError, 'Database name required' 2200 charset = m.group('charset') or 'UTF8' 2201 driver_args.update(dict(dsn='%s/%s:%s' % (host,port,db), 2202 user = credential_decoder(user), 2203 password = credential_decoder(password), 2204 charset = charset)) 2205 if adapter_args.has_key('driver_name'): 2206 if adapter_args['driver_name'] == 'kinterbasdb': 2207 self.driver = kinterbasdb 2208 elif adapter_args['driver_name'] == 'firebirdsql': 2209 self.driver = firebirdsql 2210 else: 2211 self.driver = kinterbasdb 2212 def connect(driver_args=driver_args): 2213 return self.driver.connect(**driver_args)
2214 self.pool_connection(connect) 2215 self.cursor = self.connection.cursor()
2216
2217 - def create_sequence_and_triggers(self, query, table, **args):
2218 tablename = table._tablename 2219 sequence_name = table._sequence_name 2220 trigger_name = table._trigger_name 2221 self.execute(query) 2222 self.execute('create generator %s;' % sequence_name) 2223 self.execute('set generator %s to 0;' % sequence_name) 2224 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
2225
2226 - def lastrowid(self,table):
2227 sequence_name = table._sequence_name 2228 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 2229 return int(self.cursor.fetchone()[0])
2230 2231
2232 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
2233
2234 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2235 credential_decoder=lambda x:x, driver_args={}, 2236 adapter_args={}):
2237 self.db = db 2238 self.dbengine = "firebird" 2239 self.uri = uri 2240 self.pool_size = pool_size 2241 self.folder = folder 2242 self.db_codec = db_codec 2243 self.find_or_make_work_folder() 2244 uri = uri.split('://')[1] 2245 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$').match(uri) 2246 if not m: 2247 raise SyntaxError, \ 2248 "Invalid URI string in DAL: %s" % self.uri 2249 user = credential_decoder(m.group('user')) 2250 if not user: 2251 raise SyntaxError, 'User required' 2252 password = credential_decoder(m.group('password')) 2253 if not password: 2254 password = '' 2255 pathdb = m.group('path') 2256 if not pathdb: 2257 raise SyntaxError, 'Path required' 2258 charset = m.group('charset') 2259 if not charset: 2260 charset = 'UTF8' 2261 host = '' 2262 driver_args.update(dict(host=host, 2263 database=pathdb, 2264 user=credential_decoder(user), 2265 password=credential_decoder(password), 2266 charset=charset)) 2267 #def connect(driver_args=driver_args): 2268 # return kinterbasdb.connect(**driver_args) 2269 if adapter_args.has_key('driver_name'): 2270 if adapter_args['driver_name'] == 'kinterbasdb': 2271 self.driver = kinterbasdb 2272 elif adapter_args['driver_name'] == 'firebirdsql': 2273 self.driver = firebirdsql 2274 else: 2275 self.driver = kinterbasdb 2276 def connect(driver_args=driver_args): 2277 return self.driver.connect(**driver_args)
2278 self.pool_connection(connect) 2279 self.cursor = self.connection.cursor()
2280 2281
2282 -class InformixAdapter(BaseAdapter):
2283 2284 driver = globals().get('informixdb',None) 2285 2286 types = { 2287 'boolean': 'CHAR(1)', 2288 'string': 'VARCHAR(%(length)s)', 2289 'text': 'BLOB SUB_TYPE 1', 2290 'password': 'VARCHAR(%(length)s)', 2291 'blob': 'BLOB SUB_TYPE 0', 2292 'upload': 'VARCHAR(%(length)s)', 2293 'integer': 'INTEGER', 2294 'double': 'FLOAT', 2295 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2296 'date': 'DATE', 2297 'time': 'CHAR(8)', 2298 'datetime': 'DATETIME', 2299 'id': 'SERIAL', 2300 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2301 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 2302 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 2303 'list:integer': 'BLOB SUB_TYPE 1', 2304 'list:string': 'BLOB SUB_TYPE 1', 2305 'list:reference': 'BLOB SUB_TYPE 1', 2306 } 2307
2308 - def RANDOM(self):
2309 return 'Random()'
2310
2311 - def NOT_NULL(self,default,field_type):
2312 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2313
2314 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2315 if limitby: 2316 (lmin, lmax) = limitby 2317 fetch_amt = lmax - lmin 2318 dbms_version = int(self.connection.dbms_version.split('.')[0]) 2319 if lmin and (dbms_version >= 10): 2320 # Requires Informix 10.0+ 2321 sql_s += ' SKIP %d' % (lmin, ) 2322 if fetch_amt and (dbms_version >= 9): 2323 # Requires Informix 9.0+ 2324 sql_s += ' FIRST %d' % (fetch_amt, ) 2325 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2326
2327 - def represent_exceptions(self, obj, fieldtype):
2328 if fieldtype == 'date': 2329 if isinstance(obj, (datetime.date, datetime.datetime)): 2330 obj = obj.isoformat()[:10] 2331 else: 2332 obj = str(obj) 2333 return "to_date('%s','yyyy-mm-dd')" % obj 2334 elif fieldtype == 'datetime': 2335 if isinstance(obj, datetime.datetime): 2336 obj = obj.isoformat()[:19].replace('T',' ') 2337 elif isinstance(obj, datetime.date): 2338 obj = obj.isoformat()[:10]+' 00:00:00' 2339 else: 2340 obj = str(obj) 2341 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2342 return None
2343
2344 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2345 credential_decoder=lambda x:x, driver_args={}, 2346 adapter_args={}):
2347 self.db = db 2348 self.dbengine = "informix" 2349 self.uri = uri 2350 self.pool_size = pool_size 2351 self.folder = folder 2352 self.db_codec = db_codec 2353 self.find_or_make_work_folder() 2354 uri = uri.split('://')[1] 2355 m = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$').match(uri) 2356 if not m: 2357 raise SyntaxError, \ 2358 "Invalid URI string in DAL: %s" % self.uri 2359 user = credential_decoder(m.group('user')) 2360 if not user: 2361 raise SyntaxError, 'User required' 2362 password = credential_decoder(m.group('password')) 2363 if not password: 2364 password = '' 2365 host = m.group('host') 2366 if not host: 2367 raise SyntaxError, 'Host name required' 2368 db = m.group('db') 2369 if not db: 2370 raise SyntaxError, 'Database name required' 2371 user = credential_decoder(user) 2372 password = credential_decoder(password) 2373 dsn = '%s@%s' % (db,host) 2374 driver_args.update(dict(user=user,password=password,autocommit=True)) 2375 def connect(dsn=dsn,driver_args=driver_args): 2376 return self.driver.connect(dsn,**driver_args)
2377 self.pool_connection(connect) 2378 self.cursor = self.connection.cursor()
2379
2380 - def execute(self,command):
2381 if command[-1:]==';': 2382 command = command[:-1] 2383 return self.log_execute(command)
2384
2385 - def lastrowid(self,table):
2386 return self.cursor.sqlerrd[1]
2387
2388 - def integrity_error_class(self):
2389 return informixdb.IntegrityError
2390 2391
2392 -class DB2Adapter(BaseAdapter):
2393 2394 driver = globals().get('pyodbc',None) 2395 2396 types = { 2397 'boolean': 'CHAR(1)', 2398 'string': 'VARCHAR(%(length)s)', 2399 'text': 'CLOB', 2400 'password': 'VARCHAR(%(length)s)', 2401 'blob': 'BLOB', 2402 'upload': 'VARCHAR(%(length)s)', 2403 'integer': 'INT', 2404 'double': 'DOUBLE', 2405 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2406 'date': 'DATE', 2407 'time': 'TIME', 2408 'datetime': 'TIMESTAMP', 2409 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2410 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2411 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2412 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2413 'list:integer': 'CLOB', 2414 'list:string': 'CLOB', 2415 'list:reference': 'CLOB', 2416 } 2417
2418 - def LEFT_JOIN(self):
2419 return 'LEFT OUTER JOIN'
2420
2421 - def RANDOM(self):
2422 return 'RAND()'
2423
2424 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2425 if limitby: 2426 (lmin, lmax) = limitby 2427 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 2428 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2429
2430 - def represent_exceptions(self, obj, fieldtype):
2431 if fieldtype == 'blob': 2432 obj = base64.b64encode(str(obj)) 2433 return "BLOB('%s')" % obj 2434 elif fieldtype == 'datetime': 2435 if isinstance(obj, datetime.datetime): 2436 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 2437 elif isinstance(obj, datetime.date): 2438 obj = obj.isoformat()[:10]+'-00.00.00' 2439 return "'%s'" % obj 2440 return None
2441
2442 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2443 credential_decoder=lambda x:x, driver_args={}, 2444 adapter_args={}):
2445 self.db = db 2446 self.dbengine = "db2" 2447 self.uri = uri 2448 self.pool_size = pool_size 2449 self.folder = folder 2450 self.db_codec = db_codec 2451 self.find_or_make_work_folder() 2452 cnxn = uri.split('://', 1)[1] 2453 def connect(cnxn=cnxn,driver_args=driver_args): 2454 return self.driver.connect(cnxn,**driver_args)
2455 self.pool_connection(connect) 2456 self.cursor = self.connection.cursor()
2457
2458 - def execute(self,command):
2459 if command[-1:]==';': 2460 command = command[:-1] 2461 return self.log_execute(command)
2462
2463 - def lastrowid(self,table):
2464 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 2465 return int(self.cursor.fetchone()[0])
2466
2467 - def rowslice(self,rows,minimum=0,maximum=None):
2468 if maximum is None: 2469 return rows[minimum:] 2470 return rows[minimum:maximum]
2471 2472
2473 -class TeradataAdapter(DB2Adapter):
2474 2475 driver = globals().get('pyodbc',None) 2476 2477 types = { 2478 'boolean': 'CHAR(1)', 2479 'string': 'VARCHAR(%(length)s)', 2480 'text': 'CLOB', 2481 'password': 'VARCHAR(%(length)s)', 2482 'blob': 'BLOB', 2483 'upload': 'VARCHAR(%(length)s)', 2484 'integer': 'INT', 2485 'double': 'DOUBLE', 2486 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2487 'date': 'DATE', 2488 'time': 'TIME', 2489 'datetime': 'TIMESTAMP', 2490 'id': 'INTEGER GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 2491 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2492 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2493 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2494 'list:integer': 'CLOB', 2495 'list:string': 'CLOB', 2496 'list:reference': 'CLOB', 2497 } 2498 2499
2500 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2501 credential_decoder=lambda x:x, driver_args={}, 2502 adapter_args={}):
2503 self.db = db 2504 self.dbengine = "teradata" 2505 self.uri = uri 2506 self.pool_size = pool_size 2507 self.folder = folder 2508 self.db_codec = db_codec 2509 self.find_or_make_work_folder() 2510 cnxn = uri.split('://', 1)[1] 2511 def connect(cnxn=cnxn,driver_args=driver_args): 2512 return self.driver.connect(cnxn,**driver_args)
2513 self.pool_connection(connect) 2514 self.cursor = self.connection.cursor()
2515 2516 2517 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name 2518 # (ANSI-SQL wants this form of name 2519 # to be a delimited identifier) 2520
2521 -class IngresAdapter(BaseAdapter):
2522 2523 driver = globals().get('ingresdbi',None) 2524 2525 types = { 2526 'boolean': 'CHAR(1)', 2527 'string': 'VARCHAR(%(length)s)', 2528 'text': 'CLOB', 2529 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 2530 'blob': 'BLOB', 2531 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 2532 'integer': 'INTEGER4', # or int8... 2533 'double': 'FLOAT8', 2534 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2535 'date': 'ANSIDATE', 2536 'time': 'TIME WITHOUT TIME ZONE', 2537 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 2538 'id': 'integer4 not null unique with default next value for %s' % INGRES_SEQNAME, 2539 'reference': 'integer4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2540 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2541 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 2542 'list:integer': 'CLOB', 2543 'list:string': 'CLOB', 2544 'list:reference': 'CLOB', 2545 } 2546
2547 - def LEFT_JOIN(self):
2548 return 'LEFT OUTER JOIN'
2549
2550 - def RANDOM(self):
2551 return 'RANDOM()'
2552
2553 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2554 if limitby: 2555 (lmin, lmax) = limitby 2556 fetch_amt = lmax - lmin 2557 if fetch_amt: 2558 sql_s += ' FIRST %d ' % (fetch_amt, ) 2559 if lmin: 2560 # Requires Ingres 9.2+ 2561 sql_o += ' OFFSET %d' % (lmin, ) 2562 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,