diff options
author | Brian Jarrett <celttechie@gmail.com> | 2014-07-13 21:07:06 -0600 |
---|---|---|
committer | Brian Jarrett <celttechie@gmail.com> | 2014-07-13 21:07:06 -0600 |
commit | d81a256a5c6912f6b2a0de85c29079d8c329e275 (patch) | |
tree | b745227d0154def49c44eb89d8b7de7a4bd51fbc | |
parent | 551680d06e7a0913690414c78d6dfdb590f1588f (diff) | |
download | sqlalchemy-pr/111.tar.gz |
Manual fixes for style E501 errors, etc. in dialects packagepr/111
34 files changed, 834 insertions, 639 deletions
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index c774358b2..36229a105 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -427,7 +427,7 @@ class FBDialect(default.DefaultDialect): sqltypes.DateTime: sqltypes.DATE } - self.implicit_returning = self._version_two and \ + self.implicit_returning = self._version_two and \ self.__dict__.get('implicit_returning', True) def normalize_name(self, name): diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py index aca5f7e41..ddffc80f5 100644 --- a/lib/sqlalchemy/dialects/firebird/fdb.py +++ b/lib/sqlalchemy/dialects/firebird/fdb.py @@ -9,7 +9,8 @@ .. dialect:: firebird+fdb :name: fdb :dbapi: pyodbc - :connectstring: firebird+fdb://user:password@host:port/path/to/db[?key=value&key=value...] + :connectstring: firebird+fdb://user:password@host:port/path/to/db\ +[?key=value&key=value...] :url: http://pypi.python.org/pypi/fdb/ fdb is a kinterbasdb compatible DBAPI for Firebird. @@ -23,8 +24,9 @@ Arguments ---------- -The ``fdb`` dialect is based on the :mod:`sqlalchemy.dialects.firebird.kinterbasdb` -dialect, however does not accept every argument that Kinterbasdb does. +The ``fdb`` dialect is based on the +:mod:`sqlalchemy.dialects.firebird.kinterbasdb` dialect, however does not +accept every argument that Kinterbasdb does. * ``enable_rowcount`` - True by default, setting this to False disables the usage of "cursor.rowcount" with the @@ -61,8 +63,8 @@ dialect, however does not accept every argument that Kinterbasdb does. .. seealso:: - http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions - information - on the "retaining" flag. + http://pythonhosted.org/fdb/usage-guide.html#retaining-transactions + - information on the "retaining" flag. """ diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py index 256b902c6..6bd7887f7 100644 --- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py +++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py @@ -9,15 +9,16 @@ .. dialect:: firebird+kinterbasdb :name: kinterbasdb :dbapi: kinterbasdb - :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db[?key=value&key=value...] + :connectstring: firebird+kinterbasdb://user:password@host:port/path/to/db\ +[?key=value&key=value...] :url: http://firebirdsql.org/index.php?op=devel&sub=python Arguments ---------- The Kinterbasdb backend accepts the ``enable_rowcount`` and ``retaining`` -arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. In addition, it -also accepts the following: +arguments accepted by the :mod:`sqlalchemy.dialects.firebird.fdb` dialect. +In addition, it also accepts the following: * ``type_conv`` - select the kind of mapping done on the types: by default SQLAlchemy uses 200 with Unicode, datetime and decimal support. See diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 9ba427458..f3628d62a 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -13,9 +13,9 @@ Auto Increment Behavior ----------------------- -SQL Server provides so-called "auto incrementing" behavior using the ``IDENTITY`` -construct, which can be placed on an integer primary key. SQLAlchemy -considers ``IDENTITY`` within its default "autoincrement" behavior, +SQL Server provides so-called "auto incrementing" behavior using the +``IDENTITY`` construct, which can be placed on an integer primary key. +SQLAlchemy considers ``IDENTITY`` within its default "autoincrement" behavior, described at :paramref:`.Column.autoincrement`; this means that by default, the first integer primary key column in a :class:`.Table` will be considered to be the identity column and will generate DDL as such:: @@ -52,24 +52,25 @@ specify ``autoincrement=False`` on all integer primary key columns:: An INSERT statement which refers to an explicit value for such a column is prohibited by SQL Server, however SQLAlchemy will detect this and modify the ``IDENTITY_INSERT`` flag accordingly at statement execution - time. As this is not a high performing process, care should be taken to set - the ``autoincrement`` flag appropriately for columns that will not actually - require IDENTITY behavior. + time. As this is not a high performing process, care should be taken to + set the ``autoincrement`` flag appropriately for columns that will not + actually require IDENTITY behavior. Controlling "Start" and "Increment" ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Specific control over the parameters of the ``IDENTITY`` value is supported -using the :class:`.schema.Sequence` object. While this object normally represents -an explicit "sequence" for supporting backends, on SQL Server it is re-purposed -to specify behavior regarding the identity column, including support -of the "start" and "increment" values:: +using the :class:`.schema.Sequence` object. While this object normally +represents an explicit "sequence" for supporting backends, on SQL Server it is +re-purposed to specify behavior regarding the identity column, including +support of the "start" and "increment" values:: from sqlalchemy import Table, Integer, Sequence, Column Table('test', metadata, Column('id', Integer, - Sequence('blah', start=100, increment=10), primary_key=True), + Sequence('blah', start=100, increment=10), + primary_key=True), Column('name', String(20)) ).create(some_engine) @@ -88,10 +89,10 @@ optional and will default to 1,1. INSERT behavior ^^^^^^^^^^^^^^^^ -Handling of the ``IDENTITY`` column at INSERT time involves two key techniques. -The most common is being able to fetch the "last inserted value" for a given -``IDENTITY`` column, a process which SQLAlchemy performs implicitly in many -cases, most importantly within the ORM. +Handling of the ``IDENTITY`` column at INSERT time involves two key +techniques. The most common is being able to fetch the "last inserted value" +for a given ``IDENTITY`` column, a process which SQLAlchemy performs +implicitly in many cases, most importantly within the ORM. The process for fetching this value has several variants: @@ -106,9 +107,9 @@ The process for fetching this value has several variants: ``implicit_returning=False``, either the ``scope_identity()`` function or the ``@@identity`` variable is used; behavior varies by backend: - * when using PyODBC, the phrase ``; select scope_identity()`` will be appended - to the end of the INSERT statement; a second result set will be fetched - in order to receive the value. Given a table as:: + * when using PyODBC, the phrase ``; select scope_identity()`` will be + appended to the end of the INSERT statement; a second result set will be + fetched in order to receive the value. Given a table as:: t = Table('t', m, Column('id', Integer, primary_key=True), Column('x', Integer), @@ -121,17 +122,18 @@ The process for fetching this value has several variants: INSERT INTO t (x) VALUES (?); select scope_identity() * Other dialects such as pymssql will call upon - ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT statement. - If the flag ``use_scope_identity=False`` is passed to :func:`.create_engine`, - the statement ``SELECT @@identity AS lastrowid`` is used instead. + ``SELECT scope_identity() AS lastrowid`` subsequent to an INSERT + statement. If the flag ``use_scope_identity=False`` is passed to + :func:`.create_engine`, the statement ``SELECT @@identity AS lastrowid`` + is used instead. A table that contains an ``IDENTITY`` column will prohibit an INSERT statement that refers to the identity column explicitly. The SQLAlchemy dialect will detect when an INSERT construct, created using a core :func:`.insert` construct (not a plain string SQL), refers to the identity column, and -in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert statement -proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the execution. -Given this example:: +in this case will emit ``SET IDENTITY_INSERT ON`` prior to the insert +statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the +execution. Given this example:: m = MetaData() t = Table('t', m, Column('id', Integer, primary_key=True), @@ -250,7 +252,8 @@ To generate a clustered primary key use:: which will render the table, for example, as:: - CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, PRIMARY KEY CLUSTERED (x, y)) + CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL, + PRIMARY KEY CLUSTERED (x, y)) Similarly, we can generate a clustered unique constraint using:: @@ -272,7 +275,8 @@ for :class:`.Index`. INCLUDE ^^^^^^^ -The ``mssql_include`` option renders INCLUDE(colname) for the given string names:: +The ``mssql_include`` option renders INCLUDE(colname) for the given string +names:: Index("my_index", table.c.x, mssql_include=['y']) @@ -763,11 +767,12 @@ class MSExecutionContext(default.DefaultExecutionContext): not self.executemany if self._enable_identity_insert: - self.root_connection._cursor_execute(self.cursor, - "SET IDENTITY_INSERT %s ON" % - self.dialect.identifier_preparer.format_table( - tbl), - (), self) + self.root_connection._cursor_execute( + self.cursor, + "SET IDENTITY_INSERT %s ON" % + self.dialect.identifier_preparer.format_table(tbl), + (), + self) def post_exec(self): """Disable IDENTITY_INSERT if enabled.""" @@ -775,11 +780,14 @@ class MSExecutionContext(default.DefaultExecutionContext): conn = self.root_connection if self._select_lastrowid: if self.dialect.use_scope_identity: - conn._cursor_execute(self.cursor, - "SELECT scope_identity() AS lastrowid", (), self) + conn._cursor_execute( + self.cursor, + "SELECT scope_identity() AS lastrowid", (), self) else: conn._cursor_execute(self.cursor, - "SELECT @@identity AS lastrowid", (), self) + "SELECT @@identity AS lastrowid", + (), + self) # fetchall() ensures the cursor is consumed without closing it row = self.cursor.fetchall()[0] self._lastrowid = int(row[0]) @@ -963,7 +971,8 @@ class MSSQLCompiler(compiler.SQLCompiler): (field, self.process(extract.expr, **kw)) def visit_savepoint(self, savepoint_stmt): - return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt) + return "SAVE TRANSACTION %s" % \ + self.preparer.format_savepoint(savepoint_stmt) def visit_rollback_to_savepoint(self, savepoint_stmt): return ("ROLLBACK TRANSACTION %s" @@ -971,7 +980,8 @@ class MSSQLCompiler(compiler.SQLCompiler): def visit_column(self, column, add_to_result_map=None, **kwargs): if column.table is not None and \ - (not self.isupdate and not self.isdelete) or self.is_subquery(): + (not self.isupdate and not self.isdelete) or \ + self.is_subquery(): # translate for schema-qualified table aliases t = self._schema_aliased_table(column.table) if t is not None: @@ -1169,7 +1179,8 @@ class MSDDLCompiler(compiler.DDLCompiler): preparer.format_table(index.table), ', '.join( self.sql_compiler.process(expr, - include_table=False, literal_binds=True) for + include_table=False, + literal_binds=True) for expr in index.expressions) ) @@ -1177,7 +1188,9 @@ class MSDDLCompiler(compiler.DDLCompiler): if index.dialect_options['mssql']['include']: inclusions = [index.table.c[col] if isinstance(col, util.string_types) else col - for col in index.dialect_options['mssql']['include']] + for col in + index.dialect_options['mssql']['include'] + ] text += " INCLUDE (%s)" \ % ', '.join([preparer.quote(c.name) @@ -1482,7 +1495,8 @@ class MSDialect(default.DefaultDialect): @reflection.cache @_db_plus_owner - def get_view_definition(self, connection, viewname, dbname, owner, schema, **kw): + def get_view_definition(self, connection, viewname, + dbname, owner, schema, **kw): rp = connection.execute( sql.text( "select definition from sys.sql_modules as mod, " @@ -1607,13 +1621,16 @@ class MSDialect(default.DefaultDialect): @reflection.cache @_db_plus_owner - def get_pk_constraint(self, connection, tablename, dbname, owner, schema, **kw): + def get_pk_constraint(self, connection, tablename, + dbname, owner, schema, **kw): pkeys = [] TC = ischema.constraints C = ischema.key_constraints.alias('C') # Primary key constraints - s = sql.select([C.c.column_name, TC.c.constraint_type, C.c.constraint_name], + s = sql.select([C.c.column_name, + TC.c.constraint_type, + C.c.constraint_name], sql.and_(TC.c.constraint_name == C.c.constraint_name, TC.c.table_schema == C.c.table_schema, C.c.table_name == tablename, @@ -1630,7 +1647,8 @@ class MSDialect(default.DefaultDialect): @reflection.cache @_db_plus_owner - def get_foreign_keys(self, connection, tablename, dbname, owner, schema, **kw): + def get_foreign_keys(self, connection, tablename, + dbname, owner, schema, **kw): RR = ischema.ref_constraints C = ischema.key_constraints.alias('C') R = ischema.key_constraints.alias('R') diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index 19d59387d..371a1edcc 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -54,7 +54,8 @@ tables = Table("TABLES", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column( - "TABLE_TYPE", String(convert_unicode=True), key="table_type"), + "TABLE_TYPE", String(convert_unicode=True), + key="table_type"), schema="INFORMATION_SCHEMA") columns = Table("COLUMNS", ischema, @@ -75,42 +76,43 @@ columns = Table("COLUMNS", ischema, constraints = Table("TABLE_CONSTRAINTS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column( - "CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), Column("CONSTRAINT_TYPE", String( convert_unicode=True), key="constraint_type"), schema="INFORMATION_SCHEMA") column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, - Column( - "TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column( - "TABLE_NAME", CoerceUnicode, key="table_name"), - Column( - "COLUMN_NAME", CoerceUnicode, key="column_name"), - Column( - "CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), + Column("TABLE_SCHEMA", CoerceUnicode, + key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, + key="table_name"), + Column("COLUMN_NAME", CoerceUnicode, + key="column_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), schema="INFORMATION_SCHEMA") key_constraints = Table("KEY_COLUMN_USAGE", ischema, - Column( - "TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column( - "COLUMN_NAME", CoerceUnicode, key="column_name"), - Column( - "CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - Column( - "ORDINAL_POSITION", Integer, key="ordinal_position"), + Column("TABLE_SCHEMA", CoerceUnicode, + key="table_schema"), + Column("TABLE_NAME", CoerceUnicode, + key="table_name"), + Column("COLUMN_NAME", CoerceUnicode, + key="column_name"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), + Column("ORDINAL_POSITION", Integer, + key="ordinal_position"), schema="INFORMATION_SCHEMA") ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema, - Column( - "CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"), - Column( - "CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"), - Column( - "CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), + Column("CONSTRAINT_CATALOG", CoerceUnicode, + key="constraint_catalog"), + Column("CONSTRAINT_SCHEMA", CoerceUnicode, + key="constraint_schema"), + Column("CONSTRAINT_NAME", CoerceUnicode, + key="constraint_name"), # TODO: is CATLOG misspelled ? Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, key="unique_constraint_catalog"), diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index b6749aa2a..ffe38d8dd 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -91,7 +91,7 @@ class MSDialect_mxodbc(MxODBCConnector, MSDialect): # this is only needed if "native ODBC" mode is used, # which is now disabled by default. - #statement_compiler = MSSQLStrictCompiler + # statement_compiler = MSSQLStrictCompiler execution_ctx_cls = MSExecutionContext_mxodbc diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index 5e50b96ac..8f76336ae 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -9,7 +9,8 @@ .. dialect:: mssql+pymssql :name: pymssql :dbapi: pymssql - :connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?charset=utf8 + :connectstring: mssql+pymssql://<username>:<password>@<freetds_name>?\ +charset=utf8 :url: http://pymssql.org/ pymssql is a Python module that provides a Python DBAPI interface around diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 86d896f8b..2e05eada4 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -55,9 +55,9 @@ Examples of pyodbc connection string URLs: DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 -* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection - string that includes a custom - ODBC driver name. This will create the following connection string:: +* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a + connection string that includes a custom ODBC driver name. This will create + the following connection string:: DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass @@ -85,14 +85,14 @@ Unicode Binds ------------- The current state of PyODBC on a unix backend with FreeTDS and/or -EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC -versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically -alter how strings are received. The PyODBC dialect attempts to use all the information -it knows to determine whether or not a Python unicode literal can be -passed directly to the PyODBC driver or not; while SQLAlchemy can encode -these to bytestrings first, some users have reported that PyODBC mis-handles -bytestrings for certain encodings and requires a Python unicode object, -while the author has observed widespread cases where a Python unicode +EasySoft is poor regarding unicode; different OS platforms and versions of +UnixODBC versus IODBC versus FreeTDS/EasySoft versus PyODBC itself +dramatically alter how strings are received. The PyODBC dialect attempts to +use all the information it knows to determine whether or not a Python unicode +literal can be passed directly to the PyODBC driver or not; while SQLAlchemy +can encode these to bytestrings first, some users have reported that PyODBC +mis-handles bytestrings for certain encodings and requires a Python unicode +object, while the author has observed widespread cases where a Python unicode is completely misinterpreted by PyODBC, particularly when dealing with the information schema tables used in table reflection, and the value must first be encoded to a bytestring. diff --git a/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/sqlalchemy/dialects/mssql/zxjdbc.py index c14ebb70a..b23a010e7 100644 --- a/lib/sqlalchemy/dialects/mssql/zxjdbc.py +++ b/lib/sqlalchemy/dialects/mssql/zxjdbc.py @@ -9,7 +9,8 @@ .. dialect:: mssql+zxjdbc :name: zxJDBC for Jython :dbapi: zxjdbc - :connectstring: mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...] + :connectstring: mssql+zxjdbc://user:pass@host:port/dbname\ +[?key=value&key=value...] :driverurl: http://jtds.sourceforge.net/ diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index 32699abbd..498603cf7 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -22,8 +22,10 @@ from .base import \ VARBINARY, VARCHAR, YEAR, dialect __all__ = ( - 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE', - 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', - 'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP', - 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect' + 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', + 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', + 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR', + 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', + 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', + 'YEAR', 'dialect' ) diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index e2d411957..0c00cf530 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -26,9 +26,9 @@ supported in any given server release. Connection Timeouts ------------------- -MySQL features an automatic connection close behavior, for connections that have -been idle for eight hours or more. To circumvent having this issue, use the -``pool_recycle`` option which controls the maximum age of any connection:: +MySQL features an automatic connection close behavior, for connections that +have been idle for eight hours or more. To circumvent having this issue, use +the ``pool_recycle`` option which controls the maximum age of any connection:: engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) @@ -38,10 +38,12 @@ CREATE TABLE arguments including Storage Engines ------------------------------------------------ MySQL's CREATE TABLE syntax includes a wide array of special options, -including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, ``INSERT_METHOD``, and many more. +including ``ENGINE``, ``CHARSET``, ``MAX_ROWS``, ``ROW_FORMAT``, +``INSERT_METHOD``, and many more. To accommodate the rendering of these arguments, specify the form ``mysql_argument_name="value"``. For example, to specify a table with -``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE`` of ``1024``:: +``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8``, and ``KEY_BLOCK_SIZE`` +of ``1024``:: Table('mytable', metadata, Column('data', String(32)), @@ -50,26 +52,28 @@ To accommodate the rendering of these arguments, specify the form mysql_key_block_size="1024" ) -The MySQL dialect will normally transfer any keyword specified as ``mysql_keyword_name`` -to be rendered as ``KEYWORD_NAME`` in the ``CREATE TABLE`` statement. A handful -of these names will render with a space instead of an underscore; to support this, -the MySQL dialect has awareness of these particular names, which include -``DATA DIRECTORY`` (e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g. -``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. ``mysql_index_directory``). - -The most common argument is ``mysql_engine``, which refers to the storage engine -for the table. Historically, MySQL server installations would default +The MySQL dialect will normally transfer any keyword specified as +``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the +``CREATE TABLE`` statement. A handful of these names will render with a space +instead of an underscore; to support this, the MySQL dialect has awareness of +these particular names, which include ``DATA DIRECTORY`` +(e.g. ``mysql_data_directory``), ``CHARACTER SET`` (e.g. +``mysql_character_set``) and ``INDEX DIRECTORY`` (e.g. +``mysql_index_directory``). + +The most common argument is ``mysql_engine``, which refers to the storage +engine for the table. Historically, MySQL server installations would default to ``MyISAM`` for this value, although newer versions may be defaulting to ``InnoDB``. The ``InnoDB`` engine is typically preferred for its support of transactions and foreign keys. A :class:`.Table` that is created in a MySQL database with a storage engine -of ``MyISAM`` will be essentially non-transactional, meaning any INSERT/UPDATE/DELETE -statement referring to this table will be invoked as autocommit. It also will have no -support for foreign key constraints; while the ``CREATE TABLE`` statement -accepts foreign key options, when using the ``MyISAM`` storage engine these -arguments are discarded. Reflecting such a table will also produce no -foreign key constraint information. +of ``MyISAM`` will be essentially non-transactional, meaning any +INSERT/UPDATE/DELETE statement referring to this table will be invoked as +autocommit. It also will have no support for foreign key constraints; while +the ``CREATE TABLE`` statement accepts foreign key options, when using the +``MyISAM`` storage engine these arguments are discarded. Reflecting such a +table will also produce no foreign key constraint information. For fully atomic transactions as well as support for foreign key constraints, all participating ``CREATE TABLE`` statements must specify a @@ -118,7 +122,8 @@ AUTO_INCREMENT Behavior ----------------------- When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on -the first :class:`.Integer` primary key column which is not marked as a foreign key:: +the first :class:`.Integer` primary key column which is not marked as a +foreign key:: >>> t = Table('mytable', metadata, ... Column('mytable_id', Integer, primary_key=True) @@ -129,10 +134,10 @@ the first :class:`.Integer` primary key column which is not marked as a foreign PRIMARY KEY (id) ) -You can disable this behavior by passing ``False`` to the :paramref:`~.Column.autoincrement` -argument of :class:`.Column`. This flag can also be used to enable -auto-increment on a secondary column in a multi-column key for some storage -engines:: +You can disable this behavior by passing ``False`` to the +:paramref:`~.Column.autoincrement` argument of :class:`.Column`. This flag +can also be used to enable auto-increment on a secondary column in a +multi-column key for some storage engines:: Table('mytable', metadata, Column('gid', Integer, primary_key=True, autoincrement=False), @@ -184,8 +189,8 @@ usual definition of "number of rows matched by an UPDATE or DELETE" statement. This is in contradiction to the default setting on most MySQL DBAPI drivers, which is "number of rows actually modified/deleted". For this reason, the SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag, -or whatever is equivalent for the DBAPI in use, on connect, unless the flag value -is overridden using DBAPI-specific options +or whatever is equivalent for the DBAPI in use, on connect, unless the flag +value is overridden using DBAPI-specific options (such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the OurSQL driver). @@ -197,14 +202,14 @@ See also: CAST Support ------------ -MySQL documents the CAST operator as available in version 4.0.2. When using the -SQLAlchemy :func:`.cast` function, SQLAlchemy -will not render the CAST token on MySQL before this version, based on server version -detection, instead rendering the internal expression directly. +MySQL documents the CAST operator as available in version 4.0.2. When using +the SQLAlchemy :func:`.cast` function, SQLAlchemy +will not render the CAST token on MySQL before this version, based on server +version detection, instead rendering the internal expression directly. -CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't -add all datatype support until 4.1.1. If your application falls into this -narrow area, the behavior of CAST can be controlled using the +CAST may still not be desirable on an early MySQL version post-4.0.2, as it +didn't add all datatype support until 4.1.1. If your application falls into +this narrow area, the behavior of CAST can be controlled using the :ref:`sqlalchemy.ext.compiler_toplevel` system, as per the recipe below:: from sqlalchemy.sql.expression import Cast @@ -241,7 +246,8 @@ become part of the index. SQLAlchemy provides this feature via the Index('my_index', my_table.c.data, mysql_length=10) - Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, 'b': 9}) + Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4, + 'b': 9}) Prefix lengths are given in characters for nonbinary string types and in bytes for binary string types. The value passed to the keyword argument *must* be @@ -289,10 +295,10 @@ Foreign Key Arguments to Avoid MySQL does not support the foreign key arguments "DEFERRABLE", "INITIALLY", or "MATCH". Using the ``deferrable`` or ``initially`` keyword argument with -:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of these keywords being -rendered in a DDL expression, which will then raise an error on MySQL. -In order to use these keywords on a foreign key while having them ignored -on a MySQL backend, use a custom compile rule:: +:class:`.ForeignKeyConstraint` or :class:`.ForeignKey` will have the effect of +these keywords being rendered in a DDL expression, which will then raise an +error on MySQL. In order to use these keywords on a foreign key while having +them ignored on a MySQL backend, use a custom compile rule:: from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import ForeignKeyConstraint @@ -303,19 +309,20 @@ on a MySQL backend, use a custom compile rule:: return compiler.visit_foreign_key_constraint(element, **kw) .. versionchanged:: 0.9.0 - the MySQL backend no longer silently ignores - the ``deferrable`` or ``initially`` keyword arguments of :class:`.ForeignKeyConstraint` - and :class:`.ForeignKey`. + the ``deferrable`` or ``initially`` keyword arguments of + :class:`.ForeignKeyConstraint` and :class:`.ForeignKey`. The "MATCH" keyword is in fact more insidious, and is explicitly disallowed -by SQLAlchemy in conjunction with the MySQL backend. This argument is silently -ignored by MySQL, but in addition has the effect of ON UPDATE and ON DELETE options -also being ignored by the backend. Therefore MATCH should never be used with the -MySQL backend; as is the case with DEFERRABLE and INITIALLY, custom compilation -rules can be used to correct a MySQL ForeignKeyConstraint at DDL definition time. +by SQLAlchemy in conjunction with the MySQL backend. This argument is +silently ignored by MySQL, but in addition has the effect of ON UPDATE and ON +DELETE options also being ignored by the backend. Therefore MATCH should +never be used with the MySQL backend; as is the case with DEFERRABLE and +INITIALLY, custom compilation rules can be used to correct a MySQL +ForeignKeyConstraint at DDL definition time. -.. versionadded:: 0.9.0 - the MySQL backend will raise a :class:`.CompileError` - when the ``match`` keyword is used with :class:`.ForeignKeyConstraint` - or :class:`.ForeignKey`. +.. versionadded:: 0.9.0 - the MySQL backend will raise a + :class:`.CompileError` when the ``match`` keyword is used with + :class:`.ForeignKeyConstraint` or :class:`.ForeignKey`. Reflection of Foreign Key Constraints ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -364,15 +371,15 @@ RESERVED_WORDS = set( 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop', 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists', 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8', - 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having', - 'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if', - 'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive', - 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer', - 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill', - 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load', - 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext', - 'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match', - 'mediumblob', 'mediumint', 'mediumtext', 'middleint', + 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', + 'having', 'high_priority', 'hour_microsecond', 'hour_minute', + 'hour_second', 'if', 'ignore', 'in', 'index', 'infile', 'inner', 'inout', + 'insensitive', 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', + 'integer', 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', + 'kill', 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', + 'load', 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', + 'longtext', 'loop', 'low_priority', 'master_ssl_verify_server_cert', + 'match', 'mediumblob', 'mediumint', 'mediumtext', 'middleint', 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural', 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize', 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile', @@ -445,8 +452,9 @@ class _FloatType(_NumericType, sqltypes.Float): self.scale = scale def __repr__(self): - return util.generic_repr(self, - to_inspect=[_FloatType, _NumericType, sqltypes.Float]) + return util.generic_repr(self, to_inspect=[_FloatType, + _NumericType, + sqltypes.Float]) class _IntegerType(_NumericType, sqltypes.Integer): @@ -455,8 +463,9 @@ class _IntegerType(_NumericType, sqltypes.Integer): super(_IntegerType, self).__init__(**kw) def __repr__(self): - return util.generic_repr(self, - to_inspect=[_IntegerType, _NumericType, sqltypes.Integer]) + return util.generic_repr(self, to_inspect=[_IntegerType, + _NumericType, + sqltypes.Integer]) class _StringType(sqltypes.String): @@ -542,10 +551,10 @@ class DOUBLE(_FloatType): .. note:: The :class:`.DOUBLE` type by default converts from float - to Decimal, using a truncation that defaults to 10 digits. Specify - either ``scale=n`` or ``decimal_return_scale=n`` in order to change - this scale, or ``asdecimal=False`` to return values directly as - Python floating points. + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. @@ -575,10 +584,10 @@ class REAL(_FloatType, sqltypes.REAL): .. note:: The :class:`.REAL` type by default converts from float - to Decimal, using a truncation that defaults to 10 digits. Specify - either ``scale=n`` or ``decimal_return_scale=n`` in order to change - this scale, or ``asdecimal=False`` to return values directly as - Python floating points. + to Decimal, using a truncation that defaults to 10 digits. + Specify either ``scale=n`` or ``decimal_return_scale=n`` in order + to change this scale, or ``asdecimal=False`` to return values + directly as Python floating points. :param precision: Total digits in this number. If scale and precision are both None, values are stored to limits allowed by the server. @@ -733,9 +742,9 @@ class SMALLINT(_IntegerType, sqltypes.SMALLINT): class BIT(sqltypes.TypeEngine): """MySQL BIT type. - This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for - MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger() - type. + This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater + for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a + MSTinyInteger() type. """ @@ -1265,8 +1274,8 @@ class ENUM(sqltypes.Enum, _EnumeratedValues): sqltypes.Enum.__init__(self, *values) def __repr__(self): - return util.generic_repr(self, - to_inspect=[ENUM, _StringType, sqltypes.Enum]) + return util.generic_repr( + self, to_inspect=[ENUM, _StringType, sqltypes.Enum]) def bind_processor(self, dialect): super_convert = super(ENUM, self).bind_processor(dialect) @@ -1368,7 +1377,8 @@ class SET(_EnumeratedValues): super_convert = super(SET, self).bind_processor(dialect) def process(value): - if value is None or isinstance(value, util.int_types + util.string_types): + if value is None or isinstance( + value, util.int_types + util.string_types): pass else: if None in value: @@ -1509,7 +1519,8 @@ class MySQLCompiler(compiler.SQLCompiler): elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, sqltypes.Date, sqltypes.Time)): return self.dialect.type_compiler.process(type_) - elif isinstance(type_, sqltypes.String) and not isinstance(type_, (ENUM, SET)): + elif isinstance(type_, sqltypes.String) \ + and not isinstance(type_, (ENUM, SET)): adapted = CHAR._adapt_string_for_cast(type_) return self.dialect.type_compiler.process(adapted) elif isinstance(type_, sqltypes._Binary): @@ -1577,7 +1588,8 @@ class MySQLCompiler(compiler.SQLCompiler): # The latter is more readable for offsets but we're stuck with the # former until we can refine dialects by server revision. - limit_clause, offset_clause = select._limit_clause, select._offset_clause + limit_clause, offset_clause = select._limit_clause, \ + select._offset_clause if limit_clause is None and offset_clause is None: return '' @@ -1610,7 +1622,8 @@ class MySQLCompiler(compiler.SQLCompiler): else: return None - def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): + def update_tables_clause(self, update_stmt, from_table, + extra_froms, **kw): return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw) for t in [from_table] + list(extra_froms)) @@ -1706,7 +1719,8 @@ class MySQLDDLCompiler(compiler.DDLCompiler): joiner = '=' if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET', - 'CHARACTER SET', 'COLLATE', 'PARTITION BY', 'PARTITIONS'): + 'CHARACTER SET', 'COLLATE', + 'PARTITION BY', 'PARTITIONS'): joiner = ' ' table_opts.append(joiner.join((opt, arg))) @@ -1732,8 +1746,9 @@ class MySQLDDLCompiler(compiler.DDLCompiler): if length is not None: if isinstance(length, dict): - # length value can be a (column_name --> integer value) mapping - # specifying the prefix length for each column of the index + # length value can be a (column_name --> integer value) + # mapping specifying the prefix length for each column of the + # index columns = ', '.join( '%s(%d)' % (expr, length[col.name]) if col.name in length else @@ -1898,8 +1913,8 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): if self._mysql_type(type_) and \ type_.scale is not None and \ type_.precision is not None: - return self._extend_numeric(type_, - "FLOAT(%s, %s)" % (type_.precision, type_.scale)) + return self._extend_numeric( + type_, "FLOAT(%s, %s)" % (type_.precision, type_.scale)) elif type_.precision is not None: return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,)) @@ -1908,25 +1923,25 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): def visit_INTEGER(self, type_): if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "INTEGER(%(display_width)s)" % - {'display_width': type_.display_width}) + return self._extend_numeric( + type_, "INTEGER(%(display_width)s)" % + {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "INTEGER") def visit_BIGINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "BIGINT(%(display_width)s)" % - {'display_width': type_.display_width}) + return self._extend_numeric( + type_, "BIGINT(%(display_width)s)" % + {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "BIGINT") def visit_MEDIUMINT(self, type_): if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "MEDIUMINT(%(display_width)s)" % - {'display_width': type_.display_width}) + return self._extend_numeric( + type_, "MEDIUMINT(%(display_width)s)" % + {'display_width': type_.display_width}) else: return self._extend_numeric(type_, "MEDIUMINT") @@ -1996,7 +2011,8 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): def visit_VARCHAR(self, type_): if type_.length: - return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length) + return self._extend_string( + type_, {}, "VARCHAR(%d)" % type_.length) else: raise exc.CompileError( "VARCHAR requires a length on dialect %s" % @@ -2013,8 +2029,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): # We'll actually generate the equiv. "NATIONAL VARCHAR" instead # of "NVARCHAR". if type_.length: - return self._extend_string(type_, {'national': True}, - "VARCHAR(%(length)s)" % {'length': type_.length}) + return self._extend_string( + type_, {'national': True}, + "VARCHAR(%(length)s)" % {'length': type_.length}) else: raise exc.CompileError( "NVARCHAR requires a length on dialect %s" % @@ -2024,8 +2041,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler): # We'll actually generate the equiv. # "NATIONAL CHAR" instead of "NCHAR". if type_.length: - return self._extend_string(type_, {'national': True}, - "CHAR(%(length)s)" % {'length': type_.length}) + return self._extend_string( + type_, {'national': True}, + "CHAR(%(length)s)" % {'length': type_.length}) else: return self._extend_string(type_, {'national': True}, "CHAR") @@ -2099,7 +2117,9 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer): @log.class_logger class MySQLDialect(default.DefaultDialect): - """Details of the MySQL dialect. Not used directly in application code.""" + """Details of the MySQL dialect. + Not used directly in application code. + """ name = 'mysql' supports_alter = True @@ -2158,8 +2178,8 @@ class MySQLDialect(default.DefaultDialect): else: return None - _isolation_lookup = set(['SERIALIZABLE', - 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ']) def set_isolation_level(self, connection, level): level = level.replace('_', ' ') @@ -2187,10 +2207,11 @@ class MySQLDialect(default.DefaultDialect): """Execute a COMMIT.""" # COMMIT/ROLLBACK were introduced in 3.23.15. - # Yes, we have at least one user who has to talk to these old versions! + # Yes, we have at least one user who has to talk to these old + # versions! # - # Ignore commit/rollback if support isn't present, otherwise even basic - # operations via autocommit fail. + # Ignore commit/rollback if support isn't present, otherwise even + # basic operations via autocommit fail. try: dbapi_connection.commit() except: @@ -2236,7 +2257,8 @@ class MySQLDialect(default.DefaultDialect): return [row['data'][0:row['gtrid_length']] for row in resultset] def is_disconnect(self, e, connection, cursor): - if isinstance(e, (self.dbapi.OperationalError, self.dbapi.ProgrammingError)): + if isinstance(e, (self.dbapi.OperationalError, + self.dbapi.ProgrammingError)): return self._extract_error_code(e) in \ (2006, 2013, 2014, 2045, 2055) elif isinstance(e, self.dbapi.InterfaceError): @@ -2306,8 +2328,8 @@ class MySQLDialect(default.DefaultDialect): if self._server_ansiquotes: # if ansiquotes == True, build a new IdentifierPreparer # with the new setting - self.identifier_preparer = self.preparer(self, - server_ansiquotes=self._server_ansiquotes) + self.identifier_preparer = self.preparer( + self, server_ansiquotes=self._server_ansiquotes) default.DefaultDialect.initialize(self, connection) @@ -2331,13 +2353,15 @@ class MySQLDialect(default.DefaultDialect): charset = self._connection_charset if self.server_version_info < (5, 0, 2): - rp = connection.execute("SHOW TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) + rp = connection.execute( + "SHOW TABLES FROM %s" % + self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] else: - rp = connection.execute("SHOW FULL TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) + rp = connection.execute( + "SHOW FULL TABLES FROM %s" % + self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset) @@ -2352,8 +2376,9 @@ class MySQLDialect(default.DefaultDialect): if self.server_version_info < (5, 0, 2): return self.get_table_names(connection, schema) charset = self._connection_charset - rp = connection.execute("SHOW FULL TABLES FROM %s" % - self.identifier_preparer.quote_identifier(schema)) + rp = connection.execute( + "SHOW FULL TABLES FROM %s" % + self.identifier_preparer.quote_identifier(schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset) if row[1] in ('VIEW', 'SYSTEM VIEW')] @@ -2394,7 +2419,8 @@ class MySQLDialect(default.DefaultDialect): for spec in parsed_state.constraints: # only FOREIGN KEYs ref_name = spec['table'][-1] - ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema + ref_schema = len(spec['table']) > 1 and \ + spec['table'][-2] or schema if not ref_schema: if default_schema is None: @@ -2985,7 +3011,8 @@ class MySQLTableDefinitionParser(object): r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' r'FOREIGN KEY +' r'\((?P<local>[^\)]+?)\) REFERENCES +' - r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' + r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s' + r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' r'\((?P<foreign>[^\)]+?)\)' r'(?: +(?P<match>MATCH \w+))?' r'(?: +ON DELETE (?P<ondelete>%(on)s))?' @@ -3014,8 +3041,9 @@ class MySQLTableDefinitionParser(object): self._add_option_regex('UNION', r'\([^\)]+\)') self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK') - self._add_option_regex('RAID_TYPE', - r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') + self._add_option_regex( + 'RAID_TYPE', + r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') _optional_equals = r'(?:\s*(?:=\s*)|\s+)' @@ -3023,8 +3051,9 @@ class MySQLTableDefinitionParser(object): regex = (r'(?P<directive>%s)%s' r"'(?P<val>(?:[^']|'')*?)'(?!')" % (re.escape(directive), self._optional_equals)) - self._pr_options.append(_pr_compile(regex, lambda v: - v.replace("\\\\", "\\").replace("''", "'"))) + self._pr_options.append(_pr_compile( + regex, lambda v: v.replace("\\\\", "\\").replace("''", "'") + )) def _add_option_word(self, directive): regex = (r'(?P<directive>%s)%s' diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py index 7bf8fac66..51b63044e 100644 --- a/lib/sqlalchemy/dialects/mysql/cymysql.py +++ b/lib/sqlalchemy/dialects/mysql/cymysql.py @@ -10,7 +10,8 @@ .. dialect:: mysql+cymysql :name: CyMySQL :dbapi: cymysql - :connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>] + :connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>\ +[?<options>] :url: https://github.com/nakagami/CyMySQL """ diff --git a/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/sqlalchemy/dialects/mysql/gaerdbms.py index 56a4af205..0059f5a65 100644 --- a/lib/sqlalchemy/dialects/mysql/gaerdbms.py +++ b/lib/sqlalchemy/dialects/mysql/gaerdbms.py @@ -9,10 +9,11 @@ :name: Google Cloud SQL :dbapi: rdbms :connectstring: mysql+gaerdbms:///<dbname>?instance=<instancename> - :url: https://developers.google.com/appengine/docs/python/cloud-sql/developers-guide + :url: https://developers.google.com/appengine/docs/python/cloud-sql/\ +developers-guide - This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal - changes. + This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with + minimal changes. .. versionadded:: 0.7.8 diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index 8d38ef4a0..e51e80005 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -9,14 +9,15 @@ .. dialect:: mysql+mysqlconnector :name: MySQL Connector/Python :dbapi: myconnpy - :connectstring: mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname> + :connectstring: mysql+mysqlconnector://<user>:<password>@\ +<host>[:<port>]/<dbname> :url: http://dev.mysql.com/downloads/connector/python/ """ -from .base import (MySQLDialect, - MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer, +from .base import (MySQLDialect, MySQLExecutionContext, + MySQLCompiler, MySQLIdentifierPreparer, BIT) from ... import util diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 06f38fa93..73210d67a 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -130,9 +130,9 @@ class MySQLDialect_mysqldb(MySQLDialect): util.coerce_kw_type(opts, 'read_timeout', int) util.coerce_kw_type(opts, 'client_flag', int) util.coerce_kw_type(opts, 'local_infile', int) - # Note: using either of the below will cause all strings to be returned - # as Unicode, both in raw SQL operations and with column types like - # String and MSString. + # Note: using either of the below will cause all strings to be + # returned as Unicode, both in raw SQL operations and with column + # types like String and MSString. util.coerce_kw_type(opts, 'use_unicode', bool) util.coerce_kw_type(opts, 'charset', str) diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index eba117c5f..fa127f3b0 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -80,7 +80,8 @@ class MySQLDialect_oursql(MySQLDialect): return __import__('oursql') def do_execute(self, cursor, statement, parameters, context=None): - """Provide an implementation of *cursor.execute(statement, parameters)*.""" + """Provide an implementation of + *cursor.execute(statement, parameters)*.""" if context and context.plain_query: cursor.execute(statement, plain_query=True) @@ -192,7 +193,8 @@ class MySQLDialect_oursql(MySQLDialect): def is_disconnect(self, e, connection, cursor): if isinstance(e, self.dbapi.ProgrammingError): - return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed') + return e.errno is None and 'cursor' not in e.args[1] \ + and e.args[1].endswith('closed') else: return e.errno in (2006, 2013, 2014, 2045, 2055) diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py index e3fdea753..31226cea0 100644 --- a/lib/sqlalchemy/dialects/mysql/pymysql.py +++ b/lib/sqlalchemy/dialects/mysql/pymysql.py @@ -10,15 +10,16 @@ .. dialect:: mysql+pymysql :name: PyMySQL :dbapi: pymysql - :connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>] + :connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\ +[?<options>] :url: http://code.google.com/p/pymysql/ MySQL-Python Compatibility -------------------------- The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, -and targets 100% compatibility. Most behavioral notes for MySQL-python apply to -the pymysql driver as well. +and targets 100% compatibility. Most behavioral notes for MySQL-python apply +to the pymysql driver as well. """ diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index c45c673a0..58e8b30fe 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -67,8 +67,8 @@ class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): if opts.get(key, None): return opts[key] - util.warn( - "Could not detect the connection character set. Assuming latin1.") + util.warn("Could not detect the connection character set. " + "Assuming latin1.") return 'latin1' def _extract_error_code(self, exception): diff --git a/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/sqlalchemy/dialects/mysql/zxjdbc.py index 81a4c1e13..0cf92cd13 100644 --- a/lib/sqlalchemy/dialects/mysql/zxjdbc.py +++ b/lib/sqlalchemy/dialects/mysql/zxjdbc.py @@ -10,7 +10,8 @@ .. dialect:: mysql+zxjdbc :name: zxjdbc for Jython :dbapi: zxjdbc - :connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/<database> + :connectstring: mysql+zxjdbc://<user>:<password>@<hostname>[:<port>]/\ +<database> :driverurl: http://dev.mysql.com/downloads/connector/j/ Character Sets @@ -83,8 +84,8 @@ class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): if opts.get(key, None): return opts[key] - util.warn( - "Could not detect the connection character set. Assuming latin1.") + util.warn("Could not detect the connection character set. " + "Assuming latin1.") return 'latin1' def _driver_kwargs(self): diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index e872a3f9a..40ba051f7 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -9,31 +9,37 @@ .. dialect:: oracle :name: Oracle - Oracle version 8 through current (11g at the time of this writing) are supported. + Oracle version 8 through current (11g at the time of this writing) are + supported. Connect Arguments ----------------- -The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which -affect the behavior of the dialect regardless of driver in use. +The dialect supports several :func:`~sqlalchemy.create_engine()` arguments +which affect the behavior of the dialect regardless of driver in use. -* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults - to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins. +* ``use_ansi`` - Use ANSI JOIN constructs (see the section on Oracle 8). + Defaults to ``True``. If ``False``, Oracle-8 compatible constructs are used + for joins. -* ``optimize_limits`` - defaults to ``False``. see the section on LIMIT/OFFSET. +* ``optimize_limits`` - defaults to ``False``. see the section on + LIMIT/OFFSET. -* ``use_binds_for_limits`` - defaults to ``True``. see the section on LIMIT/OFFSET. +* ``use_binds_for_limits`` - defaults to ``True``. see the section on + LIMIT/OFFSET. Auto Increment Behavior ----------------------- -SQLAlchemy Table objects which include integer primary keys are usually assumed to have -"autoincrementing" behavior, meaning they can generate their own primary key values upon -INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences -to produce these values. With the Oracle dialect, *a sequence must always be explicitly -specified to enable autoincrement*. This is divergent with the majority of documentation -examples which assume the usage of an autoincrement-capable database. To specify sequences, -use the sqlalchemy.schema.Sequence object which is passed to a Column construct:: +SQLAlchemy Table objects which include integer primary keys are usually +assumed to have "autoincrementing" behavior, meaning they can generate their +own primary key values upon INSERT. Since Oracle has no "autoincrement" +feature, SQLAlchemy relies upon sequences to produce these values. With the +Oracle dialect, *a sequence must always be explicitly specified to enable +autoincrement*. This is divergent with the majority of documentation +examples which assume the usage of an autoincrement-capable database. To +specify sequences, use the sqlalchemy.schema.Sequence object which is passed +to a Column construct:: t = Table('mytable', metadata, Column('id', Integer, Sequence('id_seq'), primary_key=True), @@ -50,15 +56,16 @@ This step is also required when using table reflection, i.e. autoload=True:: Identifier Casing ----------------- -In Oracle, the data dictionary represents all case insensitive identifier names -using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier -name to be case insensitive. The Oracle dialect converts all case insensitive identifiers -to and from those two formats during schema level communication, such as reflection of -tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a -case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches -against data dictionary data received from Oracle, so unless identifier names have been -truly created as case sensitive (i.e. using quoted names), all lowercase names should be -used on the SQLAlchemy side. +In Oracle, the data dictionary represents all case insensitive identifier +names using UPPERCASE text. SQLAlchemy on the other hand considers an +all-lower case identifier name to be case insensitive. The Oracle dialect +converts all case insensitive identifiers to and from those two formats during +schema level communication, such as reflection of tables and indexes. Using +an UPPERCASE name on the SQLAlchemy side indicates a case sensitive +identifier, and SQLAlchemy will quote the name - this will cause mismatches +against data dictionary data received from Oracle, so unless identifier names +have been truly created as case sensitive (i.e. using quoted names), all +lowercase names should be used on the SQLAlchemy side. LIMIT/OFFSET Support @@ -71,44 +78,49 @@ http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html . There are two options which affect its behavior: -* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this - optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`. -* the values passed for the limit/offset are sent as bound parameters. Some users have observed - that Oracle produces a poor query plan when the values are sent as binds and not - rendered literally. To render the limit/offset values literally within the SQL - statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`. - -Some users have reported better performance when the entirely different approach of a -window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note -that the majority of users don't observe this). To suit this case the -method used for LIMIT/OFFSET can be replaced entirely. See the recipe at +* the "FIRST ROWS()" optimization keyword is not used by default. To enable + the usage of this optimization directive, specify ``optimize_limits=True`` + to :func:`.create_engine`. +* the values passed for the limit/offset are sent as bound parameters. Some + users have observed that Oracle produces a poor query plan when the values + are sent as binds and not rendered literally. To render the limit/offset + values literally within the SQL statement, specify + ``use_binds_for_limits=False`` to :func:`.create_engine`. + +Some users have reported better performance when the entirely different +approach of a window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to +provide LIMIT/OFFSET (note that the majority of users don't observe this). +To suit this case the method used for LIMIT/OFFSET can be replaced entirely. +See the recipe at http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault -which installs a select compiler that overrides the generation of limit/offset with -a window function. +which installs a select compiler that overrides the generation of limit/offset +with a window function. .. _oracle_returning: RETURNING Support ----------------- -The Oracle database supports a limited form of RETURNING, in order to retrieve result -sets of matched rows from INSERT, UPDATE and DELETE statements. Oracle's -RETURNING..INTO syntax only supports one row being returned, as it relies upon -OUT parameters in order to function. In addition, supported DBAPIs have further -limitations (see :ref:`cx_oracle_returning`). +The Oracle database supports a limited form of RETURNING, in order to retrieve +result sets of matched rows from INSERT, UPDATE and DELETE statements. +Oracle's RETURNING..INTO syntax only supports one row being returned, as it +relies upon OUT parameters in order to function. In addition, supported +DBAPIs have further limitations (see :ref:`cx_oracle_returning`). -SQLAlchemy's "implicit returning" feature, which employs RETURNING within an INSERT -and sometimes an UPDATE statement in order to fetch newly generated primary key values -and other SQL defaults and expressions, is normally enabled on the Oracle -backend. By default, "implicit returning" typically only fetches the value of a -single ``nextval(some_seq)`` expression embedded into an INSERT in order to increment -a sequence within an INSERT statement and get the value back at the same time. -To disable this feature across the board, specify ``implicit_returning=False`` to -:func:`.create_engine`:: +SQLAlchemy's "implicit returning" feature, which employs RETURNING within an +INSERT and sometimes an UPDATE statement in order to fetch newly generated +primary key values and other SQL defaults and expressions, is normally enabled +on the Oracle backend. By default, "implicit returning" typically only +fetches the value of a single ``nextval(some_seq)`` expression embedded into +an INSERT in order to increment a sequence within an INSERT statement and get +the value back at the same time. To disable this feature across the board, +specify ``implicit_returning=False`` to :func:`.create_engine`:: - engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False) + engine = create_engine("oracle://scott:tiger@dsn", + implicit_returning=False) -Implicit returning can also be disabled on a table-by-table basis as a table option:: +Implicit returning can also be disabled on a table-by-table basis as a table +option:: # Core Table my_table = Table("my_table", metadata, ..., implicit_returning=False) @@ -121,13 +133,15 @@ Implicit returning can also be disabled on a table-by-table basis as a table opt .. seealso:: - :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on implicit returning. + :ref:`cx_oracle_returning` - additional cx_oracle-specific restrictions on + implicit returning. ON UPDATE CASCADE ----------------- -Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution -is available at http://asktom.oracle.com/tkyte/update_cascade/index.html . +Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based +solution is available at +http://asktom.oracle.com/tkyte/update_cascade/index.html . When using the SQLAlchemy ORM, the ORM has limited ability to manually issue cascading updates - specify ForeignKey objects using the @@ -137,29 +151,32 @@ and specify "passive_updates=False" on each relationship(). Oracle 8 Compatibility ---------------------- -When Oracle 8 is detected, the dialect internally configures itself to the following -behaviors: +When Oracle 8 is detected, the dialect internally configures itself to the +following behaviors: * the use_ansi flag is set to False. This has the effect of converting all JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN makes use of Oracle's (+) operator. * the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when - the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued - instead. This because these types don't seem to work correctly on Oracle 8 - even though they are available. The :class:`~sqlalchemy.types.NVARCHAR` - and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB. + the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are + issued instead. This because these types don't seem to work correctly on + Oracle 8 even though they are available. The + :class:`~sqlalchemy.types.NVARCHAR` and + :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate + NVARCHAR2 and NCLOB. * the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy - encodes all Python unicode objects to "string" before passing in as bind parameters. + encodes all Python unicode objects to "string" before passing in as bind + parameters. Synonym/DBLINK Reflection ------------------------- -When using reflection with Table objects, the dialect can optionally search for tables -indicated by synonyms, either in local or remote schemas or accessed over DBLINK, -by passing the flag ``oracle_resolve_synonyms=True`` as a -keyword argument to the :class:`.Table` construct:: +When using reflection with Table objects, the dialect can optionally search +for tables indicated by synonyms, either in local or remote schemas or +accessed over DBLINK, by passing the flag ``oracle_resolve_synonyms=True`` as +a keyword argument to the :class:`.Table` construct:: some_table = Table('some_table', autoload=True, autoload_with=some_engine, @@ -167,9 +184,10 @@ keyword argument to the :class:`.Table` construct:: When this flag is set, the given name (such as ``some_table`` above) will be searched not just in the ``ALL_TABLES`` view, but also within the -``ALL_SYNONYMS`` view to see if this name is actually a synonym to another name. -If the synonym is located and refers to a DBLINK, the oracle dialect knows -how to locate the table's information using DBLINK syntax (e.g. ``@dblink``). +``ALL_SYNONYMS`` view to see if this name is actually a synonym to another +name. If the synonym is located and refers to a DBLINK, the oracle dialect +knows how to locate the table's information using DBLINK syntax(e.g. +``@dblink``). ``oracle_resolve_synonyms`` is accepted wherever reflection arguments are accepted, including methods such as :meth:`.MetaData.reflect` and @@ -202,7 +220,8 @@ import re from sqlalchemy import util, sql from sqlalchemy.engine import default, base, reflection from sqlalchemy.sql import compiler, visitors, expression -from sqlalchemy.sql import operators as sql_operators, functions as sql_functions +from sqlalchemy.sql import (operators as sql_operators, + functions as sql_functions) from sqlalchemy import types as sqltypes, schema as sa_schema from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \ BLOB, CLOB, TIMESTAMP, FLOAT @@ -308,10 +327,11 @@ class INTERVAL(sqltypes.TypeEngine): This is due to a lack of support for YEAR TO MONTH intervals within available DBAPIs (cx_oracle and zxjdbc). - :param day_precision: the day precision value. this is the number of digits - to store for the day field. Defaults to "2" - :param second_precision: the second precision value. this is the number of digits - to store for the fractional seconds field. Defaults to "6". + :param day_precision: the day precision value. this is the number of + digits to store for the day field. Defaults to "2" + :param second_precision: the second precision value. this is the + number of digits to store for the fractional seconds field. + Defaults to "6". """ self.day_precision = day_precision @@ -557,8 +577,8 @@ class OracleCompiler(compiler.SQLCompiler): binary.left = _OuterJoinColumn(binary.left) elif join.right.is_derived_from(binary.right.table): binary.right = _OuterJoinColumn(binary.right) - clauses.append(visitors.cloned_traverse(join.onclause, {}, - {'binary': visit_binary})) + clauses.append(visitors.cloned_traverse( + join.onclause, {}, {'binary': visit_binary})) else: clauses.append(join.onclause) @@ -581,10 +601,13 @@ class OracleCompiler(compiler.SQLCompiler): return self.process(vc.column) + "(+)" def visit_sequence(self, seq): - return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval" + return (self.dialect.identifier_preparer.format_sequence(seq) + + ".nextval") def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): - """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??""" + """Oracle doesn't like ``FROM table AS alias``. Is the AS standard + SQL?? + """ if asfrom or ashint: alias_name = isinstance(alias.name, expression._truncated_label) and \ @@ -601,7 +624,8 @@ class OracleCompiler(compiler.SQLCompiler): def returning_clause(self, stmt, returning_cols): columns = [] binds = [] - for i, column in enumerate(expression._select_iterables(returning_cols)): + for i, column in enumerate( + expression._select_iterables(returning_cols)): if column.type._has_column_expression: col_expr = column.type.column_expression(column) else: @@ -610,7 +634,8 @@ class OracleCompiler(compiler.SQLCompiler): self.binds[outparam.key] = outparam binds.append( self.bindparam_string(self._truncate_bindparam(outparam))) - columns.append(self.process(col_expr, within_columns_clause=False)) + columns.append( + self.process(col_expr, within_columns_clause=False)) self.result_map[outparam.key] = ( outparam.key, (column, getattr(column, 'name', None), @@ -621,7 +646,9 @@ class OracleCompiler(compiler.SQLCompiler): return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) def _TODO_visit_compound_select(self, select): - """Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle.""" + """Need to determine how to get ``LIMIT``/``OFFSET`` into a + ``UNION`` for Oracle. + """ pass def visit_select(self, select, **kwargs): @@ -641,15 +668,17 @@ class OracleCompiler(compiler.SQLCompiler): limit_clause = select._limit_clause offset_clause = select._offset_clause if limit_clause is not None or offset_clause is not None: - # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html + # See http://www.oracle.com/technology/oramag/oracle/06-sep/\ + # o56asktom.html # # Generalized form of an Oracle pagination query: # select ... from ( - # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from ( - # select distinct ... where ... order by ... + # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from + # ( select distinct ... where ... order by ... # ) where ROWNUM <= :limit+:offset # ) where ora_rn > :offset - # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0 + # Outer select and "ROWNUM as ora_rn" can be dropped if + # limit=0 # TODO: use annotations instead of clone + attr set ? select = select._generate() @@ -745,8 +774,9 @@ class OracleDDLCompiler(compiler.DDLCompiler): if constraint.onupdate is not None: util.warn( "Oracle does not contain native UPDATE CASCADE " - "functionality - onupdates will not be rendered for foreign keys. " - "Consider using deferrable=True, initially='deferred' or triggers.") + "functionality - onupdates will not be rendered for foreign " + "keys. Consider using deferrable=True, initially='deferred' " + "or triggers.") return text @@ -770,14 +800,16 @@ class OracleIdentifierPreparer(compiler.IdentifierPreparer): def format_savepoint(self, savepoint): name = re.sub(r'^_+', '', savepoint.ident) - return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name) + return super( + OracleIdentifierPreparer, self).format_savepoint(savepoint, name) class OracleExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): - return self._execute_scalar("SELECT " + - self.dialect.identifier_preparer.format_sequence(seq) + - ".nextval FROM DUAL", type_) + return self._execute_scalar( + "SELECT " + + self.dialect.identifier_preparer.format_sequence(seq) + + ".nextval FROM DUAL", type_) class OracleDialect(default.DefaultDialect): @@ -858,7 +890,8 @@ class OracleDialect(default.DefaultDialect): cursor = connection.execute( sql.text("SELECT table_name FROM all_tables " "WHERE table_name = :name AND owner = :schema_name"), - name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema)) + name=self.denormalize_name(table_name), + schema_name=self.denormalize_name(schema)) return cursor.first() is not None def has_sequence(self, connection, sequence_name, schema=None): @@ -866,8 +899,10 @@ class OracleDialect(default.DefaultDialect): schema = self.default_schema_name cursor = connection.execute( sql.text("SELECT sequence_name FROM all_sequences " - "WHERE sequence_name = :name AND sequence_owner = :schema_name"), - name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema)) + "WHERE sequence_name = :name AND " + "sequence_owner = :schema_name"), + name=self.denormalize_name(sequence_name), + schema_name=self.denormalize_name(schema)) return cursor.first() is not None def normalize_name(self, name): @@ -876,8 +911,8 @@ class OracleDialect(default.DefaultDialect): if util.py2k: if isinstance(name, str): name = name.decode(self.encoding) - if name.upper() == name and \ - not self.identifier_preparer._requires_quotes(name.lower()): + if name.upper() == name and not \ + self.identifier_preparer._requires_quotes(name.lower()): return name.lower() else: return name @@ -885,7 +920,8 @@ class OracleDialect(default.DefaultDialect): def denormalize_name(self, name): if name is None: return None - elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()): + elif name.lower() == name and not \ + self.identifier_preparer._requires_quotes(name.lower()): name = name.upper() if util.py2k: if not self.supports_unicode_binds: @@ -895,14 +931,17 @@ class OracleDialect(default.DefaultDialect): return name def _get_default_schema_name(self, connection): - return self.normalize_name(connection.execute('SELECT USER FROM DUAL').scalar()) + return self.normalize_name( + connection.execute('SELECT USER FROM DUAL').scalar()) - def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None): + def _resolve_synonym(self, connection, desired_owner=None, + desired_synonym=None, desired_table=None): """search for a local synonym matching the given desired owner/name. if desired_owner is None, attempts to locate a distinct owner. - returns the actual name, owner, dblink name, and synonym name if found. + returns the actual name, owner, dblink name, and synonym name if + found. """ q = "SELECT owner, table_owner, table_name, db_link, "\ @@ -925,17 +964,20 @@ class OracleDialect(default.DefaultDialect): if desired_owner: row = result.first() if row: - return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] + return (row['table_name'], row['table_owner'], + row['db_link'], row['synonym_name']) else: return None, None, None, None else: rows = result.fetchall() if len(rows) > 1: raise AssertionError( - "There are multiple tables visible to the schema, you must specify owner") + "There are multiple tables visible to the schema, you " + "must specify owner") elif len(rows) == 1: row = rows[0] - return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] + return (row['table_name'], row['table_owner'], + row['db_link'], row['synonym_name']) else: return None, None, None, None @@ -984,7 +1026,8 @@ class OracleDialect(default.DefaultDialect): schema = self.default_schema_name s = sql.text( "SELECT table_name FROM all_tables " - "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') " + "WHERE nvl(tablespace_name, 'no tablespace') NOT IN " + "('SYSTEM', 'SYSAUX') " "AND OWNER = :owner " "AND IOT_NAME IS NULL") cursor = connection.execute(s, owner=schema) @@ -1126,8 +1169,8 @@ class OracleDialect(default.DefaultDialect): for rset in rp: if rset.index_name != last_index_name: remove_if_primary_key(index) - index = dict( - name=self.normalize_name(rset.index_name), column_names=[]) + index = dict(name=self.normalize_name(rset.index_name), + column_names=[]) indexes.append(index) index['unique'] = uniqueness.get(rset.uniqueness, False) @@ -1191,9 +1234,9 @@ class OracleDialect(default.DefaultDialect): info_cache=info_cache) pkeys = [] constraint_name = None - constraint_data = self._get_constraint_data(connection, table_name, - schema, dblink, - info_cache=kw.get('info_cache')) + constraint_data = self._get_constraint_data( + connection, table_name, schema, dblink, + info_cache=kw.get('info_cache')) for row in constraint_data: (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ @@ -1226,9 +1269,9 @@ class OracleDialect(default.DefaultDialect): resolve_synonyms, dblink, info_cache=info_cache) - constraint_data = self._get_constraint_data(connection, table_name, - schema, dblink, - info_cache=kw.get('info_cache')) + constraint_data = self._get_constraint_data( + connection, table_name, schema, dblink, + info_cache=kw.get('info_cache')) def fkey_rec(): return { @@ -1276,7 +1319,8 @@ class OracleDialect(default.DefaultDialect): rec['referred_table'] = remote_table - if requested_schema is not None or self.denormalize_name(remote_owner) != schema: + if requested_schema is not None or \ + self.denormalize_name(remote_owner) != schema: rec['referred_schema'] = remote_owner local_cols.append(local_column) diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index e9f5780d2..4a1ceecb1 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -10,7 +10,8 @@ .. dialect:: oracle+cx_oracle :name: cx-Oracle :dbapi: cx_oracle - :connectstring: oracle+cx_oracle://user:pass@host:port/dbname[?key=value&key=value...] + :connectstring: oracle+cx_oracle://user:pass@host:port/dbname\ +[?key=value&key=value...] :url: http://cx-oracle.sourceforge.net/ Additional Connect Arguments @@ -52,21 +53,21 @@ on the URL, or as keyword arguments to :func:`.create_engine()` are: .. versionadded:: 0.8 specific DBAPI types can be excluded from the auto_setinputsizes feature via the exclude_setinputsizes attribute. -* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or alternatively - an integer value. This value is only available as a URL query string - argument. +* ``mode`` - This is given the string value of SYSDBA or SYSOPER, or + alternatively an integer value. This value is only available as a URL query + string argument. -* ``threaded`` - enable multithreaded access to cx_oracle connections. Defaults - to ``True``. Note that this is the opposite default of the cx_Oracle DBAPI - itself. +* ``threaded`` - enable multithreaded access to cx_oracle connections. + Defaults to ``True``. Note that this is the opposite default of the + cx_Oracle DBAPI itself. .. _cx_oracle_unicode: Unicode ------- -The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the ability -to return string results as Python unicode objects natively. +The cx_Oracle DBAPI as of version 5 fully supports unicode, and has the +ability to return string results as Python unicode objects natively. When used in Python 3, cx_Oracle returns all strings as Python unicode objects (that is, plain ``str`` in Python 3). In Python 2, it will return as Python @@ -74,37 +75,39 @@ unicode those column values that are of type ``NVARCHAR`` or ``NCLOB``. For column values that are of type ``VARCHAR`` or other non-unicode string types, it will return values as Python strings (e.g. bytestrings). -The cx_Oracle SQLAlchemy dialect presents two different options for the use case of -returning ``VARCHAR`` column values as Python unicode objects under Python 2: +The cx_Oracle SQLAlchemy dialect presents two different options for the use +case of returning ``VARCHAR`` column values as Python unicode objects under +Python 2: * the cx_Oracle DBAPI has the ability to coerce all string results to Python unicode objects unconditionally using output type handlers. This has the advantage that the unicode conversion is global to all statements at the cx_Oracle driver level, meaning it works with raw textual SQL statements that have no typing information associated. However, this system - has been observed to incur signfiicant performance overhead, not only because - it takes effect for all string values unconditionally, but also because cx_Oracle under - Python 2 seems to use a pure-Python function call in order to do the - decode operation, which under cPython can orders of magnitude slower - than doing it using C functions alone. - -* SQLAlchemy has unicode-decoding services built in, and when using SQLAlchemy's - C extensions, these functions do not use any Python function calls and - are very fast. The disadvantage to this approach is that the unicode - conversion only takes effect for statements where the :class:`.Unicode` type - or :class:`.String` type with ``convert_unicode=True`` is explicitly - associated with the result column. This is the case for any ORM or Core - query or SQL expression as well as for a :func:`.text` construct that specifies - output column types, so in the vast majority of cases this is not an issue. - However, when sending a completely raw string to :meth:`.Connection.execute`, - this typing information isn't present, unless the string is handled - within a :func:`.text` construct that adds typing information. + has been observed to incur signfiicant performance overhead, not only + because it takes effect for all string values unconditionally, but also + because cx_Oracle under Python 2 seems to use a pure-Python function call in + order to do the decode operation, which under cPython can orders of + magnitude slower than doing it using C functions alone. + +* SQLAlchemy has unicode-decoding services built in, and when using + SQLAlchemy's C extensions, these functions do not use any Python function + calls and are very fast. The disadvantage to this approach is that the + unicode conversion only takes effect for statements where the + :class:`.Unicode` type or :class:`.String` type with + ``convert_unicode=True`` is explicitly associated with the result column. + This is the case for any ORM or Core query or SQL expression as well as for + a :func:`.text` construct that specifies output column types, so in the vast + majority of cases this is not an issue. However, when sending a completely + raw string to :meth:`.Connection.execute`, this typing information isn't + present, unless the string is handled within a :func:`.text` construct that + adds typing information. As of version 0.9.2 of SQLAlchemy, the default approach is to use SQLAlchemy's typing system. This keeps cx_Oracle's expensive Python 2 approach -disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy detects -that cx_Oracle is returning unicode objects natively and cx_Oracle's system -is used. +disabled unless the user explicitly wants it. Under Python 3, SQLAlchemy +detects that cx_Oracle is returning unicode objects natively and cx_Oracle's +system is used. To re-enable cx_Oracle's output type handler under Python 2, the ``coerce_to_unicode=True`` flag (new in 0.9.4) can be passed to @@ -117,12 +120,13 @@ as Python unicode under Python 2 without using cx_Oracle's native handlers, the :func:`.text` feature can be used:: from sqlalchemy import text, Unicode - result = conn.execute(text("select username from user").columns(username=Unicode)) + result = conn.execute( + text("select username from user").columns(username=Unicode)) -.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used for - unicode results of non-unicode datatypes in Python 2, after they were identified as a major - performance bottleneck. SQLAlchemy's own unicode facilities are used - instead. +.. versionchanged:: 0.9.2 cx_Oracle's outputtypehandlers are no longer used + for unicode results of non-unicode datatypes in Python 2, after they were + identified as a major performance bottleneck. SQLAlchemy's own unicode + facilities are used instead. .. versionadded:: 0.9.4 Added the ``coerce_to_unicode`` flag, to re-enable cx_Oracle's outputtypehandler and revert to pre-0.9.2 behavior. @@ -132,38 +136,43 @@ the :func:`.text` feature can be used:: RETURNING Support ----------------- -The cx_oracle DBAPI supports a limited subset of Oracle's already limited RETURNING support. -Typically, results can only be guaranteed for at most one column being returned; -this is the typical case when SQLAlchemy uses RETURNING to get just the value of a -primary-key-associated sequence value. Additional column expressions will -cause problems in a non-determinative way, due to cx_oracle's lack of support for -the OCI_DATA_AT_EXEC API which is required for more complex RETURNING scenarios. +The cx_oracle DBAPI supports a limited subset of Oracle's already limited +RETURNING support. Typically, results can only be guaranteed for at most one +column being returned; this is the typical case when SQLAlchemy uses RETURNING +to get just the value of a primary-key-associated sequence value. +Additional column expressions will cause problems in a non-determinative way, +due to cx_oracle's lack of support for the OCI_DATA_AT_EXEC API which is +required for more complex RETURNING scenarios. -For this reason, stability may be enhanced by disabling RETURNING support completely; -SQLAlchemy otherwise will use RETURNING to fetch newly sequence-generated -primary keys. As illustrated in :ref:`oracle_returning`:: +For this reason, stability may be enhanced by disabling RETURNING support +completely; SQLAlchemy otherwise will use RETURNING to fetch newly +sequence-generated primary keys. As illustrated in :ref:`oracle_returning`:: - engine = create_engine("oracle://scott:tiger@dsn", implicit_returning=False) + engine = create_engine("oracle://scott:tiger@dsn", + implicit_returning=False) .. seealso:: - http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 - OCI documentation for RETURNING + http://docs.oracle.com/cd/B10501_01/appdev.920/a96584/oci05bnd.htm#420693 + - OCI documentation for RETURNING - http://sourceforge.net/mailarchive/message.php?msg_id=31338136 - cx_oracle developer commentary + http://sourceforge.net/mailarchive/message.php?msg_id=31338136 + - cx_oracle developer commentary .. _cx_oracle_lob: LOB Objects ----------- -cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts -these to strings so that the interface of the Binary type is consistent with that of -other backends, and so that the linkage to a live cursor is not needed in scenarios -like result.fetchmany() and result.fetchall(). This means that by default, LOB -objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live -cursor is broken. +cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy +converts these to strings so that the interface of the Binary type is +consistent with that of other backends, and so that the linkage to a live +cursor is not needed in scenarios like result.fetchmany() and +result.fetchall(). This means that by default, LOB objects are fully fetched +unconditionally by SQLAlchemy, and the linkage to a live cursor is broken. -To disable this processing, pass ``auto_convert_lobs=False`` to :func:`.create_engine()`. +To disable this processing, pass ``auto_convert_lobs=False`` to +:func:`.create_engine()`. Two Phase Transaction Support ----------------------------- @@ -363,7 +372,8 @@ class _NativeUnicodeMixin(object): return unicode(value) return process else: - return super(_NativeUnicodeMixin, self).bind_processor(dialect) + return super( + _NativeUnicodeMixin, self).bind_processor(dialect) # we apply a connection output handler that returns # unicode in all cases, so the "native_unicode" flag @@ -397,7 +407,8 @@ class _OracleString(_NativeUnicodeMixin, sqltypes.String): pass -class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): +class _OracleUnicodeText( + _LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): def get_dbapi_type(self, dbapi): return dbapi.NCLOB @@ -487,9 +498,10 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): # cx_oracle really has issues when you setinputsizes # on String, including that outparams/RETURNING # breaks for varchars - self.set_input_sizes(quoted_bind_names, - exclude_types=self.dialect.exclude_setinputsizes - ) + self.set_input_sizes( + quoted_bind_names, + exclude_types=self.dialect.exclude_setinputsizes + ) # if a single execute, check for outparams if len(self.compiled_parameters) == 1: @@ -566,7 +578,8 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): return result -class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle): +class OracleExecutionContext_cx_oracle_with_unicode( + OracleExecutionContext_cx_oracle): """Support WITH_UNICODE in Python 2.xx. WITH_UNICODE allows cx_Oracle's Python 3 unicode handling @@ -590,7 +603,8 @@ class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_or class ReturningResultProxy(_result.FullyBufferedResultProxy): - """Result proxy which stuffs the _returning clause + outparams into the fetch.""" + """Result proxy which stuffs the _returning clause + outparams + into the fetch.""" def __init__(self, context, returning_params): self._returning_params = returning_params @@ -604,8 +618,10 @@ class ReturningResultProxy(_result.FullyBufferedResultProxy): ] def _buffer_rows(self): - return collections.deque([tuple(self._returning_params["ret_%d" % i] - for i, c in enumerate(self._returning_params))]) + return collections.deque( + [tuple(self._returning_params["ret_%d" % i] + for i, c in enumerate(self._returning_params))] + ) class OracleDialect_cx_oracle(OracleDialect): @@ -695,7 +711,8 @@ class OracleDialect_cx_oracle(OracleDialect): # this occurs in tests with mock DBAPIs self._cx_oracle_string_types = set() self._cx_oracle_with_unicode = False - elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'): + elif self.cx_oracle_ver >= (5,) and not \ + hasattr(self.dbapi, 'UNICODE'): # cx_Oracle WITH_UNICODE mode. *only* python # unicode objects accepted for anything self.supports_unicode_statements = True @@ -703,15 +720,15 @@ class OracleDialect_cx_oracle(OracleDialect): self._cx_oracle_with_unicode = True if util.py2k: - # There's really no reason to run with WITH_UNICODE under Python 2.x. - # Give the user a hint. + # There's really no reason to run with WITH_UNICODE under + # Python 2.x. Give the user a hint. util.warn( "cx_Oracle is compiled under Python 2.xx using the " "WITH_UNICODE flag. Consider recompiling cx_Oracle " - "without this flag, which is in no way necessary for full " - "support of Unicode. Otherwise, all string-holding bind " - "parameters must be explicitly typed using SQLAlchemy's " - "String type or one of its subtypes," + "without this flag, which is in no way necessary for " + "full support of Unicode. Otherwise, all string-holding " + "bind parameters must be explicitly typed using " + "SQLAlchemy's String type or one of its subtypes," "or otherwise be passed as Python unicode. " "Plain Python strings passed as bind parameters will be " "silently corrupted by cx_Oracle." @@ -727,8 +744,8 @@ class OracleDialect_cx_oracle(OracleDialect): self.dbapi_type_map = {} else: # only use this for LOB objects. using it for strings, dates - # etc. leads to a little too much magic, reflection doesn't know if it should - # expect encoded strings or unicodes, etc. + # etc. leads to a little too much magic, reflection doesn't know + # if it should expect encoded strings or unicodes, etc. self.dbapi_type_map = { self.dbapi.CLOB: oracle.CLOB(), self.dbapi.NCLOB: oracle.NCLOB(), diff --git a/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/sqlalchemy/dialects/oracle/zxjdbc.py index b3bae1ca0..82c8e2f0f 100644 --- a/lib/sqlalchemy/dialects/oracle/zxjdbc.py +++ b/lib/sqlalchemy/dialects/oracle/zxjdbc.py @@ -10,7 +10,8 @@ :name: zxJDBC for Jython :dbapi: zxjdbc :connectstring: oracle+zxjdbc://user:pass@host/dbname - :driverurl: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html. + :driverurl: http://www.oracle.com/technology/software/tech/java/\ +sqlj_jdbc/index.html. """ import decimal @@ -18,7 +19,9 @@ import re from sqlalchemy import sql, types as sqltypes, util from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext +from sqlalchemy.dialects.oracle.base import (OracleCompiler, + OracleDialect, + OracleExecutionContext) from sqlalchemy.engine import result as _result from sqlalchemy.sql import expression import collections @@ -65,7 +68,8 @@ class OracleCompiler_zxjdbc(OracleCompiler): expression._select_iterables(returning_cols)) # within_columns_clause=False so that labels (foo AS bar) don't render - columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) + columns = [self.process(c, within_columns_clause=False, + result_map=self.result_map) for c in self.returning_cols] if not hasattr(self, 'returning_parameters'): @@ -108,8 +112,11 @@ class OracleExecutionContext_zxjdbc(OracleExecutionContext): msg += ' [SQLState: %s]' % sqle.getSQLState() raise zxJDBC.Error(msg) else: - row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype) - for index, dbtype in self.compiled.returning_parameters) + row = tuple( + self.cursor.datahandler.getPyObject( + rrs, index, dbtype) + for index, dbtype in + self.compiled.returning_parameters) return ReturningResultProxy(self, row) finally: if rrs is not None: @@ -170,8 +177,8 @@ class ReturningParam(object): def __repr__(self): kls = self.__class__ - return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self), - self.type) + return '<%s.%s object at 0x%x type=%s>' % ( + kls.__module__, kls.__name__, id(self), self.type) class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): @@ -212,10 +219,12 @@ class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): def initialize(self, connection): super(OracleDialect_zxjdbc, self).initialize(connection) - self.implicit_returning = connection.connection.driverversion >= '10.2' + self.implicit_returning = \ + connection.connection.driverversion >= '10.2' def _create_jdbc_url(self, url): - return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database) + return 'jdbc:oracle:thin:@%s:%s:%s' % ( + url.host, url.port or 1521, url.database) def _get_server_version_info(self, connection): version = re.search( diff --git a/lib/sqlalchemy/dialects/postgres.py b/lib/sqlalchemy/dialects/postgres.py index f9725b2a5..f813e0003 100644 --- a/lib/sqlalchemy/dialects/postgres.py +++ b/lib/sqlalchemy/dialects/postgres.py @@ -9,8 +9,9 @@ from sqlalchemy.util import warn_deprecated warn_deprecated( - "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. " - "The new URL format is postgresql[+driver]://<user>:<pass>@<host>/<dbname>" + "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to " + "'postgresql'. The new URL format is " + "postgresql[+driver]://<user>:<pass>@<host>/<dbname>" ) from sqlalchemy.dialects.postgresql import * diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index c033a792d..9e5934f9e 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -50,22 +50,25 @@ Transaction Isolation Level All Postgresql dialects support setting of transaction isolation level both via a dialect-specific parameter ``isolation_level`` accepted by :func:`.create_engine`, -as well as the ``isolation_level`` argument as passed to :meth:`.Connection.execution_options`. -When using a non-psycopg2 dialect, this feature works by issuing the -command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL -<level>`` for each new connection. +as well as the ``isolation_level`` argument as passed to +:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect, +this feature works by issuing the command +``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL <level>`` for +each new connection. To set isolation level using :func:`.create_engine`:: engine = create_engine( - "postgresql+pg8000://scott:tiger@localhost/test", - isolation_level="READ UNCOMMITTED" - ) + "postgresql+pg8000://scott:tiger@localhost/test", + isolation_level="READ UNCOMMITTED" + ) To set using per-connection execution options:: connection = engine.connect() - connection = connection.execution_options(isolation_level="READ COMMITTED") + connection = connection.execution_options( + isolation_level="READ COMMITTED" + ) Valid values for ``isolation_level`` include: @@ -93,12 +96,13 @@ The Postgresql dialect can reflect tables from any schema. The :paramref:`.Table.schema` argument, or alternatively the :paramref:`.MetaData.reflect.schema` argument determines which schema will be searched for the table or tables. The reflected :class:`.Table` objects -will in all cases retain this ``.schema`` attribute as was specified. However, -with regards to tables which these :class:`.Table` objects refer to via -foreign key constraint, a decision must be made as to how the ``.schema`` +will in all cases retain this ``.schema`` attribute as was specified. +However, with regards to tables which these :class:`.Table` objects refer to +via foreign key constraint, a decision must be made as to how the ``.schema`` is represented in those remote tables, in the case where that remote schema name is also a member of the current -`Postgresql search path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_. +`Postgresql search path +<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_. By default, the Postgresql dialect mimics the behavior encouraged by Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function @@ -115,7 +119,8 @@ illustrates this behavior:: CREATE TABLE test=> SET search_path TO public, test_schema; test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f' test-> ; @@ -124,10 +129,11 @@ illustrates this behavior:: FOREIGN KEY (referred_id) REFERENCES referred(id) (1 row) -Above, we created a table ``referred`` as a member of the remote schema ``test_schema``, however -when we added ``test_schema`` to the PG ``search_path`` and then asked ``pg_get_constraintdef()`` -for the ``FOREIGN KEY`` syntax, ``test_schema`` was not included in the -output of the function. +Above, we created a table ``referred`` as a member of the remote schema +``test_schema``, however when we added ``test_schema`` to the +PG ``search_path`` and then asked ``pg_get_constraintdef()`` for the +``FOREIGN KEY`` syntax, ``test_schema`` was not included in the output of +the function. On the other hand, if we set the search path back to the typical default of ``public``:: @@ -139,7 +145,8 @@ The same query against ``pg_get_constraintdef()`` now returns the fully schema-qualified name for us:: test=> SELECT pg_catalog.pg_get_constraintdef(r.oid, true) FROM - test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace + test-> pg_catalog.pg_class c JOIN pg_catalog.pg_namespace n + test-> ON n.oid = c.relnamespace test-> JOIN pg_catalog.pg_constraint r ON c.oid = r.conrelid test-> WHERE c.relname='referring' AND r.contype = 'f'; pg_get_constraintdef @@ -157,7 +164,8 @@ reflection process as follows:: >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() - ... referring = Table('referring', meta, autoload=True, autoload_with=conn) + ... referring = Table('referring', meta, + ... autoload=True, autoload_with=conn) ... <sqlalchemy.engine.result.ResultProxy object at 0x101612ed0> @@ -167,16 +175,18 @@ The above process would deliver to the :attr:`.MetaData.tables` collection >>> meta.tables['referred'].schema is None True -To alter the behavior of reflection such that the referred schema is maintained -regardless of the ``search_path`` setting, use the ``postgresql_ignore_search_path`` -option, which can be specified as a dialect-specific argument to both -:class:`.Table` as well as :meth:`.MetaData.reflect`:: +To alter the behavior of reflection such that the referred schema is +maintained regardless of the ``search_path`` setting, use the +``postgresql_ignore_search_path`` option, which can be specified as a +dialect-specific argument to both :class:`.Table` as well as +:meth:`.MetaData.reflect`:: >>> with engine.connect() as conn: ... conn.execute("SET search_path TO test_schema, public") ... meta = MetaData() - ... referring = Table('referring', meta, autoload=True, autoload_with=conn, - ... postgresql_ignore_search_path=True) + ... referring = Table('referring', meta, autoload=True, + ... autoload_with=conn, + ... postgresql_ignore_search_path=True) ... <sqlalchemy.engine.result.ResultProxy object at 0x1016126d0> @@ -187,29 +197,33 @@ We will now have ``test_schema.referred`` stored as schema-qualified:: .. sidebar:: Best Practices for Postgresql Schema reflection - The description of Postgresql schema reflection behavior is complex, and is - the product of many years of dealing with widely varied use cases and user preferences. - But in fact, there's no need to understand any of it if you just stick to the simplest - use pattern: leave the ``search_path`` set to its default of ``public`` only, never refer - to the name ``public`` as an explicit schema name otherwise, and - refer to all other schema names explicitly when building - up a :class:`.Table` object. The options described here are only for those users - who can't, or prefer not to, stay within these guidelines. - -Note that **in all cases**, the "default" schema is always reflected as ``None``. -The "default" schema on Postgresql is that which is returned by the -Postgresql ``current_schema()`` function. On a typical Postgresql installation, -this is the name ``public``. So a table that refers to another which is -in the ``public`` (i.e. default) schema will always have the ``.schema`` attribute -set to ``None``. + The description of Postgresql schema reflection behavior is complex, and + is the product of many years of dealing with widely varied use cases and + user preferences. But in fact, there's no need to understand any of it if + you just stick to the simplest use pattern: leave the ``search_path`` set + to its default of ``public`` only, never refer to the name ``public`` as + an explicit schema name otherwise, and refer to all other schema names + explicitly when building up a :class:`.Table` object. The options + described here are only for those users who can't, or prefer not to, stay + within these guidelines. + +Note that **in all cases**, the "default" schema is always reflected as +``None``. The "default" schema on Postgresql is that which is returned by the +Postgresql ``current_schema()`` function. On a typical Postgresql +installation, this is the name ``public``. So a table that refers to another +which is in the ``public`` (i.e. default) schema will always have the +``.schema`` attribute set to ``None``. .. versionadded:: 0.9.2 Added the ``postgresql_ignore_search_path`` - dialect-level option accepted by :class:`.Table` and :meth:`.MetaData.reflect`. + dialect-level option accepted by :class:`.Table` and + :meth:`.MetaData.reflect`. .. seealso:: - `The Schema Search Path <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_ - on the Postgresql website. + `The Schema Search Path + <http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_ + - on the Postgresql website. INSERT/UPDATE...RETURNING ------------------------- @@ -273,19 +287,19 @@ produces a statement equivalent to:: SELECT CAST('some text' AS TSVECTOR) AS anon_1 Full Text Searches in Postgresql are influenced by a combination of: the -PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used to -build the GIN/GiST indexes, and the ``regconfig`` optionally passed in during a -query. +PostgresSQL setting of ``default_text_search_config``, the ``regconfig`` used +to build the GIN/GiST indexes, and the ``regconfig`` optionally passed in +during a query. When performing a Full Text Search against a column that has a GIN or -GiST index that is already pre-computed (which is common on full text searches) -one may need to explicitly pass in a particular PostgresSQL ``regconfig`` value -to ensure the query-planner utilizes the index and does not re-compute the -column on demand. +GiST index that is already pre-computed (which is common on full text +searches) one may need to explicitly pass in a particular PostgresSQL +``regconfig`` value to ensure the query-planner utilizes the index and does +not re-compute the column on demand. -In order to provide for this explicit query planning, or to use different search -strategies, the ``match`` method accepts a ``postgresql_regconfig`` keyword -argument. +In order to provide for this explicit query planning, or to use different +search strategies, the ``match`` method accepts a ``postgresql_regconfig`` +keyword argument. select([mytable.c.id]).where( mytable.c.title.match('somestring', postgresql_regconfig='english') @@ -296,8 +310,8 @@ Emits the equivalent of:: SELECT mytable.id FROM mytable WHERE mytable.title @@ to_tsquery('english', 'somestring') -One can also specifically pass in a `'regconfig'` value to the ``to_tsvector()`` -command as the initial argument. +One can also specifically pass in a `'regconfig'` value to the +``to_tsvector()`` command as the initial argument. select([mytable.c.id]).where( func.to_tsvector('english', mytable.c.title )\ @@ -310,9 +324,9 @@ produces a statement equivalent to:: WHERE to_tsvector('english', mytable.title) @@ to_tsquery('english', 'somestring') -It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from PostgresSQL -to ensure that you are generating queries with SQLAlchemy that take full -advantage of any indexes you may have created for full text search. +It is recommended that you use the ``EXPLAIN ANALYZE...`` tool from +PostgresSQL to ensure that you are generating queries with SQLAlchemy that +take full advantage of any indexes you may have created for full text search. FROM ONLY ... ------------------------ @@ -413,11 +427,11 @@ RESERVED_WORDS = set( "default", "deferrable", "desc", "distinct", "do", "else", "end", "except", "false", "fetch", "for", "foreign", "from", "grant", "group", "having", "in", "initially", "intersect", "into", "leading", "limit", - "localtime", "localtimestamp", "new", "not", "null", "of", "off", "offset", - "old", "on", "only", "or", "order", "placing", "primary", "references", - "returning", "select", "session_user", "some", "symmetric", "table", - "then", "to", "trailing", "true", "union", "unique", "user", "using", - "variadic", "when", "where", "window", "with", "authorization", + "localtime", "localtimestamp", "new", "not", "null", "of", "off", + "offset", "old", "on", "only", "or", "order", "placing", "primary", + "references", "returning", "select", "session_user", "some", "symmetric", + "table", "then", "to", "trailing", "true", "union", "unique", "user", + "using", "variadic", "when", "where", "window", "with", "authorization", "between", "binary", "cross", "current_schema", "freeze", "full", "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" @@ -534,7 +548,8 @@ class UUID(sqltypes.TypeEngine): """ if as_uuid and _python_UUID is None: raise NotImplementedError( - "This version of Python does not support the native UUID type." + "This version of Python does not support " + "the native UUID type." ) self.as_uuid = as_uuid @@ -1047,7 +1062,8 @@ class ENUM(sqltypes.Enum): return if not checkfirst or \ - not bind.dialect.has_type(bind, self.name, schema=self.schema): + not bind.dialect.has_type( + bind, self.name, schema=self.schema): bind.execute(CreateEnumType(self)) def drop(self, bind=None, checkfirst=True): @@ -1557,16 +1573,19 @@ class DropEnumType(schema._CreateDropBase): class PGExecutionContext(default.DefaultExecutionContext): def fire_sequence(self, seq, type_): - return self._execute_scalar(("select nextval('%s')" % - self.dialect.identifier_preparer.format_sequence(seq)), type_) + return self._execute_scalar(( + "select nextval('%s')" % + self.dialect.identifier_preparer.format_sequence(seq)), type_) def get_insert_default(self, column): - if column.primary_key and column is column.table._autoincrement_column: + if column.primary_key and \ + column is column.table._autoincrement_column: if column.server_default and column.server_default.has_argument: # pre-execute passive defaults on primary key columns return self._execute_scalar("select %s" % - column.server_default.arg, column.type) + column.server_default.arg, + column.type) elif (column.default is None or (column.default.is_sequence and @@ -1679,8 +1698,8 @@ class PGDialect(default.DefaultDialect): else: return None - _isolation_lookup = set(['SERIALIZABLE', - 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) + _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED', + 'READ COMMITTED', 'REPEATABLE READ']) def set_isolation_level(self, connection, level): level = level.replace('_', ' ') @@ -1745,7 +1764,8 @@ class PGDialect(default.DefaultDialect): return connection.scalar("select current_schema()") def has_schema(self, connection, schema): - query = "select nspname from pg_namespace where lower(nspname)=:schema" + query = ("select nspname from pg_namespace " + "where lower(nspname)=:schema") cursor = connection.execute( sql.text( query, @@ -1764,8 +1784,8 @@ class PGDialect(default.DefaultDialect): cursor = connection.execute( sql.text( "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=current_schema() and " - "relname=:name", + "n.oid=c.relnamespace where n.nspname=current_schema() " + "and relname=:name", bindparams=[ sql.bindparam('name', util.text_type(table_name), type_=sqltypes.Unicode)] @@ -1779,9 +1799,11 @@ class PGDialect(default.DefaultDialect): "relname=:name", bindparams=[ sql.bindparam('name', - util.text_type(table_name), type_=sqltypes.Unicode), + util.text_type(table_name), + type_=sqltypes.Unicode), sql.bindparam('schema', - util.text_type(schema), type_=sqltypes.Unicode)] + util.text_type(schema), + type_=sqltypes.Unicode)] ) ) return bool(cursor.first()) @@ -1810,7 +1832,8 @@ class PGDialect(default.DefaultDialect): sql.bindparam('name', util.text_type(sequence_name), type_=sqltypes.Unicode), sql.bindparam('schema', - util.text_type(schema), type_=sqltypes.Unicode) + util.text_type(schema), + type_=sqltypes.Unicode) ] ) ) @@ -1996,7 +2019,8 @@ class PGDialect(default.DefaultDialect): bindparams=[ sql.bindparam('table_oid', type_=sqltypes.Integer)], typemap={ - 'attname': sqltypes.Unicode, 'default': sqltypes.Unicode} + 'attname': sqltypes.Unicode, + 'default': sqltypes.Unicode} ) c = connection.execute(s, table_oid=table_oid) rows = c.fetchall() @@ -2196,8 +2220,10 @@ class PGDialect(default.DefaultDialect): FK_REGEX = re.compile( r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)' r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?' - r'[\s]?(ON UPDATE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' - r'[\s]?(ON DELETE (CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(ON UPDATE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' + r'[\s]?(ON DELETE ' + r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?' r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?' r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?' ) @@ -2218,7 +2244,8 @@ class PGDialect(default.DefaultDialect): if deferrable is not None: deferrable = True if deferrable == 'DEFERRABLE' else False constrained_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s*', constrained_columns)] + for x in re.split( + r'\s*,\s*', constrained_columns)] if postgresql_ignore_search_path: # when ignoring search path, we use the actual schema @@ -2240,7 +2267,8 @@ class PGDialect(default.DefaultDialect): referred_table = preparer._unquote_identifier(referred_table) referred_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s', referred_columns)] + for x in + re.split(r'\s*,\s', referred_columns)] fkey_d = { 'name': conname, 'constrained_columns': constrained_columns, @@ -2360,7 +2388,8 @@ class PGDialect(default.DefaultDialect): FROM pg_catalog.pg_constraint cons join pg_attribute a - on cons.conrelid = a.attrelid AND a.attnum = ANY(cons.conkey) + on cons.conrelid = a.attrelid AND + a.attnum = ANY(cons.conkey) WHERE cons.conrelid = :table_oid AND cons.contype = 'u' diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py index 02d7a8998..e8ebc75dd 100644 --- a/lib/sqlalchemy/dialects/postgresql/constraints.py +++ b/lib/sqlalchemy/dialects/postgresql/constraints.py @@ -13,7 +13,8 @@ class ExcludeConstraint(ColumnCollectionConstraint): Defines an EXCLUDE constraint as described in the `postgres documentation`__. - __ http://www.postgresql.org/docs/9.0/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE + __ http://www.postgresql.org/docs/9.0/\ +static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE """ __visit_name__ = 'exclude_constraint' diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py index 8db55d6bc..9601edc41 100644 --- a/lib/sqlalchemy/dialects/postgresql/hstore.py +++ b/lib/sqlalchemy/dialects/postgresql/hstore.py @@ -69,7 +69,8 @@ def _parse_hstore(hstore_str): pair_match = HSTORE_PAIR_RE.match(hstore_str) while pair_match is not None: - key = pair_match.group('key').replace(r'\"', '"').replace("\\\\", "\\") + key = pair_match.group('key').replace(r'\"', '"').replace( + "\\\\", "\\") if pair_match.group('value_null'): value = None else: @@ -141,15 +142,16 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine): data_table.c.data + {"k1": "v1"} - For a full list of special methods see :class:`.HSTORE.comparator_factory`. + For a full list of special methods see + :class:`.HSTORE.comparator_factory`. For usage with the SQLAlchemy ORM, it may be desirable to combine the usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now part of the :mod:`sqlalchemy.ext.mutable` extension. This extension will allow "in-place" changes to the dictionary, e.g. addition of new keys or replacement/removal of existing - keys to/from the current dictionary, to produce events which will be detected - by the unit of work:: + keys to/from the current dictionary, to produce events which will be + detected by the unit of work:: from sqlalchemy.ext.mutable import MutableDict @@ -168,9 +170,9 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine): session.commit() When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM - will not be alerted to any changes to the contents of an existing dictionary, - unless that dictionary value is re-assigned to the HSTORE-attribute itself, - thus generating a change event. + will not be alerted to any changes to the contents of an existing + dictionary, unless that dictionary value is re-assigned to the + HSTORE-attribute itself, thus generating a change event. .. versionadded:: 0.8 diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py index 6e0c5a4b1..25ac342af 100644 --- a/lib/sqlalchemy/dialects/postgresql/json.py +++ b/lib/sqlalchemy/dialects/postgresql/json.py @@ -27,12 +27,13 @@ class JSONElement(elements.BinaryExpression): expr = mytable.c.json_data['some_key'] The expression typically compiles to a JSON access such as ``col -> key``. - Modifiers are then available for typing behavior, including :meth:`.JSONElement.cast` - and :attr:`.JSONElement.astext`. + Modifiers are then available for typing behavior, including + :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`. """ - def __init__(self, left, right, astext=False, opstring=None, result_type=None): + def __init__(self, left, right, astext=False, + opstring=None, result_type=None): self._astext = astext if opstring is None: if hasattr(right, '__iter__') and \ @@ -129,15 +130,16 @@ class JSON(sqltypes.TypeEngine): * Path index operations returning text (required for text comparison):: - data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value' + data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\ + 'some value' - Index operations return an instance of :class:`.JSONElement`, which represents - an expression such as ``column -> index``. This element then defines - methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast` - for setting up type behavior. + Index operations return an instance of :class:`.JSONElement`, which + represents an expression such as ``column -> index``. This element then + defines methods such as :attr:`.JSONElement.astext` and + :meth:`.JSONElement.cast` for setting up type behavior. - The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect - in-place mutations to the structure. In order to detect these, the + The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not + detect in-place mutations to the structure. In order to detect these, the :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will allow "in-place" changes to the datastructure to produce events which will be detected by the unit of work. See the example at :class:`.HSTORE` @@ -241,15 +243,16 @@ class JSONB(JSON): * Path index operations returning text (required for text comparison):: - data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == 'some value' + data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\ + 'some value' - Index operations return an instance of :class:`.JSONElement`, which represents - an expression such as ``column -> index``. This element then defines - methods such as :attr:`.JSONElement.astext` and :meth:`.JSONElement.cast` - for setting up type behavior. + Index operations return an instance of :class:`.JSONElement`, which + represents an expression such as ``column -> index``. This element then + defines methods such as :attr:`.JSONElement.astext` and + :meth:`.JSONElement.cast` for setting up type behavior. - The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not detect - in-place mutations to the structure. In order to detect these, the + The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not + detect in-place mutations to the structure. In order to detect these, the :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will allow "in-place" changes to the datastructure to produce events which will be detected by the unit of work. See the example at :class:`.HSTORE` diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index b7971e8de..e6450c97f 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -9,7 +9,8 @@ .. dialect:: postgresql+psycopg2 :name: psycopg2 :dbapi: psycopg2 - :connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...] + :connectstring: postgresql+psycopg2://user:password@host:port/dbname\ +[?key=value&key=value...] :url: http://pypi.python.org/pypi/psycopg2/ psycopg2 Connect Arguments @@ -21,9 +22,9 @@ psycopg2-specific keyword arguments which are accepted by * ``server_side_cursors``: Enable the usage of "server side cursors" for SQL statements which support this feature. What this essentially means from a psycopg2 point of view is that the cursor is created using a name, e.g. - ``connection.cursor('some name')``, which has the effect that result rows are - not immediately pre-fetched and buffered after statement execution, but are - instead left on the server and only retrieved as needed. SQLAlchemy's + ``connection.cursor('some name')``, which has the effect that result rows + are not immediately pre-fetched and buffered after statement execution, but + are instead left on the server and only retrieved as needed. SQLAlchemy's :class:`~sqlalchemy.engine.ResultProxy` uses special row-buffering behavior when this feature is enabled, such that groups of 100 rows at a time are fetched over the wire to reduce conversational overhead. @@ -54,7 +55,8 @@ using ``host`` as an additional keyword argument:: See also: -`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_ +`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static\ +/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_ Per-Statement/Connection Execution Options ------------------------------------------- @@ -90,11 +92,13 @@ Typically, this can be changed to ``utf-8``, as a more useful default:: A second way to affect the client encoding is to set it within Psycopg2 locally. SQLAlchemy will call psycopg2's ``set_client_encoding()`` -method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) +method (see: +http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) on all new connections based on the value passed to :func:`.create_engine` using the ``client_encoding`` parameter:: - engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') + engine = create_engine("postgresql://user:pass@host/dbname", + client_encoding='utf8') This overrides the encoding specified in the Postgresql client configuration. @@ -128,11 +132,12 @@ Psycopg2 Transaction Isolation Level As discussed in :ref:`postgresql_isolation_level`, all Postgresql dialects support setting of transaction isolation level both via the ``isolation_level`` parameter passed to :func:`.create_engine`, -as well as the ``isolation_level`` argument used by :meth:`.Connection.execution_options`. -When using the psycopg2 dialect, these options make use of -psycopg2's ``set_isolation_level()`` connection method, rather than -emitting a Postgresql directive; this is because psycopg2's API-level -setting is always emitted at the start of each transaction in any case. +as well as the ``isolation_level`` argument used by +:meth:`.Connection.execution_options`. When using the psycopg2 dialect, these +options make use of psycopg2's ``set_isolation_level()`` connection method, +rather than emitting a Postgresql directive; this is because psycopg2's +API-level setting is always emitted at the start of each transaction in any +case. The psycopg2 dialect supports these constants for isolation level: @@ -166,35 +171,38 @@ The psycopg2 dialect will log Postgresql NOTICE messages via the HSTORE type ------------ -The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of the -HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension +The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of +the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension by default when it is detected that the target database has the HSTORE type set up for use. In other words, when the dialect makes the first connection, a sequence like the following is performed: -1. Request the available HSTORE oids using ``psycopg2.extras.HstoreAdapter.get_oids()``. - If this function returns a list of HSTORE identifiers, we then determine that - the ``HSTORE`` extension is present. +1. Request the available HSTORE oids using + ``psycopg2.extras.HstoreAdapter.get_oids()``. + If this function returns a list of HSTORE identifiers, we then determine + that the ``HSTORE`` extension is present. 2. If the ``use_native_hstore`` flag is at its default of ``True``, and we've detected that ``HSTORE`` oids are available, the ``psycopg2.extensions.register_hstore()`` extension is invoked for all connections. -The ``register_hstore()`` extension has the effect of **all Python dictionaries -being accepted as parameters regardless of the type of target column in SQL**. -The dictionaries are converted by this extension into a textual HSTORE expression. -If this behavior is not desired, disable the -use of the hstore extension by setting ``use_native_hstore`` to ``False`` as follows:: +The ``register_hstore()`` extension has the effect of **all Python +dictionaries being accepted as parameters regardless of the type of target +column in SQL**. The dictionaries are converted by this extension into a +textual HSTORE expression. If this behavior is not desired, disable the +use of the hstore extension by setting ``use_native_hstore`` to ``False`` as +follows:: engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test", use_native_hstore=False) -The ``HSTORE`` type is **still supported** when the ``psycopg2.extensions.register_hstore()`` -extension is not used. It merely means that the coercion between Python dictionaries and the HSTORE +The ``HSTORE`` type is **still supported** when the +``psycopg2.extensions.register_hstore()`` extension is not used. It merely +means that the coercion between Python dictionaries and the HSTORE string format, on both the parameter side and the result side, will take -place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` which -may be more performant. +place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2`` +which may be more performant. """ from __future__ import absolute_import @@ -294,12 +302,15 @@ class PGExecutionContext_psycopg2(PGExecutionContext): if self.dialect.server_side_cursors: is_server_side = \ self.execution_options.get('stream_results', True) and ( - (self.compiled and isinstance(self.compiled.statement, expression.Selectable) + (self.compiled and isinstance(self.compiled.statement, + expression.Selectable) or ( (not self.compiled or - isinstance(self.compiled.statement, expression.TextClause)) - and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement)) + isinstance(self.compiled.statement, + expression.TextClause)) + and self.statement and SERVER_SIDE_CURSOR_RE.match( + self.statement)) ) ) else: @@ -310,7 +321,8 @@ class PGExecutionContext_psycopg2(PGExecutionContext): if is_server_side: # use server-side cursors: # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html - ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) + ident = "c_%s_%s" % (hex(id(self))[2:], + hex(_server_side_id())[2:]) return self._dbapi_connection.cursor(ident) else: return self._dbapi_connection.cursor() diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py index fc785d450..3ebd0135f 100644 --- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py +++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py @@ -9,7 +9,8 @@ .. dialect:: postgresql+pypostgresql :name: py-postgresql :dbapi: pypostgresql - :connectstring: postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...] + :connectstring: postgresql+pypostgresql://user:password@host:port/dbname\ +[?key=value&key=value...] :url: http://python.projects.pgfoundry.org/ diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 7455bf3fe..306f45023 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -39,8 +39,8 @@ Two things to note: one column, if the table has a composite (i.e. multi-column) primary key. This is regardless of the AUTOINCREMENT keyword being present or not. -To specifically render the AUTOINCREMENT keyword on the primary key column when -rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table +To specifically render the AUTOINCREMENT keyword on the primary key column +when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table construct:: Table('sometable', metadata, @@ -63,29 +63,29 @@ Database Locking Behavior / Concurrency Note that SQLite is not designed for a high level of concurrency. The database itself, being a file, is locked completely during write operations and within transactions, meaning exactly one connection has exclusive access to the -database during this period - all other connections will be blocked during this -time. - -The Python DBAPI specification also calls for a connection model that is always -in a transaction; there is no BEGIN method, only commit and rollback. This -implies that a SQLite DBAPI driver would technically allow only serialized -access to a particular database file at all times. The pysqlite driver attempts -to ameliorate this by deferring the actual BEGIN statement until the first DML -(INSERT, UPDATE, or DELETE) is received within a transaction. While this breaks -serializable isolation, it at least delays the exclusive locking inherent in -SQLite's design. - -SQLAlchemy's default mode of usage with the ORM is known as "autocommit=False", -which means the moment the :class:`.Session` begins to be used, a transaction -is begun. As the :class:`.Session` is used, the autoflush feature, also on by -default, will flush out pending changes to the database before each query. The -effect of this is that a :class:`.Session` used in its default mode will often -emit DML early on, long before the transaction is actually committed. This -again will have the effect of serializing access to the SQLite database. If -highly concurrent reads are desired against the SQLite database, it is advised -that the autoflush feature be disabled, and potentially even that autocommit be -re-enabled, which has the effect of each SQL statement and flush committing -changes immediately. +database during this period - all other connections will be blocked during +this time. + +The Python DBAPI specification also calls for a connection model that is +always in a transaction; there is no BEGIN method, only commit and rollback. +This implies that a SQLite DBAPI driver would technically allow only +serialized access to a particular database file at all times. The pysqlite +driver attempts to ameliorate this by deferring the actual BEGIN statement +until the first DML (INSERT, UPDATE, or DELETE) is received within a +transaction. While this breaks serializable isolation, it at least delays the +exclusive locking inherent in SQLite's design. + +SQLAlchemy's default mode of usage with the ORM is known as +"autocommit=False", which means the moment the :class:`.Session` begins to be +used, a transaction is begun. As the :class:`.Session` is used, the autoflush +feature, also on by default, will flush out pending changes to the database +before each query. The effect of this is that a :class:`.Session` used in its +default mode will often emit DML early on, long before the transaction is +actually committed. This again will have the effect of serializing access to +the SQLite database. If highly concurrent reads are desired against the SQLite +database, it is advised that the autoflush feature be disabled, and +potentially even that autocommit be re-enabled, which has the effect of each +SQL statement and flush committing changes immediately. For more information on SQLite's lack of concurrency by design, please see `Situations Where Another RDBMS May Work Better - High Concurrency @@ -105,8 +105,8 @@ Constraint checking on SQLite has three prerequisites: * At least version 3.6.19 of SQLite must be in use * The SQLite library must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY or SQLITE_OMIT_TRIGGER symbols enabled. -* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections - before use. +* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all + connections before use. SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically for new connections through the usage of events:: @@ -122,8 +122,8 @@ new connections through the usage of events:: .. seealso:: - `SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ - on - the SQLite web site. + `SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ + - on the SQLite web site. :ref:`event_toplevel` - SQLAlchemy event API. @@ -189,8 +189,9 @@ from ... import util from ...engine import default, reflection from ...sql import compiler -from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, INTEGER, REAL, - NUMERIC, SMALLINT, TEXT, TIMESTAMP, VARCHAR) +from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT, + INTEGER, REAL, NUMERIC, SMALLINT, TEXT, + TIMESTAMP, VARCHAR) class _DateTimeMixin(object): @@ -225,7 +226,8 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime): The default string storage format is:: - "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:%(second)02d.%(microsecond)06d" + "%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(min)02d:\ +%(second)02d.%(microsecond)06d" e.g.:: @@ -238,12 +240,13 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime): from sqlalchemy.dialects.sqlite import DATETIME dt = DATETIME( - storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(min)02d:%(second)02d", + storage_format="%(year)04d/%(month)02d/%(day)02d %(hour)02d:\ +%(min)02d:%(second)02d", regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)" ) - :param storage_format: format string which will be applied to the dict with - keys year, month, day, hour, minute, second, and microsecond. + :param storage_format: format string which will be applied to the dict + with keys year, month, day, hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is @@ -391,12 +394,13 @@ class TIME(_DateTimeMixin, sqltypes.Time): from sqlalchemy.dialects.sqlite import TIME t = TIME( - storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d", + storage_format="%(hour)02d-%(minute)02d-%(second)02d-\ +%(microsecond)06d", regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") ) - :param storage_format: format string which will be applied to the dict with - keys hour, minute, second, and microsecond. + :param storage_format: format string which will be applied to the dict + with keys hour, minute, second, and microsecond. :param regexp: regular expression which will be applied to incoming result rows. If the regexp contains named groups, the resulting match dict is @@ -582,8 +586,9 @@ class SQLiteDDLCompiler(compiler.DDLCompiler): if local_table.schema != remote_table.schema: return None else: - return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint( - constraint) + return super( + SQLiteDDLCompiler, + self).visit_foreign_key_constraint(constraint) def define_constraint_remote_table(self, constraint, table, preparer): """Format the remote table clause of a CREATE CONSTRAINT clause.""" @@ -631,8 +636,8 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): if (not self.omit_schema and use_schema and getattr(index.table, "schema", None)): - result = self.quote_schema(index.table.schema, - index.table.quote_schema) + "." + result + result = self.quote_schema( + index.table.schema, index.table.quote_schema) + "." + result return result @@ -642,8 +647,8 @@ class SQLiteExecutionContext(default.DefaultExecutionContext): return self.execution_options.get("sqlite_raw_colnames", False) def _translate_colname(self, colname): - # adjust for dotted column names. SQLite in the case of UNION may store - # col names as "tablename.colname" in cursor.description + # adjust for dotted column names. SQLite in the case of UNION may + # store col names as "tablename.colname" in cursor.description if not self._preserve_raw_colnames and "." in colname: return colname.split(".")[1], colname else: @@ -686,9 +691,10 @@ class SQLiteDialect(default.DefaultDialect): default.DefaultDialect.__init__(self, **kwargs) self.isolation_level = isolation_level - # this flag used by pysqlite dialect, and perhaps others in the future, - # to indicate the driver is handling date/timestamp conversions (and - # perhaps datetime/time as well on some hypothetical driver ?) + # this flag used by pysqlite dialect, and perhaps others in the + # future, to indicate the driver is handling date/timestamp + # conversions (and perhaps datetime/time as well on some hypothetical + # driver ?) self.native_datetime = native_datetime if self.dbapi is not None: @@ -953,7 +959,8 @@ class SQLiteDialect(default.DefaultDialect): row = c.fetchone() if row is None: break - (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4]) + (numerical_id, rtbl, lcol, rcol) = ( + row[0], row[2], row[3], row[4]) self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol) return fkeys diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index 54bc19763..c67333283 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -88,7 +88,8 @@ nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES can be forced if one configures "native_datetime=True" on create_engine():: engine = create_engine('sqlite://', - connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, + connect_args={'detect_types': + sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, native_datetime=True ) @@ -96,7 +97,8 @@ With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME or TIME types...confused yet ?) will not perform any bind parameter or result processing. Execution of "func.current_date()" will return a string. "func.current_timestamp()" is registered as returning a DATETIME type in -SQLAlchemy, so this function still receives SQLAlchemy-level result processing. +SQLAlchemy, so this function still receives SQLAlchemy-level result +processing. .. _pysqlite_threading_pooling: @@ -111,12 +113,12 @@ did not allow a ``:memory:`` database to be used in multiple threads under any circumstances. Pysqlite does include a now-undocumented flag known as -``check_same_thread`` which will disable this check, however note that pysqlite -connections are still not safe to use in concurrently in multiple threads. -In particular, any statement execution calls would need to be externally -mutexed, as Pysqlite does not provide for thread-safe propagation of error -messages among other things. So while even ``:memory:`` databases can be -shared among threads in modern SQLite, Pysqlite doesn't provide enough +``check_same_thread`` which will disable this check, however note that +pysqlite connections are still not safe to use in concurrently in multiple +threads. In particular, any statement execution calls would need to be +externally mutexed, as Pysqlite does not provide for thread-safe propagation +of error messages among other things. So while even ``:memory:`` databases +can be shared among threads in modern SQLite, Pysqlite doesn't provide enough thread-safety to make this usage worth it. SQLAlchemy sets up pooling to work with Pysqlite's default behavior: @@ -142,8 +144,8 @@ SQLAlchemy sets up pooling to work with Pysqlite's default behavior: Using a Memory Database in Multiple Threads ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -To use a ``:memory:`` database in a multithreaded scenario, the same connection -object must be shared among threads, since the database exists +To use a ``:memory:`` database in a multithreaded scenario, the same +connection object must be shared among threads, since the database exists only within the scope of that connection. The :class:`.StaticPool` implementation will maintain a single connection globally, and the ``check_same_thread`` flag can be passed to Pysqlite @@ -164,10 +166,10 @@ Due to the way SQLite deals with temporary tables, if you wish to use a temporary table in a file-based SQLite database across multiple checkouts from the connection pool, such as when using an ORM :class:`.Session` where the temporary table should continue to remain after :meth:`.Session.commit` or -:meth:`.Session.rollback` is called, a pool which maintains a single connection must -be used. Use :class:`.SingletonThreadPool` if the scope is only needed -within the current thread, or :class:`.StaticPool` is scope is needed within -multiple threads for this case:: +:meth:`.Session.rollback` is called, a pool which maintains a single +connection must be used. Use :class:`.SingletonThreadPool` if the scope is +only needed within the current thread, or :class:`.StaticPool` is scope is +needed within multiple threads for this case:: # maintain the same connection per thread from sqlalchemy.pool import SingletonThreadPool @@ -215,7 +217,8 @@ a :meth:`.ConnectionEvents.begin` handler to achieve this:: from sqlalchemy import create_engine, event - engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE') + engine = create_engine("sqlite:///myfile.db", + isolation_level='SERIALIZABLE') @event.listens_for(engine, "begin") def do_begin(conn): diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 713405e1b..26f5ef04a 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -273,8 +273,9 @@ class SybaseExecutionContext(default.DefaultExecutionContext): self._enable_identity_insert = False if self._enable_identity_insert: - self.cursor.execute("SET IDENTITY_INSERT %s ON" % - self.dialect.identifier_preparer.format_table(tbl)) + self.cursor.execute( + "SET IDENTITY_INSERT %s ON" % + self.dialect.identifier_preparer.format_table(tbl)) if self.isddl: # TODO: to enhance this, we can detect "ddl in tran" on the @@ -329,9 +330,9 @@ class SybaseSQLCompiler(compiler.SQLCompiler): limit = select._limit if limit: # if select._limit == 1: - #s += "FIRST " + # s += "FIRST " # else: - #s += "TOP %s " % (select._limit,) + # s += "TOP %s " % (select._limit,) s += "TOP %s " % (limit,) offset = select._offset if offset: @@ -520,8 +521,8 @@ class SybaseDialect(default.DefaultDialect): for (name, type_, nullable, autoincrement, default, precision, scale, length) in results: col_info = self._get_column_info(name, type_, bool(nullable), - bool( - autoincrement), default, precision, scale, + bool(autoincrement), + default, precision, scale, length) columns.append(col_info) diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index b4c139ea0..cb76d1379 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -9,7 +9,8 @@ .. dialect:: sybase+pyodbc :name: PyODBC :dbapi: pyodbc - :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>[/<database>] + :connectstring: sybase+pyodbc://<username>:<password>@<dsnname>\ +[/<database>] :url: http://pypi.python.org/pypi/pyodbc/ diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py index a60a8fea2..6843eb480 100644 --- a/lib/sqlalchemy/dialects/sybase/pysybase.py +++ b/lib/sqlalchemy/dialects/sybase/pysybase.py @@ -9,7 +9,8 @@ .. dialect:: sybase+pysybase :name: Python-Sybase :dbapi: Sybase - :connectstring: sybase+pysybase://<username>:<password>@<dsn>/[database name] + :connectstring: sybase+pysybase://<username>:<password>@<dsn>/\ +[database name] :url: http://python-sybase.sourceforge.net/ Unicode Support |