summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/dialects
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy/dialects')
-rw-r--r--lib/sqlalchemy/dialects/firebird/base.py20
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py173
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py255
-rw-r--r--lib/sqlalchemy/dialects/mysql/gaerdbms.py16
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqldb.py23
-rw-r--r--lib/sqlalchemy/dialects/mysql/pymysql.py3
-rw-r--r--lib/sqlalchemy/dialects/oracle/base.py248
-rw-r--r--lib/sqlalchemy/dialects/postgresql/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py99
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py7
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py26
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py49
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py384
-rw-r--r--lib/sqlalchemy/dialects/sybase/base.py27
14 files changed, 939 insertions, 393 deletions
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
index 36229a105..74e8abfc2 100644
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ b/lib/sqlalchemy/dialects/firebird/base.py
@@ -180,16 +180,16 @@ ischema_names = {
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
- def visit_boolean(self, type_):
- return self.visit_SMALLINT(type_)
+ def visit_boolean(self, type_, **kw):
+ return self.visit_SMALLINT(type_, **kw)
- def visit_datetime(self, type_):
- return self.visit_TIMESTAMP(type_)
+ def visit_datetime(self, type_, **kw):
+ return self.visit_TIMESTAMP(type_, **kw)
- def visit_TEXT(self, type_):
+ def visit_TEXT(self, type_, **kw):
return "BLOB SUB_TYPE 1"
- def visit_BLOB(self, type_):
+ def visit_BLOB(self, type_, **kw):
return "BLOB SUB_TYPE 0"
def _extend_string(self, type_, basic):
@@ -199,16 +199,16 @@ class FBTypeCompiler(compiler.GenericTypeCompiler):
else:
return '%s CHARACTER SET %s' % (basic, charset)
- def visit_CHAR(self, type_):
- basic = super(FBTypeCompiler, self).visit_CHAR(type_)
+ def visit_CHAR(self, type_, **kw):
+ basic = super(FBTypeCompiler, self).visit_CHAR(type_, **kw)
return self._extend_string(type_, basic)
- def visit_VARCHAR(self, type_):
+ def visit_VARCHAR(self, type_, **kw):
if not type_.length:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
- basic = super(FBTypeCompiler, self).visit_VARCHAR(type_)
+ basic = super(FBTypeCompiler, self).visit_VARCHAR(type_, **kw)
return self._extend_string(type_, basic)
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index dad02ee0f..92d7e4ab3 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -226,6 +226,53 @@ The DATE and TIME types are not available for MSSQL 2005 and
previous - if a server version below 2008 is detected, DDL
for these types will be issued as DATETIME.
+.. _mssql_large_type_deprecation:
+
+Large Text/Binary Type Deprecation
+----------------------------------
+
+Per `SQL Server 2012/2014 Documentation <http://technet.microsoft.com/en-us/library/ms187993.aspx>`_,
+the ``NTEXT``, ``TEXT`` and ``IMAGE`` datatypes are to be removed from SQL Server
+in a future release. SQLAlchemy normally relates these types to the
+:class:`.UnicodeText`, :class:`.Text` and :class:`.LargeBinary` datatypes.
+
+In order to accommodate this change, a new flag ``deprecate_large_types``
+is added to the dialect, which will be automatically set based on detection
+of the server version in use, if not otherwise set by the user. The
+behavior of this flag is as follows:
+
+* When this flag is ``True``, the :class:`.UnicodeText`, :class:`.Text` and
+ :class:`.LargeBinary` datatypes, when used to render DDL, will render the
+ types ``NVARCHAR(max)``, ``VARCHAR(max)``, and ``VARBINARY(max)``,
+ respectively. This is a new behavior as of the addition of this flag.
+
+* When this flag is ``False``, the :class:`.UnicodeText`, :class:`.Text` and
+ :class:`.LargeBinary` datatypes, when used to render DDL, will render the
+ types ``NTEXT``, ``TEXT``, and ``IMAGE``,
+ respectively. This is the long-standing behavior of these types.
+
+* The flag begins with the value ``None``, before a database connection is
+ established. If the dialect is used to render DDL without the flag being
+ set, it is interpreted the same as ``False``.
+
+* On first connection, the dialect detects if SQL Server version 2012 or greater
+ is in use; if the flag is still at ``None``, it sets it to ``True`` or
+ ``False`` based on whether 2012 or greater is detected.
+
+* The flag can be set to either ``True`` or ``False`` when the dialect
+ is created, typically via :func:`.create_engine`::
+
+ eng = create_engine("mssql+pymssql://user:pass@host/db",
+ deprecate_large_types=True)
+
+* Complete control over whether the "old" or "new" types are rendered is
+ available in all SQLAlchemy versions by using the UPPERCASE type objects
+ instead: :class:`.NVARCHAR`, :class:`.VARCHAR`, :class:`.types.VARBINARY`,
+ :class:`.TEXT`, :class:`.mssql.NTEXT`, :class:`.mssql.IMAGE` will always remain
+ fixed and always output exactly that type.
+
+.. versionadded:: 1.0.0
+
.. _mssql_indexes:
Clustered Index Support
@@ -367,19 +414,20 @@ import operator
import re
from ... import sql, schema as sa_schema, exc, util
-from ...sql import compiler, expression, \
- util as sql_util, cast
+from ...sql import compiler, expression, util as sql_util
from ... import engine
from ...engine import reflection, default
from ... import types as sqltypes
from ...types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \
FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\
- VARBINARY, TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
+ TEXT, VARCHAR, NVARCHAR, CHAR, NCHAR
from ...util import update_wrapper
from . import information_schema as ischema
+# http://sqlserverbuilds.blogspot.com/
+MS_2012_VERSION = (11,)
MS_2008_VERSION = (10,)
MS_2005_VERSION = (9,)
MS_2000_VERSION = (8,)
@@ -545,6 +593,26 @@ class NTEXT(sqltypes.UnicodeText):
__visit_name__ = 'NTEXT'
+class VARBINARY(sqltypes.VARBINARY, sqltypes.LargeBinary):
+ """The MSSQL VARBINARY type.
+
+ This type extends both :class:`.types.VARBINARY` and
+ :class:`.types.LargeBinary`. In "deprecate_large_types" mode,
+ the :class:`.types.LargeBinary` type will produce ``VARBINARY(max)``
+ on SQL Server.
+
+ .. versionadded:: 1.0.0
+
+ .. seealso::
+
+ :ref:`mssql_large_type_deprecation`
+
+
+
+ """
+ __visit_name__ = 'VARBINARY'
+
+
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
@@ -626,7 +694,6 @@ ischema_names = {
class MSTypeCompiler(compiler.GenericTypeCompiler):
-
def _extend(self, spec, type_, length=None):
"""Extend a string-type declaration with standard SQL
COLLATE annotations.
@@ -647,103 +714,115 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return ' '.join([c for c in (spec, collation)
if c is not None])
- def visit_FLOAT(self, type_):
+ def visit_FLOAT(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision is None:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': precision}
- def visit_TINYINT(self, type_):
+ def visit_TINYINT(self, type_, **kw):
return "TINYINT"
- def visit_DATETIMEOFFSET(self, type_):
+ def visit_DATETIMEOFFSET(self, type_, **kw):
if type_.precision:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
- def visit_TIME(self, type_):
+ def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "TIME(%s)" % precision
else:
return "TIME"
- def visit_DATETIME2(self, type_):
+ def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
if precision:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
- def visit_SMALLDATETIME(self, type_):
+ def visit_SMALLDATETIME(self, type_, **kw):
return "SMALLDATETIME"
- def visit_unicode(self, type_):
- return self.visit_NVARCHAR(type_)
+ def visit_unicode(self, type_, **kw):
+ return self.visit_NVARCHAR(type_, **kw)
+
+ def visit_text(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_VARCHAR(type_, **kw)
+ else:
+ return self.visit_TEXT(type_, **kw)
- def visit_unicode_text(self, type_):
- return self.visit_NTEXT(type_)
+ def visit_unicode_text(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_NVARCHAR(type_, **kw)
+ else:
+ return self.visit_NTEXT(type_, **kw)
- def visit_NTEXT(self, type_):
+ def visit_NTEXT(self, type_, **kw):
return self._extend("NTEXT", type_)
- def visit_TEXT(self, type_):
+ def visit_TEXT(self, type_, **kw):
return self._extend("TEXT", type_)
- def visit_VARCHAR(self, type_):
+ def visit_VARCHAR(self, type_, **kw):
return self._extend("VARCHAR", type_, length=type_.length or 'max')
- def visit_CHAR(self, type_):
+ def visit_CHAR(self, type_, **kw):
return self._extend("CHAR", type_)
- def visit_NCHAR(self, type_):
+ def visit_NCHAR(self, type_, **kw):
return self._extend("NCHAR", type_)
- def visit_NVARCHAR(self, type_):
+ def visit_NVARCHAR(self, type_, **kw):
return self._extend("NVARCHAR", type_, length=type_.length or 'max')
- def visit_date(self, type_):
+ def visit_date(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_)
+ return self.visit_DATETIME(type_, **kw)
else:
- return self.visit_DATE(type_)
+ return self.visit_DATE(type_, **kw)
- def visit_time(self, type_):
+ def visit_time(self, type_, **kw):
if self.dialect.server_version_info < MS_2008_VERSION:
- return self.visit_DATETIME(type_)
+ return self.visit_DATETIME(type_, **kw)
else:
- return self.visit_TIME(type_)
+ return self.visit_TIME(type_, **kw)
- def visit_large_binary(self, type_):
- return self.visit_IMAGE(type_)
+ def visit_large_binary(self, type_, **kw):
+ if self.dialect.deprecate_large_types:
+ return self.visit_VARBINARY(type_, **kw)
+ else:
+ return self.visit_IMAGE(type_, **kw)
- def visit_IMAGE(self, type_):
+ def visit_IMAGE(self, type_, **kw):
return "IMAGE"
- def visit_VARBINARY(self, type_):
+ def visit_VARBINARY(self, type_, **kw):
return self._extend(
"VARBINARY",
type_,
length=type_.length or 'max')
- def visit_boolean(self, type_):
+ def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
- def visit_BIT(self, type_):
+ def visit_BIT(self, type_, **kw):
return "BIT"
- def visit_MONEY(self, type_):
+ def visit_MONEY(self, type_, **kw):
return "MONEY"
- def visit_SMALLMONEY(self, type_):
+ def visit_SMALLMONEY(self, type_, **kw):
return 'SMALLMONEY'
- def visit_UNIQUEIDENTIFIER(self, type_):
+ def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
- def visit_SQL_VARIANT(self, type_):
+ def visit_SQL_VARIANT(self, type_, **kw):
return 'SQL_VARIANT'
@@ -1160,8 +1239,11 @@ class MSSQLStrictCompiler(MSSQLCompiler):
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
- colspec = (self.preparer.format_column(column) + " "
- + self.dialect.type_compiler.process(column.type))
+ colspec = (
+ self.preparer.format_column(column) + " "
+ + self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ )
if column.nullable is not None:
if not column.nullable or column.primary_key or \
@@ -1370,13 +1452,15 @@ class MSDialect(default.DefaultDialect):
query_timeout=None,
use_scope_identity=True,
max_identifier_length=None,
- schema_name="dbo", **opts):
+ schema_name="dbo",
+ deprecate_large_types=None, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
self.use_scope_identity = use_scope_identity
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
+ self.deprecate_large_types = deprecate_large_types
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
@@ -1390,6 +1474,9 @@ class MSDialect(default.DefaultDialect):
def initialize(self, connection):
super(MSDialect, self).initialize(connection)
+ self._setup_version_attributes()
+
+ def _setup_version_attributes(self):
if self.server_version_info[0] not in list(range(8, 17)):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
@@ -1405,6 +1492,9 @@ class MSDialect(default.DefaultDialect):
self.implicit_returning = True
if self.server_version_info >= MS_2008_VERSION:
self.supports_multivalues_insert = True
+ if self.deprecate_large_types is None:
+ self.deprecate_large_types = \
+ self.server_version_info >= MS_2012_VERSION
def _get_default_schema_name(self, connection):
if self.server_version_info < MS_2005_VERSION:
@@ -1592,12 +1682,11 @@ class MSDialect(default.DefaultDialect):
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
+ if charlen == -1:
+ charlen = 'max'
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
- if coltype == MSText or \
- (coltype in (MSString, MSNVarchar) and charlen == -1):
- kwargs.pop('length')
if coltype is None:
util.warn(
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 58eb3afa0..c8e33bfb2 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -106,7 +106,7 @@ to be used.
Transaction Isolation Level
---------------------------
-:func:`.create_engine` accepts an ``isolation_level``
+:func:`.create_engine` accepts an :paramref:`.create_engine.isolation_level`
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
@@ -602,6 +602,14 @@ class _StringType(sqltypes.String):
to_inspect=[_StringType, sqltypes.String])
+class _MatchType(sqltypes.Float, sqltypes.MatchType):
+ def __init__(self, **kw):
+ # TODO: float arguments?
+ sqltypes.Float.__init__(self)
+ sqltypes.MatchType.__init__(self)
+
+
+
class NUMERIC(_NumericType, sqltypes.NUMERIC):
"""MySQL NUMERIC type."""
@@ -1420,32 +1428,28 @@ class SET(_EnumeratedValues):
Column('myset', SET("foo", "bar", "baz"))
- :param values: The range of valid values for this SET. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
- .. versionchanged:: 0.9.0 quoting is applied automatically to
- :class:`.mysql.SET` in the same way as for :class:`.mysql.ENUM`.
+ The list of potential values is required in the case that this
+ set will be used to generate DDL for a table, or if the
+ :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
+ :param values: The range of valid values for this SET.
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
+ :param convert_unicode: Same flag as that of
+ :paramref:`.String.convert_unicode`.
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
+ :param collation: same as that of :paramref:`.String.collation`
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
+ :param charset: same as that of :paramref:`.VARCHAR.charset`.
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
+ :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
+ :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
+
+ :param binary: same as that of :paramref:`.VARCHAR.binary`.
+
+ :param quoting: Defaults to 'auto': automatically determine set value
+ quoting. If all values are surrounded by the same quoting
character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
'quoted': values in enums are already quoted, they will be used
@@ -1460,50 +1464,117 @@ class SET(_EnumeratedValues):
.. versionadded:: 0.9.0
+ :param retrieve_as_bitwise: if True, the data for the set type will be
+ persisted and selected using an integer value, where a set is coerced
+ into a bitwise mask for persistence. MySQL allows this mode which
+ has the advantage of being able to store values unambiguously,
+ such as the blank string ``''``. The datatype will appear
+ as the expression ``col + 0`` in a SELECT statement, so that the
+ value is coerced into an integer value in result sets.
+ This flag is required if one wishes
+ to persist a set that can store the blank string ``''`` as a value.
+
+ .. warning::
+
+ When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
+ essential that the list of set values is expressed in the
+ **exact same order** as exists on the MySQL database.
+
+ .. versionadded:: 1.0.0
+
+
"""
+ self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
values, length = self._init_values(values, kw)
self.values = tuple(values)
-
+ if not self.retrieve_as_bitwise and '' in values:
+ raise exc.ArgumentError(
+ "Can't use the blank value '' in a SET without "
+ "setting retrieve_as_bitwise=True")
+ if self.retrieve_as_bitwise:
+ self._bitmap = dict(
+ (value, 2 ** idx)
+ for idx, value in enumerate(self.values)
+ )
+ self._bitmap.update(
+ (2 ** idx, value)
+ for idx, value in enumerate(self.values)
+ )
kw.setdefault('length', length)
super(SET, self).__init__(**kw)
+ def column_expression(self, colexpr):
+ if self.retrieve_as_bitwise:
+ return colexpr + 0
+ else:
+ return colexpr
+
def result_processor(self, dialect, coltype):
- def process(value):
- # The good news:
- # No ',' quoting issues- commas aren't allowed in SET values
- # The bad news:
- # Plenty of driver inconsistencies here.
- if isinstance(value, set):
- # ..some versions convert '' to an empty set
- if not value:
- value.add('')
- return value
- # ...and some versions return strings
- if value is not None:
- return set(value.split(','))
- else:
- return value
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is not None:
+ value = int(value)
+
+ return set(
+ util.map_bits(self._bitmap.__getitem__, value)
+ )
+ else:
+ return None
+ else:
+ super_convert = super(SET, self).result_processor(dialect, coltype)
+
+ def process(value):
+ if isinstance(value, util.string_types):
+ # MySQLdb returns a string, let's parse
+ if super_convert:
+ value = super_convert(value)
+ return set(re.findall(r'[^,]+', value))
+ else:
+ # mysql-connector-python does a naive
+ # split(",") which throws in an empty string
+ if value is not None:
+ value.discard('')
+ return value
return process
def bind_processor(self, dialect):
super_convert = super(SET, self).bind_processor(dialect)
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, util.int_types + util.string_types):
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ else:
+ int_value = 0
+ for v in value:
+ int_value |= self._bitmap[v]
+ return int_value
+ else:
- def process(value):
- if value is None or isinstance(
- value, util.int_types + util.string_types):
- pass
- else:
- if None in value:
- value = set(value)
- value.remove(None)
- value.add('')
- value = ','.join(value)
- if super_convert:
- return super_convert(value)
- else:
- return value
+ def process(value):
+ # accept strings and int (actually bitflag) values directly
+ if value is not None and not isinstance(
+ value, util.int_types + util.string_types):
+ value = ",".join(value)
+
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
return process
+ def adapt(self, impltype, **kw):
+ kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
+ return util.constructor_copy(
+ self, impltype,
+ *self.values,
+ **kw
+ )
+
# old names
MSTime = TIME
MSSet = SET
@@ -1544,6 +1615,7 @@ colspecs = {
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
+ sqltypes.MatchType: _MatchType
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
@@ -1758,10 +1830,10 @@ class MySQLCompiler(compiler.SQLCompiler):
# creation of foreign key constraints fails."
class MySQLDDLCompiler(compiler.DDLCompiler):
- def create_table_constraints(self, table):
+ def create_table_constraints(self, table, **kw):
"""Get table constraints."""
constraint_string = super(
- MySQLDDLCompiler, self).create_table_constraints(table)
+ MySQLDDLCompiler, self).create_table_constraints(table, **kw)
# why self.dialect.name and not 'mysql'? because of drizzle
is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
@@ -1787,9 +1859,11 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
- colspec = [self.preparer.format_column(column),
- self.dialect.type_compiler.process(column.type)
- ]
+ colspec = [
+ self.preparer.format_column(column),
+ self.dialect.type_compiler.process(
+ column.type, type_expression=column)
+ ]
default = self.get_column_default_string(column)
if default is not None:
@@ -1987,7 +2061,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _mysql_type(self, type_):
return isinstance(type_, (_StringType, _NumericType))
- def visit_NUMERIC(self, type_):
+ def visit_NUMERIC(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
@@ -2000,7 +2074,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
{'precision': type_.precision,
'scale': type_.scale})
- def visit_DECIMAL(self, type_):
+ def visit_DECIMAL(self, type_, **kw):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
@@ -2013,7 +2087,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
{'precision': type_.precision,
'scale': type_.scale})
- def visit_DOUBLE(self, type_):
+ def visit_DOUBLE(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"DOUBLE(%(precision)s, %(scale)s)" %
@@ -2022,7 +2096,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, 'DOUBLE')
- def visit_REAL(self, type_):
+ def visit_REAL(self, type_, **kw):
if type_.precision is not None and type_.scale is not None:
return self._extend_numeric(type_,
"REAL(%(precision)s, %(scale)s)" %
@@ -2031,7 +2105,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, 'REAL')
- def visit_FLOAT(self, type_):
+ def visit_FLOAT(self, type_, **kw):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
@@ -2043,7 +2117,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, "FLOAT")
- def visit_INTEGER(self, type_):
+ def visit_INTEGER(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "INTEGER(%(display_width)s)" %
@@ -2051,7 +2125,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, "INTEGER")
- def visit_BIGINT(self, type_):
+ def visit_BIGINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "BIGINT(%(display_width)s)" %
@@ -2059,7 +2133,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, "BIGINT")
- def visit_MEDIUMINT(self, type_):
+ def visit_MEDIUMINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(
type_, "MEDIUMINT(%(display_width)s)" %
@@ -2067,14 +2141,14 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, "MEDIUMINT")
- def visit_TINYINT(self, type_):
+ def visit_TINYINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"TINYINT(%s)" % type_.display_width)
else:
return self._extend_numeric(type_, "TINYINT")
- def visit_SMALLINT(self, type_):
+ def visit_SMALLINT(self, type_, **kw):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
@@ -2083,55 +2157,55 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_numeric(type_, "SMALLINT")
- def visit_BIT(self, type_):
+ def visit_BIT(self, type_, **kw):
if type_.length is not None:
return "BIT(%s)" % type_.length
else:
return "BIT"
- def visit_DATETIME(self, type_):
+ def visit_DATETIME(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "DATETIME(%d)" % type_.fsp
else:
return "DATETIME"
- def visit_DATE(self, type_):
+ def visit_DATE(self, type_, **kw):
return "DATE"
- def visit_TIME(self, type_):
+ def visit_TIME(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "TIME(%d)" % type_.fsp
else:
return "TIME"
- def visit_TIMESTAMP(self, type_):
+ def visit_TIMESTAMP(self, type_, **kw):
if getattr(type_, 'fsp', None):
return "TIMESTAMP(%d)" % type_.fsp
else:
return "TIMESTAMP"
- def visit_YEAR(self, type_):
+ def visit_YEAR(self, type_, **kw):
if type_.display_width is None:
return "YEAR"
else:
return "YEAR(%s)" % type_.display_width
- def visit_TEXT(self, type_):
+ def visit_TEXT(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "TEXT(%d)" % type_.length)
else:
return self._extend_string(type_, {}, "TEXT")
- def visit_TINYTEXT(self, type_):
+ def visit_TINYTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "TINYTEXT")
- def visit_MEDIUMTEXT(self, type_):
+ def visit_MEDIUMTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "MEDIUMTEXT")
- def visit_LONGTEXT(self, type_):
+ def visit_LONGTEXT(self, type_, **kw):
return self._extend_string(type_, {}, "LONGTEXT")
- def visit_VARCHAR(self, type_):
+ def visit_VARCHAR(self, type_, **kw):
if type_.length:
return self._extend_string(
type_, {}, "VARCHAR(%d)" % type_.length)
@@ -2140,14 +2214,14 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
- def visit_CHAR(self, type_):
+ def visit_CHAR(self, type_, **kw):
if type_.length:
return self._extend_string(type_, {}, "CHAR(%(length)s)" %
{'length': type_.length})
else:
return self._extend_string(type_, {}, "CHAR")
- def visit_NVARCHAR(self, type_):
+ def visit_NVARCHAR(self, type_, **kw):
# We'll actually generate the equiv. "NATIONAL VARCHAR" instead
# of "NVARCHAR".
if type_.length:
@@ -2159,7 +2233,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
- def visit_NCHAR(self, type_):
+ def visit_NCHAR(self, type_, **kw):
# We'll actually generate the equiv.
# "NATIONAL CHAR" instead of "NCHAR".
if type_.length:
@@ -2169,31 +2243,31 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
else:
return self._extend_string(type_, {'national': True}, "CHAR")
- def visit_VARBINARY(self, type_):
+ def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
- def visit_large_binary(self, type_):
+ def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
- def visit_enum(self, type_):
+ def visit_enum(self, type_, **kw):
if not type_.native_enum:
return super(MySQLTypeCompiler, self).visit_enum(type_)
else:
return self._visit_enumerated_values("ENUM", type_, type_.enums)
- def visit_BLOB(self, type_):
+ def visit_BLOB(self, type_, **kw):
if type_.length:
return "BLOB(%d)" % type_.length
else:
return "BLOB"
- def visit_TINYBLOB(self, type_):
+ def visit_TINYBLOB(self, type_, **kw):
return "TINYBLOB"
- def visit_MEDIUMBLOB(self, type_):
+ def visit_MEDIUMBLOB(self, type_, **kw):
return "MEDIUMBLOB"
- def visit_LONGBLOB(self, type_):
+ def visit_LONGBLOB(self, type_, **kw):
return "LONGBLOB"
def _visit_enumerated_values(self, name, type_, enumerated_values):
@@ -2204,15 +2278,15 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
name, ",".join(quoted_enums))
)
- def visit_ENUM(self, type_):
+ def visit_ENUM(self, type_, **kw):
return self._visit_enumerated_values("ENUM", type_,
type_._enumerated_values)
- def visit_SET(self, type_):
+ def visit_SET(self, type_, **kw):
return self._visit_enumerated_values("SET", type_,
type_._enumerated_values)
- def visit_BOOLEAN(self, type):
+ def visit_BOOLEAN(self, type, **kw):
return "BOOL"
@@ -2963,6 +3037,9 @@ class MySQLTableDefinitionParser(object):
if issubclass(col_type, _EnumeratedValues):
type_args = _EnumeratedValues._strip_values(type_args)
+ if issubclass(col_type, SET) and '' in type_args:
+ type_kw['retrieve_as_bitwise'] = True
+
type_instance = col_type(*type_args, **type_kw)
col_args, col_kw = [], {}
diff --git a/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
index 0059f5a65..284b51bde 100644
--- a/lib/sqlalchemy/dialects/mysql/gaerdbms.py
+++ b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
@@ -17,6 +17,13 @@ developers-guide
.. versionadded:: 0.7.8
+ .. deprecated:: 1.0 This dialect is **no longer necessary** for
+ Google Cloud SQL; the MySQLdb dialect can be used directly.
+ Cloud SQL now recommends creating connections via the
+ mysql dialect using the URL format
+
+ `mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>`
+
Pooling
-------
@@ -33,6 +40,7 @@ import os
from .mysqldb import MySQLDialect_mysqldb
from ...pool import NullPool
import re
+from sqlalchemy.util import warn_deprecated
def _is_dev_environment():
@@ -43,6 +51,14 @@ class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
+
+ warn_deprecated(
+ "Google Cloud SQL now recommends creating connections via the "
+ "MySQLdb dialect directly, using the URL format "
+ "mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/"
+ "<projectid>:<instancename>"
+ )
+
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py
index 73210d67a..929317467 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqldb.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py
@@ -39,6 +39,14 @@ MySQL-python version 1.2.2 has a serious memory leak related
to unicode conversion, a feature which is disabled via ``use_unicode=0``.
It is strongly advised to use the latest version of MySQL-Python.
+Using MySQLdb with Google Cloud SQL
+-----------------------------------
+
+Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
+using a URL like the following::
+
+ mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
+
"""
from .base import (MySQLDialect, MySQLExecutionContext,
@@ -77,7 +85,7 @@ class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer):
class MySQLDialect_mysqldb(MySQLDialect):
driver = 'mysqldb'
- supports_unicode_statements = False
+ supports_unicode_statements = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
@@ -102,12 +110,13 @@ class MySQLDialect_mysqldb(MySQLDialect):
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
# specific issue w/ the utf8_bin collation and unicode returns
- has_utf8_bin = connection.scalar(
- "show collation where %s = 'utf8' and %s = 'utf8_bin'"
- % (
- self.identifier_preparer.quote("Charset"),
- self.identifier_preparer.quote("Collation")
- ))
+ has_utf8_bin = self.server_version_info > (5, ) and \
+ connection.scalar(
+ "show collation where %s = 'utf8' and %s = 'utf8_bin'"
+ % (
+ self.identifier_preparer.quote("Charset"),
+ self.identifier_preparer.quote("Collation")
+ ))
if has_utf8_bin:
additional_tests = [
sql.collate(sql.cast(
diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py
index 31226cea0..8df2ba03f 100644
--- a/lib/sqlalchemy/dialects/mysql/pymysql.py
+++ b/lib/sqlalchemy/dialects/mysql/pymysql.py
@@ -31,8 +31,7 @@ class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
- if py3k:
- supports_unicode_statements = True
+ supports_unicode_statements = True
@classmethod
def dbapi(cls):
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
index 6df38e57e..b482c9069 100644
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ b/lib/sqlalchemy/dialects/oracle/base.py
@@ -213,6 +213,8 @@ is reflected and the type is reported as ``DATE``, the time-supporting
examining the type of column for use in special Python translations or
for migrating schemas to other database backends.
+.. _oracle_table_options:
+
Oracle Table Options
-------------------------
@@ -228,15 +230,63 @@ in conjunction with the :class:`.Table` construct:
.. versionadded:: 1.0.0
+* ``COMPRESS``::
+
+ Table('mytable', metadata, Column('data', String(32)),
+ oracle_compress=True)
+
+ Table('mytable', metadata, Column('data', String(32)),
+ oracle_compress=6)
+
+ The ``oracle_compress`` parameter accepts either an integer compression
+ level, or ``True`` to use the default compression level.
+
+.. versionadded:: 1.0.0
+
+.. _oracle_index_options:
+
+Oracle Specific Index Options
+-----------------------------
+
+Bitmap Indexes
+~~~~~~~~~~~~~~
+
+You can specify the ``oracle_bitmap`` parameter to create a bitmap index
+instead of a B-tree index::
+
+ Index('my_index', my_table.c.data, oracle_bitmap=True)
+
+Bitmap indexes cannot be unique and cannot be compressed. SQLAlchemy will not
+check for such limitations, only the database will.
+
+.. versionadded:: 1.0.0
+
+Index compression
+~~~~~~~~~~~~~~~~~
+
+Oracle has a more efficient storage mode for indexes containing lots of
+repeated values. Use the ``oracle_compress`` parameter to turn on key c
+ompression::
+
+ Index('my_index', my_table.c.data, oracle_compress=True)
+
+ Index('my_index', my_table.c.data1, my_table.c.data2, unique=True,
+ oracle_compress=1)
+
+The ``oracle_compress`` parameter accepts either an integer specifying the
+number of prefix columns to compress, or ``True`` to use the default (all
+columns for non-unique indexes, all but the last column for unique indexes).
+
+.. versionadded:: 1.0.0
+
"""
import re
from sqlalchemy import util, sql
-from sqlalchemy.engine import default, base, reflection
+from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, visitors, expression
-from sqlalchemy.sql import (operators as sql_operators,
- functions as sql_functions)
+from sqlalchemy.sql import operators as sql_operators
from sqlalchemy import types as sqltypes, schema as sa_schema
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
BLOB, CLOB, TIMESTAMP, FLOAT
@@ -407,19 +457,19 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
# Oracle does not allow milliseconds in DATE
# Oracle does not support TIME columns
- def visit_datetime(self, type_):
- return self.visit_DATE(type_)
+ def visit_datetime(self, type_, **kw):
+ return self.visit_DATE(type_, **kw)
- def visit_float(self, type_):
- return self.visit_FLOAT(type_)
+ def visit_float(self, type_, **kw):
+ return self.visit_FLOAT(type_, **kw)
- def visit_unicode(self, type_):
+ def visit_unicode(self, type_, **kw):
if self.dialect._supports_nchar:
- return self.visit_NVARCHAR2(type_)
+ return self.visit_NVARCHAR2(type_, **kw)
else:
- return self.visit_VARCHAR2(type_)
+ return self.visit_VARCHAR2(type_, **kw)
- def visit_INTERVAL(self, type_):
+ def visit_INTERVAL(self, type_, **kw):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
"(%d)" % type_.day_precision or
@@ -429,22 +479,22 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
"",
)
- def visit_LONG(self, type_):
+ def visit_LONG(self, type_, **kw):
return "LONG"
- def visit_TIMESTAMP(self, type_):
+ def visit_TIMESTAMP(self, type_, **kw):
if type_.timezone:
return "TIMESTAMP WITH TIME ZONE"
else:
return "TIMESTAMP"
- def visit_DOUBLE_PRECISION(self, type_):
- return self._generate_numeric(type_, "DOUBLE PRECISION")
+ def visit_DOUBLE_PRECISION(self, type_, **kw):
+ return self._generate_numeric(type_, "DOUBLE PRECISION", **kw)
def visit_NUMBER(self, type_, **kw):
return self._generate_numeric(type_, "NUMBER", **kw)
- def _generate_numeric(self, type_, name, precision=None, scale=None):
+ def _generate_numeric(self, type_, name, precision=None, scale=None, **kw):
if precision is None:
precision = type_.precision
@@ -460,17 +510,17 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
n = "%(name)s(%(precision)s, %(scale)s)"
return n % {'name': name, 'precision': precision, 'scale': scale}
- def visit_string(self, type_):
- return self.visit_VARCHAR2(type_)
+ def visit_string(self, type_, **kw):
+ return self.visit_VARCHAR2(type_, **kw)
- def visit_VARCHAR2(self, type_):
+ def visit_VARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, '', '2')
- def visit_NVARCHAR2(self, type_):
+ def visit_NVARCHAR2(self, type_, **kw):
return self._visit_varchar(type_, 'N', '2')
visit_NVARCHAR = visit_NVARCHAR2
- def visit_VARCHAR(self, type_):
+ def visit_VARCHAR(self, type_, **kw):
return self._visit_varchar(type_, '', '')
def _visit_varchar(self, type_, n, num):
@@ -483,31 +533,31 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
varchar = "%(n)sVARCHAR%(two)s(%(length)s)"
return varchar % {'length': type_.length, 'two': num, 'n': n}
- def visit_text(self, type_):
- return self.visit_CLOB(type_)
+ def visit_text(self, type_, **kw):
+ return self.visit_CLOB(type_, **kw)
- def visit_unicode_text(self, type_):
+ def visit_unicode_text(self, type_, **kw):
if self.dialect._supports_nchar:
- return self.visit_NCLOB(type_)
+ return self.visit_NCLOB(type_, **kw)
else:
- return self.visit_CLOB(type_)
+ return self.visit_CLOB(type_, **kw)
- def visit_large_binary(self, type_):
- return self.visit_BLOB(type_)
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BLOB(type_, **kw)
- def visit_big_integer(self, type_):
- return self.visit_NUMBER(type_, precision=19)
+ def visit_big_integer(self, type_, **kw):
+ return self.visit_NUMBER(type_, precision=19, **kw)
- def visit_boolean(self, type_):
- return self.visit_SMALLINT(type_)
+ def visit_boolean(self, type_, **kw):
+ return self.visit_SMALLINT(type_, **kw)
- def visit_RAW(self, type_):
+ def visit_RAW(self, type_, **kw):
if type_.length:
return "RAW(%(length)s)" % {'length': type_.length}
else:
return "RAW"
- def visit_ROWID(self, type_):
+ def visit_ROWID(self, type_, **kw):
return "ROWID"
@@ -549,6 +599,9 @@ class OracleCompiler(compiler.SQLCompiler):
def visit_false(self, expr, **kw):
return '0'
+ def get_cte_preamble(self, recursive):
+ return "WITH"
+
def get_select_hint_text(self, byfroms):
return " ".join(
"/*+ %s */" % text for table, text in byfroms.items()
@@ -619,22 +672,10 @@ class OracleCompiler(compiler.SQLCompiler):
return (self.dialect.identifier_preparer.format_sequence(seq) +
".nextval")
- def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs):
- """Oracle doesn't like ``FROM table AS alias``. Is the AS standard
- SQL??
- """
-
- if asfrom or ashint:
- alias_name = isinstance(alias.name, expression._truncated_label) and \
- self._truncated_identifier("alias", alias.name) or alias.name
+ def get_render_as_alias_suffix(self, alias_name_text):
+ """Oracle doesn't like ``FROM table AS alias``"""
- if ashint:
- return alias_name
- elif asfrom:
- return self.process(alias.original, asfrom=asfrom, **kwargs) + \
- " " + self.preparer.format_alias(alias, alias_name)
- else:
- return self.process(alias.original, **kwargs)
+ return " " + alias_name_text
def returning_clause(self, stmt, returning_cols):
columns = []
@@ -795,9 +836,32 @@ class OracleDDLCompiler(compiler.DDLCompiler):
return text
- def visit_create_index(self, create, **kw):
- return super(OracleDDLCompiler, self).\
- visit_create_index(create, include_schema=True)
+ def visit_create_index(self, create):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ if index.dialect_options['oracle']['bitmap']:
+ text += "BITMAP "
+ text += "INDEX %s ON %s (%s)" % (
+ self._prepared_index_name(index, include_schema=True),
+ preparer.format_table(index.table, use_schema=True),
+ ', '.join(
+ self.sql_compiler.process(
+ expr,
+ include_table=False, literal_binds=True)
+ for expr in index.expressions)
+ )
+ if index.dialect_options['oracle']['compress'] is not False:
+ if index.dialect_options['oracle']['compress'] is True:
+ text += " COMPRESS"
+ else:
+ text += " COMPRESS %d" % (
+ index.dialect_options['oracle']['compress']
+ )
+ return text
def post_create_table(self, table):
table_opts = []
@@ -807,6 +871,14 @@ class OracleDDLCompiler(compiler.DDLCompiler):
on_commit_options = opts['on_commit'].replace("_", " ").upper()
table_opts.append('\n ON COMMIT %s' % on_commit_options)
+ if opts['compress']:
+ if opts['compress'] is True:
+ table_opts.append("\n COMPRESS")
+ else:
+ table_opts.append("\n COMPRESS FOR %s" % (
+ opts['compress']
+ ))
+
return ''.join(table_opts)
@@ -870,7 +942,12 @@ class OracleDialect(default.DefaultDialect):
construct_arguments = [
(sa_schema.Table, {
"resolve_synonyms": False,
- "on_commit": None
+ "on_commit": None,
+ "compress": False
+ }),
+ (sa_schema.Index, {
+ "bitmap": False,
+ "compress": False
})
]
@@ -902,6 +979,16 @@ class OracleDialect(default.DefaultDialect):
self.server_version_info < (9, )
@property
+ def _supports_table_compression(self):
+ return self.server_version_info and \
+ self.server_version_info >= (9, 2, )
+
+ @property
+ def _supports_table_compress_for(self):
+ return self.server_version_info and \
+ self.server_version_info >= (11, )
+
+ @property
def _supports_char_length(self):
return not self._is_oracle_8
@@ -1084,6 +1171,50 @@ class OracleDialect(default.DefaultDialect):
return [self.normalize_name(row[0]) for row in cursor]
@reflection.cache
+ def get_table_options(self, connection, table_name, schema=None, **kw):
+ options = {}
+
+ resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
+ dblink = kw.get('dblink', '')
+ info_cache = kw.get('info_cache')
+
+ (table_name, schema, dblink, synonym) = \
+ self._prepare_reflection_args(connection, table_name, schema,
+ resolve_synonyms, dblink,
+ info_cache=info_cache)
+
+ params = {"table_name": table_name}
+
+ columns = ["table_name"]
+ if self._supports_table_compression:
+ columns.append("compression")
+ if self._supports_table_compress_for:
+ columns.append("compress_for")
+
+ text = "SELECT %(columns)s "\
+ "FROM ALL_TABLES%(dblink)s "\
+ "WHERE table_name = :table_name"
+
+ if schema is not None:
+ params['owner'] = schema
+ text += " AND owner = :owner "
+ text = text % {'dblink': dblink, 'columns': ", ".join(columns)}
+
+ result = connection.execute(sql.text(text), **params)
+
+ enabled = dict(DISABLED=False, ENABLED=True)
+
+ row = result.first()
+ if row:
+ if "compression" in row and enabled.get(row.compression, False):
+ if "compress_for" in row:
+ options['oracle_compress'] = row.compress_for
+ else:
+ options['oracle_compress'] = True
+
+ return options
+
+ @reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
"""
@@ -1168,7 +1299,8 @@ class OracleDialect(default.DefaultDialect):
params = {'table_name': table_name}
text = \
- "SELECT a.index_name, a.column_name, b.uniqueness "\
+ "SELECT a.index_name, a.column_name, "\
+ "\nb.index_type, b.uniqueness, b.compression, b.prefix_length "\
"\nFROM ALL_IND_COLUMNS%(dblink)s a, "\
"\nALL_INDEXES%(dblink)s b "\
"\nWHERE "\
@@ -1194,6 +1326,7 @@ class OracleDialect(default.DefaultDialect):
dblink=dblink, info_cache=kw.get('info_cache'))
pkeys = pk_constraint['constrained_columns']
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
+ enabled = dict(DISABLED=False, ENABLED=True)
oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
@@ -1213,10 +1346,15 @@ class OracleDialect(default.DefaultDialect):
if rset.index_name != last_index_name:
remove_if_primary_key(index)
index = dict(name=self.normalize_name(rset.index_name),
- column_names=[])
+ column_names=[], dialect_options={})
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
+ if rset.index_type in ('BITMAP', 'FUNCTION-BASED BITMAP'):
+ index['dialect_options']['oracle_bitmap'] = True
+ if enabled.get(rset.compression, False):
+ index['dialect_options']['oracle_compress'] = rset.prefix_length
+
# filter out Oracle SYS_NC names. could also do an outer join
# to the all_tab_columns table and check for real col names there.
if not oracle_sys_col.match(rset.column_name):
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
index 1cff8e3a0..01a846314 100644
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ b/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -5,7 +5,7 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from . import base, psycopg2, pg8000, pypostgresql, zxjdbc
+from . import base, psycopg2, pg8000, pypostgresql, zxjdbc, psycopg2cffi
base.dialect = psycopg2.dialect
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index baa640eaa..1935d0cad 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -48,7 +48,7 @@ Transaction Isolation Level
---------------------------
All Postgresql dialects support setting of transaction isolation level
-both via a dialect-specific parameter ``isolation_level``
+both via a dialect-specific parameter :paramref:`.create_engine.isolation_level`
accepted by :func:`.create_engine`,
as well as the ``isolation_level`` argument as passed to
:meth:`.Connection.execution_options`. When using a non-psycopg2 dialect,
@@ -266,7 +266,7 @@ will emit to the database::
The Postgresql text search functions such as ``to_tsquery()``
and ``to_tsvector()`` are available
-explicitly using the standard :attr:`.func` construct. For example::
+explicitly using the standard :data:`.func` construct. For example::
select([
func.to_tsvector('fat cats ate rats').match('cat & rat')
@@ -299,7 +299,7 @@ not re-compute the column on demand.
In order to provide for this explicit query planning, or to use different
search strategies, the ``match`` method accepts a ``postgresql_regconfig``
-keyword argument.
+keyword argument::
select([mytable.c.id]).where(
mytable.c.title.match('somestring', postgresql_regconfig='english')
@@ -311,7 +311,7 @@ Emits the equivalent of::
WHERE mytable.title @@ to_tsquery('english', 'somestring')
One can also specifically pass in a `'regconfig'` value to the
-``to_tsvector()`` command as the initial argument.
+``to_tsvector()`` command as the initial argument::
select([mytable.c.id]).where(
func.to_tsvector('english', mytable.c.title )\
@@ -483,7 +483,7 @@ import re
from ... import sql, schema, exc, util
from ...engine import default, reflection
-from ...sql import compiler, expression, operators
+from ...sql import compiler, expression, operators, default_comparator
from ... import types as sqltypes
try:
@@ -680,10 +680,10 @@ class _Slice(expression.ColumnElement):
type = sqltypes.NULLTYPE
def __init__(self, slice_, source_comparator):
- self.start = source_comparator._check_literal(
+ self.start = default_comparator._check_literal(
source_comparator.expr,
operators.getitem, slice_.start)
- self.stop = source_comparator._check_literal(
+ self.stop = default_comparator._check_literal(
source_comparator.expr,
operators.getitem, slice_.stop)
@@ -876,8 +876,9 @@ class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
index += shift_indexes
return_type = self.type.item_type
- return self._binary_operate(self.expr, operators.getitem, index,
- result_type=return_type)
+ return default_comparator._binary_operate(
+ self.expr, operators.getitem, index,
+ result_type=return_type)
def any(self, other, operator=operators.eq):
"""Return ``other operator ANY (array)`` clause.
@@ -1424,7 +1425,8 @@ class PGDDLCompiler(compiler.DDLCompiler):
else:
colspec += " SERIAL"
else:
- colspec += " " + self.dialect.type_compiler.process(column.type)
+ colspec += " " + self.dialect.type_compiler.process(column.type,
+ type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
@@ -1476,8 +1478,13 @@ class PGDDLCompiler(compiler.DDLCompiler):
if not isinstance(expr, expression.ColumnClause)
else expr,
include_table=False, literal_binds=True) +
- (c.key in ops and (' ' + ops[c.key]) or '')
- for expr, c in zip(index.expressions, index.columns)])
+ (
+ (' ' + ops[expr.key])
+ if hasattr(expr, 'key')
+ and expr.key in ops else ''
+ )
+ for expr in index.expressions
+ ])
)
whereclause = index.dialect_options["postgresql"]["where"]
@@ -1539,94 +1546,93 @@ class PGDDLCompiler(compiler.DDLCompiler):
class PGTypeCompiler(compiler.GenericTypeCompiler):
-
- def visit_TSVECTOR(self, type):
+ def visit_TSVECTOR(self, type, **kw):
return "TSVECTOR"
- def visit_INET(self, type_):
+ def visit_INET(self, type_, **kw):
return "INET"
- def visit_CIDR(self, type_):
+ def visit_CIDR(self, type_, **kw):
return "CIDR"
- def visit_MACADDR(self, type_):
+ def visit_MACADDR(self, type_, **kw):
return "MACADDR"
- def visit_OID(self, type_):
+ def visit_OID(self, type_, **kw):
return "OID"
- def visit_FLOAT(self, type_):
+ def visit_FLOAT(self, type_, **kw):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
- def visit_DOUBLE_PRECISION(self, type_):
+ def visit_DOUBLE_PRECISION(self, type_, **kw):
return "DOUBLE PRECISION"
- def visit_BIGINT(self, type_):
+ def visit_BIGINT(self, type_, **kw):
return "BIGINT"
- def visit_HSTORE(self, type_):
+ def visit_HSTORE(self, type_, **kw):
return "HSTORE"
- def visit_JSON(self, type_):
+ def visit_JSON(self, type_, **kw):
return "JSON"
- def visit_JSONB(self, type_):
+ def visit_JSONB(self, type_, **kw):
return "JSONB"
- def visit_INT4RANGE(self, type_):
+ def visit_INT4RANGE(self, type_, **kw):
return "INT4RANGE"
- def visit_INT8RANGE(self, type_):
+ def visit_INT8RANGE(self, type_, **kw):
return "INT8RANGE"
- def visit_NUMRANGE(self, type_):
+ def visit_NUMRANGE(self, type_, **kw):
return "NUMRANGE"
- def visit_DATERANGE(self, type_):
+ def visit_DATERANGE(self, type_, **kw):
return "DATERANGE"
- def visit_TSRANGE(self, type_):
+ def visit_TSRANGE(self, type_, **kw):
return "TSRANGE"
- def visit_TSTZRANGE(self, type_):
+ def visit_TSTZRANGE(self, type_, **kw):
return "TSTZRANGE"
- def visit_datetime(self, type_):
- return self.visit_TIMESTAMP(type_)
+ def visit_datetime(self, type_, **kw):
+ return self.visit_TIMESTAMP(type_, **kw)
- def visit_enum(self, type_):
+ def visit_enum(self, type_, **kw):
if not type_.native_enum or not self.dialect.supports_native_enum:
- return super(PGTypeCompiler, self).visit_enum(type_)
+ return super(PGTypeCompiler, self).visit_enum(type_, **kw)
else:
- return self.visit_ENUM(type_)
+ return self.visit_ENUM(type_, **kw)
- def visit_ENUM(self, type_):
+ def visit_ENUM(self, type_, **kw):
return self.dialect.identifier_preparer.format_type(type_)
- def visit_TIMESTAMP(self, type_):
+ def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
- def visit_TIME(self, type_):
+ def visit_TIME(self, type_, **kw):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
- def visit_INTERVAL(self, type_):
+ def visit_INTERVAL(self, type_, **kw):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
- def visit_BIT(self, type_):
+ def visit_BIT(self, type_, **kw):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
@@ -1635,16 +1641,16 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
compiled = "BIT(%d)" % type_.length
return compiled
- def visit_UUID(self, type_):
+ def visit_UUID(self, type_, **kw):
return "UUID"
- def visit_large_binary(self, type_):
- return self.visit_BYTEA(type_)
+ def visit_large_binary(self, type_, **kw):
+ return self.visit_BYTEA(type_, **kw)
- def visit_BYTEA(self, type_):
+ def visit_BYTEA(self, type_, **kw):
return "BYTEA"
- def visit_ARRAY(self, type_):
+ def visit_ARRAY(self, type_, **kw):
return self.process(type_.item_type) + ('[]' * (type_.dimensions
if type_.dimensions
is not None else 1))
@@ -1942,7 +1948,8 @@ class PGDialect(default.DefaultDialect):
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
- "n.oid=c.relnamespace where n.nspname=current_schema() "
+ "n.oid=c.relnamespace where "
+ "pg_catalog.pg_table_is_visible(c.oid) "
"and relname=:name",
bindparams=[
sql.bindparam('name', util.text_type(table_name),
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index 250bf5e9d..f38c4a56a 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -12,7 +12,7 @@ from .base import ischema_names
from ... import types as sqltypes
from ...sql.operators import custom_op
from ... import sql
-from ...sql import elements
+from ...sql import elements, default_comparator
from ... import util
__all__ = ('JSON', 'JSONElement', 'JSONB')
@@ -46,7 +46,8 @@ class JSONElement(elements.BinaryExpression):
self._json_opstring = opstring
operator = custom_op(opstring, precedence=5)
- right = left._check_literal(left, operator, right)
+ right = default_comparator._check_literal(
+ left, operator, right)
super(JSONElement, self).__init__(
left, right, operator, type_=result_type)
@@ -77,7 +78,7 @@ class JSONElement(elements.BinaryExpression):
def cast(self, type_):
"""Convert this :class:`.JSONElement` to apply both the 'astext' operator
- as well as an explicit type cast when evaulated.
+ as well as an explicit type cast when evaluated.
E.g.::
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index f67b2e3b0..5246abf1c 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -66,12 +66,13 @@ in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
- create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
+ create_engine("postgresql+psycopg2://user:password@/dbname?\
+host=/var/lib/postgresql")
See also:
-`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static\
-/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/\
+libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
Per-Statement/Connection Execution Options
-------------------------------------------
@@ -237,7 +238,7 @@ The psycopg2 dialect supports these constants for isolation level:
* ``AUTOCOMMIT``
.. versionadded:: 0.8.2 support for AUTOCOMMIT isolation level when using
- psycopg2.
+ psycopg2.
.. seealso::
@@ -511,9 +512,19 @@ class PGDialect_psycopg2(PGDialect):
import psycopg2
return psycopg2
+ @classmethod
+ def _psycopg2_extensions(cls):
+ from psycopg2 import extensions
+ return extensions
+
+ @classmethod
+ def _psycopg2_extras(cls):
+ from psycopg2 import extras
+ return extras
+
@util.memoized_property
def _isolation_lookup(self):
- from psycopg2 import extensions
+ extensions = self._psycopg2_extensions()
return {
'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
@@ -535,7 +546,8 @@ class PGDialect_psycopg2(PGDialect):
connection.set_isolation_level(level)
def on_connect(self):
- from psycopg2 import extras, extensions
+ extras = self._psycopg2_extras()
+ extensions = self._psycopg2_extensions()
fns = []
if self.client_encoding is not None:
@@ -585,7 +597,7 @@ class PGDialect_psycopg2(PGDialect):
@util.memoized_instancemethod
def _hstore_oids(self, conn):
if self.psycopg2_version >= (2, 4):
- from psycopg2 import extras
+ extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
return oids[0:2]
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
new file mode 100644
index 000000000..f5c475d90
--- /dev/null
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -0,0 +1,49 @@
+# testing/engines.py
+# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+"""
+.. dialect:: postgresql+psycopg2cffi
+ :name: psycopg2cffi
+ :dbapi: psycopg2cffi
+ :connectstring: \
+postgresql+psycopg2cffi://user:password@host:port/dbname\
+[?key=value&key=value...]
+ :url: http://pypi.python.org/pypi/psycopg2cffi/
+
+``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
+layer. This makes it suitable for use in e.g. PyPy. Documentation
+is as per ``psycopg2``.
+
+.. versionadded:: 1.0.0
+
+.. seealso::
+
+ :mod:`sqlalchemy.dialects.postgresql.psycopg2`
+
+"""
+from .psycopg2 import PGDialect_psycopg2
+
+
+class PGDialect_psycopg2cffi(PGDialect_psycopg2):
+ driver = 'psycopg2cffi'
+ supports_unicode_statements = True
+
+ @classmethod
+ def dbapi(cls):
+ return __import__('psycopg2cffi')
+
+ @classmethod
+ def _psycopg2_extensions(cls):
+ root = __import__('psycopg2cffi', fromlist=['extensions'])
+ return root.extensions
+
+ @classmethod
+ def _psycopg2_extras(cls):
+ root = __import__('psycopg2cffi', fromlist=['extras'])
+ return root.extras
+
+
+dialect = PGDialect_psycopg2cffi
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index 33003297c..1ed89bacb 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -9,6 +9,7 @@
.. dialect:: sqlite
:name: SQLite
+.. _sqlite_datetime:
Date and Time Types
-------------------
@@ -23,6 +24,20 @@ These types represent dates and times as ISO formatted strings, which also
nicely support ordering. There's no reliance on typical "libc" internals for
these functions so historical dates are fully supported.
+Ensuring Text affinity
+^^^^^^^^^^^^^^^^^^^^^^
+
+The DDL rendered for these types is the standard ``DATE``, ``TIME``
+and ``DATETIME`` indicators. However, custom storage formats can also be
+applied to these types. When the
+storage format is detected as containing no alpha characters, the DDL for
+these types is rendered as ``DATE_CHAR``, ``TIME_CHAR``, and ``DATETIME_CHAR``,
+so that the column continues to have textual affinity.
+
+.. seealso::
+
+ `Type Affinity <http://www.sqlite.org/datatype3.html#affinity>`_ - in the SQLite documentation
+
.. _sqlite_autoincrement:
SQLite Auto Incrementing Behavior
@@ -92,8 +107,10 @@ The following subsections introduce areas that are impacted by SQLite's
file-based architecture and additionally will usually require workarounds to
work when using the pysqlite driver.
+.. _sqlite_isolation_level:
+
Transaction Isolation Level
-===========================
+----------------------------
SQLite supports "transaction isolation" in a non-standard way, along two
axes. One is that of the `PRAGMA read_uncommitted <http://www.sqlite.org/pragma.html#pragma_read_uncommitted>`_
@@ -126,7 +143,7 @@ by *not even emitting BEGIN* until the first write operation.
for techniques to work around this behavior.
SAVEPOINT Support
-=================
+----------------------------
SQLite supports SAVEPOINTs, which only function once a transaction is
begun. SQLAlchemy's SAVEPOINT support is available using the
@@ -142,7 +159,7 @@ won't work at all with pysqlite unless workarounds are taken.
for techniques to work around this behavior.
Transactional DDL
-=================
+----------------------------
The SQLite database supports transactional :term:`DDL` as well.
In this case, the pysqlite driver is not only failing to start transactions,
@@ -186,6 +203,15 @@ new connections through the usage of events::
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
+.. warning::
+
+ When SQLite foreign keys are enabled, it is **not possible**
+ to emit CREATE or DROP statements for tables that contain
+ mutually-dependent foreign key constraints;
+ to emit the DDL for these tables requires that ALTER TABLE be used to
+ create or drop these constraints separately, for which SQLite has
+ no support.
+
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_
@@ -193,6 +219,9 @@ new connections through the usage of events::
:ref:`event_toplevel` - SQLAlchemy event API.
+ :ref:`use_alter` - more information on SQLAlchemy's facilities for handling
+ mutually-dependent foreign key constraints.
+
.. _sqlite_type_reflection:
Type Reflection
@@ -255,7 +284,7 @@ from ... import util
from ...engine import default, reflection
from ...sql import compiler
-from ...types import (BLOB, BOOLEAN, CHAR, DATE, DECIMAL, FLOAT,
+from ...types import (BLOB, BOOLEAN, CHAR, DECIMAL, FLOAT,
INTEGER, REAL, NUMERIC, SMALLINT, TEXT,
TIMESTAMP, VARCHAR)
@@ -271,6 +300,25 @@ class _DateTimeMixin(object):
if storage_format is not None:
self._storage_format = storage_format
+ @property
+ def format_is_text_affinity(self):
+ """return True if the storage format will automatically imply
+ a TEXT affinity.
+
+ If the storage format contains no non-numeric characters,
+ it will imply a NUMERIC storage format on SQLite; in this case,
+ the type will generate its DDL as DATE_CHAR, DATETIME_CHAR,
+ TIME_CHAR.
+
+ .. versionadded:: 1.0.0
+
+ """
+ spec = self._storage_format % {
+ "year": 0, "month": 0, "day": 0, "hour": 0,
+ "minute": 0, "second": 0, "microsecond": 0
+ }
+ return bool(re.search(r'[^0-9]', spec))
+
def adapt(self, cls, **kw):
if issubclass(cls, _DateTimeMixin):
if self._storage_format:
@@ -526,7 +574,9 @@ ischema_names = {
'BOOLEAN': sqltypes.BOOLEAN,
'CHAR': sqltypes.CHAR,
'DATE': sqltypes.DATE,
+ 'DATE_CHAR': sqltypes.DATE,
'DATETIME': sqltypes.DATETIME,
+ 'DATETIME_CHAR': sqltypes.DATETIME,
'DOUBLE': sqltypes.FLOAT,
'DECIMAL': sqltypes.DECIMAL,
'FLOAT': sqltypes.FLOAT,
@@ -537,6 +587,7 @@ ischema_names = {
'SMALLINT': sqltypes.SMALLINT,
'TEXT': sqltypes.TEXT,
'TIME': sqltypes.TIME,
+ 'TIME_CHAR': sqltypes.TIME,
'TIMESTAMP': sqltypes.TIMESTAMP,
'VARCHAR': sqltypes.VARCHAR,
'NVARCHAR': sqltypes.NVARCHAR,
@@ -611,7 +662,8 @@ class SQLiteCompiler(compiler.SQLCompiler):
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
- coltype = self.dialect.type_compiler.process(column.type)
+ coltype = self.dialect.type_compiler.process(
+ column.type, type_expression=column)
colspec = self.preparer.format_column(column) + " " + coltype
default = self.get_column_default_string(column)
if default is not None:
@@ -667,9 +719,30 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
- def visit_large_binary(self, type_):
+ def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
+ def visit_DATETIME(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_DATETIME(type_)
+ else:
+ return "DATETIME_CHAR"
+
+ def visit_DATE(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_DATE(type_)
+ else:
+ return "DATE_CHAR"
+
+ def visit_TIME(self, type_, **kw):
+ if not isinstance(type_, _DateTimeMixin) or \
+ type_.format_is_text_affinity:
+ return super(SQLiteTypeCompiler, self).visit_TIME(type_)
+ else:
+ return "TIME_CHAR"
+
class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = set([
@@ -855,22 +928,9 @@ class SQLiteDialect(default.DefaultDialect):
return [row[0] for row in rs]
def has_table(self, connection, table_name, schema=None):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%stable_info(%s)" % (pragma, qtable)
- cursor = _pragma_cursor(connection.execute(statement))
- row = cursor.fetchone()
-
- # consume remaining rows, to work around
- # http://www.sqlite.org/cvstrac/tktview?tn=1884
- while not cursor.closed and cursor.fetchone() is not None:
- pass
-
- return row is not None
+ info = self._get_table_pragma(
+ connection, "table_info", table_name, schema=schema)
+ return bool(info)
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
@@ -912,18 +972,11 @@ class SQLiteDialect(default.DefaultDialect):
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%stable_info(%s)" % (pragma, qtable)
- c = _pragma_cursor(connection.execute(statement))
+ info = self._get_table_pragma(
+ connection, "table_info", table_name, schema=schema)
- rows = c.fetchall()
columns = []
- for row in rows:
+ for row in info:
(name, type_, nullable, default, primary_key) = (
row[1], row[2].upper(), not row[3], row[4], row[5])
@@ -1010,92 +1063,192 @@ class SQLiteDialect(default.DefaultDialect):
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- qtable = quote(table_name)
- statement = "%sforeign_key_list(%s)" % (pragma, qtable)
- c = _pragma_cursor(connection.execute(statement))
- fkeys = []
+ # sqlite makes this *extremely difficult*.
+ # First, use the pragma to get the actual FKs.
+ pragma_fks = self._get_table_pragma(
+ connection, "foreign_key_list",
+ table_name, schema=schema
+ )
+
fks = {}
- while True:
- row = c.fetchone()
- if row is None:
- break
+
+ for row in pragma_fks:
(numerical_id, rtbl, lcol, rcol) = (
row[0], row[2], row[3], row[4])
- self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol)
- return fkeys
+ if rcol is None:
+ rcol = lcol
- def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol):
- # sqlite won't return rcol if the table was created with REFERENCES
- # <tablename>, no col
- if rcol is None:
- rcol = lcol
+ if self._broken_fk_pragma_quotes:
+ rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
- if self._broken_fk_pragma_quotes:
- rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
+ if numerical_id in fks:
+ fk = fks[numerical_id]
+ else:
+ fk = fks[numerical_id] = {
+ 'name': None,
+ 'constrained_columns': [],
+ 'referred_schema': None,
+ 'referred_table': rtbl,
+ 'referred_columns': [],
+ }
+ fks[numerical_id] = fk
- try:
- fk = fks[numerical_id]
- except KeyError:
- fk = {
- 'name': None,
- 'constrained_columns': [],
- 'referred_schema': None,
- 'referred_table': rtbl,
- 'referred_columns': [],
- }
- fkeys.append(fk)
- fks[numerical_id] = fk
-
- if lcol not in fk['constrained_columns']:
fk['constrained_columns'].append(lcol)
- if rcol not in fk['referred_columns']:
fk['referred_columns'].append(rcol)
- return fk
+
+ def fk_sig(constrained_columns, referred_table, referred_columns):
+ return tuple(constrained_columns) + (referred_table,) + \
+ tuple(referred_columns)
+
+ # then, parse the actual SQL and attempt to find DDL that matches
+ # the names as well. SQLite saves the DDL in whatever format
+ # it was typed in as, so need to be liberal here.
+
+ keys_by_signature = dict(
+ (
+ fk_sig(
+ fk['constrained_columns'],
+ fk['referred_table'], fk['referred_columns']),
+ fk
+ ) for fk in fks.values()
+ )
+
+ table_data = self._get_table_sql(connection, table_name, schema=schema)
+ if table_data is None:
+ # system tables, etc.
+ return []
+
+ def parse_fks():
+ FK_PATTERN = (
+ '(?:CONSTRAINT (\w+) +)?'
+ 'FOREIGN KEY *\( *(.+?) *\) +'
+ 'REFERENCES +(?:(?:"(.+?)")|([a-z0-9_]+)) *\((.+?)\)'
+ )
+
+ for match in re.finditer(FK_PATTERN, table_data, re.I):
+ (
+ constraint_name, constrained_columns,
+ referred_quoted_name, referred_name,
+ referred_columns) = match.group(1, 2, 3, 4, 5)
+ constrained_columns = list(
+ self._find_cols_in_sig(constrained_columns))
+ if not referred_columns:
+ referred_columns = constrained_columns
+ else:
+ referred_columns = list(
+ self._find_cols_in_sig(referred_columns))
+ referred_name = referred_quoted_name or referred_name
+ yield (
+ constraint_name, constrained_columns,
+ referred_name, referred_columns)
+ fkeys = []
+
+ for (
+ constraint_name, constrained_columns,
+ referred_name, referred_columns) in parse_fks():
+ sig = fk_sig(
+ constrained_columns, referred_name, referred_columns)
+ if sig not in keys_by_signature:
+ util.warn(
+ "WARNING: SQL-parsed foreign key constraint "
+ "'%s' could not be located in PRAGMA "
+ "foreign_keys for table %s" % (
+ sig,
+ table_name
+ ))
+ continue
+ key = keys_by_signature.pop(sig)
+ key['name'] = constraint_name
+ fkeys.append(key)
+ # assume the remainders are the unnamed, inline constraints, just
+ # use them as is as it's extremely difficult to parse inline
+ # constraints
+ fkeys.extend(keys_by_signature.values())
+ return fkeys
+
+ def _find_cols_in_sig(self, sig):
+ for match in re.finditer(r'(?:"(.+?)")|([a-z0-9_]+)', sig, re.I):
+ yield match.group(1) or match.group(2)
+
+ @reflection.cache
+ def get_unique_constraints(self, connection, table_name,
+ schema=None, **kw):
+
+ auto_index_by_sig = {}
+ for idx in self.get_indexes(
+ connection, table_name, schema=schema,
+ include_auto_indexes=True, **kw):
+ if not idx['name'].startswith("sqlite_autoindex"):
+ continue
+ sig = tuple(idx['column_names'])
+ auto_index_by_sig[sig] = idx
+
+ table_data = self._get_table_sql(
+ connection, table_name, schema=schema, **kw)
+ if not table_data:
+ return []
+
+ unique_constraints = []
+
+ def parse_uqs():
+ UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) +)?UNIQUE *\((.+?)\)'
+ INLINE_UNIQUE_PATTERN = (
+ '(?:(".+?")|([a-z0-9]+)) '
+ '+[a-z0-9_ ]+? +UNIQUE')
+
+ for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
+ name, cols = match.group(1, 2)
+ yield name, list(self._find_cols_in_sig(cols))
+
+ # we need to match inlines as well, as we seek to differentiate
+ # a UNIQUE constraint from a UNIQUE INDEX, even though these
+ # are kind of the same thing :)
+ for match in re.finditer(INLINE_UNIQUE_PATTERN, table_data, re.I):
+ cols = list(
+ self._find_cols_in_sig(match.group(1) or match.group(2)))
+ yield None, cols
+
+ for name, cols in parse_uqs():
+ sig = tuple(cols)
+ if sig in auto_index_by_sig:
+ auto_index_by_sig.pop(sig)
+ parsed_constraint = {
+ 'name': name,
+ 'column_names': cols
+ }
+ unique_constraints.append(parsed_constraint)
+ # NOTE: auto_index_by_sig might not be empty here,
+ # the PRIMARY KEY may have an entry.
+ return unique_constraints
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
- quote = self.identifier_preparer.quote_identifier
- if schema is not None:
- pragma = "PRAGMA %s." % quote(schema)
- else:
- pragma = "PRAGMA "
- include_auto_indexes = kw.pop('include_auto_indexes', False)
- qtable = quote(table_name)
- statement = "%sindex_list(%s)" % (pragma, qtable)
- c = _pragma_cursor(connection.execute(statement))
+ pragma_indexes = self._get_table_pragma(
+ connection, "index_list", table_name, schema=schema)
indexes = []
- while True:
- row = c.fetchone()
- if row is None:
- break
+
+ include_auto_indexes = kw.pop('include_auto_indexes', False)
+ for row in pragma_indexes:
# ignore implicit primary key index.
# http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html
- elif (not include_auto_indexes and
- row[1].startswith('sqlite_autoindex')):
+ if (not include_auto_indexes and
+ row[1].startswith('sqlite_autoindex')):
continue
indexes.append(dict(name=row[1], column_names=[], unique=row[2]))
+
# loop thru unique indexes to get the column names.
for idx in indexes:
- statement = "%sindex_info(%s)" % (pragma, quote(idx['name']))
- c = connection.execute(statement)
- cols = idx['column_names']
- while True:
- row = c.fetchone()
- if row is None:
- break
- cols.append(row[2])
+ pragma_index = self._get_table_pragma(
+ connection, "index_info", idx['name'])
+
+ for row in pragma_index:
+ idx['column_names'].append(row[2])
return indexes
@reflection.cache
- def get_unique_constraints(self, connection, table_name,
- schema=None, **kw):
+ def _get_table_sql(self, connection, table_name, schema=None, **kw):
try:
s = ("SELECT sql FROM "
" (SELECT * FROM sqlite_master UNION ALL "
@@ -1107,27 +1260,22 @@ class SQLiteDialect(default.DefaultDialect):
s = ("SELECT sql FROM sqlite_master WHERE name = '%s' "
"AND type = 'table'") % table_name
rs = connection.execute(s)
- row = rs.fetchone()
- if row is None:
- # sqlite won't return the schema for the sqlite_master or
- # sqlite_temp_master tables from this query. These tables
- # don't have any unique constraints anyway.
- return []
- table_data = row[0]
-
- UNIQUE_PATTERN = 'CONSTRAINT (\w+) UNIQUE \(([^\)]+)\)'
- return [
- {'name': name,
- 'column_names': [col.strip(' "') for col in cols.split(',')]}
- for name, cols in re.findall(UNIQUE_PATTERN, table_data)
- ]
+ return rs.scalar()
-
-def _pragma_cursor(cursor):
- """work around SQLite issue whereby cursor.description
- is blank when PRAGMA returns no rows."""
-
- if cursor.closed:
- cursor.fetchone = lambda: None
- cursor.fetchall = lambda: []
- return cursor
+ def _get_table_pragma(self, connection, pragma, table_name, schema=None):
+ quote = self.identifier_preparer.quote_identifier
+ if schema is not None:
+ statement = "PRAGMA %s." % quote(schema)
+ else:
+ statement = "PRAGMA "
+ qtable = quote(table_name)
+ statement = "%s%s(%s)" % (statement, pragma, qtable)
+ cursor = connection.execute(statement)
+ if not cursor.closed:
+ # work around SQLite issue whereby cursor.description
+ # is blank when PRAGMA returns no rows:
+ # http://www.sqlite.org/cvstrac/tktview?tn=1884
+ result = cursor.fetchall()
+ else:
+ result = []
+ return result
diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py
index f65a76a27..369420358 100644
--- a/lib/sqlalchemy/dialects/sybase/base.py
+++ b/lib/sqlalchemy/dialects/sybase/base.py
@@ -146,40 +146,40 @@ class IMAGE(sqltypes.LargeBinary):
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
- def visit_large_binary(self, type_):
+ def visit_large_binary(self, type_, **kw):
return self.visit_IMAGE(type_)
- def visit_boolean(self, type_):
+ def visit_boolean(self, type_, **kw):
return self.visit_BIT(type_)
- def visit_unicode(self, type_):
+ def visit_unicode(self, type_, **kw):
return self.visit_NVARCHAR(type_)
- def visit_UNICHAR(self, type_):
+ def visit_UNICHAR(self, type_, **kw):
return "UNICHAR(%d)" % type_.length
- def visit_UNIVARCHAR(self, type_):
+ def visit_UNIVARCHAR(self, type_, **kw):
return "UNIVARCHAR(%d)" % type_.length
- def visit_UNITEXT(self, type_):
+ def visit_UNITEXT(self, type_, **kw):
return "UNITEXT"
- def visit_TINYINT(self, type_):
+ def visit_TINYINT(self, type_, **kw):
return "TINYINT"
- def visit_IMAGE(self, type_):
+ def visit_IMAGE(self, type_, **kw):
return "IMAGE"
- def visit_BIT(self, type_):
+ def visit_BIT(self, type_, **kw):
return "BIT"
- def visit_MONEY(self, type_):
+ def visit_MONEY(self, type_, **kw):
return "MONEY"
- def visit_SMALLMONEY(self, type_):
+ def visit_SMALLMONEY(self, type_, **kw):
return "SMALLMONEY"
- def visit_UNIQUEIDENTIFIER(self, type_):
+ def visit_UNIQUEIDENTIFIER(self, type_, **kw):
return "UNIQUEIDENTIFIER"
ischema_names = {
@@ -377,7 +377,8 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
class SybaseDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column) + " " + \
- self.dialect.type_compiler.process(column.type)
+ self.dialect.type_compiler.process(
+ column.type, type_expression=column)
if column.table is None:
raise exc.CompileError(