summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/dialects
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2016-03-15 16:41:17 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2016-03-15 16:41:17 -0400
commit224b03f9c006b12e3bbae9190ca9d0132e843208 (patch)
tree4c54f3404af67962835c3a78849f8e89ebb98da0 /lib/sqlalchemy/dialects
parenta87b3c2101114d82f999c23d113ad2018629ed48 (diff)
parent8bc370ed382a45654101fa34bac4a2886ce089c3 (diff)
downloadsqlalchemy-224b03f9c006b12e3bbae9190ca9d0132e843208.tar.gz
Merge branch 'master' into pr157
Diffstat (limited to 'lib/sqlalchemy/dialects')
-rw-r--r--lib/sqlalchemy/dialects/__init__.py13
-rw-r--r--lib/sqlalchemy/dialects/firebird/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/firebird/base.py16
-rw-r--r--lib/sqlalchemy/dialects/firebird/fdb.py2
-rw-r--r--lib/sqlalchemy/dialects/firebird/kinterbasdb.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/adodbapi.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py185
-rw-r--r--lib/sqlalchemy/dialects/mssql/information_schema.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/mxodbc.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/pymssql.py14
-rw-r--r--lib/sqlalchemy/dialects/mssql/pyodbc.py31
-rw-r--r--lib/sqlalchemy/dialects/mssql/zxjdbc.py4
-rw-r--r--lib/sqlalchemy/dialects/mysql/__init__.py8
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py1662
-rw-r--r--lib/sqlalchemy/dialects/mysql/cymysql.py2
-rw-r--r--lib/sqlalchemy/dialects/mysql/enumerated.py299
-rw-r--r--lib/sqlalchemy/dialects/mysql/gaerdbms.py4
-rw-r--r--lib/sqlalchemy/dialects/mysql/json.py90
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqlconnector.py8
-rw-r--r--lib/sqlalchemy/dialects/mysql/mysqldb.py29
-rw-r--r--lib/sqlalchemy/dialects/mysql/oursql.py18
-rw-r--r--lib/sqlalchemy/dialects/mysql/pymysql.py15
-rw-r--r--lib/sqlalchemy/dialects/mysql/pyodbc.py15
-rw-r--r--lib/sqlalchemy/dialects/mysql/reflection.py449
-rw-r--r--lib/sqlalchemy/dialects/mysql/types.py766
-rw-r--r--lib/sqlalchemy/dialects/mysql/zxjdbc.py5
-rw-r--r--lib/sqlalchemy/dialects/oracle/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/oracle/base.py21
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py25
-rw-r--r--lib/sqlalchemy/dialects/oracle/zxjdbc.py11
-rw-r--r--lib/sqlalchemy/dialects/postgres.py18
-rw-r--r--lib/sqlalchemy/dialects/postgresql/__init__.py19
-rw-r--r--lib/sqlalchemy/dialects/postgresql/array.py314
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py831
-rw-r--r--lib/sqlalchemy/dialects/postgresql/constraints.py75
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ext.py168
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py281
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py360
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py94
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py47
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py14
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pypostgresql.py19
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ranges.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/zxjdbc.py2
-rw-r--r--lib/sqlalchemy/dialects/sqlite/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/sqlite/base.py291
-rw-r--r--lib/sqlalchemy/dialects/sqlite/pysqlcipher.py2
-rw-r--r--lib/sqlalchemy/dialects/sqlite/pysqlite.py2
-rw-r--r--lib/sqlalchemy/dialects/sybase/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/sybase/base.py25
-rw-r--r--lib/sqlalchemy/dialects/sybase/mxodbc.py2
-rw-r--r--lib/sqlalchemy/dialects/sybase/pyodbc.py2
-rw-r--r--lib/sqlalchemy/dialects/sybase/pysybase.py2
54 files changed, 3635 insertions, 2645 deletions
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
index 74c48820d..bf9c6d38e 100644
--- a/lib/sqlalchemy/dialects/__init__.py
+++ b/lib/sqlalchemy/dialects/__init__.py
@@ -1,5 +1,5 @@
# dialects/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -17,6 +17,7 @@ __all__ = (
from .. import util
+_translates = {'postgres': 'postgresql'}
def _auto_fn(name):
"""default dialect importer.
@@ -30,6 +31,14 @@ def _auto_fn(name):
else:
dialect = name
driver = "base"
+
+ if dialect in _translates:
+ translated = _translates[dialect]
+ util.warn_deprecated(
+ "The '%s' dialect name has been "
+ "renamed to '%s'" % (dialect, translated)
+ )
+ dialect = translated
try:
module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects
except ImportError:
@@ -43,3 +52,5 @@ def _auto_fn(name):
return None
registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
+
+plugins = util.PluginLoader("sqlalchemy.plugins") \ No newline at end of file
diff --git a/lib/sqlalchemy/dialects/firebird/__init__.py b/lib/sqlalchemy/dialects/firebird/__init__.py
index 9e8a88245..f27bdc05b 100644
--- a/lib/sqlalchemy/dialects/firebird/__init__.py
+++ b/lib/sqlalchemy/dialects/firebird/__init__.py
@@ -1,5 +1,5 @@
# firebird/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
index 74e8abfc2..16e2c55b8 100644
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ b/lib/sqlalchemy/dialects/firebird/base.py
@@ -1,5 +1,5 @@
# firebird/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -293,22 +293,22 @@ class FBCompiler(sql.compiler.SQLCompiler):
def visit_sequence(self, seq):
return "gen_id(%s, 1)" % self.preparer.format_sequence(seq)
- def get_select_precolumns(self, select):
+ def get_select_precolumns(self, select, **kw):
"""Called when building a ``SELECT`` statement, position is just
before column list Firebird puts the limit and offset right
after the ``SELECT``...
"""
result = ""
- if select._limit_clause:
- result += "FIRST %s " % self.process(select._limit_clause)
- if select._offset_clause:
- result += "SKIP %s " % self.process(select._offset_clause)
+ if select._limit_clause is not None:
+ result += "FIRST %s " % self.process(select._limit_clause, **kw)
+ if select._offset_clause is not None:
+ result += "SKIP %s " % self.process(select._offset_clause, **kw)
if select._distinct:
result += "DISTINCT "
return result
- def limit_clause(self, select):
+ def limit_clause(self, select, **kw):
"""Already taken care of in the `get_select_precolumns` method."""
return ""
@@ -648,7 +648,7 @@ class FBDialect(default.DefaultDialect):
'type': coltype,
'nullable': not bool(row['null_flag']),
'default': defvalue,
- 'autoincrement': defvalue is None
+ 'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
diff --git a/lib/sqlalchemy/dialects/firebird/fdb.py b/lib/sqlalchemy/dialects/firebird/fdb.py
index ddffc80f5..aff8cff15 100644
--- a/lib/sqlalchemy/dialects/firebird/fdb.py
+++ b/lib/sqlalchemy/dialects/firebird/fdb.py
@@ -1,5 +1,5 @@
# firebird/fdb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
index 6bd7887f7..3df9f736b 100644
--- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
+++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
@@ -1,5 +1,5 @@
# firebird/kinterbasdb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mssql/__init__.py b/lib/sqlalchemy/dialects/mssql/__init__.py
index d0047765e..8c9e85862 100644
--- a/lib/sqlalchemy/dialects/mssql/__init__.py
+++ b/lib/sqlalchemy/dialects/mssql/__init__.py
@@ -1,5 +1,5 @@
# mssql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mssql/adodbapi.py b/lib/sqlalchemy/dialects/mssql/adodbapi.py
index e9927f8ed..60fa25d34 100644
--- a/lib/sqlalchemy/dialects/mssql/adodbapi.py
+++ b/lib/sqlalchemy/dialects/mssql/adodbapi.py
@@ -1,5 +1,5 @@
# mssql/adodbapi.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index 92d7e4ab3..31c55917b 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -1,5 +1,5 @@
# mssql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -166,7 +166,6 @@ how SQLAlchemy handles this:
This
is an auxilliary use case suitable for testing and bulk insert scenarios.
-
Collation Support
-----------------
@@ -187,7 +186,7 @@ CREATE TABLE statement for this column will yield::
LIMIT/OFFSET Support
--------------------
-MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is
+MSSQL has no support for the LIMIT or OFFSET keywords. LIMIT is
supported directly through the ``TOP`` Transact SQL keyword::
select.limit
@@ -273,6 +272,41 @@ behavior of this flag is as follows:
.. versionadded:: 1.0.0
+.. _legacy_schema_rendering:
+
+Legacy Schema Mode
+------------------
+
+Very old versions of the MSSQL dialect introduced the behavior such that a
+schema-qualified table would be auto-aliased when used in a
+SELECT statement; given a table::
+
+ account_table = Table(
+ 'account', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('info', String(100)),
+ schema="customer_schema"
+ )
+
+this legacy mode of rendering would assume that "customer_schema.account"
+would not be accepted by all parts of the SQL statement, as illustrated
+below::
+
+ >>> eng = create_engine("mssql+pymssql://mydsn", legacy_schema_aliasing=True)
+ >>> print(account_table.select().compile(eng))
+ SELECT account_1.id, account_1.info
+ FROM customer_schema.account AS account_1
+
+This mode of behavior is now off by default, as it appears to have served
+no purpose; however in the case that legacy applications rely upon it,
+it is available using the ``legacy_schema_aliasing`` argument to
+:func:`.create_engine` as illustrated above.
+
+.. versionchanged:: 1.1 the ``legacy_schema_aliasing`` flag introduced
+ in version 1.0.5 to allow disabling of legacy mode for schemas now
+ defaults to False.
+
+
.. _mssql_indexes:
Clustered Index Support
@@ -384,6 +418,40 @@ Declarative form::
This option can also be specified engine-wide using the
``implicit_returning=False`` argument on :func:`.create_engine`.
+.. _mssql_rowcount_versioning:
+
+Rowcount Support / ORM Versioning
+---------------------------------
+
+The SQL Server drivers have very limited ability to return the number
+of rows updated from an UPDATE or DELETE statement. In particular, the
+pymssql driver has no support, whereas the pyodbc driver can only return
+this value under certain conditions.
+
+In particular, updated rowcount is not available when OUTPUT INSERTED
+is used. This impacts the SQLAlchemy ORM's versioning feature when
+server-side versioning schemes are used. When
+using pyodbc, the "implicit_returning" flag needs to be set to false
+for any ORM mapped class that uses a version_id column in conjunction with
+a server-side version generator::
+
+ class MyTable(Base):
+ __tablename__ = 'mytable'
+ id = Column(Integer, primary_key=True)
+ stuff = Column(String(10))
+ timestamp = Column(TIMESTAMP(), default=text('DEFAULT'))
+ __mapper_args__ = {
+ 'version_id_col': timestamp,
+ 'version_id_generator': False,
+ }
+ __table_args__ = {
+ 'implicit_returning': False
+ }
+
+Without the implicit_returning flag above, the UPDATE statement will
+use ``OUTPUT inserted.timestamp`` and the rowcount will be returned as
+-1, causing the versioning logic to fail.
+
Enabling Snapshot Isolation
---------------------------
@@ -499,9 +567,13 @@ class _MSDate(sqltypes.Date):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a date value" % (value, ))
return datetime.date(*[
int(x or 0)
- for x in self._reg.match(value).groups()
+ for x in m.groups()
])
else:
return value
@@ -533,9 +605,13 @@ class TIME(sqltypes.TIME):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, util.string_types):
+ m = self._reg.match(value)
+ if not m:
+ raise ValueError(
+ "could not parse %r as a time value" % (value, ))
return datetime.time(*[
int(x or 0)
- for x in self._reg.match(value).groups()])
+ for x in m.groups()])
else:
return value
return process
@@ -725,21 +801,21 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return "TINYINT"
def visit_DATETIMEOFFSET(self, type_, **kw):
- if type_.precision:
+ if type_.precision is not None:
return "DATETIMEOFFSET(%s)" % type_.precision
else:
return "DATETIMEOFFSET"
def visit_TIME(self, type_, **kw):
precision = getattr(type_, 'precision', None)
- if precision:
+ if precision is not None:
return "TIME(%s)" % precision
else:
return "TIME"
def visit_DATETIME2(self, type_, **kw):
precision = getattr(type_, 'precision', None)
- if precision:
+ if precision is not None:
return "DATETIME2(%s)" % precision
else:
return "DATETIME2"
@@ -951,6 +1027,15 @@ class MSSQLCompiler(compiler.SQLCompiler):
self.tablealiases = {}
super(MSSQLCompiler, self).__init__(*args, **kwargs)
+ def _with_legacy_schema_aliasing(fn):
+ def decorate(self, *arg, **kw):
+ if self.dialect.legacy_schema_aliasing:
+ return fn(self, *arg, **kw)
+ else:
+ super_ = getattr(super(MSSQLCompiler, self), fn.__name__)
+ return super_(*arg, **kw)
+ return decorate
+
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
@@ -979,7 +1064,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
self.process(binary.left, **kw),
self.process(binary.right, **kw))
- def get_select_precolumns(self, select):
+ def get_select_precolumns(self, select, **kw):
""" MS-SQL puts TOP, it's version of LIMIT here """
s = ""
@@ -995,7 +1080,8 @@ class MSSQLCompiler(compiler.SQLCompiler):
if s:
return s
else:
- return compiler.SQLCompiler.get_select_precolumns(self, select)
+ return compiler.SQLCompiler.get_select_precolumns(
+ self, select, **kw)
def get_from_hint_text(self, table, text):
return text
@@ -1031,6 +1117,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
_order_by_clauses = select._order_by_clause.clauses
limit_clause = select._limit_clause
offset_clause = select._offset_clause
+ kwargs['select_wraps_for'] = select
select = select._generate()
select._mssql_visit = True
select = select.column(
@@ -1048,18 +1135,11 @@ class MSSQLCompiler(compiler.SQLCompiler):
else:
limitselect.append_whereclause(
mssql_rn <= (limit_clause))
- return self.process(limitselect, iswrapper=True, **kwargs)
+ return self.process(limitselect, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
- def _schema_aliased_table(self, table):
- if getattr(table, 'schema', None) is not None:
- if table not in self.tablealiases:
- self.tablealiases[table] = table.alias()
- return self.tablealiases[table]
- else:
- return None
-
+ @_with_legacy_schema_aliasing
def visit_table(self, table, mssql_aliased=False, iscrud=False, **kwargs):
if mssql_aliased is table or iscrud:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
@@ -1071,25 +1151,14 @@ class MSSQLCompiler(compiler.SQLCompiler):
else:
return super(MSSQLCompiler, self).visit_table(table, **kwargs)
- def visit_alias(self, alias, **kwargs):
+ @_with_legacy_schema_aliasing
+ def visit_alias(self, alias, **kw):
# translate for schema-qualified table aliases
- kwargs['mssql_aliased'] = alias.original
- return super(MSSQLCompiler, self).visit_alias(alias, **kwargs)
+ kw['mssql_aliased'] = alias.original
+ return super(MSSQLCompiler, self).visit_alias(alias, **kw)
- def visit_extract(self, extract, **kw):
- field = self.extract_map.get(extract.field, extract.field)
- return 'DATEPART("%s", %s)' % \
- (field, self.process(extract.expr, **kw))
-
- def visit_savepoint(self, savepoint_stmt):
- return "SAVE TRANSACTION %s" % \
- self.preparer.format_savepoint(savepoint_stmt)
-
- def visit_rollback_to_savepoint(self, savepoint_stmt):
- return ("ROLLBACK TRANSACTION %s"
- % self.preparer.format_savepoint(savepoint_stmt))
-
- def visit_column(self, column, add_to_result_map=None, **kwargs):
+ @_with_legacy_schema_aliasing
+ def visit_column(self, column, add_to_result_map=None, **kw):
if column.table is not None and \
(not self.isupdate and not self.isdelete) or \
self.is_subquery():
@@ -1107,10 +1176,31 @@ class MSSQLCompiler(compiler.SQLCompiler):
)
return super(MSSQLCompiler, self).\
- visit_column(converted, **kwargs)
+ visit_column(converted, **kw)
return super(MSSQLCompiler, self).visit_column(
- column, add_to_result_map=add_to_result_map, **kwargs)
+ column, add_to_result_map=add_to_result_map, **kw)
+
+ def _schema_aliased_table(self, table):
+ if getattr(table, 'schema', None) is not None:
+ if table not in self.tablealiases:
+ self.tablealiases[table] = table.alias()
+ return self.tablealiases[table]
+ else:
+ return None
+
+ def visit_extract(self, extract, **kw):
+ field = self.extract_map.get(extract.field, extract.field)
+ return 'DATEPART(%s, %s)' % \
+ (field, self.process(extract.expr, **kw))
+
+ def visit_savepoint(self, savepoint_stmt):
+ return "SAVE TRANSACTION %s" % \
+ self.preparer.format_savepoint(savepoint_stmt)
+
+ def visit_rollback_to_savepoint(self, savepoint_stmt):
+ return ("ROLLBACK TRANSACTION %s"
+ % self.preparer.format_savepoint(savepoint_stmt))
def visit_binary(self, binary, **kwargs):
"""Move bind parameters to the right-hand side of an operator, where
@@ -1422,6 +1512,10 @@ class MSDialect(default.DefaultDialect):
sqltypes.Time: TIME,
}
+ engine_config_types = default.DefaultDialect.engine_config_types.union([
+ ('legacy_schema_aliasing', util.asbool),
+ ])
+
ischema_names = ischema_names
supports_native_boolean = False
@@ -1453,7 +1547,8 @@ class MSDialect(default.DefaultDialect):
use_scope_identity=True,
max_identifier_length=None,
schema_name="dbo",
- deprecate_large_types=None, **opts):
+ deprecate_large_types=None,
+ legacy_schema_aliasing=False, **opts):
self.query_timeout = int(query_timeout or 0)
self.schema_name = schema_name
@@ -1461,6 +1556,8 @@ class MSDialect(default.DefaultDialect):
self.max_identifier_length = int(max_identifier_length or 0) or \
self.max_identifier_length
self.deprecate_large_types = deprecate_large_types
+ self.legacy_schema_aliasing = legacy_schema_aliasing
+
super(MSDialect, self).__init__(**opts)
def do_savepoint(self, connection, name):
@@ -1481,11 +1578,15 @@ class MSDialect(default.DefaultDialect):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# that is. So emit warning.
+ # Use TDS Version 7.0 through 7.3, per the MS information here:
+ # https://msdn.microsoft.com/en-us/library/dd339982.aspx
+ # and FreeTDS information here (7.3 highest supported version):
+ # http://www.freetds.org/userguide/choosingtdsprotocol.htm
util.warn(
"Unrecognized server version info '%s'. Version specific "
"behaviors may not function properly. If using ODBC "
- "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, "
- "is configured in the FreeTDS configuration." %
+ "with FreeTDS, ensure TDS_VERSION 7.0 through 7.3, not "
+ "4.2, is configured in the FreeTDS configuration." %
".".join(str(x) for x in self.server_version_info))
if self.server_version_info >= MS_2005_VERSION and \
'implicit_returning' not in self.__dict__:
@@ -1683,7 +1784,7 @@ class MSDialect(default.DefaultDialect):
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
if charlen == -1:
- charlen = 'max'
+ charlen = None
kwargs['length'] = charlen
if collation:
kwargs['collation'] = collation
diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py
index 371a1edcc..e2c0a466c 100644
--- a/lib/sqlalchemy/dialects/mssql/information_schema.py
+++ b/lib/sqlalchemy/dialects/mssql/information_schema.py
@@ -1,5 +1,5 @@
# mssql/information_schema.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py
index ffe38d8dd..5e20ed11b 100644
--- a/lib/sqlalchemy/dialects/mssql/mxodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py
@@ -1,5 +1,5 @@
# mssql/mxodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
index b5a1bc566..e3a4db8ab 100644
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ b/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -1,5 +1,5 @@
# mssql/pymssql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -46,11 +46,12 @@ class MSDialect_pymssql(MSDialect):
@classmethod
def dbapi(cls):
module = __import__('pymssql')
- # pymmsql doesn't have a Binary method. we use string
- # TODO: monkeypatching here is less than ideal
- module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
-
+ # pymmsql < 2.1.1 doesn't have a Binary method. we use string
client_ver = tuple(int(x) for x in module.__version__.split("."))
+ if client_ver < (2, 1, 1):
+ # TODO: monkeypatching here is less than ideal
+ module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
+
if client_ver < (1, ):
util.warn("The pymssql dialect expects at least "
"the 1.0 series of the pymssql DBAPI.")
@@ -84,7 +85,8 @@ class MSDialect_pymssql(MSDialect):
"message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
- "Connection is closed"
+ "Connection is closed",
+ "message 20006", # Write to the server failed
):
if msg in str(e):
return True
diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py
index 445584d24..45c091cfb 100644
--- a/lib/sqlalchemy/dialects/mssql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py
@@ -1,5 +1,5 @@
# mssql/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -93,9 +93,16 @@ for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
+Rowcount Support
+----------------
+
+Pyodbc only has partial support for rowcount. See the notes at
+:ref:`mssql_rowcount_versioning` for important notes when using ORM
+versioning.
+
"""
-from .base import MSExecutionContext, MSDialect
+from .base import MSExecutionContext, MSDialect, VARBINARY
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util
import decimal
@@ -174,6 +181,22 @@ class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
pass
+class _VARBINARY_pyodbc(VARBINARY):
+ def bind_processor(self, dialect):
+ if dialect.dbapi is None:
+ return None
+
+ DBAPIBinary = dialect.dbapi.Binary
+
+ def process(value):
+ if value is not None:
+ return DBAPIBinary(value)
+ else:
+ # pyodbc-specific
+ return dialect.dbapi.BinaryNull
+ return process
+
+
class MSExecutionContext_pyodbc(MSExecutionContext):
_embedded_scope_identity = False
@@ -230,7 +253,9 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
MSDialect.colspecs,
{
sqltypes.Numeric: _MSNumeric_pyodbc,
- sqltypes.Float: _MSFloat_pyodbc
+ sqltypes.Float: _MSFloat_pyodbc,
+ VARBINARY: _VARBINARY_pyodbc,
+ sqltypes.LargeBinary: _VARBINARY_pyodbc,
}
)
diff --git a/lib/sqlalchemy/dialects/mssql/zxjdbc.py b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
index b23a010e7..0bf68c2a2 100644
--- a/lib/sqlalchemy/dialects/mssql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/mssql/zxjdbc.py
@@ -1,5 +1,5 @@
# mssql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -13,6 +13,8 @@
[?key=value&key=value...]
:driverurl: http://jtds.sourceforge.net/
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
"""
from ...connectors.zxJDBC import ZxJDBCConnector
diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py
index 498603cf7..a17bcb402 100644
--- a/lib/sqlalchemy/dialects/mysql/__init__.py
+++ b/lib/sqlalchemy/dialects/mysql/__init__.py
@@ -1,5 +1,5 @@
# mysql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -15,7 +15,7 @@ base.dialect = mysqldb.dialect
from .base import \
BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, DECIMAL,\
- FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
+ FLOAT, INTEGER, INTEGER, JSON, LONGBLOB, LONGTEXT, MEDIUMBLOB, \
MEDIUMINT, MEDIUMTEXT, NCHAR, \
NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \
TINYBLOB, TINYINT, TINYTEXT,\
@@ -24,8 +24,8 @@ from .base import \
__all__ = (
'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME',
'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER',
- 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT', 'NCHAR',
- 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
+ 'JSON', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', 'MEDIUMTEXT',
+ 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME',
'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR',
'YEAR', 'dialect'
)
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index c45f8c3ee..b04070162 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -1,5 +1,5 @@
# mysql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -32,6 +32,11 @@ the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
+.. seealso::
+
+ :ref:`pool_setting_recycle` - full description of the pool recycle feature.
+
+
.. _mysql_storage_engines:
CREATE TABLE arguments including Storage Engines
@@ -146,6 +151,90 @@ multi-column key for some storage engines::
Column('id', Integer, primary_key=True)
)
+.. _mysql_unicode:
+
+Unicode
+-------
+
+Charset Selection
+~~~~~~~~~~~~~~~~~
+
+Most MySQL DBAPIs offer the option to set the client character set for
+a connection. This is typically delivered using the ``charset`` parameter
+in the URL, such as::
+
+ e = create_engine("mysql+pymysql://scott:tiger@localhost/\
+test?charset=utf8")
+
+This charset is the **client character set** for the connection. Some
+MySQL DBAPIs will default this to a value such as ``latin1``, and some
+will make use of the ``default-character-set`` setting in the ``my.cnf``
+file as well. Documentation for the DBAPI in use should be consulted
+for specific behavior.
+
+The encoding used for Unicode has traditionally been ``'utf8'``. However,
+for MySQL versions 5.5.3 on forward, a new MySQL-specific encoding
+``'utf8mb4'`` has been introduced. The rationale for this new encoding
+is due to the fact that MySQL's utf-8 encoding only supports
+codepoints up to three bytes instead of four. Therefore,
+when communicating with a MySQL database
+that includes codepoints more than three bytes in size,
+this new charset is preferred, if supported by both the database as well
+as the client DBAPI, as in::
+
+ e = create_engine("mysql+pymysql://scott:tiger@localhost/\
+test?charset=utf8mb4")
+
+At the moment, up-to-date versions of MySQLdb and PyMySQL support the
+``utf8mb4`` charset. Other DBAPIs such as MySQL-Connector and OurSQL
+may **not** support it as of yet.
+
+In order to use ``utf8mb4`` encoding, changes to
+the MySQL schema and/or server configuration may be required.
+
+.. seealso::
+
+ `The utf8mb4 Character Set \
+<http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html>`_ - \
+in the MySQL documentation
+
+Unicode Encoding / Decoding
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+All modern MySQL DBAPIs all offer the service of handling the encoding and
+decoding of unicode data between the Python application space and the database.
+As this was not always the case, SQLAlchemy also includes a comprehensive system
+of performing the encode/decode task as well. As only one of these systems
+should be in use at at time, SQLAlchemy has long included functionality
+to automatically detect upon first connection whether or not the DBAPI is
+automatically handling unicode.
+
+Whether or not the MySQL DBAPI will handle encoding can usually be configured
+using a DBAPI flag ``use_unicode``, which is known to be supported at least
+by MySQLdb, PyMySQL, and MySQL-Connector. Setting this value to ``0``
+in the "connect args" or query string will have the effect of disabling the
+DBAPI's handling of unicode, such that it instead will return data of the
+``str`` type or ``bytes`` type, with data in the configured charset::
+
+ # connect while disabling the DBAPI's unicode encoding/decoding
+ e = create_engine("mysql+mysqldb://scott:tiger@localhost/test?charset=utf8&use_unicode=0")
+
+Current recommendations for modern DBAPIs are as follows:
+
+* It is generally always safe to leave the ``use_unicode`` flag set at
+ its default; that is, don't use it at all.
+* Under Python 3, the ``use_unicode=0`` flag should **never be used**.
+ SQLAlchemy under Python 3 generally assumes the DBAPI receives and returns
+ string values as Python 3 strings, which are inherently unicode objects.
+* Under Python 2 with MySQLdb, the ``use_unicode=0`` flag will **offer
+ superior performance**, as MySQLdb's unicode converters under Python 2 only
+ have been observed to have unusually slow performance compared to SQLAlchemy's
+ fast C-based encoders/decoders.
+
+In short: don't specify ``use_unicode`` *at all*, with the possible
+exception of ``use_unicode=0`` on MySQLdb with Python 2 **only** for a
+potential performance gain.
+
Ansi Quoting Style
------------------
@@ -447,13 +536,13 @@ output::
"""
-import datetime
import re
import sys
+import json
from ... import schema as sa_schema
from ... import exc, log, sql, util
-from ...sql import compiler
+from ...sql import compiler, elements
from array import array as _array
from ...engine import reflection
@@ -463,6 +552,17 @@ from ...util import topological
from ...types import DATE, BOOLEAN, \
BLOB, BINARY, VARBINARY
+from . import reflection as _reflection
+from .types import BIGINT, BIT, CHAR, DECIMAL, DATETIME, \
+ DOUBLE, FLOAT, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, \
+ MEDIUMTEXT, NCHAR, NUMERIC, NVARCHAR, REAL, SMALLINT, TEXT, TIME, \
+ TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT, VARCHAR, YEAR
+from .types import _StringType, _IntegerType, _NumericType, \
+ _FloatType, _MatchType
+from .enumerated import ENUM, SET
+from .json import JSON, JSONIndexType, JSONPathType
+
+
RESERVED_WORDS = set(
['accessible', 'add', 'all', 'alter', 'analyze', 'and', 'as', 'asc',
'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both',
@@ -513,6 +613,8 @@ RESERVED_WORDS = set(
'get', 'io_after_gtids', 'io_before_gtids', 'master_bind', 'one_shot',
'partition', 'sql_after_gtids', 'sql_before_gtids', # 5.6
+ 'generated', 'optimizer_costs', 'stored', 'virtual', # 5.7
+
])
AUTOCOMMIT_RE = re.compile(
@@ -523,1050 +625,6 @@ SET_RE = re.compile(
re.I | re.UNICODE)
-class _NumericType(object):
- """Base for MySQL numeric types.
-
- This is the base both for NUMERIC as well as INTEGER, hence
- it's a mixin.
-
- """
-
- def __init__(self, unsigned=False, zerofill=False, **kw):
- self.unsigned = unsigned
- self.zerofill = zerofill
- super(_NumericType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_NumericType, sqltypes.Numeric])
-
-
-class _FloatType(_NumericType, sqltypes.Float):
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- if isinstance(self, (REAL, DOUBLE)) and \
- (
- (precision is None and scale is not None) or
- (precision is not None and scale is None)
- ):
- raise exc.ArgumentError(
- "You must specify both precision and scale or omit "
- "both altogether.")
- super(_FloatType, self).__init__(
- precision=precision, asdecimal=asdecimal, **kw)
- self.scale = scale
-
- def __repr__(self):
- return util.generic_repr(self, to_inspect=[_FloatType,
- _NumericType,
- sqltypes.Float])
-
-
-class _IntegerType(_NumericType, sqltypes.Integer):
- def __init__(self, display_width=None, **kw):
- self.display_width = display_width
- super(_IntegerType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self, to_inspect=[_IntegerType,
- _NumericType,
- sqltypes.Integer])
-
-
-class _StringType(sqltypes.String):
- """Base for MySQL string types."""
-
- def __init__(self, charset=None, collation=None,
- ascii=False, binary=False, unicode=False,
- national=False, **kw):
- self.charset = charset
-
- # allow collate= or collation=
- kw.setdefault('collation', kw.pop('collate', collation))
-
- self.ascii = ascii
- self.unicode = unicode
- self.binary = binary
- self.national = national
- super(_StringType, self).__init__(**kw)
-
- def __repr__(self):
- return util.generic_repr(self,
- to_inspect=[_StringType, sqltypes.String])
-
-
-class _MatchType(sqltypes.Float, sqltypes.MatchType):
- def __init__(self, **kw):
- # TODO: float arguments?
- sqltypes.Float.__init__(self)
- sqltypes.MatchType.__init__(self)
-
-
-
-class NUMERIC(_NumericType, sqltypes.NUMERIC):
- """MySQL NUMERIC type."""
-
- __visit_name__ = 'NUMERIC'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a NUMERIC.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(NUMERIC, self).__init__(precision=precision,
- scale=scale, asdecimal=asdecimal, **kw)
-
-
-class DECIMAL(_NumericType, sqltypes.DECIMAL):
- """MySQL DECIMAL type."""
-
- __visit_name__ = 'DECIMAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DECIMAL.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DECIMAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class DOUBLE(_FloatType):
- """MySQL DOUBLE type."""
-
- __visit_name__ = 'DOUBLE'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a DOUBLE.
-
- .. note::
-
- The :class:`.DOUBLE` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits.
- Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
- to change this scale, or ``asdecimal=False`` to return values
- directly as Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(DOUBLE, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class REAL(_FloatType, sqltypes.REAL):
- """MySQL REAL type."""
-
- __visit_name__ = 'REAL'
-
- def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
- """Construct a REAL.
-
- .. note::
-
- The :class:`.REAL` type by default converts from float
- to Decimal, using a truncation that defaults to 10 digits.
- Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
- to change this scale, or ``asdecimal=False`` to return values
- directly as Python floating points.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(REAL, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
-
-class FLOAT(_FloatType, sqltypes.FLOAT):
- """MySQL FLOAT type."""
-
- __visit_name__ = 'FLOAT'
-
- def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
- """Construct a FLOAT.
-
- :param precision: Total digits in this number. If scale and precision
- are both None, values are stored to limits allowed by the server.
-
- :param scale: The number of digits after the decimal point.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(FLOAT, self).__init__(precision=precision, scale=scale,
- asdecimal=asdecimal, **kw)
-
- def bind_processor(self, dialect):
- return None
-
-
-class INTEGER(_IntegerType, sqltypes.INTEGER):
- """MySQL INTEGER type."""
-
- __visit_name__ = 'INTEGER'
-
- def __init__(self, display_width=None, **kw):
- """Construct an INTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(INTEGER, self).__init__(display_width=display_width, **kw)
-
-
-class BIGINT(_IntegerType, sqltypes.BIGINT):
- """MySQL BIGINTEGER type."""
-
- __visit_name__ = 'BIGINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a BIGINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(BIGINT, self).__init__(display_width=display_width, **kw)
-
-
-class MEDIUMINT(_IntegerType):
- """MySQL MEDIUMINTEGER type."""
-
- __visit_name__ = 'MEDIUMINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a MEDIUMINTEGER
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
-
-
-class TINYINT(_IntegerType):
- """MySQL TINYINT type."""
-
- __visit_name__ = 'TINYINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a TINYINT.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(TINYINT, self).__init__(display_width=display_width, **kw)
-
-
-class SMALLINT(_IntegerType, sqltypes.SMALLINT):
- """MySQL SMALLINTEGER type."""
-
- __visit_name__ = 'SMALLINT'
-
- def __init__(self, display_width=None, **kw):
- """Construct a SMALLINTEGER.
-
- :param display_width: Optional, maximum display width for this number.
-
- :param unsigned: a boolean, optional.
-
- :param zerofill: Optional. If true, values will be stored as strings
- left-padded with zeros. Note that this does not effect the values
- returned by the underlying database API, which continue to be
- numeric.
-
- """
- super(SMALLINT, self).__init__(display_width=display_width, **kw)
-
-
-class BIT(sqltypes.TypeEngine):
- """MySQL BIT type.
-
- This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
- for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
- MSTinyInteger() type.
-
- """
-
- __visit_name__ = 'BIT'
-
- def __init__(self, length=None):
- """Construct a BIT.
-
- :param length: Optional, number of bits.
-
- """
- self.length = length
-
- def result_processor(self, dialect, coltype):
- """Convert a MySQL's 64 bit, variable length binary string to a long.
-
- TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
- already do this, so this logic should be moved to those dialects.
-
- """
-
- def process(value):
- if value is not None:
- v = 0
- for i in map(ord, value):
- v = v << 8 | i
- return v
- return value
- return process
-
-
-class TIME(sqltypes.TIME):
- """MySQL TIME type. """
-
- __visit_name__ = 'TIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8 The MySQL-specific TIME
- type as well as fractional seconds support.
-
- """
- super(TIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
- def result_processor(self, dialect, coltype):
- time = datetime.time
-
- def process(value):
- # convert from a timedelta value
- if value is not None:
- microseconds = value.microseconds
- seconds = value.seconds
- minutes = seconds // 60
- return time(minutes // 60,
- minutes % 60,
- seconds - minutes * 60,
- microsecond=microseconds)
- else:
- return None
- return process
-
-
-class TIMESTAMP(sqltypes.TIMESTAMP):
- """MySQL TIMESTAMP type.
-
- """
-
- __visit_name__ = 'TIMESTAMP'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL TIMESTAMP type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the TIMESTAMP type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
- with fractional seconds support.
-
- """
- super(TIMESTAMP, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class DATETIME(sqltypes.DATETIME):
- """MySQL DATETIME type.
-
- """
-
- __visit_name__ = 'DATETIME'
-
- def __init__(self, timezone=False, fsp=None):
- """Construct a MySQL DATETIME type.
-
- :param timezone: not used by the MySQL dialect.
- :param fsp: fractional seconds precision value.
- MySQL 5.6.4 supports storage of fractional seconds;
- this parameter will be used when emitting DDL
- for the DATETIME type.
-
- .. note::
-
- DBAPI driver support for fractional seconds may
- be limited; current support includes
- MySQL Connector/Python.
-
- .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
- with fractional seconds support.
-
- """
- super(DATETIME, self).__init__(timezone=timezone)
- self.fsp = fsp
-
-
-class YEAR(sqltypes.TypeEngine):
- """MySQL YEAR type, for single byte storage of years 1901-2155."""
-
- __visit_name__ = 'YEAR'
-
- def __init__(self, display_width=None):
- self.display_width = display_width
-
-
-class TEXT(_StringType, sqltypes.TEXT):
- """MySQL TEXT type, for text up to 2^16 characters."""
-
- __visit_name__ = 'TEXT'
-
- def __init__(self, length=None, **kw):
- """Construct a TEXT.
-
- :param length: Optional, if provided the server may optimize storage
- by substituting the smallest TEXT type sufficient to store
- ``length`` characters.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TEXT, self).__init__(length=length, **kw)
-
-
-class TINYTEXT(_StringType):
- """MySQL TINYTEXT type, for text up to 2^8 characters."""
-
- __visit_name__ = 'TINYTEXT'
-
- def __init__(self, **kwargs):
- """Construct a TINYTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(TINYTEXT, self).__init__(**kwargs)
-
-
-class MEDIUMTEXT(_StringType):
- """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
-
- __visit_name__ = 'MEDIUMTEXT'
-
- def __init__(self, **kwargs):
- """Construct a MEDIUMTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(MEDIUMTEXT, self).__init__(**kwargs)
-
-
-class LONGTEXT(_StringType):
- """MySQL LONGTEXT type, for text up to 2^32 characters."""
-
- __visit_name__ = 'LONGTEXT'
-
- def __init__(self, **kwargs):
- """Construct a LONGTEXT.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(LONGTEXT, self).__init__(**kwargs)
-
-
-class VARCHAR(_StringType, sqltypes.VARCHAR):
- """MySQL VARCHAR type, for variable-length character data."""
-
- __visit_name__ = 'VARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a VARCHAR.
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param national: Optional. If true, use the server's configured
- national character set.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- """
- super(VARCHAR, self).__init__(length=length, **kwargs)
-
-
-class CHAR(_StringType, sqltypes.CHAR):
- """MySQL CHAR type, for fixed-length character data."""
-
- __visit_name__ = 'CHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct a CHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- super(CHAR, self).__init__(length=length, **kwargs)
-
- @classmethod
- def _adapt_string_for_cast(self, type_):
- # copy the given string type into a CHAR
- # for the purposes of rendering a CAST expression
- type_ = sqltypes.to_instance(type_)
- if isinstance(type_, sqltypes.CHAR):
- return type_
- elif isinstance(type_, _StringType):
- return CHAR(
- length=type_.length,
- charset=type_.charset,
- collation=type_.collation,
- ascii=type_.ascii,
- binary=type_.binary,
- unicode=type_.unicode,
- national=False # not supported in CAST
- )
- else:
- return CHAR(length=type_.length)
-
-
-class NVARCHAR(_StringType, sqltypes.NVARCHAR):
- """MySQL NVARCHAR type.
-
- For variable-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NVARCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NVARCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NVARCHAR, self).__init__(length=length, **kwargs)
-
-
-class NCHAR(_StringType, sqltypes.NCHAR):
- """MySQL NCHAR type.
-
- For fixed-length character data in the server's configured national
- character set.
- """
-
- __visit_name__ = 'NCHAR'
-
- def __init__(self, length=None, **kwargs):
- """Construct an NCHAR.
-
- :param length: Maximum data length, in characters.
-
- :param binary: Optional, use the default binary collation for the
- national character set. This does not affect the type of data
- stored, use a BINARY type for binary data.
-
- :param collation: Optional, request a particular collation. Must be
- compatible with the national character set.
-
- """
- kwargs['national'] = True
- super(NCHAR, self).__init__(length=length, **kwargs)
-
-
-class TINYBLOB(sqltypes._Binary):
- """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
-
- __visit_name__ = 'TINYBLOB'
-
-
-class MEDIUMBLOB(sqltypes._Binary):
- """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
-
- __visit_name__ = 'MEDIUMBLOB'
-
-
-class LONGBLOB(sqltypes._Binary):
- """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
-
- __visit_name__ = 'LONGBLOB'
-
-
-class _EnumeratedValues(_StringType):
- def _init_values(self, values, kw):
- self.quoting = kw.pop('quoting', 'auto')
-
- if self.quoting == 'auto' and len(values):
- # What quoting character are we using?
- q = None
- for e in values:
- if len(e) == 0:
- self.quoting = 'unquoted'
- break
- elif q is None:
- q = e[0]
-
- if len(e) == 1 or e[0] != q or e[-1] != q:
- self.quoting = 'unquoted'
- break
- else:
- self.quoting = 'quoted'
-
- if self.quoting == 'quoted':
- util.warn_deprecated(
- 'Manually quoting %s value literals is deprecated. Supply '
- 'unquoted values and use the quoting= option in cases of '
- 'ambiguity.' % self.__class__.__name__)
-
- values = self._strip_values(values)
-
- self._enumerated_values = values
- length = max([len(v) for v in values] + [0])
- return values, length
-
- @classmethod
- def _strip_values(cls, values):
- strip_values = []
- for a in values:
- if a[0:1] == '"' or a[0:1] == "'":
- # strip enclosing quotes and unquote interior
- a = a[1:-1].replace(a[0] * 2, a[0])
- strip_values.append(a)
- return strip_values
-
-
-class ENUM(sqltypes.Enum, _EnumeratedValues):
- """MySQL ENUM type."""
-
- __visit_name__ = 'ENUM'
-
- def __init__(self, *enums, **kw):
- """Construct an ENUM.
-
- E.g.::
-
- Column('myenum', ENUM("foo", "bar", "baz"))
-
- :param enums: The range of valid values for this ENUM. Values will be
- quoted when generating the schema according to the quoting flag (see
- below).
-
- :param strict: Defaults to False: ensure that a given value is in this
- ENUM's range of permissible values when inserting or updating rows.
- Note that MySQL will not raise a fatal error if you attempt to store
- an out of range value- an alternate value will be stored instead.
- (See MySQL ENUM documentation.)
-
- :param charset: Optional, a column-level character set for this string
- value. Takes precedence to 'ascii' or 'unicode' short-hand.
-
- :param collation: Optional, a column-level collation for this string
- value. Takes precedence to 'binary' short-hand.
-
- :param ascii: Defaults to False: short-hand for the ``latin1``
- character set, generates ASCII in schema.
-
- :param unicode: Defaults to False: short-hand for the ``ucs2``
- character set, generates UNICODE in schema.
-
- :param binary: Defaults to False: short-hand, pick the binary
- collation type that matches the column's character set. Generates
- BINARY in schema. This does not affect the type of data stored,
- only the collation of character data.
-
- :param quoting: Defaults to 'auto': automatically determine enum value
- quoting. If all enum values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- """
- values, length = self._init_values(enums, kw)
- self.strict = kw.pop('strict', False)
- kw.pop('metadata', None)
- kw.pop('schema', None)
- kw.pop('name', None)
- kw.pop('quote', None)
- kw.pop('native_enum', None)
- kw.pop('inherit_schema', None)
- _StringType.__init__(self, length=length, **kw)
- sqltypes.Enum.__init__(self, *values)
-
- def __repr__(self):
- return util.generic_repr(
- self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
-
- def bind_processor(self, dialect):
- super_convert = super(ENUM, self).bind_processor(dialect)
-
- def process(value):
- if self.strict and value is not None and value not in self.enums:
- raise exc.InvalidRequestError('"%s" not a valid value for '
- 'this enum' % value)
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
- def adapt(self, cls, **kw):
- if issubclass(cls, ENUM):
- kw['strict'] = self.strict
- return sqltypes.Enum.adapt(self, cls, **kw)
-
-
-class SET(_EnumeratedValues):
- """MySQL SET type."""
-
- __visit_name__ = 'SET'
-
- def __init__(self, *values, **kw):
- """Construct a SET.
-
- E.g.::
-
- Column('myset', SET("foo", "bar", "baz"))
-
-
- The list of potential values is required in the case that this
- set will be used to generate DDL for a table, or if the
- :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
-
- :param values: The range of valid values for this SET.
-
- :param convert_unicode: Same flag as that of
- :paramref:`.String.convert_unicode`.
-
- :param collation: same as that of :paramref:`.String.collation`
-
- :param charset: same as that of :paramref:`.VARCHAR.charset`.
-
- :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
-
- :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
-
- :param binary: same as that of :paramref:`.VARCHAR.binary`.
-
- :param quoting: Defaults to 'auto': automatically determine set value
- quoting. If all values are surrounded by the same quoting
- character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
-
- 'quoted': values in enums are already quoted, they will be used
- directly when generating the schema - this usage is deprecated.
-
- 'unquoted': values in enums are not quoted, they will be escaped and
- surrounded by single quotes when generating the schema.
-
- Previous versions of this type always required manually quoted
- values to be supplied; future versions will always quote the string
- literals for you. This is a transitional option.
-
- .. versionadded:: 0.9.0
-
- :param retrieve_as_bitwise: if True, the data for the set type will be
- persisted and selected using an integer value, where a set is coerced
- into a bitwise mask for persistence. MySQL allows this mode which
- has the advantage of being able to store values unambiguously,
- such as the blank string ``''``. The datatype will appear
- as the expression ``col + 0`` in a SELECT statement, so that the
- value is coerced into an integer value in result sets.
- This flag is required if one wishes
- to persist a set that can store the blank string ``''`` as a value.
-
- .. warning::
-
- When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
- essential that the list of set values is expressed in the
- **exact same order** as exists on the MySQL database.
-
- .. versionadded:: 1.0.0
-
-
- """
- self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
- values, length = self._init_values(values, kw)
- self.values = tuple(values)
- if not self.retrieve_as_bitwise and '' in values:
- raise exc.ArgumentError(
- "Can't use the blank value '' in a SET without "
- "setting retrieve_as_bitwise=True")
- if self.retrieve_as_bitwise:
- self._bitmap = dict(
- (value, 2 ** idx)
- for idx, value in enumerate(self.values)
- )
- self._bitmap.update(
- (2 ** idx, value)
- for idx, value in enumerate(self.values)
- )
- kw.setdefault('length', length)
- super(SET, self).__init__(**kw)
-
- def column_expression(self, colexpr):
- if self.retrieve_as_bitwise:
- return colexpr + 0
- else:
- return colexpr
-
- def result_processor(self, dialect, coltype):
- if self.retrieve_as_bitwise:
- def process(value):
- if value is not None:
- value = int(value)
-
- return set(
- util.map_bits(self._bitmap.__getitem__, value)
- )
- else:
- return None
- else:
- super_convert = super(SET, self).result_processor(dialect, coltype)
-
- def process(value):
- if isinstance(value, util.string_types):
- # MySQLdb returns a string, let's parse
- if super_convert:
- value = super_convert(value)
- return set(re.findall(r'[^,]+', value))
- else:
- # mysql-connector-python does a naive
- # split(",") which throws in an empty string
- if value is not None:
- value.discard('')
- return value
- return process
-
- def bind_processor(self, dialect):
- super_convert = super(SET, self).bind_processor(dialect)
- if self.retrieve_as_bitwise:
- def process(value):
- if value is None:
- return None
- elif isinstance(value, util.int_types + util.string_types):
- if super_convert:
- return super_convert(value)
- else:
- return value
- else:
- int_value = 0
- for v in value:
- int_value |= self._bitmap[v]
- return int_value
- else:
-
- def process(value):
- # accept strings and int (actually bitflag) values directly
- if value is not None and not isinstance(
- value, util.int_types + util.string_types):
- value = ",".join(value)
-
- if super_convert:
- return super_convert(value)
- else:
- return value
- return process
-
- def adapt(self, impltype, **kw):
- kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
- return util.constructor_copy(
- self, impltype,
- *self.values,
- **kw
- )
-
# old names
MSTime = TIME
MSSet = SET
@@ -1607,7 +665,11 @@ colspecs = {
sqltypes.Float: FLOAT,
sqltypes.Time: TIME,
sqltypes.Enum: ENUM,
- sqltypes.MatchType: _MatchType
+ sqltypes.MatchType: _MatchType,
+ sqltypes.JSON: JSON,
+ sqltypes.JSON.JSONIndexType: JSONIndexType,
+ sqltypes.JSON.JSONPathType: JSONPathType
+
}
# Everything 3.23 through 5.1 excepting OpenGIS types.
@@ -1627,6 +689,7 @@ ischema_names = {
'float': FLOAT,
'int': INTEGER,
'integer': INTEGER,
+ 'json': JSON,
'longblob': LONGBLOB,
'longtext': LONGTEXT,
'mediumblob': MEDIUMBLOB,
@@ -1672,6 +735,16 @@ class MySQLCompiler(compiler.SQLCompiler):
def visit_sysdate_func(self, fn, **kw):
return "SYSDATE()"
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left),
+ self.process(binary.right))
+
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ return "JSON_EXTRACT(%s, %s)" % (
+ self.process(binary.left),
+ self.process(binary.right))
+
def visit_concat_op_binary(self, binary, operator, **kw):
return "concat(%s, %s)" % (self.process(binary.left),
self.process(binary.right))
@@ -1704,6 +777,8 @@ class MySQLCompiler(compiler.SQLCompiler):
return self.dialect.type_compiler.process(adapted)
elif isinstance(type_, sqltypes._Binary):
return 'BINARY'
+ elif isinstance(type_, sqltypes.JSON):
+ return "JSON"
elif isinstance(type_, sqltypes.NUMERIC):
return self.dialect.type_compiler.process(
type_).replace('NUMERIC', 'DECIMAL')
@@ -1742,7 +817,7 @@ class MySQLCompiler(compiler.SQLCompiler):
def visit_false(self, element, **kw):
return "false"
- def get_select_precolumns(self, select):
+ def get_select_precolumns(self, select, **kw):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
@@ -1826,38 +901,7 @@ class MySQLCompiler(compiler.SQLCompiler):
return None
-# ug. "InnoDB needs indexes on foreign keys and referenced keys [...].
-# Starting with MySQL 4.1.2, these indexes are created automatically.
-# In older versions, the indexes must be created explicitly or the
-# creation of foreign key constraints fails."
-
class MySQLDDLCompiler(compiler.DDLCompiler):
- def create_table_constraints(self, table, **kw):
- """Get table constraints."""
- constraint_string = super(
- MySQLDDLCompiler, self).create_table_constraints(table, **kw)
-
- # why self.dialect.name and not 'mysql'? because of drizzle
- is_innodb = 'engine' in table.dialect_options[self.dialect.name] and \
- table.dialect_options[self.dialect.name][
- 'engine'].lower() == 'innodb'
-
- auto_inc_column = table._autoincrement_column
-
- if is_innodb and \
- auto_inc_column is not None and \
- auto_inc_column is not list(table.primary_key)[0]:
- if constraint_string:
- constraint_string += ", \n\t"
- constraint_string += "KEY %s (%s)" % (
- self.preparer.quote(
- "idx_autoinc_%s" % auto_inc_column.name
- ),
- self.preparer.format_column(auto_inc_column)
- )
-
- return constraint_string
-
def get_column_specification(self, column, **kw):
"""Builds column DDL."""
@@ -1908,7 +952,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
('PARTITION_BY', 'PARTITIONS'), # only for test consistency
], opts):
arg = opts[opt]
- if opt in _options_of_type_string:
+ if opt in _reflection._options_of_type_string:
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
@@ -2250,6 +1294,9 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARBINARY(self, type_, **kw):
return "VARBINARY(%d)" % type_.length
+ def visit_JSON(self, type_, **kw):
+ return "JSON"
+
def visit_large_binary(self, type_, **kw):
return self.visit_BLOB(type_)
@@ -2369,10 +1416,13 @@ class MySQLDialect(default.DefaultDialect):
})
]
- def __init__(self, isolation_level=None, **kwargs):
+ def __init__(self, isolation_level=None, json_serializer=None,
+ json_deserializer=None, **kwargs):
kwargs.pop('use_ansiquotes', None) # legacy
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
+ self._json_serializer = json_serializer
+ self._json_deserializer = json_deserializer
def on_connect(self):
if self.isolation_level is not None:
@@ -2547,6 +1597,10 @@ class MySQLDialect(default.DefaultDialect):
default.DefaultDialect.initialize(self, connection)
@property
+ def _is_mariadb(self):
+ return 'MariaDB' in self.server_version_info
+
+ @property
def _supports_cast(self):
return self.server_version_info is None or \
self.server_version_info >= (4, 0, 2)
@@ -2738,7 +1792,7 @@ class MySQLDialect(default.DefaultDialect):
preparer = self.preparer(self, server_ansiquotes=False)
else:
preparer = self.identifier_preparer
- return MySQLTableDefinitionParser(self, preparer)
+ return _reflection.MySQLTableDefinitionParser(self, preparer)
@reflection.cache
def _setup_parser(self, connection, table_name, schema=None, **kw):
@@ -2748,7 +1802,7 @@ class MySQLDialect(default.DefaultDialect):
schema, table_name))
sql = self._show_create_table(connection, None, charset,
full_name=full_name)
- if sql.startswith('CREATE ALGORITHM'):
+ if re.match(r'^CREATE (?:ALGORITHM)?.* VIEW', sql):
# Adapt views to something table-like.
columns = self._describe_table(connection, None, charset,
full_name=full_name)
@@ -2870,425 +1924,6 @@ class MySQLDialect(default.DefaultDialect):
return rows
-class ReflectedState(object):
- """Stores raw information about a SHOW CREATE TABLE statement."""
-
- def __init__(self):
- self.columns = []
- self.table_options = {}
- self.table_name = None
- self.keys = []
- self.constraints = []
-
-
-@log.class_logger
-class MySQLTableDefinitionParser(object):
- """Parses the results of a SHOW CREATE TABLE statement."""
-
- def __init__(self, dialect, preparer):
- self.dialect = dialect
- self.preparer = preparer
- self._prep_regexes()
-
- def parse(self, show_create, charset):
- state = ReflectedState()
- state.charset = charset
- for line in re.split(r'\r?\n', show_create):
- if line.startswith(' ' + self.preparer.initial_quote):
- self._parse_column(line, state)
- # a regular table options line
- elif line.startswith(') '):
- self._parse_table_options(line, state)
- # an ANSI-mode table options line
- elif line == ')':
- pass
- elif line.startswith('CREATE '):
- self._parse_table_name(line, state)
- # Not present in real reflection, but may be if
- # loading from a file.
- elif not line:
- pass
- else:
- type_, spec = self._parse_constraints(line)
- if type_ is None:
- util.warn("Unknown schema content: %r" % line)
- elif type_ == 'key':
- state.keys.append(spec)
- elif type_ == 'constraint':
- state.constraints.append(spec)
- else:
- pass
- return state
-
- def _parse_constraints(self, line):
- """Parse a KEY or CONSTRAINT line.
-
- :param line: A line of SHOW CREATE TABLE output
- """
-
- # KEY
- m = self._re_key.match(line)
- if m:
- spec = m.groupdict()
- # convert columns into name, length pairs
- spec['columns'] = self._parse_keyexprs(spec['columns'])
- return 'key', spec
-
- # CONSTRAINT
- m = self._re_constraint.match(line)
- if m:
- spec = m.groupdict()
- spec['table'] = \
- self.preparer.unformat_identifiers(spec['table'])
- spec['local'] = [c[0]
- for c in self._parse_keyexprs(spec['local'])]
- spec['foreign'] = [c[0]
- for c in self._parse_keyexprs(spec['foreign'])]
- return 'constraint', spec
-
- # PARTITION and SUBPARTITION
- m = self._re_partition.match(line)
- if m:
- # Punt!
- return 'partition', line
-
- # No match.
- return (None, line)
-
- def _parse_table_name(self, line, state):
- """Extract the table name.
-
- :param line: The first line of SHOW CREATE TABLE
- """
-
- regex, cleanup = self._pr_name
- m = regex.match(line)
- if m:
- state.table_name = cleanup(m.group('name'))
-
- def _parse_table_options(self, line, state):
- """Build a dictionary of all reflected table-level options.
-
- :param line: The final line of SHOW CREATE TABLE output.
- """
-
- options = {}
-
- if not line or line == ')':
- pass
-
- else:
- rest_of_line = line[:]
- for regex, cleanup in self._pr_options:
- m = regex.search(rest_of_line)
- if not m:
- continue
- directive, value = m.group('directive'), m.group('val')
- if cleanup:
- value = cleanup(value)
- options[directive.lower()] = value
- rest_of_line = regex.sub('', rest_of_line)
-
- for nope in ('auto_increment', 'data directory', 'index directory'):
- options.pop(nope, None)
-
- for opt, val in options.items():
- state.table_options['%s_%s' % (self.dialect.name, opt)] = val
-
- def _parse_column(self, line, state):
- """Extract column details.
-
- Falls back to a 'minimal support' variant if full parse fails.
-
- :param line: Any column-bearing line from SHOW CREATE TABLE
- """
-
- spec = None
- m = self._re_column.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = True
- else:
- m = self._re_column_loose.match(line)
- if m:
- spec = m.groupdict()
- spec['full'] = False
- if not spec:
- util.warn("Unknown column definition %r" % line)
- return
- if not spec['full']:
- util.warn("Incomplete reflection of column definition %r" % line)
-
- name, type_, args = spec['name'], spec['coltype'], spec['arg']
-
- try:
- col_type = self.dialect.ischema_names[type_]
- except KeyError:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (type_, name))
- col_type = sqltypes.NullType
-
- # Column type positional arguments eg. varchar(32)
- if args is None or args == '':
- type_args = []
- elif args[0] == "'" and args[-1] == "'":
- type_args = self._re_csv_str.findall(args)
- else:
- type_args = [int(v) for v in self._re_csv_int.findall(args)]
-
- # Column type keyword options
- type_kw = {}
- for kw in ('unsigned', 'zerofill'):
- if spec.get(kw, False):
- type_kw[kw] = True
- for kw in ('charset', 'collate'):
- if spec.get(kw, False):
- type_kw[kw] = spec[kw]
- if issubclass(col_type, _EnumeratedValues):
- type_args = _EnumeratedValues._strip_values(type_args)
-
- if issubclass(col_type, SET) and '' in type_args:
- type_kw['retrieve_as_bitwise'] = True
-
- type_instance = col_type(*type_args, **type_kw)
-
- col_kw = {}
-
- # NOT NULL
- col_kw['nullable'] = True
- # this can be "NULL" in the case of TIMESTAMP
- if spec.get('notnull', False) == 'NOT NULL':
- col_kw['nullable'] = False
-
- # AUTO_INCREMENT
- if spec.get('autoincr', False):
- col_kw['autoincrement'] = True
- elif issubclass(col_type, sqltypes.Integer):
- col_kw['autoincrement'] = False
-
- # DEFAULT
- default = spec.get('default', None)
-
- if default == 'NULL':
- # eliminates the need to deal with this later.
- default = None
-
- col_d = dict(name=name, type=type_instance, default=default)
- col_d.update(col_kw)
- state.columns.append(col_d)
-
- def _describe_to_create(self, table_name, columns):
- """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
-
- DESCRIBE is a much simpler reflection and is sufficient for
- reflecting views for runtime use. This method formats DDL
- for columns only- keys are omitted.
-
- :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
- SHOW FULL COLUMNS FROM rows must be rearranged for use with
- this function.
- """
-
- buffer = []
- for row in columns:
- (name, col_type, nullable, default, extra) = \
- [row[i] for i in (0, 1, 2, 4, 5)]
-
- line = [' ']
- line.append(self.preparer.quote_identifier(name))
- line.append(col_type)
- if not nullable:
- line.append('NOT NULL')
- if default:
- if 'auto_increment' in default:
- pass
- elif (col_type.startswith('timestamp') and
- default.startswith('C')):
- line.append('DEFAULT')
- line.append(default)
- elif default == 'NULL':
- line.append('DEFAULT')
- line.append(default)
- else:
- line.append('DEFAULT')
- line.append("'%s'" % default.replace("'", "''"))
- if extra:
- line.append(extra)
-
- buffer.append(' '.join(line))
-
- return ''.join([('CREATE TABLE %s (\n' %
- self.preparer.quote_identifier(table_name)),
- ',\n'.join(buffer),
- '\n) '])
-
- def _parse_keyexprs(self, identifiers):
- """Unpack '"col"(2),"col" ASC'-ish strings into components."""
-
- return self._re_keyexprs.findall(identifiers)
-
- def _prep_regexes(self):
- """Pre-compile regular expressions."""
-
- self._re_columns = []
- self._pr_options = []
-
- _final = self.preparer.final_quote
-
- quotes = dict(zip(('iq', 'fq', 'esc_fq'),
- [re.escape(s) for s in
- (self.preparer.initial_quote,
- _final,
- self.preparer._escape_identifier(_final))]))
-
- self._pr_name = _pr_compile(
- r'^CREATE (?:\w+ +)?TABLE +'
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
- self.preparer._unescape_identifier)
-
- # `col`,`col2`(32),`col3`(15) DESC
- #
- # Note: ASC and DESC aren't reflected, so we'll punt...
- self._re_keyexprs = _re_compile(
- r'(?:'
- r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
- r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
-
- # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
- self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
-
- # 123 or 123,456
- self._re_csv_int = _re_compile(r'\d+')
-
- # `colname` <type> [type opts]
- # (NOT NULL | NULL)
- # DEFAULT ('value' | CURRENT_TIMESTAMP...)
- # COMMENT 'comment'
- # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
- # STORAGE (DISK|MEMORY)
- self._re_column = _re_compile(
- r' '
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P<coltype>\w+)'
- r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
- r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
- r'(?: +(?P<unsigned>UNSIGNED))?'
- r'(?: +(?P<zerofill>ZEROFILL))?'
- r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
- r'(?: +COLLATE +(?P<collate>[\w_]+))?'
- r'(?: +(?P<notnull>(?:NOT )?NULL))?'
- r'(?: +DEFAULT +(?P<default>'
- r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
- r'(?: +ON UPDATE \w+)?)'
- r'))?'
- r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
- r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
- r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
- r'(?: +STORAGE +(?P<storage>\w+))?'
- r'(?: +(?P<extra>.*))?'
- r',?$'
- % quotes
- )
-
- # Fallback, try to parse as little as possible
- self._re_column_loose = _re_compile(
- r' '
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'(?P<coltype>\w+)'
- r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
- r'.*?(?P<notnull>(?:NOT )NULL)?'
- % quotes
- )
-
- # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
- # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
- # KEY_BLOCK_SIZE size | WITH PARSER name
- self._re_key = _re_compile(
- r' '
- r'(?:(?P<type>\S+) )?KEY'
- r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
- r'(?: +USING +(?P<using_pre>\S+))?'
- r' +\((?P<columns>.+?)\)'
- r'(?: +USING +(?P<using_post>\S+))?'
- r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
- r'(?: +WITH PARSER +(?P<parser>\S+))?'
- r',?$'
- % quotes
- )
-
- # CONSTRAINT `name` FOREIGN KEY (`local_col`)
- # REFERENCES `remote` (`remote_col`)
- # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
- # ON DELETE CASCADE ON UPDATE RESTRICT
- #
- # unique constraints come back as KEYs
- kw = quotes.copy()
- kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
- self._re_constraint = _re_compile(
- r' '
- r'CONSTRAINT +'
- r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
- r'FOREIGN KEY +'
- r'\((?P<local>[^\)]+?)\) REFERENCES +'
- r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
- r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
- r'\((?P<foreign>[^\)]+?)\)'
- r'(?: +(?P<match>MATCH \w+))?'
- r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
- r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
- % kw
- )
-
- # PARTITION
- #
- # punt!
- self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
-
- # Table-level options (COLLATE, ENGINE, etc.)
- # Do the string options first, since they have quoted
- # strings we need to get rid of.
- for option in _options_of_type_string:
- self._add_option_string(option)
-
- for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
- 'AVG_ROW_LENGTH', 'CHARACTER SET',
- 'DEFAULT CHARSET', 'CHECKSUM',
- 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
- 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
- 'KEY_BLOCK_SIZE'):
- self._add_option_word(option)
-
- self._add_option_regex('UNION', r'\([^\)]+\)')
- self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
- self._add_option_regex(
- 'RAID_TYPE',
- r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
-
- _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
-
- def _add_option_string(self, directive):
- regex = (r'(?P<directive>%s)%s'
- r"'(?P<val>(?:[^']|'')*?)'(?!')" %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(
- regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
- ))
-
- def _add_option_word(self, directive):
- regex = (r'(?P<directive>%s)%s'
- r'(?P<val>\w+)' %
- (re.escape(directive), self._optional_equals))
- self._pr_options.append(_pr_compile(regex))
-
- def _add_option_regex(self, directive, regex):
- regex = (r'(?P<directive>%s)%s'
- r'(?P<val>%s)' %
- (re.escape(directive), self._optional_equals, regex))
- self._pr_options.append(_pr_compile(regex))
-
-_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
- 'PASSWORD', 'CONNECTION')
-
class _DecodingRowProxy(object):
"""Return unicode-decoded values based on type inspection.
@@ -3303,9 +1938,17 @@ class _DecodingRowProxy(object):
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
+ _encoding_compat = {
+ 'koi8r': 'koi8_r',
+ 'koi8u': 'koi8_u',
+ 'utf16': 'utf-16-be', # MySQL's uft16 is always bigendian
+ 'utf8mb4': 'utf8', # real utf8
+ 'eucjpms': 'ujis',
+ }
+
def __init__(self, rowproxy, charset):
self.rowproxy = rowproxy
- self.charset = charset
+ self.charset = self._encoding_compat.get(charset, charset)
def __getitem__(self, index):
item = self.rowproxy[index]
@@ -3326,14 +1969,3 @@ class _DecodingRowProxy(object):
else:
return item
-
-def _pr_compile(regex, cleanup=None):
- """Prepare a 2-tuple of compiled regex and callable."""
-
- return (_re_compile(regex), cleanup)
-
-
-def _re_compile(regex):
- """Compile a string to regex, I and UNICODE."""
-
- return re.compile(regex, re.I | re.UNICODE)
diff --git a/lib/sqlalchemy/dialects/mysql/cymysql.py b/lib/sqlalchemy/dialects/mysql/cymysql.py
index 51b63044e..8bc0ae3be 100644
--- a/lib/sqlalchemy/dialects/mysql/cymysql.py
+++ b/lib/sqlalchemy/dialects/mysql/cymysql.py
@@ -1,5 +1,5 @@
# mysql/cymysql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/mysql/enumerated.py b/lib/sqlalchemy/dialects/mysql/enumerated.py
new file mode 100644
index 000000000..567e95288
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/enumerated.py
@@ -0,0 +1,299 @@
+# mysql/enumerated.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+
+from .types import _StringType
+from ... import exc, sql, util
+from ... import types as sqltypes
+
+
+class _EnumeratedValues(_StringType):
+ def _init_values(self, values, kw):
+ self.quoting = kw.pop('quoting', 'auto')
+
+ if self.quoting == 'auto' and len(values):
+ # What quoting character are we using?
+ q = None
+ for e in values:
+ if len(e) == 0:
+ self.quoting = 'unquoted'
+ break
+ elif q is None:
+ q = e[0]
+
+ if len(e) == 1 or e[0] != q or e[-1] != q:
+ self.quoting = 'unquoted'
+ break
+ else:
+ self.quoting = 'quoted'
+
+ if self.quoting == 'quoted':
+ util.warn_deprecated(
+ 'Manually quoting %s value literals is deprecated. Supply '
+ 'unquoted values and use the quoting= option in cases of '
+ 'ambiguity.' % self.__class__.__name__)
+
+ values = self._strip_values(values)
+
+ self._enumerated_values = values
+ length = max([len(v) for v in values] + [0])
+ return values, length
+
+ @classmethod
+ def _strip_values(cls, values):
+ strip_values = []
+ for a in values:
+ if a[0:1] == '"' or a[0:1] == "'":
+ # strip enclosing quotes and unquote interior
+ a = a[1:-1].replace(a[0] * 2, a[0])
+ strip_values.append(a)
+ return strip_values
+
+
+class ENUM(sqltypes.Enum, _EnumeratedValues):
+ """MySQL ENUM type."""
+
+ __visit_name__ = 'ENUM'
+
+ def __init__(self, *enums, **kw):
+ """Construct an ENUM.
+
+ E.g.::
+
+ Column('myenum', ENUM("foo", "bar", "baz"))
+
+ :param enums: The range of valid values for this ENUM. Values will be
+ quoted when generating the schema according to the quoting flag (see
+ below). This object may also be a PEP-435-compliant enumerated
+ type.
+
+ .. versionadded: 1.1 added support for PEP-435-compliant enumerated
+ types.
+
+ :param strict: This flag has no effect.
+
+ .. versionchanged:: The MySQL ENUM type as well as the base Enum
+ type now validates all Python data values.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ :param quoting: Defaults to 'auto': automatically determine enum value
+ quoting. If all enum values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ """
+
+ kw.pop('strict', None)
+ sqltypes.Enum.__init__(self, *enums)
+ kw.pop('metadata', None)
+ kw.pop('schema', None)
+ kw.pop('name', None)
+ kw.pop('quote', None)
+ kw.pop('native_enum', None)
+ kw.pop('inherit_schema', None)
+ kw.pop('_create_events', None)
+ _StringType.__init__(self, length=self.length, **kw)
+
+ def _setup_for_values(self, values, objects, kw):
+ values, length = self._init_values(values, kw)
+ return sqltypes.Enum._setup_for_values(self, values, objects, kw)
+
+ def __repr__(self):
+ return util.generic_repr(
+ self, to_inspect=[ENUM, _StringType, sqltypes.Enum])
+
+ def adapt(self, cls, **kw):
+ return sqltypes.Enum.adapt(self, cls, **kw)
+
+
+class SET(_EnumeratedValues):
+ """MySQL SET type."""
+
+ __visit_name__ = 'SET'
+
+ def __init__(self, *values, **kw):
+ """Construct a SET.
+
+ E.g.::
+
+ Column('myset', SET("foo", "bar", "baz"))
+
+
+ The list of potential values is required in the case that this
+ set will be used to generate DDL for a table, or if the
+ :paramref:`.SET.retrieve_as_bitwise` flag is set to True.
+
+ :param values: The range of valid values for this SET.
+
+ :param convert_unicode: Same flag as that of
+ :paramref:`.String.convert_unicode`.
+
+ :param collation: same as that of :paramref:`.String.collation`
+
+ :param charset: same as that of :paramref:`.VARCHAR.charset`.
+
+ :param ascii: same as that of :paramref:`.VARCHAR.ascii`.
+
+ :param unicode: same as that of :paramref:`.VARCHAR.unicode`.
+
+ :param binary: same as that of :paramref:`.VARCHAR.binary`.
+
+ :param quoting: Defaults to 'auto': automatically determine set value
+ quoting. If all values are surrounded by the same quoting
+ character, then use 'quoted' mode. Otherwise, use 'unquoted' mode.
+
+ 'quoted': values in enums are already quoted, they will be used
+ directly when generating the schema - this usage is deprecated.
+
+ 'unquoted': values in enums are not quoted, they will be escaped and
+ surrounded by single quotes when generating the schema.
+
+ Previous versions of this type always required manually quoted
+ values to be supplied; future versions will always quote the string
+ literals for you. This is a transitional option.
+
+ .. versionadded:: 0.9.0
+
+ :param retrieve_as_bitwise: if True, the data for the set type will be
+ persisted and selected using an integer value, where a set is coerced
+ into a bitwise mask for persistence. MySQL allows this mode which
+ has the advantage of being able to store values unambiguously,
+ such as the blank string ``''``. The datatype will appear
+ as the expression ``col + 0`` in a SELECT statement, so that the
+ value is coerced into an integer value in result sets.
+ This flag is required if one wishes
+ to persist a set that can store the blank string ``''`` as a value.
+
+ .. warning::
+
+ When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
+ essential that the list of set values is expressed in the
+ **exact same order** as exists on the MySQL database.
+
+ .. versionadded:: 1.0.0
+
+
+ """
+ self.retrieve_as_bitwise = kw.pop('retrieve_as_bitwise', False)
+ values, length = self._init_values(values, kw)
+ self.values = tuple(values)
+ if not self.retrieve_as_bitwise and '' in values:
+ raise exc.ArgumentError(
+ "Can't use the blank value '' in a SET without "
+ "setting retrieve_as_bitwise=True")
+ if self.retrieve_as_bitwise:
+ self._bitmap = dict(
+ (value, 2 ** idx)
+ for idx, value in enumerate(self.values)
+ )
+ self._bitmap.update(
+ (2 ** idx, value)
+ for idx, value in enumerate(self.values)
+ )
+ kw.setdefault('length', length)
+ super(SET, self).__init__(**kw)
+
+ def column_expression(self, colexpr):
+ if self.retrieve_as_bitwise:
+ return sql.type_coerce(
+ sql.type_coerce(colexpr, sqltypes.Integer) + 0,
+ self
+ )
+ else:
+ return colexpr
+
+ def result_processor(self, dialect, coltype):
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is not None:
+ value = int(value)
+
+ return set(
+ util.map_bits(self._bitmap.__getitem__, value)
+ )
+ else:
+ return None
+ else:
+ super_convert = super(SET, self).result_processor(dialect, coltype)
+
+ def process(value):
+ if isinstance(value, util.string_types):
+ # MySQLdb returns a string, let's parse
+ if super_convert:
+ value = super_convert(value)
+ return set(re.findall(r'[^,]+', value))
+ else:
+ # mysql-connector-python does a naive
+ # split(",") which throws in an empty string
+ if value is not None:
+ value.discard('')
+ return value
+ return process
+
+ def bind_processor(self, dialect):
+ super_convert = super(SET, self).bind_processor(dialect)
+ if self.retrieve_as_bitwise:
+ def process(value):
+ if value is None:
+ return None
+ elif isinstance(value, util.int_types + util.string_types):
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ else:
+ int_value = 0
+ for v in value:
+ int_value |= self._bitmap[v]
+ return int_value
+ else:
+
+ def process(value):
+ # accept strings and int (actually bitflag) values directly
+ if value is not None and not isinstance(
+ value, util.int_types + util.string_types):
+ value = ",".join(value)
+
+ if super_convert:
+ return super_convert(value)
+ else:
+ return value
+ return process
+
+ def adapt(self, impltype, **kw):
+ kw['retrieve_as_bitwise'] = self.retrieve_as_bitwise
+ return util.constructor_copy(
+ self, impltype,
+ *self.values,
+ **kw
+ )
diff --git a/lib/sqlalchemy/dialects/mysql/gaerdbms.py b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
index 284b51bde..4e365884e 100644
--- a/lib/sqlalchemy/dialects/mysql/gaerdbms.py
+++ b/lib/sqlalchemy/dialects/mysql/gaerdbms.py
@@ -1,5 +1,5 @@
# mysql/gaerdbms.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -22,7 +22,7 @@ developers-guide
Cloud SQL now recommends creating connections via the
mysql dialect using the URL format
- `mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>`
+ ``mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>``
Pooling
diff --git a/lib/sqlalchemy/dialects/mysql/json.py b/lib/sqlalchemy/dialects/mysql/json.py
new file mode 100644
index 000000000..b2d5a78b5
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/json.py
@@ -0,0 +1,90 @@
+# mysql/json.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from __future__ import absolute_import
+
+import json
+
+from ...sql import elements
+from ... import types as sqltypes
+from ... import util
+
+
+class JSON(sqltypes.JSON):
+ """MySQL JSON type.
+
+ MySQL supports JSON as of version 5.7. Note that MariaDB does **not**
+ support JSON at the time of this writing.
+
+ The :class:`.mysql.JSON` type supports persistence of JSON values
+ as well as the core index operations provided by :class:`.types.JSON`
+ datatype, by adapting the operations to render the ``JSON_EXTRACT``
+ function at the database level.
+
+ .. versionadded:: 1.1
+
+ """
+
+ @util.memoized_property
+ def _str_impl(self):
+ return sqltypes.String(convert_unicode=True)
+
+ def bind_processor(self, dialect):
+ string_process = self._str_impl.bind_processor(dialect)
+
+ json_serializer = dialect._json_serializer or json.dumps
+
+ def process(value):
+ if value is self.NULL:
+ value = None
+ elif isinstance(value, elements.Null) or (
+ value is None and self.none_as_null
+ ):
+ return None
+
+ serialized = json_serializer(value)
+ if string_process:
+ serialized = string_process(serialized)
+ return serialized
+
+ return process
+
+ def result_processor(self, dialect, coltype):
+ string_process = self._str_impl.result_processor(dialect, coltype)
+ json_deserializer = dialect._json_deserializer or json.loads
+
+ def process(value):
+ if value is None:
+ return None
+ if string_process:
+ value = string_process(value)
+ return json_deserializer(value)
+ return process
+
+
+class JSONIndexType(sqltypes.JSON.JSONIndexType):
+ def bind_processor(self, dialect):
+ def process(value):
+ if isinstance(value, int):
+ return "$[%s]" % value
+ else:
+ return '$."%s"' % value
+
+ return process
+
+
+class JSONPathType(sqltypes.JSON.JSONPathType):
+ def bind_processor(self, dialect):
+ def process(value):
+ return "$%s" % (
+ "".join([
+ "[%s]" % elem if isinstance(elem, int)
+ else '."%s"' % elem for elem in value
+ ])
+ )
+
+ return process
diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
index 4dc803d2c..8fe6a9182 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py
@@ -1,5 +1,5 @@
# mysql/mysqlconnector.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -14,6 +14,12 @@
:url: http://dev.mysql.com/downloads/connector/python/
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
+
"""
from .base import (MySQLDialect, MySQLExecutionContext,
diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py
index 67d2f3d1d..aa8377b27 100644
--- a/lib/sqlalchemy/dialects/mysql/mysqldb.py
+++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py
@@ -1,5 +1,5 @@
# mysql/mysqldb.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -13,31 +13,22 @@
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://sourceforge.net/projects/mysql-python
+.. _mysqldb_unicode:
Unicode
-------
-MySQLdb requires a "charset" parameter to be passed in order for it
-to handle non-ASCII characters correctly. When this parameter is passed,
-MySQLdb will also implicitly set the "use_unicode" flag to true, which means
-that it will return Python unicode objects instead of bytestrings.
-However, SQLAlchemy's decode process, when C extensions are enabled,
-is orders of magnitude faster than that of MySQLdb as it does not call into
-Python functions to do so. Therefore, the **recommended URL to use for
-unicode** will include both charset and use_unicode=0::
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
- create_engine("mysql+mysqldb://user:pass@host/dbname?charset=utf8&use_unicode=0")
+Py3K Support
+------------
-As of this writing, MySQLdb only runs on Python 2. It is not known how
-MySQLdb behaves on Python 3 as far as unicode decoding.
+Currently, MySQLdb only runs on Python 2 and development has been stopped.
+`mysqlclient`_ is fork of MySQLdb and provides Python 3 support as well
+as some bugfixes.
-
-Known Issues
--------------
-
-MySQL-python version 1.2.2 has a serious memory leak related
-to unicode conversion, a feature which is disabled via ``use_unicode=0``.
-It is strongly advised to use the latest version of MySQL-Python.
+.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
Using MySQLdb with Google Cloud SQL
-----------------------------------
diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py
index fa127f3b0..b91db1836 100644
--- a/lib/sqlalchemy/dialects/mysql/oursql.py
+++ b/lib/sqlalchemy/dialects/mysql/oursql.py
@@ -1,5 +1,5 @@
# mysql/oursql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -16,22 +16,10 @@
Unicode
-------
-oursql defaults to using ``utf8`` as the connection charset, but other
-encodings may be used instead. Like the MySQL-Python driver, unicode support
-can be completely disabled::
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
- # oursql sets the connection charset to utf8 automatically; all strings come
- # back as utf8 str
- create_engine('mysql+oursql:///mydb?use_unicode=0')
-To not automatically use ``utf8`` and instead use whatever the connection
-defaults to, there is a separate parameter::
-
- # use the default connection charset; all strings come back as unicode
- create_engine('mysql+oursql:///mydb?default_charset=1')
-
- # use latin1 as the connection charset; all strings come back as unicode
- create_engine('mysql+oursql:///mydb?charset=latin1')
"""
import re
diff --git a/lib/sqlalchemy/dialects/mysql/pymysql.py b/lib/sqlalchemy/dialects/mysql/pymysql.py
index 8df2ba03f..3c493fbfc 100644
--- a/lib/sqlalchemy/dialects/mysql/pymysql.py
+++ b/lib/sqlalchemy/dialects/mysql/pymysql.py
@@ -1,5 +1,5 @@
# mysql/pymysql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -12,7 +12,13 @@
:dbapi: pymysql
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>\
[?<options>]
- :url: http://code.google.com/p/pymysql/
+ :url: http://www.pymysql.org/
+
+Unicode
+-------
+
+Please see :ref:`mysql_unicode` for current recommendations on unicode
+handling.
MySQL-Python Compatibility
--------------------------
@@ -31,7 +37,12 @@ class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
+
+ # generally, these two values should be both True
+ # or both False. PyMySQL unicode tests pass all the way back
+ # to 0.4 either way. See [ticket:3337]
supports_unicode_statements = True
+ supports_unicode_binds = True
@classmethod
def dbapi(cls):
diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py
index 58e8b30fe..882d3ea4e 100644
--- a/lib/sqlalchemy/dialects/mysql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py
@@ -1,5 +1,5 @@
# mysql/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -14,14 +14,11 @@
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
-
-Limitations
------------
-
-The mysql-pyodbc dialect is subject to unresolved character encoding issues
-which exist within the current ODBC drivers available.
-(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
-of OurSQL, MySQLdb, or MySQL-connector/Python.
+ .. note:: The PyODBC for MySQL dialect is not well supported, and
+ is subject to unresolved character encoding issues
+ which exist within the current ODBC drivers available.
+ (see http://code.google.com/p/pyodbc/issues/detail?id=25).
+ Other dialects for MySQL are recommended.
"""
diff --git a/lib/sqlalchemy/dialects/mysql/reflection.py b/lib/sqlalchemy/dialects/mysql/reflection.py
new file mode 100644
index 000000000..77bf9105e
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/reflection.py
@@ -0,0 +1,449 @@
+# mysql/reflection.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import re
+from ... import log, util
+from ... import types as sqltypes
+from .enumerated import _EnumeratedValues, SET
+from .types import DATETIME, TIME, TIMESTAMP
+
+
+class ReflectedState(object):
+ """Stores raw information about a SHOW CREATE TABLE statement."""
+
+ def __init__(self):
+ self.columns = []
+ self.table_options = {}
+ self.table_name = None
+ self.keys = []
+ self.constraints = []
+
+
+@log.class_logger
+class MySQLTableDefinitionParser(object):
+ """Parses the results of a SHOW CREATE TABLE statement."""
+
+ def __init__(self, dialect, preparer):
+ self.dialect = dialect
+ self.preparer = preparer
+ self._prep_regexes()
+
+ def parse(self, show_create, charset):
+ state = ReflectedState()
+ state.charset = charset
+ for line in re.split(r'\r?\n', show_create):
+ if line.startswith(' ' + self.preparer.initial_quote):
+ self._parse_column(line, state)
+ # a regular table options line
+ elif line.startswith(') '):
+ self._parse_table_options(line, state)
+ # an ANSI-mode table options line
+ elif line == ')':
+ pass
+ elif line.startswith('CREATE '):
+ self._parse_table_name(line, state)
+ # Not present in real reflection, but may be if
+ # loading from a file.
+ elif not line:
+ pass
+ else:
+ type_, spec = self._parse_constraints(line)
+ if type_ is None:
+ util.warn("Unknown schema content: %r" % line)
+ elif type_ == 'key':
+ state.keys.append(spec)
+ elif type_ == 'constraint':
+ state.constraints.append(spec)
+ else:
+ pass
+ return state
+
+ def _parse_constraints(self, line):
+ """Parse a KEY or CONSTRAINT line.
+
+ :param line: A line of SHOW CREATE TABLE output
+ """
+
+ # KEY
+ m = self._re_key.match(line)
+ if m:
+ spec = m.groupdict()
+ # convert columns into name, length pairs
+ spec['columns'] = self._parse_keyexprs(spec['columns'])
+ return 'key', spec
+
+ # CONSTRAINT
+ m = self._re_constraint.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['table'] = \
+ self.preparer.unformat_identifiers(spec['table'])
+ spec['local'] = [c[0]
+ for c in self._parse_keyexprs(spec['local'])]
+ spec['foreign'] = [c[0]
+ for c in self._parse_keyexprs(spec['foreign'])]
+ return 'constraint', spec
+
+ # PARTITION and SUBPARTITION
+ m = self._re_partition.match(line)
+ if m:
+ # Punt!
+ return 'partition', line
+
+ # No match.
+ return (None, line)
+
+ def _parse_table_name(self, line, state):
+ """Extract the table name.
+
+ :param line: The first line of SHOW CREATE TABLE
+ """
+
+ regex, cleanup = self._pr_name
+ m = regex.match(line)
+ if m:
+ state.table_name = cleanup(m.group('name'))
+
+ def _parse_table_options(self, line, state):
+ """Build a dictionary of all reflected table-level options.
+
+ :param line: The final line of SHOW CREATE TABLE output.
+ """
+
+ options = {}
+
+ if not line or line == ')':
+ pass
+
+ else:
+ rest_of_line = line[:]
+ for regex, cleanup in self._pr_options:
+ m = regex.search(rest_of_line)
+ if not m:
+ continue
+ directive, value = m.group('directive'), m.group('val')
+ if cleanup:
+ value = cleanup(value)
+ options[directive.lower()] = value
+ rest_of_line = regex.sub('', rest_of_line)
+
+ for nope in ('auto_increment', 'data directory', 'index directory'):
+ options.pop(nope, None)
+
+ for opt, val in options.items():
+ state.table_options['%s_%s' % (self.dialect.name, opt)] = val
+
+ def _parse_column(self, line, state):
+ """Extract column details.
+
+ Falls back to a 'minimal support' variant if full parse fails.
+
+ :param line: Any column-bearing line from SHOW CREATE TABLE
+ """
+
+ spec = None
+ m = self._re_column.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = True
+ else:
+ m = self._re_column_loose.match(line)
+ if m:
+ spec = m.groupdict()
+ spec['full'] = False
+ if not spec:
+ util.warn("Unknown column definition %r" % line)
+ return
+ if not spec['full']:
+ util.warn("Incomplete reflection of column definition %r" % line)
+
+ name, type_, args = spec['name'], spec['coltype'], spec['arg']
+
+ try:
+ col_type = self.dialect.ischema_names[type_]
+ except KeyError:
+ util.warn("Did not recognize type '%s' of column '%s'" %
+ (type_, name))
+ col_type = sqltypes.NullType
+
+ # Column type positional arguments eg. varchar(32)
+ if args is None or args == '':
+ type_args = []
+ elif args[0] == "'" and args[-1] == "'":
+ type_args = self._re_csv_str.findall(args)
+ else:
+ type_args = [int(v) for v in self._re_csv_int.findall(args)]
+
+ # Column type keyword options
+ type_kw = {}
+
+ if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
+ if type_args:
+ type_kw['fsp'] = type_args.pop(0)
+
+ for kw in ('unsigned', 'zerofill'):
+ if spec.get(kw, False):
+ type_kw[kw] = True
+ for kw in ('charset', 'collate'):
+ if spec.get(kw, False):
+ type_kw[kw] = spec[kw]
+ if issubclass(col_type, _EnumeratedValues):
+ type_args = _EnumeratedValues._strip_values(type_args)
+
+ if issubclass(col_type, SET) and '' in type_args:
+ type_kw['retrieve_as_bitwise'] = True
+
+ type_instance = col_type(*type_args, **type_kw)
+
+ col_kw = {}
+
+ # NOT NULL
+ col_kw['nullable'] = True
+ # this can be "NULL" in the case of TIMESTAMP
+ if spec.get('notnull', False) == 'NOT NULL':
+ col_kw['nullable'] = False
+
+ # AUTO_INCREMENT
+ if spec.get('autoincr', False):
+ col_kw['autoincrement'] = True
+ elif issubclass(col_type, sqltypes.Integer):
+ col_kw['autoincrement'] = False
+
+ # DEFAULT
+ default = spec.get('default', None)
+
+ if default == 'NULL':
+ # eliminates the need to deal with this later.
+ default = None
+
+ col_d = dict(name=name, type=type_instance, default=default)
+ col_d.update(col_kw)
+ state.columns.append(col_d)
+
+ def _describe_to_create(self, table_name, columns):
+ """Re-format DESCRIBE output as a SHOW CREATE TABLE string.
+
+ DESCRIBE is a much simpler reflection and is sufficient for
+ reflecting views for runtime use. This method formats DDL
+ for columns only- keys are omitted.
+
+ :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
+ SHOW FULL COLUMNS FROM rows must be rearranged for use with
+ this function.
+ """
+
+ buffer = []
+ for row in columns:
+ (name, col_type, nullable, default, extra) = \
+ [row[i] for i in (0, 1, 2, 4, 5)]
+
+ line = [' ']
+ line.append(self.preparer.quote_identifier(name))
+ line.append(col_type)
+ if not nullable:
+ line.append('NOT NULL')
+ if default:
+ if 'auto_increment' in default:
+ pass
+ elif (col_type.startswith('timestamp') and
+ default.startswith('C')):
+ line.append('DEFAULT')
+ line.append(default)
+ elif default == 'NULL':
+ line.append('DEFAULT')
+ line.append(default)
+ else:
+ line.append('DEFAULT')
+ line.append("'%s'" % default.replace("'", "''"))
+ if extra:
+ line.append(extra)
+
+ buffer.append(' '.join(line))
+
+ return ''.join([('CREATE TABLE %s (\n' %
+ self.preparer.quote_identifier(table_name)),
+ ',\n'.join(buffer),
+ '\n) '])
+
+ def _parse_keyexprs(self, identifiers):
+ """Unpack '"col"(2),"col" ASC'-ish strings into components."""
+
+ return self._re_keyexprs.findall(identifiers)
+
+ def _prep_regexes(self):
+ """Pre-compile regular expressions."""
+
+ self._re_columns = []
+ self._pr_options = []
+
+ _final = self.preparer.final_quote
+
+ quotes = dict(zip(('iq', 'fq', 'esc_fq'),
+ [re.escape(s) for s in
+ (self.preparer.initial_quote,
+ _final,
+ self.preparer._escape_identifier(_final))]))
+
+ self._pr_name = _pr_compile(
+ r'^CREATE (?:\w+ +)?TABLE +'
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes,
+ self.preparer._unescape_identifier)
+
+ # `col`,`col2`(32),`col3`(15) DESC
+ #
+ # Note: ASC and DESC aren't reflected, so we'll punt...
+ self._re_keyexprs = _re_compile(
+ r'(?:'
+ r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)'
+ r'(?:\((\d+)\))?(?=\,|$))+' % quotes)
+
+ # 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
+ self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27')
+
+ # 123 or 123,456
+ self._re_csv_int = _re_compile(r'\d+')
+
+ # `colname` <type> [type opts]
+ # (NOT NULL | NULL)
+ # DEFAULT ('value' | CURRENT_TIMESTAMP...)
+ # COMMENT 'comment'
+ # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
+ # STORAGE (DISK|MEMORY)
+ self._re_column = _re_compile(
+ r' '
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P<coltype>\w+)'
+ r'(?:\((?P<arg>(?:\d+|\d+,\d+|'
+ r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?'
+ r'(?: +(?P<unsigned>UNSIGNED))?'
+ r'(?: +(?P<zerofill>ZEROFILL))?'
+ r'(?: +CHARACTER SET +(?P<charset>[\w_]+))?'
+ r'(?: +COLLATE +(?P<collate>[\w_]+))?'
+ r'(?: +(?P<notnull>(?:NOT )?NULL))?'
+ r'(?: +DEFAULT +(?P<default>'
+ r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+'
+ r'(?: +ON UPDATE \w+)?)'
+ r'))?'
+ r'(?: +(?P<autoincr>AUTO_INCREMENT))?'
+ r'(?: +COMMENT +(P<comment>(?:\x27\x27|[^\x27])+))?'
+ r'(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?'
+ r'(?: +STORAGE +(?P<storage>\w+))?'
+ r'(?: +(?P<extra>.*))?'
+ r',?$'
+ % quotes
+ )
+
+ # Fallback, try to parse as little as possible
+ self._re_column_loose = _re_compile(
+ r' '
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'(?P<coltype>\w+)'
+ r'(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?'
+ r'.*?(?P<notnull>(?:NOT )NULL)?'
+ % quotes
+ )
+
+ # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
+ # (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
+ # KEY_BLOCK_SIZE size | WITH PARSER name
+ self._re_key = _re_compile(
+ r' '
+ r'(?:(?P<type>\S+) )?KEY'
+ r'(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?'
+ r'(?: +USING +(?P<using_pre>\S+))?'
+ r' +\((?P<columns>.+?)\)'
+ r'(?: +USING +(?P<using_post>\S+))?'
+ r'(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?'
+ r'(?: +WITH PARSER +(?P<parser>\S+))?'
+ r',?$'
+ % quotes
+ )
+
+ # CONSTRAINT `name` FOREIGN KEY (`local_col`)
+ # REFERENCES `remote` (`remote_col`)
+ # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
+ # ON DELETE CASCADE ON UPDATE RESTRICT
+ #
+ # unique constraints come back as KEYs
+ kw = quotes.copy()
+ kw['on'] = 'RESTRICT|CASCADE|SET NULL|NOACTION'
+ self._re_constraint = _re_compile(
+ r' '
+ r'CONSTRAINT +'
+ r'%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +'
+ r'FOREIGN KEY +'
+ r'\((?P<local>[^\)]+?)\) REFERENCES +'
+ r'(?P<table>%(iq)s[^%(fq)s]+%(fq)s'
+ r'(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +'
+ r'\((?P<foreign>[^\)]+?)\)'
+ r'(?: +(?P<match>MATCH \w+))?'
+ r'(?: +ON DELETE (?P<ondelete>%(on)s))?'
+ r'(?: +ON UPDATE (?P<onupdate>%(on)s))?'
+ % kw
+ )
+
+ # PARTITION
+ #
+ # punt!
+ self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)')
+
+ # Table-level options (COLLATE, ENGINE, etc.)
+ # Do the string options first, since they have quoted
+ # strings we need to get rid of.
+ for option in _options_of_type_string:
+ self._add_option_string(option)
+
+ for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT',
+ 'AVG_ROW_LENGTH', 'CHARACTER SET',
+ 'DEFAULT CHARSET', 'CHECKSUM',
+ 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD',
+ 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT',
+ 'KEY_BLOCK_SIZE'):
+ self._add_option_word(option)
+
+ self._add_option_regex('UNION', r'\([^\)]+\)')
+ self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK')
+ self._add_option_regex(
+ 'RAID_TYPE',
+ r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+')
+
+ _optional_equals = r'(?:\s*(?:=\s*)|\s+)'
+
+ def _add_option_string(self, directive):
+ regex = (r'(?P<directive>%s)%s'
+ r"'(?P<val>(?:[^']|'')*?)'(?!')" %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(
+ regex, lambda v: v.replace("\\\\", "\\").replace("''", "'")
+ ))
+
+ def _add_option_word(self, directive):
+ regex = (r'(?P<directive>%s)%s'
+ r'(?P<val>\w+)' %
+ (re.escape(directive), self._optional_equals))
+ self._pr_options.append(_pr_compile(regex))
+
+ def _add_option_regex(self, directive, regex):
+ regex = (r'(?P<directive>%s)%s'
+ r'(?P<val>%s)' %
+ (re.escape(directive), self._optional_equals, regex))
+ self._pr_options.append(_pr_compile(regex))
+
+_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY',
+ 'PASSWORD', 'CONNECTION')
+
+
+def _pr_compile(regex, cleanup=None):
+ """Prepare a 2-tuple of compiled regex and callable."""
+
+ return (_re_compile(regex), cleanup)
+
+
+def _re_compile(regex):
+ """Compile a string to regex, I and UNICODE."""
+
+ return re.compile(regex, re.I | re.UNICODE)
diff --git a/lib/sqlalchemy/dialects/mysql/types.py b/lib/sqlalchemy/dialects/mysql/types.py
new file mode 100644
index 000000000..d20b8bdfc
--- /dev/null
+++ b/lib/sqlalchemy/dialects/mysql/types.py
@@ -0,0 +1,766 @@
+# mysql/types.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+import datetime
+from ... import exc, util
+from ... import types as sqltypes
+
+
+class _NumericType(object):
+ """Base for MySQL numeric types.
+
+ This is the base both for NUMERIC as well as INTEGER, hence
+ it's a mixin.
+
+ """
+
+ def __init__(self, unsigned=False, zerofill=False, **kw):
+ self.unsigned = unsigned
+ self.zerofill = zerofill
+ super(_NumericType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_NumericType, sqltypes.Numeric])
+
+
+class _FloatType(_NumericType, sqltypes.Float):
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ if isinstance(self, (REAL, DOUBLE)) and \
+ (
+ (precision is None and scale is not None) or
+ (precision is not None and scale is None)
+ ):
+ raise exc.ArgumentError(
+ "You must specify both precision and scale or omit "
+ "both altogether.")
+ super(_FloatType, self).__init__(
+ precision=precision, asdecimal=asdecimal, **kw)
+ self.scale = scale
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_FloatType,
+ _NumericType,
+ sqltypes.Float])
+
+
+class _IntegerType(_NumericType, sqltypes.Integer):
+ def __init__(self, display_width=None, **kw):
+ self.display_width = display_width
+ super(_IntegerType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self, to_inspect=[_IntegerType,
+ _NumericType,
+ sqltypes.Integer])
+
+
+class _StringType(sqltypes.String):
+ """Base for MySQL string types."""
+
+ def __init__(self, charset=None, collation=None,
+ ascii=False, binary=False, unicode=False,
+ national=False, **kw):
+ self.charset = charset
+
+ # allow collate= or collation=
+ kw.setdefault('collation', kw.pop('collate', collation))
+
+ self.ascii = ascii
+ self.unicode = unicode
+ self.binary = binary
+ self.national = national
+ super(_StringType, self).__init__(**kw)
+
+ def __repr__(self):
+ return util.generic_repr(self,
+ to_inspect=[_StringType, sqltypes.String])
+
+
+class _MatchType(sqltypes.Float, sqltypes.MatchType):
+ def __init__(self, **kw):
+ # TODO: float arguments?
+ sqltypes.Float.__init__(self)
+ sqltypes.MatchType.__init__(self)
+
+
+
+class NUMERIC(_NumericType, sqltypes.NUMERIC):
+ """MySQL NUMERIC type."""
+
+ __visit_name__ = 'NUMERIC'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a NUMERIC.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(NUMERIC, self).__init__(precision=precision,
+ scale=scale, asdecimal=asdecimal, **kw)
+
+
+class DECIMAL(_NumericType, sqltypes.DECIMAL):
+ """MySQL DECIMAL type."""
+
+ __visit_name__ = 'DECIMAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DECIMAL.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DECIMAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class DOUBLE(_FloatType):
+ """MySQL DOUBLE type."""
+
+ __visit_name__ = 'DOUBLE'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a DOUBLE.
+
+ .. note::
+
+ The :class:`.DOUBLE` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(DOUBLE, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class REAL(_FloatType, sqltypes.REAL):
+ """MySQL REAL type."""
+
+ __visit_name__ = 'REAL'
+
+ def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
+ """Construct a REAL.
+
+ .. note::
+
+ The :class:`.REAL` type by default converts from float
+ to Decimal, using a truncation that defaults to 10 digits.
+ Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
+ to change this scale, or ``asdecimal=False`` to return values
+ directly as Python floating points.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(REAL, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+
+class FLOAT(_FloatType, sqltypes.FLOAT):
+ """MySQL FLOAT type."""
+
+ __visit_name__ = 'FLOAT'
+
+ def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
+ """Construct a FLOAT.
+
+ :param precision: Total digits in this number. If scale and precision
+ are both None, values are stored to limits allowed by the server.
+
+ :param scale: The number of digits after the decimal point.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(FLOAT, self).__init__(precision=precision, scale=scale,
+ asdecimal=asdecimal, **kw)
+
+ def bind_processor(self, dialect):
+ return None
+
+
+class INTEGER(_IntegerType, sqltypes.INTEGER):
+ """MySQL INTEGER type."""
+
+ __visit_name__ = 'INTEGER'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct an INTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(INTEGER, self).__init__(display_width=display_width, **kw)
+
+
+class BIGINT(_IntegerType, sqltypes.BIGINT):
+ """MySQL BIGINTEGER type."""
+
+ __visit_name__ = 'BIGINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a BIGINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(BIGINT, self).__init__(display_width=display_width, **kw)
+
+
+class MEDIUMINT(_IntegerType):
+ """MySQL MEDIUMINTEGER type."""
+
+ __visit_name__ = 'MEDIUMINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a MEDIUMINTEGER
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(MEDIUMINT, self).__init__(display_width=display_width, **kw)
+
+
+class TINYINT(_IntegerType):
+ """MySQL TINYINT type."""
+
+ __visit_name__ = 'TINYINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a TINYINT.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(TINYINT, self).__init__(display_width=display_width, **kw)
+
+
+class SMALLINT(_IntegerType, sqltypes.SMALLINT):
+ """MySQL SMALLINTEGER type."""
+
+ __visit_name__ = 'SMALLINT'
+
+ def __init__(self, display_width=None, **kw):
+ """Construct a SMALLINTEGER.
+
+ :param display_width: Optional, maximum display width for this number.
+
+ :param unsigned: a boolean, optional.
+
+ :param zerofill: Optional. If true, values will be stored as strings
+ left-padded with zeros. Note that this does not effect the values
+ returned by the underlying database API, which continue to be
+ numeric.
+
+ """
+ super(SMALLINT, self).__init__(display_width=display_width, **kw)
+
+
+class BIT(sqltypes.TypeEngine):
+ """MySQL BIT type.
+
+ This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
+ for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
+ MSTinyInteger() type.
+
+ """
+
+ __visit_name__ = 'BIT'
+
+ def __init__(self, length=None):
+ """Construct a BIT.
+
+ :param length: Optional, number of bits.
+
+ """
+ self.length = length
+
+ def result_processor(self, dialect, coltype):
+ """Convert a MySQL's 64 bit, variable length binary string to a long.
+
+ TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector
+ already do this, so this logic should be moved to those dialects.
+
+ """
+
+ def process(value):
+ if value is not None:
+ v = 0
+ for i in value:
+ if not isinstance(i, int):
+ i = ord(i) # convert byte to int on Python 2
+ v = v << 8 | i
+ return v
+ return value
+ return process
+
+
+class TIME(sqltypes.TIME):
+ """MySQL TIME type. """
+
+ __visit_name__ = 'TIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8 The MySQL-specific TIME
+ type as well as fractional seconds support.
+
+ """
+ super(TIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+ def result_processor(self, dialect, coltype):
+ time = datetime.time
+
+ def process(value):
+ # convert from a timedelta value
+ if value is not None:
+ microseconds = value.microseconds
+ seconds = value.seconds
+ minutes = seconds // 60
+ return time(minutes // 60,
+ minutes % 60,
+ seconds - minutes * 60,
+ microsecond=microseconds)
+ else:
+ return None
+ return process
+
+
+class TIMESTAMP(sqltypes.TIMESTAMP):
+ """MySQL TIMESTAMP type.
+
+ """
+
+ __visit_name__ = 'TIMESTAMP'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL TIMESTAMP type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the TIMESTAMP type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.TIMESTAMP`
+ with fractional seconds support.
+
+ """
+ super(TIMESTAMP, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class DATETIME(sqltypes.DATETIME):
+ """MySQL DATETIME type.
+
+ """
+
+ __visit_name__ = 'DATETIME'
+
+ def __init__(self, timezone=False, fsp=None):
+ """Construct a MySQL DATETIME type.
+
+ :param timezone: not used by the MySQL dialect.
+ :param fsp: fractional seconds precision value.
+ MySQL 5.6.4 supports storage of fractional seconds;
+ this parameter will be used when emitting DDL
+ for the DATETIME type.
+
+ .. note::
+
+ DBAPI driver support for fractional seconds may
+ be limited; current support includes
+ MySQL Connector/Python.
+
+ .. versionadded:: 0.8.5 Added MySQL-specific :class:`.mysql.DATETIME`
+ with fractional seconds support.
+
+ """
+ super(DATETIME, self).__init__(timezone=timezone)
+ self.fsp = fsp
+
+
+class YEAR(sqltypes.TypeEngine):
+ """MySQL YEAR type, for single byte storage of years 1901-2155."""
+
+ __visit_name__ = 'YEAR'
+
+ def __init__(self, display_width=None):
+ self.display_width = display_width
+
+
+class TEXT(_StringType, sqltypes.TEXT):
+ """MySQL TEXT type, for text up to 2^16 characters."""
+
+ __visit_name__ = 'TEXT'
+
+ def __init__(self, length=None, **kw):
+ """Construct a TEXT.
+
+ :param length: Optional, if provided the server may optimize storage
+ by substituting the smallest TEXT type sufficient to store
+ ``length`` characters.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TEXT, self).__init__(length=length, **kw)
+
+
+class TINYTEXT(_StringType):
+ """MySQL TINYTEXT type, for text up to 2^8 characters."""
+
+ __visit_name__ = 'TINYTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a TINYTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(TINYTEXT, self).__init__(**kwargs)
+
+
+class MEDIUMTEXT(_StringType):
+ """MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
+
+ __visit_name__ = 'MEDIUMTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a MEDIUMTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(MEDIUMTEXT, self).__init__(**kwargs)
+
+
+class LONGTEXT(_StringType):
+ """MySQL LONGTEXT type, for text up to 2^32 characters."""
+
+ __visit_name__ = 'LONGTEXT'
+
+ def __init__(self, **kwargs):
+ """Construct a LONGTEXT.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(LONGTEXT, self).__init__(**kwargs)
+
+
+class VARCHAR(_StringType, sqltypes.VARCHAR):
+ """MySQL VARCHAR type, for variable-length character data."""
+
+ __visit_name__ = 'VARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a VARCHAR.
+
+ :param charset: Optional, a column-level character set for this string
+ value. Takes precedence to 'ascii' or 'unicode' short-hand.
+
+ :param collation: Optional, a column-level collation for this string
+ value. Takes precedence to 'binary' short-hand.
+
+ :param ascii: Defaults to False: short-hand for the ``latin1``
+ character set, generates ASCII in schema.
+
+ :param unicode: Defaults to False: short-hand for the ``ucs2``
+ character set, generates UNICODE in schema.
+
+ :param national: Optional. If true, use the server's configured
+ national character set.
+
+ :param binary: Defaults to False: short-hand, pick the binary
+ collation type that matches the column's character set. Generates
+ BINARY in schema. This does not affect the type of data stored,
+ only the collation of character data.
+
+ """
+ super(VARCHAR, self).__init__(length=length, **kwargs)
+
+
+class CHAR(_StringType, sqltypes.CHAR):
+ """MySQL CHAR type, for fixed-length character data."""
+
+ __visit_name__ = 'CHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct a CHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ super(CHAR, self).__init__(length=length, **kwargs)
+
+ @classmethod
+ def _adapt_string_for_cast(self, type_):
+ # copy the given string type into a CHAR
+ # for the purposes of rendering a CAST expression
+ type_ = sqltypes.to_instance(type_)
+ if isinstance(type_, sqltypes.CHAR):
+ return type_
+ elif isinstance(type_, _StringType):
+ return CHAR(
+ length=type_.length,
+ charset=type_.charset,
+ collation=type_.collation,
+ ascii=type_.ascii,
+ binary=type_.binary,
+ unicode=type_.unicode,
+ national=False # not supported in CAST
+ )
+ else:
+ return CHAR(length=type_.length)
+
+
+class NVARCHAR(_StringType, sqltypes.NVARCHAR):
+ """MySQL NVARCHAR type.
+
+ For variable-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NVARCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NVARCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NVARCHAR, self).__init__(length=length, **kwargs)
+
+
+class NCHAR(_StringType, sqltypes.NCHAR):
+ """MySQL NCHAR type.
+
+ For fixed-length character data in the server's configured national
+ character set.
+ """
+
+ __visit_name__ = 'NCHAR'
+
+ def __init__(self, length=None, **kwargs):
+ """Construct an NCHAR.
+
+ :param length: Maximum data length, in characters.
+
+ :param binary: Optional, use the default binary collation for the
+ national character set. This does not affect the type of data
+ stored, use a BINARY type for binary data.
+
+ :param collation: Optional, request a particular collation. Must be
+ compatible with the national character set.
+
+ """
+ kwargs['national'] = True
+ super(NCHAR, self).__init__(length=length, **kwargs)
+
+
+class TINYBLOB(sqltypes._Binary):
+ """MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
+
+ __visit_name__ = 'TINYBLOB'
+
+
+class MEDIUMBLOB(sqltypes._Binary):
+ """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
+
+ __visit_name__ = 'MEDIUMBLOB'
+
+
+class LONGBLOB(sqltypes._Binary):
+ """MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
+
+ __visit_name__ = 'LONGBLOB'
diff --git a/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/sqlalchemy/dialects/mysql/zxjdbc.py
index 0cf92cd13..fe4c13705 100644
--- a/lib/sqlalchemy/dialects/mysql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/mysql/zxjdbc.py
@@ -1,5 +1,5 @@
# mysql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -14,6 +14,9 @@
<database>
:driverurl: http://dev.mysql.com/downloads/connector/j/
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
+
Character Sets
--------------
diff --git a/lib/sqlalchemy/dialects/oracle/__init__.py b/lib/sqlalchemy/dialects/oracle/__init__.py
index fd32f2235..0c5c3174b 100644
--- a/lib/sqlalchemy/dialects/oracle/__init__.py
+++ b/lib/sqlalchemy/dialects/oracle/__init__.py
@@ -1,5 +1,5 @@
# oracle/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
index b482c9069..6992670c4 100644
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ b/lib/sqlalchemy/dialects/oracle/base.py
@@ -1,5 +1,5 @@
# oracle/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -287,6 +287,7 @@ from sqlalchemy import util, sql
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, visitors, expression
from sqlalchemy.sql import operators as sql_operators
+from sqlalchemy.sql.elements import quoted_name
from sqlalchemy import types as sqltypes, schema as sa_schema
from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, \
BLOB, CLOB, TIMESTAMP, FLOAT
@@ -665,8 +666,8 @@ class OracleCompiler(compiler.SQLCompiler):
else:
return sql.and_(*clauses)
- def visit_outer_join_column(self, vc):
- return self.process(vc.column) + "(+)"
+ def visit_outer_join_column(self, vc, **kw):
+ return self.process(vc.column, **kw) + "(+)"
def visit_sequence(self, seq):
return (self.dialect.identifier_preparer.format_sequence(seq) +
@@ -692,8 +693,9 @@ class OracleCompiler(compiler.SQLCompiler):
self.bindparam_string(self._truncate_bindparam(outparam)))
columns.append(
self.process(col_expr, within_columns_clause=False))
- self.result_map[outparam.key] = (
- outparam.key,
+
+ self._add_to_result_map(
+ outparam.key, outparam.key,
(column, getattr(column, 'name', None),
getattr(column, 'key', None)),
column.type
@@ -736,7 +738,7 @@ class OracleCompiler(compiler.SQLCompiler):
# Outer select and "ROWNUM as ora_rn" can be dropped if
# limit=0
- # TODO: use annotations instead of clone + attr set ?
+ kwargs['select_wraps_for'] = select
select = select._generate()
select._oracle_visit = True
@@ -793,7 +795,6 @@ class OracleCompiler(compiler.SQLCompiler):
offsetselect._for_update_arg = select._for_update_arg
select = offsetselect
- kwargs['iswrapper'] = getattr(select, '_is_wrapper', False)
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
def limit_clause(self, select, **kw):
@@ -919,6 +920,8 @@ class OracleDialect(default.DefaultDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = False
+ supports_simple_order_by_label = False
+
supports_sequences = True
sequences_optional = False
postfetch_lastrowid = False
@@ -1030,6 +1033,8 @@ class OracleDialect(default.DefaultDialect):
if name.upper() == name and not \
self.identifier_preparer._requires_quotes(name.lower()):
return name.lower()
+ elif name.lower() == name:
+ return quoted_name(name, quote=True)
else:
return name
@@ -1278,7 +1283,7 @@ class OracleDialect(default.DefaultDialect):
'type': coltype,
'nullable': nullable,
'default': default,
- 'autoincrement': default is None
+ 'autoincrement': 'auto',
}
if orig_colname.lower() == orig_colname:
cdict['quote'] = True
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index 6fc88da8e..0c93ced97 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -1,5 +1,5 @@
# oracle/cx_oracle.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -293,6 +293,7 @@ from .base import OracleCompiler, OracleDialect, OracleExecutionContext
from . import base as oracle
from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
+from sqlalchemy import util
import random
import collections
import decimal
@@ -414,6 +415,21 @@ class _OracleLong(oracle.LONG):
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
+class _OracleEnum(_NativeUnicodeMixin, sqltypes.Enum):
+ def bind_processor(self, dialect):
+ enum_proc = sqltypes.Enum.bind_processor(self, dialect)
+ if util.py2k:
+ unicode_proc = _NativeUnicodeMixin.bind_processor(self, dialect)
+ else:
+ unicode_proc = None
+
+ def process(value):
+ raw_str = enum_proc(value)
+ if unicode_proc:
+ raw_str = unicode_proc(raw_str)
+ return raw_str
+ return process
+
class _OracleUnicodeText(
_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText):
@@ -650,6 +666,7 @@ class OracleDialect_cx_oracle(OracleDialect):
sqltypes.String: _OracleString,
sqltypes.UnicodeText: _OracleUnicodeText,
sqltypes.CHAR: _OracleChar,
+ sqltypes.Enum: _OracleEnum,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
@@ -719,8 +736,10 @@ class OracleDialect_cx_oracle(OracleDialect):
# this occurs in tests with mock DBAPIs
self._cx_oracle_string_types = set()
self._cx_oracle_with_unicode = False
- elif self.cx_oracle_ver >= (5,) and not \
- hasattr(self.dbapi, 'UNICODE'):
+ elif util.py3k or (
+ self.cx_oracle_ver >= (5,) and not \
+ hasattr(self.dbapi, 'UNICODE')
+ ):
# cx_Oracle WITH_UNICODE mode. *only* python
# unicode objects accepted for anything
self.supports_unicode_statements = True
diff --git a/lib/sqlalchemy/dialects/oracle/zxjdbc.py b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
index 82c8e2f0f..c3259feae 100644
--- a/lib/sqlalchemy/dialects/oracle/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/oracle/zxjdbc.py
@@ -1,5 +1,5 @@
# oracle/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -10,8 +10,10 @@
:name: zxJDBC for Jython
:dbapi: zxjdbc
:connectstring: oracle+zxjdbc://user:pass@host/dbname
- :driverurl: http://www.oracle.com/technology/software/tech/java/\
-sqlj_jdbc/index.html.
+ :driverurl: http://www.oracle.com/technetwork/database/features/jdbc/index-091264.html
+
+ .. note:: Jython is not supported by current versions of SQLAlchemy. The
+ zxjdbc dialect should be considered as experimental.
"""
import decimal
@@ -68,8 +70,7 @@ class OracleCompiler_zxjdbc(OracleCompiler):
expression._select_iterables(returning_cols))
# within_columns_clause=False so that labels (foo AS bar) don't render
- columns = [self.process(c, within_columns_clause=False,
- result_map=self.result_map)
+ columns = [self.process(c, within_columns_clause=False)
for c in self.returning_cols]
if not hasattr(self, 'returning_parameters'):
diff --git a/lib/sqlalchemy/dialects/postgres.py b/lib/sqlalchemy/dialects/postgres.py
deleted file mode 100644
index f813e0003..000000000
--- a/lib/sqlalchemy/dialects/postgres.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# dialects/postgres.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
-# <see AUTHORS file>
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-# backwards compat with the old name
-from sqlalchemy.util import warn_deprecated
-
-warn_deprecated(
- "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to "
- "'postgresql'. The new URL format is "
- "postgresql[+driver]://<user>:<pass>@<host>/<dbname>"
-)
-
-from sqlalchemy.dialects.postgresql import *
-from sqlalchemy.dialects.postgresql import base
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
index 01a846314..8aa4509be 100644
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ b/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -1,5 +1,5 @@
# postgresql/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -12,11 +12,13 @@ base.dialect = psycopg2.dialect
from .base import \
INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
INET, CIDR, UUID, BIT, MACADDR, OID, DOUBLE_PRECISION, TIMESTAMP, TIME, \
- DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect, array, Any, All, \
- TSVECTOR, DropEnumType
-from .constraints import ExcludeConstraint
+ DATE, BYTEA, BOOLEAN, INTERVAL, ENUM, dialect, TSVECTOR, DropEnumType, \
+ CreateEnumType
from .hstore import HSTORE, hstore
-from .json import JSON, JSONElement, JSONB
+from .json import JSON, JSONB
+from .array import array, ARRAY, Any, All
+from .ext import aggregate_order_by, ExcludeConstraint, array_agg
+
from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
TSTZRANGE
@@ -24,8 +26,9 @@ __all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'OID',
'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA', 'BOOLEAN',
- 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'Any', 'All', 'array', 'HSTORE',
+ 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'array', 'HSTORE',
'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
- 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'JSONElement',
- 'DropEnumType'
+ 'TSRANGE', 'TSTZRANGE', 'json', 'JSON', 'JSONB', 'Any', 'All',
+ 'DropEnumType', 'CreateEnumType', 'ExcludeConstraint',
+ 'aggregate_order_by', 'array_agg'
)
diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py
new file mode 100644
index 000000000..8d811aa55
--- /dev/null
+++ b/lib/sqlalchemy/dialects/postgresql/array.py
@@ -0,0 +1,314 @@
+# postgresql/array.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from .base import ischema_names
+from ...sql import expression, operators
+from ...sql.base import SchemaEventTarget
+from ... import types as sqltypes
+
+try:
+ from uuid import UUID as _python_UUID
+except ImportError:
+ _python_UUID = None
+
+
+def Any(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.any` method.
+
+ This method is legacy and is here for backwards-compatiblity.
+
+ .. seealso::
+
+ :func:`.expression.any_`
+
+ """
+
+ return arrexpr.any(other, operator)
+
+
+def All(other, arrexpr, operator=operators.eq):
+ """A synonym for the :meth:`.ARRAY.Comparator.all` method.
+
+ This method is legacy and is here for backwards-compatiblity.
+
+ .. seealso::
+
+ :func:`.expression.all_`
+
+ """
+
+ return arrexpr.all(other, operator)
+
+
+class array(expression.Tuple):
+
+ """A Postgresql ARRAY literal.
+
+ This is used to produce ARRAY literals in SQL expressions, e.g.::
+
+ from sqlalchemy.dialects.postgresql import array
+ from sqlalchemy.dialects import postgresql
+ from sqlalchemy import select, func
+
+ stmt = select([
+ array([1,2]) + array([3,4,5])
+ ])
+
+ print stmt.compile(dialect=postgresql.dialect())
+
+ Produces the SQL::
+
+ SELECT ARRAY[%(param_1)s, %(param_2)s] ||
+ ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
+
+ An instance of :class:`.array` will always have the datatype
+ :class:`.ARRAY`. The "inner" type of the array is inferred from
+ the values present, unless the ``type_`` keyword argument is passed::
+
+ array(['foo', 'bar'], type_=CHAR)
+
+ .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
+
+ See also:
+
+ :class:`.postgresql.ARRAY`
+
+ """
+ __visit_name__ = 'array'
+
+ def __init__(self, clauses, **kw):
+ super(array, self).__init__(*clauses, **kw)
+ self.type = ARRAY(self.type)
+
+ def _bind_param(self, operator, obj, _assume_scalar=False, type_=None):
+ if _assume_scalar or operator is operators.getitem:
+ # if getitem->slice were called, Indexable produces
+ # a Slice object from that
+ assert isinstance(obj, int)
+ return expression.BindParameter(
+ None, obj, _compared_to_operator=operator,
+ type_=type_,
+ _compared_to_type=self.type, unique=True)
+
+ else:
+ return array([
+ self._bind_param(operator, o, _assume_scalar=True, type_=type_)
+ for o in obj])
+
+ def self_group(self, against=None):
+ if (against in (
+ operators.any_op, operators.all_op, operators.getitem)):
+ return expression.Grouping(self)
+ else:
+ return self
+
+
+CONTAINS = operators.custom_op("@>", precedence=5)
+
+CONTAINED_BY = operators.custom_op("<@", precedence=5)
+
+OVERLAP = operators.custom_op("&&", precedence=5)
+
+
+class ARRAY(SchemaEventTarget, sqltypes.ARRAY):
+
+ """Postgresql ARRAY type.
+
+ .. versionchanged:: 1.1 The :class:`.postgresql.ARRAY` type is now
+ a subclass of the core :class:`.types.ARRAY` type.
+
+ The :class:`.postgresql.ARRAY` type is constructed in the same way
+ as the core :class:`.types.ARRAY` type; a member type is required, and a
+ number of dimensions is recommended if the type is to be used for more
+ than one dimension::
+
+ from sqlalchemy.dialects import postgresql
+
+ mytable = Table("mytable", metadata,
+ Column("data", postgresql.ARRAY(Integer, dimensions=2))
+ )
+
+ The :class:`.postgresql.ARRAY` type provides all operations defined on the
+ core :class:`.types.ARRAY` type, including support for "dimensions", indexed
+ access, and simple matching such as :meth:`.types.ARRAY.Comparator.any`
+ and :meth:`.types.ARRAY.Comparator.all`. :class:`.postgresql.ARRAY` class also
+ provides PostgreSQL-specific methods for containment operations, including
+ :meth:`.postgresql.ARRAY.Comparator.contains`
+ :meth:`.postgresql.ARRAY.Comparator.contained_by`,
+ and :meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
+
+ mytable.c.data.contains([1, 2])
+
+ The :class:`.postgresql.ARRAY` type may not be supported on all
+ PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
+
+ Additionally, the :class:`.postgresql.ARRAY` type does not work directly in
+ conjunction with the :class:`.ENUM` type. For a workaround, see the
+ special type at :ref:`postgresql_array_of_enum`.
+
+ .. seealso::
+
+ :class:`.types.ARRAY` - base array type
+
+ :class:`.postgresql.array` - produces a literal array value.
+
+ """
+
+ class Comparator(sqltypes.ARRAY.Comparator):
+
+ """Define comparison operations for :class:`.ARRAY`.
+
+ Note that these operations are in addition to those provided
+ by the base :class:`.types.ARRAY.Comparator` class, including
+ :meth:`.types.ARRAY.Comparator.any` and
+ :meth:`.types.ARRAY.Comparator.all`.
+
+ """
+
+ def contains(self, other, **kwargs):
+ """Boolean expression. Test if elements are a superset of the
+ elements of the argument array expression.
+ """
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
+
+ def contained_by(self, other):
+ """Boolean expression. Test if elements are a proper subset of the
+ elements of the argument array expression.
+ """
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ def overlap(self, other):
+ """Boolean expression. Test if array has elements in common with
+ an argument array expression.
+ """
+ return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
+
+ def __init__(self, item_type, as_tuple=False, dimensions=None,
+ zero_indexes=False):
+ """Construct an ARRAY.
+
+ E.g.::
+
+ Column('myarray', ARRAY(Integer))
+
+ Arguments are:
+
+ :param item_type: The data type of items of this array. Note that
+ dimensionality is irrelevant here, so multi-dimensional arrays like
+ ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
+ ``ARRAY(ARRAY(Integer))`` or such.
+
+ :param as_tuple=False: Specify whether return results
+ should be converted to tuples from lists. DBAPIs such
+ as psycopg2 return lists by default. When tuples are
+ returned, the results are hashable.
+
+ :param dimensions: if non-None, the ARRAY will assume a fixed
+ number of dimensions. This will cause the DDL emitted for this
+ ARRAY to include the exact number of bracket clauses ``[]``,
+ and will also optimize the performance of the type overall.
+ Note that PG arrays are always implicitly "non-dimensioned",
+ meaning they can store any number of dimensions no matter how
+ they were declared.
+
+ :param zero_indexes=False: when True, index values will be converted
+ between Python zero-based and Postgresql one-based indexes, e.g.
+ a value of one will be added to all index values before passing
+ to the database.
+
+ .. versionadded:: 0.9.5
+
+
+ """
+ if isinstance(item_type, ARRAY):
+ raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype")
+ if isinstance(item_type, type):
+ item_type = item_type()
+ self.item_type = item_type
+ self.as_tuple = as_tuple
+ self.dimensions = dimensions
+ self.zero_indexes = zero_indexes
+
+ @property
+ def hashable(self):
+ return self.as_tuple
+
+ @property
+ def python_type(self):
+ return list
+
+ def compare_values(self, x, y):
+ return x == y
+
+ def _set_parent(self, column):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent(column)
+
+ def _set_parent_with_dispatch(self, parent):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.item_type, SchemaEventTarget):
+ self.item_type._set_parent_with_dispatch(parent)
+
+ def _proc_array(self, arr, itemproc, dim, collection):
+ if dim is None:
+ arr = list(arr)
+ if dim == 1 or dim is None and (
+ # this has to be (list, tuple), or at least
+ # not hasattr('__iter__'), since Py3K strings
+ # etc. have __iter__
+ not arr or not isinstance(arr[0], (list, tuple))):
+ if itemproc:
+ return collection(itemproc(x) for x in arr)
+ else:
+ return collection(arr)
+ else:
+ return collection(
+ self._proc_array(
+ x, itemproc,
+ dim - 1 if dim is not None else None,
+ collection)
+ for x in arr
+ )
+
+ def bind_processor(self, dialect):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ bind_processor(dialect)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ list)
+ return process
+
+ def result_processor(self, dialect, coltype):
+ item_proc = self.item_type.dialect_impl(dialect).\
+ result_processor(dialect, coltype)
+
+ def process(value):
+ if value is None:
+ return value
+ else:
+ return self._proc_array(
+ value,
+ item_proc,
+ self.dimensions,
+ tuple if self.as_tuple else list)
+ return process
+
+ischema_names['_array'] = ARRAY
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index 1935d0cad..b16a82e04 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -1,5 +1,5 @@
# postgresql/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -102,7 +102,7 @@ via foreign key constraint, a decision must be made as to how the ``.schema``
is represented in those remote tables, in the case where that remote
schema name is also a member of the current
`Postgresql search path
-<http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
+<http://www.postgresql.org/docs/current/static/ddl-schemas.html#DDL-SCHEMAS-PATH>`_.
By default, the Postgresql dialect mimics the behavior encouraged by
Postgresql's own ``pg_get_constraintdef()`` builtin procedure. This function
@@ -401,6 +401,37 @@ The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
+.. _postgresql_index_storage:
+
+Index Storage Parameters
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+PostgreSQL allows storage parameters to be set on indexes. The storage
+parameters available depend on the index method used by the index. Storage
+parameters can be specified on :class:`.Index` using the ``postgresql_with``
+keyword argument::
+
+ Index('my_index', my_table.c.data, postgresql_with={"fillfactor": 50})
+
+.. versionadded:: 1.0.6
+
+.. _postgresql_index_concurrently:
+
+Indexes with CONCURRENTLY
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The Postgresql index option CONCURRENTLY is supported by passing the
+flag ``postgresql_concurrently`` to the :class:`.Index` construct::
+
+ tbl = Table('testtbl', m, Column('data', Integer))
+
+ idx1 = Index('test_idx1', tbl.c.data, postgresql_concurrently=True)
+
+The above index construct will render SQL as::
+
+ CREATE INDEX CONCURRENTLY test_idx1 ON testtbl (data)
+
+.. versionadded:: 0.9.9
.. _postgresql_index_reflection:
@@ -475,15 +506,109 @@ dialect in conjunction with the :class:`.Table` construct:
.. seealso::
`Postgresql CREATE TABLE options
- <http://www.postgresql.org/docs/9.3/static/sql-createtable.html>`_
+ <http://www.postgresql.org/docs/current/static/sql-createtable.html>`_
+
+ARRAY Types
+-----------
+
+The Postgresql dialect supports arrays, both as multidimensional column types
+as well as array literals:
+
+* :class:`.postgresql.ARRAY` - ARRAY datatype
+
+* :class:`.postgresql.array` - array literal
+
+* :func:`.postgresql.array_agg` - ARRAY_AGG SQL function
+
+* :class:`.postgresql.aggregate_order_by` - helper for PG's ORDER BY aggregate
+ function syntax.
+
+JSON Types
+----------
+
+The Postgresql dialect supports both JSON and JSONB datatypes, including
+psycopg2's native support and support for all of Postgresql's special
+operators:
+
+* :class:`.postgresql.JSON`
+
+* :class:`.postgresql.JSONB`
+
+HSTORE Type
+-----------
+
+The Postgresql HSTORE type as well as hstore literals are supported:
+
+* :class:`.postgresql.HSTORE` - HSTORE datatype
+
+* :class:`.postgresql.hstore` - hstore literal
+
+ENUM Types
+----------
+
+Postgresql has an independently creatable TYPE structure which is used
+to implement an enumerated type. This approach introduces significant
+complexity on the SQLAlchemy side in terms of when this type should be
+CREATED and DROPPED. The type object is also an independently reflectable
+entity. The following sections should be consulted:
+
+* :class:`.postgresql.ENUM` - DDL and typing support for ENUM.
+
+* :meth:`.PGInspector.get_enums` - retrieve a listing of current ENUM types
+
+* :meth:`.postgresql.ENUM.create` , :meth:`.postgresql.ENUM.drop` - individual
+ CREATE and DROP commands for ENUM.
+
+.. _postgresql_array_of_enum:
+
+Using ENUM with ARRAY
+^^^^^^^^^^^^^^^^^^^^^
+
+The combination of ENUM and ARRAY is not directly supported by backend
+DBAPIs at this time. In order to send and receive an ARRAY of ENUM,
+use the following workaround type::
+
+ class ArrayOfEnum(ARRAY):
+
+ def bind_expression(self, bindvalue):
+ return sa.cast(bindvalue, self)
+
+ def result_processor(self, dialect, coltype):
+ super_rp = super(ArrayOfEnum, self).result_processor(
+ dialect, coltype)
+
+ def handle_raw_string(value):
+ inner = re.match(r"^{(.*)}$", value).group(1)
+ return inner.split(",") if inner else []
+
+ def process(value):
+ if value is None:
+ return None
+ return super_rp(handle_raw_string(value))
+ return process
+
+E.g.::
+
+ Table(
+ 'mydata', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', ArrayOfEnum(ENUM('a', 'b, 'c', name='myenum')))
+
+ )
+
+This type is not included as a built-in type as it would be incompatible
+with a DBAPI that suddenly decides to support ARRAY of ENUM directly in
+a new version.
"""
from collections import defaultdict
import re
+import datetime as dt
+
from ... import sql, schema, exc, util
from ...engine import default, reflection
-from ...sql import compiler, expression, operators, default_comparator
+from ...sql import compiler, expression
from ... import types as sqltypes
try:
@@ -586,6 +711,10 @@ class INTERVAL(sqltypes.TypeEngine):
def _type_affinity(self):
return sqltypes.Interval
+ @property
+ def python_type(self):
+ return dt.timedelta
+
PGInterval = INTERVAL
@@ -675,427 +804,81 @@ class TSVECTOR(sqltypes.TypeEngine):
__visit_name__ = 'TSVECTOR'
-class _Slice(expression.ColumnElement):
- __visit_name__ = 'slice'
- type = sqltypes.NULLTYPE
-
- def __init__(self, slice_, source_comparator):
- self.start = default_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.start)
- self.stop = default_comparator._check_literal(
- source_comparator.expr,
- operators.getitem, slice_.stop)
-
-
-class Any(expression.ColumnElement):
-
- """Represent the clause ``left operator ANY (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.any` - ARRAY-bound method
-
- """
- __visit_name__ = 'any'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class All(expression.ColumnElement):
-
- """Represent the clause ``left operator ALL (right)``. ``right`` must be
- an array expression.
-
- .. seealso::
-
- :class:`.postgresql.ARRAY`
-
- :meth:`.postgresql.ARRAY.Comparator.all` - ARRAY-bound method
-
- """
- __visit_name__ = 'all'
-
- def __init__(self, left, right, operator=operators.eq):
- self.type = sqltypes.Boolean()
- self.left = expression._literal_as_binds(left)
- self.right = right
- self.operator = operator
-
-
-class array(expression.Tuple):
-
- """A Postgresql ARRAY literal.
-
- This is used to produce ARRAY literals in SQL expressions, e.g.::
-
- from sqlalchemy.dialects.postgresql import array
- from sqlalchemy.dialects import postgresql
- from sqlalchemy import select, func
-
- stmt = select([
- array([1,2]) + array([3,4,5])
- ])
-
- print stmt.compile(dialect=postgresql.dialect())
-
- Produces the SQL::
-
- SELECT ARRAY[%(param_1)s, %(param_2)s] ||
- ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
-
- An instance of :class:`.array` will always have the datatype
- :class:`.ARRAY`. The "inner" type of the array is inferred from
- the values present, unless the ``type_`` keyword argument is passed::
-
- array(['foo', 'bar'], type_=CHAR)
-
- .. versionadded:: 0.8 Added the :class:`~.postgresql.array` literal type.
-
- See also:
-
- :class:`.postgresql.ARRAY`
-
- """
- __visit_name__ = 'array'
-
- def __init__(self, clauses, **kw):
- super(array, self).__init__(*clauses, **kw)
- self.type = ARRAY(self.type)
-
- def _bind_param(self, operator, obj):
- return array([
- expression.BindParameter(None, o, _compared_to_operator=operator,
- _compared_to_type=self.type, unique=True)
- for o in obj
- ])
-
- def self_group(self, against=None):
- return self
-
-
-class ARRAY(sqltypes.Concatenable, sqltypes.TypeEngine):
-
- """Postgresql ARRAY type.
-
- Represents values as Python lists.
-
- An :class:`.ARRAY` type is constructed given the "type"
- of element::
-
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer))
- )
+class ENUM(sqltypes.Enum):
- The above type represents an N-dimensional array,
- meaning Postgresql will interpret values with any number
- of dimensions automatically. To produce an INSERT
- construct that passes in a 1-dimensional array of integers::
+ """Postgresql ENUM type.
- connection.execute(
- mytable.insert(),
- data=[1,2,3]
+ This is a subclass of :class:`.types.Enum` which includes
+ support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
+
+ When the builtin type :class:`.types.Enum` is used and the
+ :paramref:`.Enum.native_enum` flag is left at its default of
+ True, the Postgresql backend will use a :class:`.postgresql.ENUM`
+ type as the implementation, so the special create/drop rules
+ will be used.
+
+ The create/drop behavior of ENUM is necessarily intricate, due to the
+ awkward relationship the ENUM type has in relationship to the
+ parent table, in that it may be "owned" by just a single table, or
+ may be shared among many tables.
+
+ When using :class:`.types.Enum` or :class:`.postgresql.ENUM`
+ in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
+ corresponding to when the :meth:`.Table.create` and :meth:`.Table.drop`
+ methods are called::
+
+ table = Table('sometable', metadata,
+ Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
)
- The :class:`.ARRAY` type can be constructed given a fixed number
- of dimensions::
+ table.create(engine) # will emit CREATE ENUM and CREATE TABLE
+ table.drop(engine) # will emit DROP TABLE and DROP ENUM
- mytable = Table("mytable", metadata,
- Column("data", ARRAY(Integer, dimensions=2))
- )
+ To use a common enumerated type between multiple tables, the best
+ practice is to declare the :class:`.types.Enum` or
+ :class:`.postgresql.ENUM` independently, and associate it with the
+ :class:`.MetaData` object itself::
- This has the effect of the :class:`.ARRAY` type
- specifying that number of bracketed blocks when a :class:`.Table`
- is used in a CREATE TABLE statement, or when the type is used
- within a :func:`.expression.cast` construct; it also causes
- the bind parameter and result set processing of the type
- to optimize itself to expect exactly that number of dimensions.
- Note that Postgresql itself still allows N dimensions with such a type.
-
- SQL expressions of type :class:`.ARRAY` have support for "index" and
- "slice" behavior. The Python ``[]`` operator works normally here, given
- integer indexes or slices. Note that Postgresql arrays default
- to 1-based indexing. The operator produces binary expression
- constructs which will produce the appropriate SQL, both for
- SELECT statements::
-
- select([mytable.c.data[5], mytable.c.data[2:7]])
-
- as well as UPDATE statements when the :meth:`.Update.values` method
- is used::
-
- mytable.update().values({
- mytable.c.data[5]: 7,
- mytable.c.data[2:7]: [1, 2, 3]
- })
-
- :class:`.ARRAY` provides special methods for containment operations,
- e.g.::
-
- mytable.c.data.contains([1, 2])
-
- For a full list of special methods see :class:`.ARRAY.Comparator`.
-
- .. versionadded:: 0.8 Added support for index and slice operations
- to the :class:`.ARRAY` type, including support for UPDATE
- statements, and special array containment operations.
-
- The :class:`.ARRAY` type may not be supported on all DBAPIs.
- It is known to work on psycopg2 and not pg8000.
-
- See also:
-
- :class:`.postgresql.array` - produce a literal array value.
-
- """
- __visit_name__ = 'ARRAY'
-
- class Comparator(sqltypes.Concatenable.Comparator):
-
- """Define comparison operations for :class:`.ARRAY`."""
-
- def __getitem__(self, index):
- shift_indexes = 1 if self.expr.type.zero_indexes else 0
- if isinstance(index, slice):
- if shift_indexes:
- index = slice(
- index.start + shift_indexes,
- index.stop + shift_indexes,
- index.step
- )
- index = _Slice(index, self)
- return_type = self.type
- else:
- index += shift_indexes
- return_type = self.type.item_type
-
- return default_comparator._binary_operate(
- self.expr, operators.getitem, index,
- result_type=return_type)
-
- def any(self, other, operator=operators.eq):
- """Return ``other operator ANY (array)`` clause.
-
- Argument places are switched, because ANY requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.any(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.Any`
-
- :meth:`.postgresql.ARRAY.Comparator.all`
-
- """
- return Any(other, self.expr, operator=operator)
-
- def all(self, other, operator=operators.eq):
- """Return ``other operator ALL (array)`` clause.
-
- Argument places are switched, because ALL requires array
- expression to be on the right hand-side.
-
- E.g.::
-
- from sqlalchemy.sql import operators
-
- conn.execute(
- select([table.c.data]).where(
- table.c.data.all(7, operator=operators.lt)
- )
- )
-
- :param other: expression to be compared
- :param operator: an operator object from the
- :mod:`sqlalchemy.sql.operators`
- package, defaults to :func:`.operators.eq`.
-
- .. seealso::
-
- :class:`.postgresql.All`
-
- :meth:`.postgresql.ARRAY.Comparator.any`
-
- """
- return All(other, self.expr, operator=operator)
-
- def contains(self, other, **kwargs):
- """Boolean expression. Test if elements are a superset of the
- elements of the argument array expression.
- """
- return self.expr.op('@>')(other)
-
- def contained_by(self, other):
- """Boolean expression. Test if elements are a proper subset of the
- elements of the argument array expression.
- """
- return self.expr.op('<@')(other)
-
- def overlap(self, other):
- """Boolean expression. Test if array has elements in common with
- an argument array expression.
- """
- return self.expr.op('&&')(other)
-
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, operators.custom_op):
- if op.opstring in ['@>', '<@', '&&']:
- return op, sqltypes.Boolean
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
- comparator_factory = Comparator
-
- def __init__(self, item_type, as_tuple=False, dimensions=None,
- zero_indexes=False):
- """Construct an ARRAY.
-
- E.g.::
-
- Column('myarray', ARRAY(Integer))
-
- Arguments are:
+ my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
- :param item_type: The data type of items of this array. Note that
- dimensionality is irrelevant here, so multi-dimensional arrays like
- ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
- ``ARRAY(ARRAY(Integer))`` or such.
-
- :param as_tuple=False: Specify whether return results
- should be converted to tuples from lists. DBAPIs such
- as psycopg2 return lists by default. When tuples are
- returned, the results are hashable.
-
- :param dimensions: if non-None, the ARRAY will assume a fixed
- number of dimensions. This will cause the DDL emitted for this
- ARRAY to include the exact number of bracket clauses ``[]``,
- and will also optimize the performance of the type overall.
- Note that PG arrays are always implicitly "non-dimensioned",
- meaning they can store any number of dimensions no matter how
- they were declared.
-
- :param zero_indexes=False: when True, index values will be converted
- between Python zero-based and Postgresql one-based indexes, e.g.
- a value of one will be added to all index values before passing
- to the database.
-
- .. versionadded:: 0.9.5
-
- """
- if isinstance(item_type, ARRAY):
- raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
- if isinstance(item_type, type):
- item_type = item_type()
- self.item_type = item_type
- self.as_tuple = as_tuple
- self.dimensions = dimensions
- self.zero_indexes = zero_indexes
+ t1 = Table('sometable_one', metadata,
+ Column('some_enum', myenum)
+ )
- @property
- def python_type(self):
- return list
-
- def compare_values(self, x, y):
- return x == y
-
- def _proc_array(self, arr, itemproc, dim, collection):
- if dim is None:
- arr = list(arr)
- if dim == 1 or dim is None and (
- # this has to be (list, tuple), or at least
- # not hasattr('__iter__'), since Py3K strings
- # etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
- if itemproc:
- return collection(itemproc(x) for x in arr)
- else:
- return collection(arr)
- else:
- return collection(
- self._proc_array(
- x, itemproc,
- dim - 1 if dim is not None else None,
- collection)
- for x in arr
- )
+ t2 = Table('sometable_two', metadata,
+ Column('some_enum', myenum)
+ )
- def bind_processor(self, dialect):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- bind_processor(dialect)
+ When this pattern is used, care must still be taken at the level
+ of individual table creates. Emitting CREATE TABLE without also
+ specifying ``checkfirst=True`` will still cause issues::
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
- return process
+ t1.create(engine) # will fail: no such type 'myenum'
- def result_processor(self, dialect, coltype):
- item_proc = self.item_type.\
- dialect_impl(dialect).\
- result_processor(dialect, coltype)
+ If we specify ``checkfirst=True``, the individual table-level create
+ operation will check for the ``ENUM`` and create if not exists::
- def process(value):
- if value is None:
- return value
- else:
- return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- tuple if self.as_tuple else list)
- return process
+ # will check if enum exists, and emit CREATE TYPE if not
+ t1.create(engine, checkfirst=True)
-PGArray = ARRAY
+ When using a metadata-level ENUM type, the type will always be created
+ and dropped if either the metadata-wide create/drop is called::
+ metadata.create_all(engine) # will emit CREATE TYPE
+ metadata.drop_all(engine) # will emit DROP TYPE
-class ENUM(sqltypes.Enum):
+ The type can also be created and dropped directly::
- """Postgresql ENUM type.
+ my_enum.create(engine)
+ my_enum.drop(engine)
- This is a subclass of :class:`.types.Enum` which includes
- support for PG's ``CREATE TYPE``.
-
- :class:`~.postgresql.ENUM` is used automatically when
- using the :class:`.types.Enum` type on PG assuming
- the ``native_enum`` is left as ``True``. However, the
- :class:`~.postgresql.ENUM` class can also be instantiated
- directly in order to access some additional Postgresql-specific
- options, namely finer control over whether or not
- ``CREATE TYPE`` should be emitted.
-
- Note that both :class:`.types.Enum` as well as
- :class:`~.postgresql.ENUM` feature create/drop
- methods; the base :class:`.types.Enum` type ultimately
- delegates to the :meth:`~.postgresql.ENUM.create` and
- :meth:`~.postgresql.ENUM.drop` methods present here.
+ .. versionchanged:: 1.0.0 The Postgresql :class:`.postgresql.ENUM` type
+ now behaves more strictly with regards to CREATE/DROP. A metadata-level
+ ENUM type will only be created and dropped at the metadata level,
+ not the table level, with the exception of
+ ``table.create(checkfirst=True)``.
+ The ``table.drop()`` call will now emit a DROP TYPE for a table-level
+ enumerated type.
"""
@@ -1201,9 +984,18 @@ class ENUM(sqltypes.Enum):
return False
def _on_table_create(self, target, bind, checkfirst, **kw):
- if not self._check_for_name_in_memos(checkfirst, kw):
+ if checkfirst or (
+ not self.metadata and
+ not kw.get('_is_metadata_operation', False)) and \
+ not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
+ def _on_table_drop(self, target, bind, checkfirst, **kw):
+ if not self.metadata and \
+ not kw.get('_is_metadata_operation', False) and \
+ not self._check_for_name_in_memos(checkfirst, kw):
+ self.drop(bind=bind, checkfirst=checkfirst)
+
def _on_metadata_create(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
@@ -1264,18 +1056,14 @@ class PGCompiler(compiler.SQLCompiler):
self.process(element.stop, **kw),
)
- def visit_any(self, element, **kw):
- return "%s%sANY (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
+ def visit_json_getitem_op_binary(self, binary, operator, **kw):
+ return self._generate_generic_binary(
+ binary, " -> ", **kw
)
- def visit_all(self, element, **kw):
- return "%s%sALL (%s)" % (
- self.process(element.left, **kw),
- compiler.OPERATORS[element.operator],
- self.process(element.right, **kw)
+ def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
+ return self._generate_generic_binary(
+ binary, " #> ", **kw
)
def visit_getitem_binary(self, binary, operator, **kw):
@@ -1284,6 +1072,12 @@ class PGCompiler(compiler.SQLCompiler):
self.process(binary.right, **kw)
)
+ def visit_aggregate_order_by(self, element, **kw):
+ return "%s ORDER BY %s" % (
+ self.process(element.target, **kw),
+ self.process(element.order_by, **kw)
+ )
+
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
@@ -1348,7 +1142,7 @@ class PGCompiler(compiler.SQLCompiler):
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
- def get_select_precolumns(self, select):
+ def get_select_precolumns(self, select, **kw):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
@@ -1357,7 +1151,8 @@ class PGCompiler(compiler.SQLCompiler):
[self.process(col) for col in select._distinct]
) + ") "
else:
- return "DISTINCT ON (" + self.process(select._distinct) + ") "
+ return "DISTINCT ON (" + \
+ self.process(select._distinct, **kw) + ") "
else:
return ""
@@ -1373,7 +1168,7 @@ class PGCompiler(compiler.SQLCompiler):
c.table if isinstance(c, expression.ColumnClause)
else c for c in select._for_update_arg.of)
tmp += " OF " + ", ".join(
- self.process(table, ashint=True, **kw)
+ self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
)
@@ -1425,8 +1220,8 @@ class PGDDLCompiler(compiler.DDLCompiler):
else:
colspec += " SERIAL"
else:
- colspec += " " + self.dialect.type_compiler.process(column.type,
- type_expression=column)
+ colspec += " " + self.dialect.type_compiler.process(
+ column.type, type_expression=column)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
@@ -1459,7 +1254,13 @@ class PGDDLCompiler(compiler.DDLCompiler):
text = "CREATE "
if index.unique:
text += "UNIQUE "
- text += "INDEX %s ON %s " % (
+ text += "INDEX "
+
+ concurrently = index.dialect_options['postgresql']['concurrently']
+ if concurrently:
+ text += "CONCURRENTLY "
+
+ text += "%s ON %s " % (
self._prepared_index_name(index,
include_schema=False),
preparer.format_table(index.table)
@@ -1487,6 +1288,13 @@ class PGDDLCompiler(compiler.DDLCompiler):
])
)
+ withclause = index.dialect_options['postgresql']['with']
+
+ if withclause:
+ text += " WITH (%s)" % (', '.join(
+ ['%s = %s' % storage_parameter
+ for storage_parameter in withclause.items()]))
+
whereclause = index.dialect_options["postgresql"]["where"]
if whereclause is not None:
@@ -1496,15 +1304,17 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += " WHERE " + where_compiled
return text
- def visit_exclude_constraint(self, constraint):
+ def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
text += "CONSTRAINT %s " % \
self.preparer.format_constraint(constraint)
elements = []
- for c in constraint.columns:
- op = constraint.operators[c.name]
- elements.append(self.preparer.quote(c.name) + ' WITH ' + op)
+ for expr, name, op in constraint._render_exprs:
+ kw['include_table'] = False
+ elements.append(
+ "%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
+ )
text += "EXCLUDE USING %s (%s)" % (constraint.using,
', '.join(elements))
if constraint.where is not None:
@@ -1671,8 +1481,11 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
raise exc.CompileError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name)
- if not self.omit_schema and use_schema and type_.schema is not None:
- name = self.quote_schema(type_.schema) + "." + name
+ effective_schema = self.schema_for_object(type_)
+
+ if not self.omit_schema and use_schema and \
+ effective_schema is not None:
+ name = self.quote_schema(effective_schema) + "." + name
return name
@@ -1765,10 +1578,15 @@ class PGExecutionContext(default.DefaultExecutionContext):
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
- sch = column.table.schema
- if sch is not None:
+ if column.table is not None:
+ effective_schema = self.connection.schema_for_object(
+ column.table)
+ else:
+ effective_schema = None
+
+ if effective_schema is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
- (sch, seq_name)
+ (effective_schema, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
@@ -1812,7 +1630,9 @@ class PGDialect(default.DefaultDialect):
(schema.Index, {
"using": False,
"where": None,
- "ops": {}
+ "ops": {},
+ "concurrently": False,
+ "with": {}
}),
(schema.Table, {
"ignore_search_path": False,
@@ -2165,11 +1985,27 @@ class PGDialect(default.DefaultDialect):
current_schema = schema
else:
current_schema = self.default_schema_name
- s = """
- SELECT definition FROM pg_views
- WHERE schemaname = :schema
- AND viewname = :view_name
- """
+
+ if self.server_version_info >= (9, 3):
+ s = """
+ SELECT definition FROM pg_views
+ WHERE schemaname = :schema
+ AND viewname = :view_name
+
+ UNION
+
+ SELECT definition FROM pg_matviews
+ WHERE schemaname = :schema
+ AND matviewname = :view_name
+
+ """
+ else:
+ s = """
+ SELECT definition FROM pg_views
+ WHERE schemaname = :schema
+ AND viewname = :view_name
+ """
+
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
@@ -2309,7 +2145,7 @@ class PGDialect(default.DefaultDialect):
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
- coltype = ARRAY(coltype)
+ coltype = self.ischema_names['_array'](coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
@@ -2496,37 +2332,69 @@ class PGDialect(default.DefaultDialect):
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
- IDX_SQL = """
- SELECT
- i.relname as relname,
- ix.indisunique, ix.indexprs, ix.indpred,
- a.attname, a.attnum, c.conrelid, ix.indkey%s
- FROM
- pg_class t
- join pg_index ix on t.oid = ix.indrelid
- join pg_class i on i.oid = ix.indexrelid
- left outer join
- pg_attribute a
- on t.oid = a.attrelid and %s
- left outer join
- pg_constraint c
- on (ix.indrelid = c.conrelid and
- ix.indexrelid = c.conindid and
- c.contype in ('p', 'u', 'x'))
- WHERE
- t.relkind IN ('r', 'v', 'f', 'm')
- and t.oid = :table_oid
- and ix.indisprimary = 'f'
- ORDER BY
- t.relname,
- i.relname
- """ % (
- # version 8.3 here was based on observing the
- # cast does not work in PG 8.2.4, does work in 8.3.0.
- # nothing in PG changelogs regarding this.
- "::varchar" if self.server_version_info >= (8, 3) else "",
- self._pg_index_any("a.attnum", "ix.indkey")
- )
+ if self.server_version_info < (8, 5):
+ IDX_SQL = """
+ SELECT
+ i.relname as relname,
+ ix.indisunique, ix.indexprs, ix.indpred,
+ a.attname, a.attnum, NULL, ix.indkey%s,
+ %s, am.amname
+ FROM
+ pg_class t
+ join pg_index ix on t.oid = ix.indrelid
+ join pg_class i on i.oid = ix.indexrelid
+ left outer join
+ pg_attribute a
+ on t.oid = a.attrelid and %s
+ left outer join
+ pg_am am
+ on i.relam = am.oid
+ WHERE
+ t.relkind IN ('r', 'v', 'f', 'm')
+ and t.oid = :table_oid
+ and ix.indisprimary = 'f'
+ ORDER BY
+ t.relname,
+ i.relname
+ """ % (
+ # version 8.3 here was based on observing the
+ # cast does not work in PG 8.2.4, does work in 8.3.0.
+ # nothing in PG changelogs regarding this.
+ "::varchar" if self.server_version_info >= (8, 3) else "",
+ "i.reloptions" if self.server_version_info >= (8, 2)
+ else "NULL",
+ self._pg_index_any("a.attnum", "ix.indkey")
+ )
+ else:
+ IDX_SQL = """
+ SELECT
+ i.relname as relname,
+ ix.indisunique, ix.indexprs, ix.indpred,
+ a.attname, a.attnum, c.conrelid, ix.indkey::varchar,
+ i.reloptions, am.amname
+ FROM
+ pg_class t
+ join pg_index ix on t.oid = ix.indrelid
+ join pg_class i on i.oid = ix.indexrelid
+ left outer join
+ pg_attribute a
+ on t.oid = a.attrelid and a.attnum = ANY(ix.indkey)
+ left outer join
+ pg_constraint c
+ on (ix.indrelid = c.conrelid and
+ ix.indexrelid = c.conindid and
+ c.contype in ('p', 'u', 'x'))
+ left outer join
+ pg_am am
+ on i.relam = am.oid
+ WHERE
+ t.relkind IN ('r', 'v', 'f', 'm')
+ and t.oid = :table_oid
+ and ix.indisprimary = 'f'
+ ORDER BY
+ t.relname,
+ i.relname
+ """
t = sql.text(IDX_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
@@ -2535,7 +2403,8 @@ class PGDialect(default.DefaultDialect):
sv_idx_name = None
for row in c.fetchall():
- idx_name, unique, expr, prd, col, col_num, conrelid, idx_key = row
+ (idx_name, unique, expr, prd, col,
+ col_num, conrelid, idx_key, options, amname) = row
if expr:
if idx_name != sv_idx_name:
@@ -2561,6 +2430,16 @@ class PGDialect(default.DefaultDialect):
index['unique'] = unique
if conrelid is not None:
index['duplicates_constraint'] = idx_name
+ if options:
+ index['options'] = dict(
+ [option.split("=") for option in options])
+
+ # it *might* be nice to include that this is 'btree' in the
+ # reflection info. But we don't want an Index object
+ # to have a ``postgresql_using`` in it that is just the
+ # default, so for the moment leaving this out.
+ if amname and amname != 'btree':
+ index['amname'] = amname
result = []
for name, idx in indexes.items():
@@ -2571,6 +2450,12 @@ class PGDialect(default.DefaultDialect):
}
if 'duplicates_constraint' in idx:
entry['duplicates_constraint'] = idx['duplicates_constraint']
+ if 'options' in idx:
+ entry.setdefault(
+ 'dialect_options', {})["postgresql_with"] = idx['options']
+ if 'amname' in idx:
+ entry.setdefault(
+ 'dialect_options', {})["postgresql_using"] = idx['amname']
result.append(entry)
return result
diff --git a/lib/sqlalchemy/dialects/postgresql/constraints.py b/lib/sqlalchemy/dialects/postgresql/constraints.py
deleted file mode 100644
index e8ebc75dd..000000000
--- a/lib/sqlalchemy/dialects/postgresql/constraints.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors
-# <see AUTHORS file>
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from sqlalchemy.schema import ColumnCollectionConstraint
-from sqlalchemy.sql import expression
-
-
-class ExcludeConstraint(ColumnCollectionConstraint):
- """A table-level EXCLUDE constraint.
-
- Defines an EXCLUDE constraint as described in the `postgres
- documentation`__.
-
- __ http://www.postgresql.org/docs/9.0/\
-static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
- """
-
- __visit_name__ = 'exclude_constraint'
-
- where = None
-
- def __init__(self, *elements, **kw):
- """
- :param \*elements:
- A sequence of two tuples of the form ``(column, operator)`` where
- column must be a column name or Column object and operator must
- be a string containing the operator to use.
-
- :param name:
- Optional, the in-database name of this constraint.
-
- :param deferrable:
- Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
- issuing DDL for this constraint.
-
- :param initially:
- Optional string. If set, emit INITIALLY <value> when issuing DDL
- for this constraint.
-
- :param using:
- Optional string. If set, emit USING <index_method> when issuing DDL
- for this constraint. Defaults to 'gist'.
-
- :param where:
- Optional string. If set, emit WHERE <predicate> when issuing DDL
- for this constraint.
-
- """
- ColumnCollectionConstraint.__init__(
- self,
- *[col for col, op in elements],
- name=kw.get('name'),
- deferrable=kw.get('deferrable'),
- initially=kw.get('initially')
- )
- self.operators = {}
- for col_or_string, op in elements:
- name = getattr(col_or_string, 'name', col_or_string)
- self.operators[name] = op
- self.using = kw.get('using', 'gist')
- where = kw.get('where')
- if where:
- self.where = expression._literal_as_text(where)
-
- def copy(self, **kw):
- elements = [(col, self.operators[col])
- for col in self.columns.keys()]
- c = self.__class__(*elements,
- name=self.name,
- deferrable=self.deferrable,
- initially=self.initially)
- c.dispatch._update(self.dispatch)
- return c
diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py
new file mode 100644
index 000000000..8a6524f42
--- /dev/null
+++ b/lib/sqlalchemy/dialects/postgresql/ext.py
@@ -0,0 +1,168 @@
+# postgresql/ext.py
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
+# <see AUTHORS file>
+#
+# This module is part of SQLAlchemy and is released under
+# the MIT License: http://www.opensource.org/licenses/mit-license.php
+
+from ...sql import expression
+from ...sql import elements
+from ...sql import functions
+from ...sql.schema import ColumnCollectionConstraint
+from .array import ARRAY
+
+
+class aggregate_order_by(expression.ColumnElement):
+ """Represent a Postgresql aggregate order by expression.
+
+ E.g.::
+
+ from sqlalchemy.dialects.postgresql import aggregate_order_by
+ expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
+ stmt = select([expr])
+
+ would represent the expression::
+
+ SELECT array_agg(a ORDER BY b DESC) FROM table;
+
+ Similarly::
+
+ expr = func.string_agg(
+ table.c.a,
+ aggregate_order_by(literal_column("','"), table.c.a)
+ )
+ stmt = select([expr])
+
+ Would represent::
+
+ SELECT string_agg(a, ',' ORDER BY a) FROM table;
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :class:`.array_agg`
+
+ """
+
+ __visit_name__ = 'aggregate_order_by'
+
+ def __init__(self, target, order_by):
+ self.target = elements._literal_as_binds(target)
+ self.order_by = elements._literal_as_binds(order_by)
+
+ def self_group(self, against=None):
+ return self
+
+ def get_children(self, **kwargs):
+ return self.target, self.order_by
+
+ def _copy_internals(self, clone=elements._clone, **kw):
+ self.target = clone(self.target, **kw)
+ self.order_by = clone(self.order_by, **kw)
+
+ @property
+ def _from_objects(self):
+ return self.target._from_objects + self.order_by._from_objects
+
+
+class ExcludeConstraint(ColumnCollectionConstraint):
+ """A table-level EXCLUDE constraint.
+
+ Defines an EXCLUDE constraint as described in the `postgres
+ documentation`__.
+
+ __ http://www.postgresql.org/docs/9.0/\
+static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
+ """
+
+ __visit_name__ = 'exclude_constraint'
+
+ where = None
+
+ def __init__(self, *elements, **kw):
+ """
+ :param \*elements:
+ A sequence of two tuples of the form ``(column, operator)`` where
+ column must be a column name or Column object and operator must
+ be a string containing the operator to use.
+
+ :param name:
+ Optional, the in-database name of this constraint.
+
+ :param deferrable:
+ Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
+ issuing DDL for this constraint.
+
+ :param initially:
+ Optional string. If set, emit INITIALLY <value> when issuing DDL
+ for this constraint.
+
+ :param using:
+ Optional string. If set, emit USING <index_method> when issuing DDL
+ for this constraint. Defaults to 'gist'.
+
+ :param where:
+ Optional string. If set, emit WHERE <predicate> when issuing DDL
+ for this constraint.
+
+ """
+ columns = []
+ render_exprs = []
+ self.operators = {}
+
+ expressions, operators = zip(*elements)
+
+ for (expr, column, strname, add_element), operator in zip(
+ self._extract_col_expression_collection(expressions),
+ operators
+ ):
+ if add_element is not None:
+ columns.append(add_element)
+
+ name = column.name if column is not None else strname
+
+ if name is not None:
+ # backwards compat
+ self.operators[name] = operator
+
+ expr = expression._literal_as_text(expr)
+
+ render_exprs.append(
+ (expr, name, operator)
+ )
+
+ self._render_exprs = render_exprs
+ ColumnCollectionConstraint.__init__(
+ self,
+ *columns,
+ name=kw.get('name'),
+ deferrable=kw.get('deferrable'),
+ initially=kw.get('initially')
+ )
+ self.using = kw.get('using', 'gist')
+ where = kw.get('where')
+ if where is not None:
+ self.where = expression._literal_as_text(where)
+
+ def copy(self, **kw):
+ elements = [(col, self.operators[col])
+ for col in self.columns.keys()]
+ c = self.__class__(*elements,
+ name=self.name,
+ deferrable=self.deferrable,
+ initially=self.initially)
+ c.dispatch._update(self.dispatch)
+ return c
+
+
+def array_agg(*arg, **kw):
+ """Postgresql-specific form of :class:`.array_agg`, ensures
+ return type is :class:`.postgresql.ARRAY` and not
+ the plain :class:`.types.ARRAY`.
+
+ .. versionadded:: 1.1
+
+ """
+ kw['type_'] = ARRAY(functions._type_from_args(arg))
+ return functions.func.array_agg(*arg, **kw)
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index 9601edc41..67923fe39 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -1,5 +1,5 @@
# postgresql/hstore.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,110 +7,42 @@
import re
-from .base import ARRAY, ischema_names
+from .base import ischema_names
+from .array import ARRAY
from ... import types as sqltypes
from ...sql import functions as sqlfunc
-from ...sql.operators import custom_op
+from ...sql import operators
from ... import util
__all__ = ('HSTORE', 'hstore')
-# My best guess at the parsing rules of hstore literals, since no formal
-# grammar is given. This is mostly reverse engineered from PG's input parser
-# behavior.
-HSTORE_PAIR_RE = re.compile(r"""
-(
- "(?P<key> (\\ . | [^"])* )" # Quoted key
-)
-[ ]* => [ ]* # Pair operator, optional adjoining whitespace
-(
- (?P<value_null> NULL ) # NULL value
- | "(?P<value> (\\ . | [^"])* )" # Quoted value
-)
-""", re.VERBOSE)
-
-HSTORE_DELIMITER_RE = re.compile(r"""
-[ ]* , [ ]*
-""", re.VERBOSE)
-
-
-def _parse_error(hstore_str, pos):
- """format an unmarshalling error."""
-
- ctx = 20
- hslen = len(hstore_str)
-
- parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
- residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
- if len(parsed_tail) > ctx:
- parsed_tail = '[...]' + parsed_tail[1:]
- if len(residual) > ctx:
- residual = residual[:-1] + '[...]'
-
- return "After %r, could not parse residual at position %d: %r" % (
- parsed_tail, pos, residual)
-
-
-def _parse_hstore(hstore_str):
- """Parse an hstore from its literal string representation.
-
- Attempts to approximate PG's hstore input parsing rules as closely as
- possible. Although currently this is not strictly necessary, since the
- current implementation of hstore's output syntax is stricter than what it
- accepts as input, the documentation makes no guarantees that will always
- be the case.
-
-
-
- """
- result = {}
- pos = 0
- pair_match = HSTORE_PAIR_RE.match(hstore_str)
-
- while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace(
- "\\\\", "\\")
- if pair_match.group('value_null'):
- value = None
- else:
- value = pair_match.group('value').replace(
- r'\"', '"').replace("\\\\", "\\")
- result[key] = value
-
- pos += pair_match.end()
-
- delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
- if delim_match is not None:
- pos += delim_match.end()
-
- pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
-
- if pos != len(hstore_str):
- raise ValueError(_parse_error(hstore_str, pos))
+GETITEM = operators.custom_op(
+ "->", precedence=15, natural_self_precedent=True,
+)
- return result
+HAS_KEY = operators.custom_op(
+ "?", precedence=15, natural_self_precedent=True
+)
+HAS_ALL = operators.custom_op(
+ "?&", precedence=15, natural_self_precedent=True
+)
-def _serialize_hstore(val):
- """Serialize a dictionary into an hstore literal. Keys and values must
- both be strings (except None for values).
+HAS_ANY = operators.custom_op(
+ "?|", precedence=15, natural_self_precedent=True
+)
- """
- def esc(s, position):
- if position == 'value' and s is None:
- return 'NULL'
- elif isinstance(s, util.string_types):
- return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
- else:
- raise ValueError("%r in %s position is not a string." %
- (s, position))
+CONTAINS = operators.custom_op(
+ "@>", precedence=15, natural_self_precedent=True
+)
- return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
- for k, v in val.items())
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=15, natural_self_precedent=True
+)
-class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
+class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Represent the Postgresql HSTORE type.
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
@@ -185,51 +117,61 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
__visit_name__ = 'HSTORE'
hashable = False
+ text_type = sqltypes.Text()
+
+ def __init__(self, text_type=None):
+ """Construct a new :class:`.HSTORE`.
+
+ :param text_type: the type that should be used for indexed values.
+ Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1.0
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ """
+ if text_type is not None:
+ self.text_type = text_type
+
+ class Comparator(
+ sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
- return self.expr.op('?')(other)
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
- """Boolean expression. Test for presence of all keys in the PG
- array.
+ """Boolean expression. Test for presence of all keys in jsonb
"""
- return self.expr.op('?&')(other)
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
- """Boolean expression. Test for presence of any key in the PG
- array.
+ """Boolean expression. Test for presence of any key in jsonb
"""
- return self.expr.op('?|')(other)
-
- def defined(self, key):
- """Boolean expression. Test for presence of a non-NULL value for
- the key. Note that the key may be a SQLA expression.
- """
- return _HStoreDefinedFunction(self.expr, key)
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
- """Boolean expression. Test if keys are a superset of the keys of
- the argument hstore expression.
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
"""
- return self.expr.op('@>')(other)
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
- keys of the argument hstore expression.
+ keys of the argument jsonb expression.
"""
- return self.expr.op('<@')(other)
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
- def __getitem__(self, other):
- """Text expression. Get the value at a given key. Note that the
- key may be a SQLA expression.
+ def _setup_getitem(self, index):
+ return GETITEM, index, self.type.text_type
+
+ def defined(self, key):
+ """Boolean expression. Test for presence of a non-NULL value for
+ the key. Note that the key may be a SQLA expression.
"""
- return self.expr.op('->', precedence=5)(other)
+ return _HStoreDefinedFunction(self.expr, key)
def delete(self, key):
"""HStore expression. Returns the contents of this hstore with the
@@ -263,14 +205,7 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
"""Text array expression. Returns array of [key, value] pairs."""
return _HStoreMatrixFunction(self.expr)
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring in ['?', '?&', '?|', '@>', '<@']:
- return op, sqltypes.Boolean
- elif op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
+ comparator_factory = Comparator
def bind_processor(self, dialect):
if util.py2k:
@@ -374,3 +309,105 @@ class _HStoreArrayFunction(sqlfunc.GenericFunction):
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
name = 'hstore_to_matrix'
+
+
+#
+# parsing. note that none of this is used with the psycopg2 backend,
+# which provides its own native extensions.
+#
+
+# My best guess at the parsing rules of hstore literals, since no formal
+# grammar is given. This is mostly reverse engineered from PG's input parser
+# behavior.
+HSTORE_PAIR_RE = re.compile(r"""
+(
+ "(?P<key> (\\ . | [^"])* )" # Quoted key
+)
+[ ]* => [ ]* # Pair operator, optional adjoining whitespace
+(
+ (?P<value_null> NULL ) # NULL value
+ | "(?P<value> (\\ . | [^"])* )" # Quoted value
+)
+""", re.VERBOSE)
+
+HSTORE_DELIMITER_RE = re.compile(r"""
+[ ]* , [ ]*
+""", re.VERBOSE)
+
+
+def _parse_error(hstore_str, pos):
+ """format an unmarshalling error."""
+
+ ctx = 20
+ hslen = len(hstore_str)
+
+ parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
+ residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
+
+ if len(parsed_tail) > ctx:
+ parsed_tail = '[...]' + parsed_tail[1:]
+ if len(residual) > ctx:
+ residual = residual[:-1] + '[...]'
+
+ return "After %r, could not parse residual at position %d: %r" % (
+ parsed_tail, pos, residual)
+
+
+def _parse_hstore(hstore_str):
+ """Parse an hstore from its literal string representation.
+
+ Attempts to approximate PG's hstore input parsing rules as closely as
+ possible. Although currently this is not strictly necessary, since the
+ current implementation of hstore's output syntax is stricter than what it
+ accepts as input, the documentation makes no guarantees that will always
+ be the case.
+
+
+
+ """
+ result = {}
+ pos = 0
+ pair_match = HSTORE_PAIR_RE.match(hstore_str)
+
+ while pair_match is not None:
+ key = pair_match.group('key').replace(r'\"', '"').replace(
+ "\\\\", "\\")
+ if pair_match.group('value_null'):
+ value = None
+ else:
+ value = pair_match.group('value').replace(
+ r'\"', '"').replace("\\\\", "\\")
+ result[key] = value
+
+ pos += pair_match.end()
+
+ delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
+ if delim_match is not None:
+ pos += delim_match.end()
+
+ pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
+
+ if pos != len(hstore_str):
+ raise ValueError(_parse_error(hstore_str, pos))
+
+ return result
+
+
+def _serialize_hstore(val):
+ """Serialize a dictionary into an hstore literal. Keys and values must
+ both be strings (except None for values).
+
+ """
+ def esc(s, position):
+ if position == 'value' and s is None:
+ return 'NULL'
+ elif isinstance(s, util.string_types):
+ return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
+ else:
+ raise ValueError("%r in %s position is not a string." %
+ (s, position))
+
+ return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
+ for k, v in val.items())
+
+
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index f38c4a56a..b0f0f7cf0 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -1,5 +1,5 @@
# postgresql/json.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,144 +7,104 @@
from __future__ import absolute_import
import json
+import collections
-from .base import ischema_names
+from .base import ischema_names, colspecs
from ... import types as sqltypes
-from ...sql.operators import custom_op
-from ... import sql
-from ...sql import elements, default_comparator
+from ...sql import operators
+from ...sql import elements
from ... import util
-__all__ = ('JSON', 'JSONElement', 'JSONB')
+__all__ = ('JSON', 'JSONB')
+ASTEXT = operators.custom_op(
+ "->>", precedence=15, natural_self_precedent=True,
+)
-class JSONElement(elements.BinaryExpression):
- """Represents accessing an element of a :class:`.JSON` value.
+JSONPATH_ASTEXT = operators.custom_op(
+ "#>>", precedence=15, natural_self_precedent=True,
+)
- The :class:`.JSONElement` is produced whenever using the Python index
- operator on an expression that has the type :class:`.JSON`::
- expr = mytable.c.json_data['some_key']
+HAS_KEY = operators.custom_op(
+ "?", precedence=15, natural_self_precedent=True
+)
- The expression typically compiles to a JSON access such as ``col -> key``.
- Modifiers are then available for typing behavior, including
- :meth:`.JSONElement.cast` and :attr:`.JSONElement.astext`.
+HAS_ALL = operators.custom_op(
+ "?&", precedence=15, natural_self_precedent=True
+)
- """
-
- def __init__(self, left, right, astext=False,
- opstring=None, result_type=None):
- self._astext = astext
- if opstring is None:
- if hasattr(right, '__iter__') and \
- not isinstance(right, util.string_types):
- opstring = "#>"
- right = "{%s}" % (
- ", ".join(util.text_type(elem) for elem in right))
- else:
- opstring = "->"
-
- self._json_opstring = opstring
- operator = custom_op(opstring, precedence=5)
- right = default_comparator._check_literal(
- left, operator, right)
- super(JSONElement, self).__init__(
- left, right, operator, type_=result_type)
-
- @property
- def astext(self):
- """Convert this :class:`.JSONElement` to use the 'astext' operator
- when evaluated.
-
- E.g.::
-
- select([data_table.c.data['some key'].astext])
-
- .. seealso::
-
- :meth:`.JSONElement.cast`
-
- """
- if self._astext:
- return self
- else:
- return JSONElement(
- self.left,
- self.right,
- astext=True,
- opstring=self._json_opstring + ">",
- result_type=sqltypes.String(convert_unicode=True)
- )
+HAS_ANY = operators.custom_op(
+ "?|", precedence=15, natural_self_precedent=True
+)
- def cast(self, type_):
- """Convert this :class:`.JSONElement` to apply both the 'astext' operator
- as well as an explicit type cast when evaluated.
+CONTAINS = operators.custom_op(
+ "@>", precedence=15, natural_self_precedent=True
+)
- E.g.::
+CONTAINED_BY = operators.custom_op(
+ "<@", precedence=15, natural_self_precedent=True
+)
- select([data_table.c.data['some key'].cast(Integer)])
- .. seealso::
+class JSONPathType(sqltypes.JSON.JSONPathType):
+ def bind_processor(self, dialect):
+ def process(value):
+ assert isinstance(value, collections.Sequence)
+ tokens = [util.text_type(elem) for elem in value]
+ return "{%s}" % (", ".join(tokens))
- :attr:`.JSONElement.astext`
+ return process
- """
- if not self._astext:
- return self.astext.cast(type_)
- else:
- return sql.cast(self, type_)
+colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
-class JSON(sqltypes.TypeEngine):
+class JSON(sqltypes.JSON):
"""Represent the Postgresql JSON type.
- The :class:`.JSON` type stores arbitrary JSON format data, e.g.::
-
- data_table = Table('data_table', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', JSON)
- )
+ This type is a specialization of the Core-level :class:`.types.JSON`
+ type. Be sure to read the documentation for :class:`.types.JSON` for
+ important tips regarding treatment of NULL values and ORM use.
- with engine.connect() as conn:
- conn.execute(
- data_table.insert(),
- data = {"key1": "value1", "key2": "value2"}
- )
+ .. versionchanged:: 1.1 :class:`.postgresql.JSON` is now a Postgresql-
+ specific specialization of the new :class:`.types.JSON` type.
- :class:`.JSON` provides several operations:
+ The operators provided by the Postgresql version of :class:`.JSON`
+ include:
- * Index operations::
+ * Index operations (the ``->`` operator)::
data_table.c.data['some key']
- * Index operations returning text (required for text comparison)::
+ data_table.c.data[5]
+
+
+ * Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
- * Index operations with a built-in CAST call::
+ * Index operations with CAST
+ (equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
- data_table.c.data['some key'].cast(Integer) == 5
+ data_table.c.data['some key'].astext.cast(Integer) == 5
- * Path index operations::
+ * Path index operations (the ``#>`` operator)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
- * Path index operations returning text (required for text comparison)::
+ * Path index operations returning text (the ``#>>`` operator)::
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
- 'some value'
+ data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == \
+'some value'
- Index operations return an instance of :class:`.JSONElement`, which
- represents an expression such as ``column -> index``. This element then
- defines methods such as :attr:`.JSONElement.astext` and
- :meth:`.JSONElement.cast` for setting up type behavior.
+ .. versionchanged:: 1.1 The :meth:`.ColumnElement.cast` operator on
+ JSON objects now requires that the :attr:`.JSON.Comparator.astext`
+ modifier be called explicitly, if the cast works only from a textual
+ string.
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
- detect in-place mutations to the structure. In order to detect these, the
- :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
- allow "in-place" changes to the datastructure to produce events which
- will be detected by the unit of work. See the example at :class:`.HSTORE`
- for a simple example involving a dictionary.
+ Index operations return an expression object whose type defaults to
+ :class:`.JSON` by default, so that further JSON-oriented instructions
+ may be called upon the result type.
Custom serializers and deserializers are specified at the dialect level,
that is using :func:`.create_engine`. The reason for this is that when
@@ -159,13 +119,17 @@ class JSON(sqltypes.TypeEngine):
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
- .. versionadded:: 0.9
+ .. seealso::
+
+ :class:`.types.JSON` - Core level JSON type
+
+ :class:`.JSONB`
"""
- __visit_name__ = 'JSON'
+ astext_type = sqltypes.Text()
- def __init__(self, none_as_null=False):
+ def __init__(self, none_as_null=False, astext_type=None):
"""Construct a :class:`.JSON` type.
:param none_as_null: if True, persist the value ``None`` as a
@@ -179,61 +143,51 @@ class JSON(sqltypes.TypeEngine):
.. versionchanged:: 0.9.8 - Added ``none_as_null``, and :func:`.null`
is now supported in order to persist a NULL value.
+ .. seealso::
+
+ :attr:`.JSON.NULL`
+
+ :param astext_type: the type to use for the
+ :attr:`.JSON.Comparator.astext`
+ accessor on indexed attributes. Defaults to :class:`.types.Text`.
+
+ .. versionadded:: 1.1
+
"""
- self.none_as_null = none_as_null
+ super(JSON, self).__init__(none_as_null=none_as_null)
+ if astext_type is not None:
+ self.astext_type = astext_type
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ class Comparator(sqltypes.JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
- def __getitem__(self, other):
- """Get the value at a given key."""
+ @property
+ def astext(self):
+ """On an indexed expression, use the "astext" (e.g. "->>")
+ conversion when rendered in SQL.
- return JSONElement(self.expr, other)
+ E.g.::
- def _adapt_expression(self, op, other_comparator):
- if isinstance(op, custom_op):
- if op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
+ select([data_table.c.data['some key'].astext])
- def bind_processor(self, dialect):
- json_serializer = dialect._json_serializer or json.dumps
- if util.py2k:
- encoding = dialect.encoding
-
- def process(value):
- if isinstance(value, elements.Null) or (
- value is None and self.none_as_null
- ):
- return None
- return json_serializer(value).encode(encoding)
- else:
- def process(value):
- if isinstance(value, elements.Null) or (
- value is None and self.none_as_null
- ):
- return None
- return json_serializer(value)
- return process
+ .. seealso::
- def result_processor(self, dialect, coltype):
- json_deserializer = dialect._json_deserializer or json.loads
- if util.py2k:
- encoding = dialect.encoding
-
- def process(value):
- if value is None:
- return None
- return json_deserializer(value.decode(encoding))
- else:
- def process(value):
- if value is None:
- return None
- return json_deserializer(value)
- return process
+ :meth:`.ColumnElement.cast`
+
+ """
+
+ if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
+ return self.expr.left.operate(
+ JSONPATH_ASTEXT,
+ self.expr.right, result_type=self.type.astext_type)
+ else:
+ return self.expr.left.operate(
+ ASTEXT, self.expr.right, result_type=self.type.astext_type)
+
+ comparator_factory = Comparator
+colspecs[sqltypes.JSON] = JSON
ischema_names['json'] = JSON
@@ -253,106 +207,68 @@ class JSONB(JSON):
data = {"key1": "value1", "key2": "value2"}
)
- :class:`.JSONB` provides several operations:
+ The :class:`.JSONB` type includes all operations provided by
+ :class:`.JSON`, including the same behaviors for indexing operations.
+ It also adds additional operators specific to JSONB, including
+ :meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
+ :meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
+ and :meth:`.JSONB.Comparator.contained_by`.
+
+ Like the :class:`.JSON` type, the :class:`.JSONB` type does not detect
+ in-place changes when used with the ORM, unless the
+ :mod:`sqlalchemy.ext.mutable` extension is used.
+
+ Custom serializers and deserializers
+ are shared with the :class:`.JSON` class, using the ``json_serializer``
+ and ``json_deserializer`` keyword arguments. These must be specified
+ at the dialect level using :func:`.create_engine`. When using
+ psycopg2, the serializers are associated with the jsonb type using
+ ``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
+ in the same way that ``psycopg2.extras.register_default_json`` is used
+ to register these handlers with the json type.
- * Index operations::
-
- data_table.c.data['some key']
-
- * Index operations returning text (required for text comparison)::
-
- data_table.c.data['some key'].astext == 'some value'
-
- * Index operations with a built-in CAST call::
-
- data_table.c.data['some key'].cast(Integer) == 5
-
- * Path index operations::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')]
-
- * Path index operations returning text (required for text comparison)::
-
- data_table.c.data[('key_1', 'key_2', ..., 'key_n')].astext == \\
- 'some value'
-
- Index operations return an instance of :class:`.JSONElement`, which
- represents an expression such as ``column -> index``. This element then
- defines methods such as :attr:`.JSONElement.astext` and
- :meth:`.JSONElement.cast` for setting up type behavior.
-
- The :class:`.JSON` type, when used with the SQLAlchemy ORM, does not
- detect in-place mutations to the structure. In order to detect these, the
- :mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
- allow "in-place" changes to the datastructure to produce events which
- will be detected by the unit of work. See the example at :class:`.HSTORE`
- for a simple example involving a dictionary.
-
- Custom serializers and deserializers are specified at the dialect level,
- that is using :func:`.create_engine`. The reason for this is that when
- using psycopg2, the DBAPI only allows serializers at the per-cursor
- or per-connection level. E.g.::
+ .. versionadded:: 0.9.7
- engine = create_engine("postgresql://scott:tiger@localhost/test",
- json_serializer=my_serialize_fn,
- json_deserializer=my_deserialize_fn
- )
+ .. seealso::
- When using the psycopg2 dialect, the json_deserializer is registered
- against the database using ``psycopg2.extras.register_default_json``.
-
- .. versionadded:: 0.9.7
+ :class:`.JSON`
"""
__visit_name__ = 'JSONB'
- hashable = False
- class comparator_factory(sqltypes.Concatenable.Comparator):
+ class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
- def __getitem__(self, other):
- """Get the value at a given key."""
-
- return JSONElement(self.expr, other)
-
- def _adapt_expression(self, op, other_comparator):
- # How does one do equality?? jsonb also has "=" eg.
- # '[1,2,3]'::jsonb = '[1,2,3]'::jsonb
- if isinstance(op, custom_op):
- if op.opstring in ['?', '?&', '?|', '@>', '<@']:
- return op, sqltypes.Boolean
- if op.opstring == '->':
- return op, sqltypes.Text
- return sqltypes.Concatenable.Comparator.\
- _adapt_expression(self, op, other_comparator)
-
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""
- return self.expr.op('?')(other)
+ return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb
"""
- return self.expr.op('?&')(other)
+ return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb
"""
- return self.expr.op('?|')(other)
+ return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
- """Boolean expression. Test if keys (or array) are a superset of/contained
- the keys of the argument jsonb expression.
+ """Boolean expression. Test if keys (or array) are a superset
+ of/contained the keys of the argument jsonb expression.
"""
- return self.expr.op('@>')(other)
+ return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
"""
- return self.expr.op('<@')(other)
+ return self.operate(
+ CONTAINED_BY, other, result_type=sqltypes.Boolean)
+
+ comparator_factory = Comparator
ischema_names['jsonb'] = JSONB
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index 4ccc90208..9daab5851 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -1,5 +1,5 @@
# postgresql/pg8000.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
@@ -13,17 +13,30 @@
postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
:url: https://pythonhosted.org/pg8000/
+
+.. _pg8000_unicode:
+
Unicode
-------
-When communicating with the server, pg8000 **always uses the server-side
-character set**. SQLAlchemy has no ability to modify what character set
-pg8000 chooses to use, and additionally SQLAlchemy does no unicode conversion
-of any kind with the pg8000 backend. The origin of the client encoding setting
-is ultimately the CLIENT_ENCODING setting in postgresql.conf.
+pg8000 will encode / decode string values between it and the server using the
+PostgreSQL ``client_encoding`` parameter; by default this is the value in
+the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
+Typically, this can be changed to ``utf-8``, as a more useful default::
+
+ #client_encoding = sql_ascii # actually, defaults to database
+ # encoding
+ client_encoding = utf8
+
+The ``client_encoding`` can be overriden for a session by executing the SQL:
+
+SET CLIENT_ENCODING TO 'utf8';
+
+SQLAlchemy will execute this SQL on all new connections based on the value
+passed to :func:`.create_engine` using the ``client_encoding`` parameter::
-It is not necessary, though is also harmless, to pass the "encoding" parameter
-to :func:`.create_engine` when using pg8000.
+ engine = create_engine(
+ "postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
.. _pg8000_isolation_level:
@@ -58,6 +71,8 @@ from ... import types as sqltypes
from .base import (
PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext,
_DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES)
+import re
+from sqlalchemy.dialects.postgresql.json import JSON
class _PGNumeric(sqltypes.Numeric):
@@ -88,6 +103,15 @@ class _PGNumericNoBind(_PGNumeric):
return None
+class _PGJSON(JSON):
+
+ def result_processor(self, dialect, coltype):
+ if dialect._dbapi_version > (1, 10, 1):
+ return None # Has native JSON
+ else:
+ return super(_PGJSON, self).result_processor(dialect, coltype)
+
+
class PGExecutionContext_pg8000(PGExecutionContext):
pass
@@ -129,20 +153,30 @@ class PGDialect_pg8000(PGDialect):
PGDialect.colspecs,
{
sqltypes.Numeric: _PGNumericNoBind,
- sqltypes.Float: _PGNumeric
+ sqltypes.Float: _PGNumeric,
+ JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON
}
)
+ def __init__(self, client_encoding=None, **kwargs):
+ PGDialect.__init__(self, **kwargs)
+ self.client_encoding = client_encoding
+
def initialize(self, connection):
- if self.dbapi and hasattr(self.dbapi, '__version__'):
- self._dbapi_version = tuple([
- int(x) for x in
- self.dbapi.__version__.split(".")])
- else:
- self._dbapi_version = (99, 99, 99)
self.supports_sane_multi_rowcount = self._dbapi_version >= (1, 9, 14)
super(PGDialect_pg8000, self).initialize(connection)
+ @util.memoized_property
+ def _dbapi_version(self):
+ if self.dbapi and hasattr(self.dbapi, '__version__'):
+ return tuple(
+ [
+ int(x) for x in re.findall(
+ r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)])
+ else:
+ return (99, 99, 99)
+
@classmethod
def dbapi(cls):
return __import__('pg8000')
@@ -181,6 +215,16 @@ class PGDialect_pg8000(PGDialect):
(level, self.name, ", ".join(self._isolation_lookup))
)
+ def set_client_encoding(self, connection, client_encoding):
+ # adjust for ConnectionFairy possibly being present
+ if hasattr(connection, 'connection'):
+ connection = connection.connection
+
+ cursor = connection.cursor()
+ cursor.execute("SET CLIENT_ENCODING TO '" + client_encoding + "'")
+ cursor.execute("COMMIT")
+ cursor.close()
+
def do_begin_twophase(self, connection, xid):
connection.connection.tpc_begin((0, xid, ''))
@@ -198,4 +242,24 @@ class PGDialect_pg8000(PGDialect):
def do_recover_twophase(self, connection):
return [row[1] for row in connection.connection.tpc_recover()]
+ def on_connect(self):
+ fns = []
+ if self.client_encoding is not None:
+ def on_connect(conn):
+ self.set_client_encoding(conn, self.client_encoding)
+ fns.append(on_connect)
+
+ if self.isolation_level is not None:
+ def on_connect(conn):
+ self.set_isolation_level(conn, self.isolation_level)
+ fns.append(on_connect)
+
+ if len(fns) > 0:
+ def on_connect(conn):
+ for fn in fns:
+ fn(conn)
+ return on_connect
+ else:
+ return None
+
dialect = PGDialect_pg8000
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 4f1e04f20..fe245b21d 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -1,5 +1,5 @@
# postgresql/psycopg2.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -74,6 +74,8 @@ See also:
`PQconnectdbParams <http://www.postgresql.org/docs/9.1/static/\
libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
+.. _psycopg2_execution_options:
+
Per-Statement/Connection Execution Options
-------------------------------------------
@@ -81,16 +83,23 @@ The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
-* isolation_level - Set the transaction isolation level for the lifespan of a
+* ``isolation_level`` - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement
or query). See :ref:`psycopg2_isolation_level`.
-* stream_results - Enable or disable usage of psycopg2 server side cursors -
+* ``stream_results`` - Enable or disable usage of psycopg2 server side cursors -
this feature makes use of "named" cursors in combination with special
result handling methods so that result rows are not fully buffered.
If ``None`` or not set, the ``server_side_cursors`` option of the
:class:`.Engine` is used.
+* ``max_row_buffer`` - when using ``stream_results``, an integer value that
+ specifies the maximum number of rows to buffer at a time. This is
+ interpreted by the :class:`.BufferedRowResultProxy`, and if omitted the
+ buffer will grow to ultimately store 1000 rows at a time.
+
+ .. versionadded:: 1.0.6
+
.. _psycopg2_unicode:
Unicode with Psycopg2
@@ -311,7 +320,7 @@ from ...sql import expression
from ... import types as sqltypes
from .base import PGDialect, PGCompiler, \
PGIdentifierPreparer, PGExecutionContext, \
- ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\
+ ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\
_INT_TYPES, UUID
from .hstore import HSTORE
from .json import JSON, JSONB
@@ -354,7 +363,7 @@ class _PGNumeric(sqltypes.Numeric):
class _PGEnum(ENUM):
def result_processor(self, dialect, coltype):
- if util.py2k and self.convert_unicode is True:
+ if self.native_enum and util.py2k and self.convert_unicode is True:
# we can't easily use PG's extensions here because
# the OID is on the fly, and we need to give it a python
# function anyway - not really worth it.
@@ -501,10 +510,22 @@ class PGDialect_psycopg2(PGDialect):
preparer = PGIdentifierPreparer_psycopg2
psycopg2_version = (0, 0)
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 5),
+ native_jsonb=(2, 5, 4),
+ sane_multi_rowcount=(2, 0, 9),
+ array_oid=(2, 4, 3),
+ hstore_adapter=(2, 4)
+ )
+
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
+ engine_config_types = PGDialect.engine_config_types.union([
+ ('use_native_unicode', util.asbool),
+ ])
+
colspecs = util.update_copy(
PGDialect.colspecs,
{
@@ -513,6 +534,7 @@ class PGDialect_psycopg2(PGDialect):
sqltypes.Enum: _PGEnum, # needs force_unicode
HSTORE: _PGHStore,
JSON: _PGJSON,
+ sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID
}
@@ -543,11 +565,15 @@ class PGDialect_psycopg2(PGDialect):
self._has_native_hstore = self.use_native_hstore and \
self._hstore_oids(connection.connection) \
is not None
- self._has_native_json = self.psycopg2_version >= (2, 5)
- self._has_native_jsonb = self.psycopg2_version >= (2, 5, 4)
+ self._has_native_json = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json']
+ self._has_native_jsonb = \
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb']
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
- self.supports_sane_multi_rowcount = self.psycopg2_version >= (2, 0, 9)
+ self.supports_sane_multi_rowcount = \
+ self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['sane_multi_rowcount']
@classmethod
def dbapi(cls):
@@ -621,7 +647,8 @@ class PGDialect_psycopg2(PGDialect):
kw = {'oid': oid}
if util.py2k:
kw['unicode'] = True
- if self.psycopg2_version >= (2, 4, 3):
+ if self.psycopg2_version >= \
+ self.FEATURE_VERSION_MAP['array_oid']:
kw['array_oid'] = array_oid
extras.register_hstore(conn, **kw)
fns.append(on_connect)
@@ -646,7 +673,7 @@ class PGDialect_psycopg2(PGDialect):
@util.memoized_instancemethod
def _hstore_oids(self, conn):
- if self.psycopg2_version >= (2, 4):
+ if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
index f5c475d90..ab99a8392 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -1,5 +1,5 @@
# testing/engines.py
-# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -31,6 +31,18 @@ class PGDialect_psycopg2cffi(PGDialect_psycopg2):
driver = 'psycopg2cffi'
supports_unicode_statements = True
+ # psycopg2cffi's first release is 2.5.0, but reports
+ # __version__ as 2.4.4. Subsequent releases seem to have
+ # fixed this.
+
+ FEATURE_VERSION_MAP = dict(
+ native_json=(2, 4, 4),
+ native_jsonb=(2, 7, 1),
+ sane_multi_rowcount=(2, 4, 4),
+ array_oid=(2, 4, 4),
+ hstore_adapter=(2, 4, 4)
+ )
+
@classmethod
def dbapi(cls):
return __import__('psycopg2cffi')
diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
index 3ebd0135f..f2b850a9a 100644
--- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
@@ -1,5 +1,5 @@
# postgresql/pypostgresql.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -65,6 +65,23 @@ class PGDialect_pypostgresql(PGDialect):
from postgresql.driver import dbapi20
return dbapi20
+ _DBAPI_ERROR_NAMES = [
+ "Error",
+ "InterfaceError", "DatabaseError", "DataError",
+ "OperationalError", "IntegrityError", "InternalError",
+ "ProgrammingError", "NotSupportedError"
+ ]
+
+ @util.memoized_property
+ def dbapi_exception_translation_map(self):
+ if self.dbapi is None:
+ return {}
+
+ return dict(
+ (getattr(self.dbapi, name).__name__, name)
+ for name in self._DBAPI_ERROR_NAMES
+ )
+
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user')
if 'port' in opts:
diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py
index 28f80d000..42a1cd4b1 100644
--- a/lib/sqlalchemy/dialects/postgresql/ranges.py
+++ b/lib/sqlalchemy/dialects/postgresql/ranges.py
@@ -1,4 +1,4 @@
-# Copyright (C) 2013-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2013-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py b/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
index 00b428f84..cc464601b 100644
--- a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
@@ -1,5 +1,5 @@
# postgresql/zxjdbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sqlite/__init__.py b/lib/sqlalchemy/dialects/sqlite/__init__.py
index a53d53e9d..a8dec300a 100644
--- a/lib/sqlalchemy/dialects/sqlite/__init__.py
+++ b/lib/sqlalchemy/dialects/sqlite/__init__.py
@@ -1,5 +1,5 @@
# sqlite/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py
index 1ed89bacb..b50170759 100644
--- a/lib/sqlalchemy/dialects/sqlite/base.py
+++ b/lib/sqlalchemy/dialects/sqlite/base.py
@@ -1,5 +1,5 @@
# sqlite/base.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -45,14 +45,20 @@ SQLite Auto Incrementing Behavior
Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html
-Two things to note:
+Key concepts:
-* The AUTOINCREMENT keyword is **not** required for SQLite tables to
- generate primary key values automatically. AUTOINCREMENT only means that the
- algorithm used to generate ROWID values should be slightly different.
-* SQLite does **not** generate primary key (i.e. ROWID) values, even for
- one column, if the table has a composite (i.e. multi-column) primary key.
- This is regardless of the AUTOINCREMENT keyword being present or not.
+* SQLite has an implicit "auto increment" feature that takes place for any
+ non-composite primary-key column that is specifically created using
+ "INTEGER PRIMARY KEY" for the type + primary key.
+
+* SQLite also has an explicit "AUTOINCREMENT" keyword, that is **not**
+ equivalent to the implicit autoincrement feature; this keyword is not
+ recommended for general use. SQLAlchemy does not render this keyword
+ unless a special SQLite-specific directive is used (see below). However,
+ it still requires that the column's type is named "INTEGER".
+
+Using the AUTOINCREMENT Keyword
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
@@ -62,6 +68,60 @@ construct::
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
+Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+SQLite's typing model is based on naming conventions. Among
+other things, this means that any type name which contains the
+substring ``"INT"`` will be determined to be of "integer affinity". A
+type named ``"BIGINT"``, ``"SPECIAL_INT"`` or even ``"XYZINTQPR"``, will be considered by
+SQLite to be of "integer" affinity. However, **the SQLite
+autoincrement feature, whether implicitly or explicitly enabled,
+requires that the name of the column's type
+is exactly the string "INTEGER"**. Therefore, if an
+application uses a type like :class:`.BigInteger` for a primary key, on
+SQLite this type will need to be rendered as the name ``"INTEGER"`` when
+emitting the initial ``CREATE TABLE`` statement in order for the autoincrement
+behavior to be available.
+
+One approach to achieve this is to use :class:`.Integer` on SQLite
+only using :meth:`.TypeEngine.with_variant`::
+
+ table = Table(
+ "my_table", metadata,
+ Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
+ )
+
+Another is to use a subclass of :class:`.BigInteger` that overrides its DDL name
+to be ``INTEGER`` when compiled against SQLite::
+
+ from sqlalchemy import BigInteger
+ from sqlalchemy.ext.compiler import compiles
+
+ class SLBigInteger(BigInteger):
+ pass
+
+ @compiles(SLBigInteger, 'sqlite')
+ def bi_c(element, compiler, **kw):
+ return "INTEGER"
+
+ @compiles(SLBigInteger)
+ def bi_c(element, compiler, **kw):
+ return compiler.visit_BIGINT(element, **kw)
+
+
+ table = Table(
+ "my_table", metadata,
+ Column("id", SLBigInteger(), primary_key=True)
+ )
+
+.. seealso::
+
+ :meth:`.TypeEngine.with_variant`
+
+ :ref:`sqlalchemy.ext.compiler_toplevel`
+
+ `Datatypes In SQLite Version 3 <http://sqlite.org/datatype3.html>`_
.. _sqlite_concurrency:
@@ -272,6 +332,137 @@ lookup is used instead:
.. versionadded:: 0.9.3 Support for SQLite type affinity rules when reflecting
columns.
+
+.. _sqlite_partial_index:
+
+Partial Indexes
+---------------
+
+A partial index, e.g. one which uses a WHERE clause, can be specified
+with the DDL system using the argument ``sqlite_where``::
+
+ tbl = Table('testtbl', m, Column('data', Integer))
+ idx = Index('test_idx1', tbl.c.data,
+ sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
+
+The index will be rendered at create time as::
+
+ CREATE INDEX test_idx1 ON testtbl (data)
+ WHERE data > 5 AND data < 10
+
+.. versionadded:: 0.9.9
+
+.. _sqlite_dotted_column_names:
+
+Dotted Column Names
+-------------------
+
+Using table or column names that explicitly have periods in them is
+**not recommended**. While this is generally a bad idea for relational
+databases in general, as the dot is a syntactically significant character,
+the SQLite driver up until version **3.10.0** of SQLite has a bug which
+requires that SQLAlchemy filter out these dots in result sets.
+
+.. versionchanged:: 1.1
+
+ The following SQLite issue has been resolved as of version 3.10.0
+ of SQLite. SQLAlchemy as of **1.1** automatically disables its internal
+ workarounds based on detection of this version.
+
+The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
+
+ import sqlite3
+
+ assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
+
+ conn = sqlite3.connect(":memory:")
+ cursor = conn.cursor()
+
+ cursor.execute("create table x (a integer, b integer)")
+ cursor.execute("insert into x (a, b) values (1, 1)")
+ cursor.execute("insert into x (a, b) values (2, 2)")
+
+ cursor.execute("select x.a, x.b from x")
+ assert [c[0] for c in cursor.description] == ['a', 'b']
+
+ cursor.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert [c[0] for c in cursor.description] == ['a', 'b'], \\
+ [c[0] for c in cursor.description]
+
+The second assertion fails::
+
+ Traceback (most recent call last):
+ File "test.py", line 19, in <module>
+ [c[0] for c in cursor.description]
+ AssertionError: ['x.a', 'x.b']
+
+Where above, the driver incorrectly reports the names of the columns
+including the name of the table, which is entirely inconsistent vs.
+when the UNION is not present.
+
+SQLAlchemy relies upon column names being predictable in how they match
+to the original statement, so the SQLAlchemy dialect has no choice but
+to filter these out::
+
+
+ from sqlalchemy import create_engine
+
+ eng = create_engine("sqlite://")
+ conn = eng.connect()
+
+ conn.execute("create table x (a integer, b integer)")
+ conn.execute("insert into x (a, b) values (1, 1)")
+ conn.execute("insert into x (a, b) values (2, 2)")
+
+ result = conn.execute("select x.a, x.b from x")
+ assert result.keys() == ["a", "b"]
+
+ result = conn.execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["a", "b"]
+
+Note that above, even though SQLAlchemy filters out the dots, *both
+names are still addressable*::
+
+ >>> row = result.first()
+ >>> row["a"]
+ 1
+ >>> row["x.a"]
+ 1
+ >>> row["b"]
+ 1
+ >>> row["x.b"]
+ 1
+
+Therefore, the workaround applied by SQLAlchemy only impacts
+:meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()` in the public API.
+In the very specific case where
+an application is forced to use column names that contain dots, and the
+functionality of :meth:`.ResultProxy.keys` and :meth:`.RowProxy.keys()`
+is required to return these dotted names unmodified, the ``sqlite_raw_colnames``
+execution option may be provided, either on a per-:class:`.Connection` basis::
+
+ result = conn.execution_options(sqlite_raw_colnames=True).execute('''
+ select x.a, x.b from x where a=1
+ union
+ select x.a, x.b from x where a=2
+ ''')
+ assert result.keys() == ["x.a", "x.b"]
+
+or on a per-:class:`.Engine` basis::
+
+ engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
+
+When using the per-:class:`.Engine` execution option, note that
+**Core and ORM queries that use UNION may not function properly**.
+
"""
import datetime
@@ -672,12 +863,20 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
if not column.nullable:
colspec += " NOT NULL"
- if (column.primary_key and
- column.table.dialect_options['sqlite']['autoincrement'] and
- len(column.table.primary_key.columns) == 1 and
- issubclass(column.type._type_affinity, sqltypes.Integer) and
- not column.foreign_keys):
- colspec += " PRIMARY KEY AUTOINCREMENT"
+ if column.primary_key:
+ if (
+ column.autoincrement is True and
+ len(column.table.primary_key.columns) != 1
+ ):
+ raise exc.CompileError(
+ "SQLite does not support autoincrement for "
+ "composite primary keys")
+
+ if (column.table.dialect_options['sqlite']['autoincrement'] and
+ len(column.table.primary_key.columns) == 1 and
+ issubclass(column.type._type_affinity, sqltypes.Integer) and
+ not column.foreign_keys):
+ colspec += " PRIMARY KEY AUTOINCREMENT"
return colspec
@@ -713,9 +912,34 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
return preparer.format_table(table, use_schema=False)
- def visit_create_index(self, create):
- return super(SQLiteDDLCompiler, self).visit_create_index(
- create, include_table_schema=False)
+ def visit_create_index(self, create, include_schema=False,
+ include_table_schema=True):
+ index = create.element
+ self._verify_index_table(index)
+ preparer = self.preparer
+ text = "CREATE "
+ if index.unique:
+ text += "UNIQUE "
+ text += "INDEX %s ON %s (%s)" \
+ % (
+ self._prepared_index_name(index,
+ include_schema=True),
+ preparer.format_table(index.table,
+ use_schema=False),
+ ', '.join(
+ self.sql_compiler.process(
+ expr, include_table=False, literal_binds=True) for
+ expr in index.expressions)
+ )
+
+ whereclause = index.dialect_options["sqlite"]["where"]
+ if whereclause is not None:
+ where_compiled = self.sql_compiler.process(
+ whereclause, include_table=False,
+ literal_binds=True)
+ text += " WHERE " + where_compiled
+
+ return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
@@ -783,9 +1007,13 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
- return self.execution_options.get("sqlite_raw_colnames", False)
+ return not self.dialect._broken_dotted_colnames or \
+ self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
+ # TODO: detect SQLite version 3.10.0 or greater;
+ # see [ticket:3633]
+
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname", or if using an attached database,
@@ -805,7 +1033,6 @@ class SQLiteDialect(default.DefaultDialect):
supports_empty_insert = False
supports_cast = True
supports_multivalues_insert = True
- supports_right_nested_joins = False
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
@@ -823,10 +1050,14 @@ class SQLiteDialect(default.DefaultDialect):
construct_arguments = [
(sa_schema.Table, {
"autoincrement": False
- })
+ }),
+ (sa_schema.Index, {
+ "where": None,
+ }),
]
_broken_fk_pragma_quotes = False
+ _broken_dotted_colnames = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
@@ -839,6 +1070,11 @@ class SQLiteDialect(default.DefaultDialect):
self.native_datetime = native_datetime
if self.dbapi is not None:
+ self.supports_right_nested_joins = (
+ self.dbapi.sqlite_version_info >= (3, 7, 16))
+ self._broken_dotted_colnames = (
+ self.dbapi.sqlite_version_info < (3, 10, 0)
+ )
self.supports_default_values = (
self.dbapi.sqlite_version_info >= (3, 3, 8))
self.supports_cast = (
@@ -900,6 +1136,13 @@ class SQLiteDialect(default.DefaultDialect):
return None
@reflection.cache
+ def get_schema_names(self, connection, **kw):
+ s = "PRAGMA database_list"
+ dl = connection.execute(s)
+
+ return [db[1] for db in dl if db[1] != "temp"]
+
+ @reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
qschema = self.identifier_preparer.quote_identifier(schema)
@@ -995,7 +1238,7 @@ class SQLiteDialect(default.DefaultDialect):
'type': coltype,
'nullable': nullable,
'default': default,
- 'autoincrement': default is None,
+ 'autoincrement': 'auto',
'primary_key': primary_key,
}
@@ -1088,7 +1331,7 @@ class SQLiteDialect(default.DefaultDialect):
fk = fks[numerical_id] = {
'name': None,
'constrained_columns': [],
- 'referred_schema': None,
+ 'referred_schema': schema,
'referred_table': rtbl,
'referred_columns': [],
}
@@ -1192,7 +1435,7 @@ class SQLiteDialect(default.DefaultDialect):
unique_constraints = []
def parse_uqs():
- UNIQUE_PATTERN = '(?:CONSTRAINT (\w+) +)?UNIQUE *\((.+?)\)'
+ UNIQUE_PATTERN = '(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
'(?:(".+?")|([a-z0-9]+)) '
'+[a-z0-9_ ]+? +UNIQUE')
@@ -1271,7 +1514,7 @@ class SQLiteDialect(default.DefaultDialect):
qtable = quote(table_name)
statement = "%s%s(%s)" % (statement, pragma, qtable)
cursor = connection.execute(statement)
- if not cursor.closed:
+ if not cursor._soft_closed:
# work around SQLite issue whereby cursor.description
# is blank when PRAGMA returns no rows:
# http://www.sqlite.org/cvstrac/tktview?tn=1884
diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
index 3c55a1de7..bbafc8d60 100644
--- a/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
+++ b/lib/sqlalchemy/dialects/sqlite/pysqlcipher.py
@@ -1,5 +1,5 @@
# sqlite/pysqlcipher.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
index 62c19d145..33d04deeb 100644
--- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py
+++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py
@@ -1,5 +1,5 @@
# sqlite/pysqlite.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sybase/__init__.py b/lib/sqlalchemy/dialects/sybase/__init__.py
index eb313592b..18535edcb 100644
--- a/lib/sqlalchemy/dialects/sybase/__init__.py
+++ b/lib/sqlalchemy/dialects/sybase/__init__.py
@@ -1,5 +1,5 @@
# sybase/__init__.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py
index 369420358..fba13c722 100644
--- a/lib/sqlalchemy/dialects/sybase/base.py
+++ b/lib/sqlalchemy/dialects/sybase/base.py
@@ -1,5 +1,5 @@
# sybase/base.py
-# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
@@ -323,7 +323,7 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
'milliseconds': 'millisecond'
})
- def get_select_precolumns(self, select):
+ def get_select_precolumns(self, select, **kw):
s = select._distinct and "DISTINCT " or ""
# TODO: don't think Sybase supports
# bind params for FIRST / TOP
@@ -336,11 +336,7 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
s += "TOP %s " % (limit,)
offset = select._offset
if offset:
- if not limit:
- # FIXME: sybase doesn't allow an offset without a limit
- # so use a huge value for TOP here
- s += "TOP 1000000 "
- s += "START AT %s " % (offset + 1,)
+ raise NotImplementedError("Sybase ASE does not support OFFSET")
return s
def get_from_hint_text(self, table, text):
@@ -608,8 +604,8 @@ class SybaseDialect(default.DefaultDialect):
FROM sysreferences r JOIN sysobjects o on r.tableid = o.id
WHERE r.tableid = :table_id
""")
- referential_constraints = connection.execute(REFCONSTRAINT_SQL,
- table_id=table_id)
+ referential_constraints = connection.execute(
+ REFCONSTRAINT_SQL, table_id=table_id).fetchall()
REFTABLE_SQL = text("""
SELECT o.name AS name, u.name AS 'schema'
@@ -740,10 +736,13 @@ class SybaseDialect(default.DefaultDialect):
results.close()
constrained_columns = []
- for i in range(1, pks["count"] + 1):
- constrained_columns.append(pks["pk_%i" % (i,)])
- return {"constrained_columns": constrained_columns,
- "name": pks["name"]}
+ if pks:
+ for i in range(1, pks["count"] + 1):
+ constrained_columns.append(pks["pk_%i" % (i,)])
+ return {"constrained_columns": constrained_columns,
+ "name": pks["name"]}
+ else:
+ return {"constrained_columns": [], "name": None}
@reflection.cache
def get_schema_names(self, connection, **kw):
diff --git a/lib/sqlalchemy/dialects/sybase/mxodbc.py b/lib/sqlalchemy/dialects/sybase/mxodbc.py
index 373bea05d..60e6510a5 100644
--- a/lib/sqlalchemy/dialects/sybase/mxodbc.py
+++ b/lib/sqlalchemy/dialects/sybase/mxodbc.py
@@ -1,5 +1,5 @@
# sybase/mxodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py
index cb76d1379..348ca321d 100644
--- a/lib/sqlalchemy/dialects/sybase/pyodbc.py
+++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py
@@ -1,5 +1,5 @@
# sybase/pyodbc.py
-# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2005-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
diff --git a/lib/sqlalchemy/dialects/sybase/pysybase.py b/lib/sqlalchemy/dialects/sybase/pysybase.py
index 6843eb480..41ca47fd3 100644
--- a/lib/sqlalchemy/dialects/sybase/pysybase.py
+++ b/lib/sqlalchemy/dialects/sybase/pysybase.py
@@ -1,5 +1,5 @@
# sybase/pysybase.py
-# Copyright (C) 2010-2014 the SQLAlchemy authors and contributors
+# Copyright (C) 2010-2016 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under