summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/sql
diff options
context:
space:
mode:
authorjonathan vanasco <jonathan@2xlp.com>2015-12-16 11:04:25 -0500
committerjonathan vanasco <jonathan@2xlp.com>2015-12-16 11:04:25 -0500
commitce25ac172d3b1be81025b7b541a9aa32b0286974 (patch)
tree7920084df122b2df19a44b2946ab0e52d4fe5958 /lib/sqlalchemy/sql
parent0a5dcdc2c4112478d87e5cd68c187e302f586834 (diff)
parent03ee22f342bbef9b15bfc989edda6a4ac3910508 (diff)
downloadsqlalchemy-ce25ac172d3b1be81025b7b541a9aa32b0286974.tar.gz
Merge branch 'master' of bitbucket.org:zzzeek/sqlalchemy
Diffstat (limited to 'lib/sqlalchemy/sql')
-rw-r--r--lib/sqlalchemy/sql/__init__.py2
-rw-r--r--lib/sqlalchemy/sql/compiler.py118
-rw-r--r--lib/sqlalchemy/sql/crud.py111
-rw-r--r--lib/sqlalchemy/sql/default_comparator.py35
-rw-r--r--lib/sqlalchemy/sql/dml.py162
-rw-r--r--lib/sqlalchemy/sql/elements.py450
-rw-r--r--lib/sqlalchemy/sql/expression.py14
-rw-r--r--lib/sqlalchemy/sql/functions.py196
-rw-r--r--lib/sqlalchemy/sql/operators.py66
-rw-r--r--lib/sqlalchemy/sql/schema.py232
-rw-r--r--lib/sqlalchemy/sql/selectable.py20
-rw-r--r--lib/sqlalchemy/sql/sqltypes.py306
-rw-r--r--lib/sqlalchemy/sql/type_api.py115
-rw-r--r--lib/sqlalchemy/sql/util.py23
14 files changed, 1558 insertions, 292 deletions
diff --git a/lib/sqlalchemy/sql/__init__.py b/lib/sqlalchemy/sql/__init__.py
index e8b70061d..fa2cf2399 100644
--- a/lib/sqlalchemy/sql/__init__.py
+++ b/lib/sqlalchemy/sql/__init__.py
@@ -21,6 +21,8 @@ from .expression import (
Update,
alias,
and_,
+ any_,
+ all_,
asc,
between,
bindparam,
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index e9c3d0efa..6766c99b7 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -97,6 +97,8 @@ OPERATORS = {
operators.exists: 'EXISTS ',
operators.distinct_op: 'DISTINCT ',
operators.inv: 'NOT ',
+ operators.any_op: 'ANY ',
+ operators.all_op: 'ALL ',
# modifiers
operators.desc_op: ' DESC',
@@ -281,6 +283,8 @@ class _CompileLabel(visitors.Visitable):
def type(self):
return self.element.type
+ def self_group(self, **kw):
+ return self
class SQLCompiler(Compiled):
@@ -761,6 +765,9 @@ class SQLCompiler(Compiled):
x += "END"
return x
+ def visit_type_coerce(self, type_coerce, **kw):
+ return type_coerce.typed_expression._compiler_dispatch(self, **kw)
+
def visit_cast(self, cast, **kwargs):
return "CAST(%s AS %s)" % \
(cast.clause._compiler_dispatch(self, **kwargs),
@@ -768,7 +775,7 @@ class SQLCompiler(Compiled):
def visit_over(self, over, **kwargs):
return "%s OVER (%s)" % (
- over.func._compiler_dispatch(self, **kwargs),
+ over.element._compiler_dispatch(self, **kwargs),
' '.join(
'%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs))
for word, clause in (
@@ -779,6 +786,12 @@ class SQLCompiler(Compiled):
)
)
+ def visit_withingroup(self, withingroup, **kwargs):
+ return "%s WITHIN GROUP (ORDER BY %s)" % (
+ withingroup.element._compiler_dispatch(self, **kwargs),
+ withingroup.order_by._compiler_dispatch(self, **kwargs)
+ )
+
def visit_funcfilter(self, funcfilter, **kwargs):
return "%s FILTER (WHERE %s)" % (
funcfilter.func._compiler_dispatch(self, **kwargs),
@@ -1270,9 +1283,6 @@ class SQLCompiler(Compiled):
return " AS " + alias_name_text
def _add_to_result_map(self, keyname, name, objects, type_):
- if not self.dialect.case_sensitive:
- keyname = keyname.lower()
-
self._result_columns.append((keyname, name, objects, type_))
def _label_select_column(self, select, column,
@@ -1789,9 +1799,9 @@ class SQLCompiler(Compiled):
return text
def visit_table(self, table, asfrom=False, iscrud=False, ashint=False,
- fromhints=None, **kwargs):
+ fromhints=None, use_schema=True, **kwargs):
if asfrom or ashint:
- if getattr(table, "schema", None):
+ if use_schema and getattr(table, "schema", None):
ret = self.preparer.quote_schema(table.schema) + \
"." + self.preparer.quote(table.name)
else:
@@ -1812,6 +1822,22 @@ class SQLCompiler(Compiled):
join.onclause._compiler_dispatch(self, **kwargs)
)
+ def _setup_crud_hints(self, stmt, table_text):
+ dialect_hints = dict([
+ (table, hint_text)
+ for (table, dialect), hint_text in
+ stmt._hints.items()
+ if dialect in ('*', self.dialect.name)
+ ])
+ if stmt.table in dialect_hints:
+ table_text = self.format_from_hint_text(
+ table_text,
+ stmt.table,
+ dialect_hints[stmt.table],
+ True
+ )
+ return dialect_hints, table_text
+
def visit_insert(self, insert_stmt, **kw):
self.stack.append(
{'correlate_froms': set(),
@@ -1853,19 +1879,10 @@ class SQLCompiler(Compiled):
table_text = preparer.format_table(insert_stmt.table)
if insert_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- insert_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if insert_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- insert_stmt.table,
- dialect_hints[insert_stmt.table],
- True
- )
+ dialect_hints, table_text = self._setup_crud_hints(
+ insert_stmt, table_text)
+ else:
+ dialect_hints = None
text += table_text
@@ -1957,19 +1974,8 @@ class SQLCompiler(Compiled):
crud_params = crud._get_crud_params(self, update_stmt, **kw)
if update_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- update_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if update_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- update_stmt.table,
- dialect_hints[update_stmt.table],
- True
- )
+ dialect_hints, table_text = self._setup_crud_hints(
+ update_stmt, table_text)
else:
dialect_hints = None
@@ -2038,22 +2044,8 @@ class SQLCompiler(Compiled):
self, asfrom=True, iscrud=True)
if delete_stmt._hints:
- dialect_hints = dict([
- (table, hint_text)
- for (table, dialect), hint_text in
- delete_stmt._hints.items()
- if dialect in ('*', self.dialect.name)
- ])
- if delete_stmt.table in dialect_hints:
- table_text = self.format_from_hint_text(
- table_text,
- delete_stmt.table,
- dialect_hints[delete_stmt.table],
- True
- )
-
- else:
- dialect_hints = None
+ dialect_hints, table_text = self._setup_crud_hints(
+ delete_stmt, table_text)
text += table_text
@@ -2139,11 +2131,11 @@ class DDLCompiler(Compiled):
table = create.element
preparer = self.dialect.identifier_preparer
- text = "\n" + " ".join(['CREATE'] +
- table._prefixes +
- ['TABLE',
- preparer.format_table(table),
- "("])
+ text = "\nCREATE "
+ if table._prefixes:
+ text += " ".join(table._prefixes) + " "
+ text += "TABLE " + preparer.format_table(table) + " ("
+
separator = "\n"
# if only one primary key, specify it along with the column
@@ -2168,10 +2160,10 @@ class DDLCompiler(Compiled):
))
const = self.create_table_constraints(
- table, _include_foreign_key_constraints=
- create.include_foreign_key_constraints)
+ table, _include_foreign_key_constraints= # noqa
+ create.include_foreign_key_constraints)
if const:
- text += ", \n\t" + const
+ text += separator + "\t" + const
text += "\n)%s\n\n" % self.post_create_table(table)
return text
@@ -2223,7 +2215,7 @@ class DDLCompiler(Compiled):
and (
not self.dialect.supports_alter or
not getattr(constraint, 'use_alter', False)
- )) if p is not None
+ )) if p is not None
)
def visit_drop_table(self, drop):
@@ -2299,6 +2291,16 @@ class DDLCompiler(Compiled):
text += " INCREMENT BY %d" % create.element.increment
if create.element.start is not None:
text += " START WITH %d" % create.element.start
+ if create.element.minvalue is not None:
+ text += " MINVALUE %d" % create.element.minvalue
+ if create.element.maxvalue is not None:
+ text += " MAXVALUE %d" % create.element.maxvalue
+ if create.element.nominvalue is not None:
+ text += " NO MINVALUE"
+ if create.element.nomaxvalue is not None:
+ text += " NO MAXVALUE"
+ if create.element.cycle is not None:
+ text += " CYCLE"
return text
def visit_drop_sequence(self, drop):
@@ -2379,7 +2381,7 @@ class DDLCompiler(Compiled):
text += "CONSTRAINT %s " % formatted_name
text += "PRIMARY KEY "
text += "(%s)" % ', '.join(self.preparer.quote(c.name)
- for c in constraint)
+ for c in constraint.columns_autoinc_first)
text += self.define_constraint_deferrability(constraint)
return text
diff --git a/lib/sqlalchemy/sql/crud.py b/lib/sqlalchemy/sql/crud.py
index 2e39f6b36..c5495ccde 100644
--- a/lib/sqlalchemy/sql/crud.py
+++ b/lib/sqlalchemy/sql/crud.py
@@ -196,8 +196,9 @@ def _scan_insert_from_select_cols(
if add_select_cols:
values.extend(add_select_cols)
compiler._insert_from_select = compiler._insert_from_select._generate()
- compiler._insert_from_select._raw_columns += tuple(
- expr for col, expr in add_select_cols)
+ compiler._insert_from_select._raw_columns = \
+ tuple(compiler._insert_from_select._raw_columns) + tuple(
+ expr for col, expr in add_select_cols)
def _scan_cols(
@@ -208,10 +209,22 @@ def _scan_cols(
implicit_return_defaults, postfetch_lastrowid = \
_get_returning_modifiers(compiler, stmt)
- cols = stmt.table.columns
+ if stmt._parameter_ordering:
+ parameter_ordering = [
+ _column_as_key(key) for key in stmt._parameter_ordering
+ ]
+ ordered_keys = set(parameter_ordering)
+ cols = [
+ stmt.table.c[key] for key in parameter_ordering
+ ] + [
+ c for c in stmt.table.c if c.key not in ordered_keys
+ ]
+ else:
+ cols = stmt.table.columns
for c in cols:
col_key = _getattr_col_key(c)
+
if col_key in parameters and col_key not in check_columns:
_append_param_parameter(
@@ -248,6 +261,10 @@ def _scan_cols(
elif implicit_return_defaults and \
c in implicit_return_defaults:
compiler.returning.append(c)
+ elif c.primary_key and \
+ c is not stmt.table._autoincrement_column and \
+ not c.nullable:
+ _raise_pk_with_no_anticipated_value(c)
elif compiler.isupdate:
_append_param_update(
@@ -285,6 +302,22 @@ def _append_param_parameter(
def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
+ """Create a primary key expression in the INSERT statement and
+ possibly a RETURNING clause for it.
+
+ If the column has a Python-side default, we will create a bound
+ parameter for it and "pre-execute" the Python function. If
+ the column has a SQL expression default, or is a sequence,
+ we will add it directly into the INSERT statement and add a
+ RETURNING element to get the new value. If the column has a
+ server side default or is marked as the "autoincrement" column,
+ we will add a RETRUNING element to get at the value.
+
+ If all the above tests fail, that indicates a primary key column with no
+ noted default generation capabilities that has no parameter passed;
+ raise an exception.
+
+ """
if c.default is not None:
if c.default.is_sequence:
if compiler.dialect.supports_sequences and \
@@ -303,9 +336,12 @@ def _append_param_insert_pk_returning(compiler, stmt, c, values, kw):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
-
- else:
+ elif c is stmt.table._autoincrement_column or c.server_default is not None:
compiler.returning.append(c)
+ elif not c.nullable:
+ # no .default, no .server_default, not autoincrement, we have
+ # no indication this primary key column will have any value
+ _raise_pk_with_no_anticipated_value(c)
def _create_prefetch_bind_param(compiler, c, process=True, name=None):
@@ -319,6 +355,7 @@ class _multiparam_column(elements.ColumnElement):
self.key = "%s_%d" % (original.key, index + 1)
self.original = original
self.default = original.default
+ self.type = original.type
def __eq__(self, other):
return isinstance(other, _multiparam_column) and \
@@ -341,18 +378,46 @@ def _process_multiparam_default_bind(compiler, c, index, kw):
def _append_param_insert_pk(compiler, stmt, c, values, kw):
+ """Create a bound parameter in the INSERT statement to receive a
+ 'prefetched' default value.
+
+ The 'prefetched' value indicates that we are to invoke a Python-side
+ default function or expliclt SQL expression before the INSERT statement
+ proceeds, so that we have a primary key value available.
+
+ if the column has no noted default generation capabilities, it has
+ no value passed in either; raise an exception.
+
+ """
if (
- (c.default is not None and
- (not c.default.is_sequence or
- compiler.dialect.supports_sequences)) or
- c is stmt.table._autoincrement_column and
- (compiler.dialect.supports_sequences or
- compiler.dialect.
- preexecute_autoincrement_sequences)
+ (
+ # column has a Python-side default
+ c.default is not None and
+ (
+ # and it won't be a Sequence
+ not c.default.is_sequence or
+ compiler.dialect.supports_sequences
+ )
+ )
+ or
+ (
+ # column is the "autoincrement column"
+ c is stmt.table._autoincrement_column and
+ (
+ # and it's either a "sequence" or a
+ # pre-executable "autoincrement" sequence
+ compiler.dialect.supports_sequences or
+ compiler.dialect.preexecute_autoincrement_sequences
+ )
+ )
):
values.append(
(c, _create_prefetch_bind_param(compiler, c))
)
+ elif c.default is None and c.server_default is None and not c.nullable:
+ # no .default, no .server_default, not autoincrement, we have
+ # no indication this primary key column will have any value
+ _raise_pk_with_no_anticipated_value(c)
def _append_param_insert_hasdefault(
@@ -428,6 +493,7 @@ def _append_param_update(
else:
compiler.postfetch.append(c)
elif implicit_return_defaults and \
+ stmt._return_defaults is not True and \
c in implicit_return_defaults:
compiler.returning.append(c)
@@ -554,3 +620,24 @@ def _get_returning_modifiers(compiler, stmt):
return need_pks, implicit_returning, \
implicit_return_defaults, postfetch_lastrowid
+
+
+def _raise_pk_with_no_anticipated_value(c):
+ msg = (
+ "Column '%s.%s' is marked as a member of the "
+ "primary key for table '%s', "
+ "but has no Python-side or server-side default generator indicated, "
+ "nor does it indicate 'autoincrement=True' or 'nullable=True', "
+ "and no explicit value is passed. "
+ "Primary key columns typically may not store NULL."
+ %
+ (c.table.fullname, c.name, c.table.fullname))
+ if len(c.table.primary_key.columns) > 1:
+ msg += (
+ " Note that as of SQLAlchemy 1.1, 'autoincrement=True' must be "
+ "indicated explicitly for composite (e.g. multicolumn) primary "
+ "keys if AUTO_INCREMENT/SERIAL/IDENTITY "
+ "behavior is expected for one of the columns in the primary key. "
+ "CREATE TABLE statements are impacted by this change as well on "
+ "most backends.")
+ raise exc.CompileError(msg)
diff --git a/lib/sqlalchemy/sql/default_comparator.py b/lib/sqlalchemy/sql/default_comparator.py
index e77ad765c..68ea5624e 100644
--- a/lib/sqlalchemy/sql/default_comparator.py
+++ b/lib/sqlalchemy/sql/default_comparator.py
@@ -14,7 +14,8 @@ from . import operators
from .elements import BindParameter, True_, False_, BinaryExpression, \
Null, _const_expr, _clause_element_as_expr, \
ClauseList, ColumnElement, TextClause, UnaryExpression, \
- collate, _is_literal, _literal_as_text, ClauseElement, and_, or_
+ collate, _is_literal, _literal_as_text, ClauseElement, and_, or_, \
+ Slice, Visitable, _literal_as_binds
from .selectable import SelectBase, Alias, Selectable, ScalarSelect
@@ -161,6 +162,34 @@ def _in_impl(expr, op, seq_or_selectable, negate_op, **kw):
negate=negate_op)
+def _getitem_impl(expr, op, other, **kw):
+ if isinstance(expr.type, type_api.INDEXABLE):
+ if isinstance(other, slice):
+ if expr.type.zero_indexes:
+ other = slice(
+ other.start + 1,
+ other.stop + 1,
+ other.step
+ )
+ other = Slice(
+ _literal_as_binds(
+ other.start, name=expr.key, type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ other.stop, name=expr.key, type_=type_api.INTEGERTYPE),
+ _literal_as_binds(
+ other.step, name=expr.key, type_=type_api.INTEGERTYPE)
+ )
+ else:
+ if expr.type.zero_indexes:
+ other += 1
+
+ other = _literal_as_binds(
+ other, name=expr.key, type_=type_api.INTEGERTYPE)
+ return _binary_operate(expr, op, other, **kw)
+ else:
+ _unsupported_impl(expr, op, other, **kw)
+
+
def _unsupported_impl(expr, op, *arg, **kw):
raise NotImplementedError("Operator '%s' is not supported on "
"this expression" % op.__name__)
@@ -260,7 +289,7 @@ operator_lookup = {
"between_op": (_between_impl, ),
"notbetween_op": (_between_impl, ),
"neg": (_neg_impl,),
- "getitem": (_unsupported_impl,),
+ "getitem": (_getitem_impl,),
"lshift": (_unsupported_impl,),
"rshift": (_unsupported_impl,),
}
@@ -280,7 +309,7 @@ def _check_literal(expr, operator, other):
if isinstance(other, (SelectBase, Alias)):
return other.as_scalar()
- elif not isinstance(other, (ColumnElement, TextClause)):
+ elif not isinstance(other, Visitable):
return expr._bind_param(operator, other)
else:
return other
diff --git a/lib/sqlalchemy/sql/dml.py b/lib/sqlalchemy/sql/dml.py
index 6756f1554..22c534153 100644
--- a/lib/sqlalchemy/sql/dml.py
+++ b/lib/sqlalchemy/sql/dml.py
@@ -27,6 +27,7 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
_execution_options = \
Executable._execution_options.union({'autocommit': True})
_hints = util.immutabledict()
+ _parameter_ordering = None
_prefixes = ()
def _process_colparams(self, parameters):
@@ -39,6 +40,16 @@ class UpdateBase(DialectKWArgs, HasPrefixes, Executable, ClauseElement):
else:
return p
+ if self._preserve_parameter_order and parameters is not None:
+ if not isinstance(parameters, list) or \
+ (parameters and not isinstance(parameters[0], tuple)):
+ raise ValueError(
+ "When preserve_parameter_order is True, "
+ "values() only accepts a list of 2-tuples")
+ self._parameter_ordering = [key for key, value in parameters]
+
+ return dict(parameters), False
+
if (isinstance(parameters, (list, tuple)) and parameters and
isinstance(parameters[0], (list, tuple, dict))):
@@ -178,6 +189,7 @@ class ValuesBase(UpdateBase):
_supports_multi_parameters = False
_has_multi_parameters = False
+ _preserve_parameter_order = False
select = None
def __init__(self, table, values, prefixes):
@@ -214,23 +226,32 @@ class ValuesBase(UpdateBase):
users.update().where(users.c.id==5).values(name="some name")
- :param \*args: Alternatively, a dictionary, tuple or list
- of dictionaries or tuples can be passed as a single positional
- argument in order to form the VALUES or
- SET clause of the statement. The single dictionary form
- works the same as the kwargs form::
+ :param \*args: As an alternative to passing key/value parameters,
+ a dictionary, tuple, or list of dictionaries or tuples can be passed
+ as a single positional argument in order to form the VALUES or
+ SET clause of the statement. The forms that are accepted vary
+ based on whether this is an :class:`.Insert` or an :class:`.Update`
+ construct.
+
+ For either an :class:`.Insert` or :class:`.Update` construct, a
+ single dictionary can be passed, which works the same as that of
+ the kwargs form::
users.insert().values({"name": "some name"})
- If a tuple is passed, the tuple should contain the same number
- of columns as the target :class:`.Table`::
+ users.update().values({"name": "some new name"})
+
+ Also for either form but more typically for the :class:`.Insert`
+ construct, a tuple that contains an entry for every column in the
+ table is also accepted::
users.insert().values((5, "some name"))
- The :class:`.Insert` construct also supports multiply-rendered VALUES
- construct, for those backends which support this SQL syntax
- (SQLite, Postgresql, MySQL). This mode is indicated by passing a
- list of one or more dictionaries/tuples::
+ The :class:`.Insert` construct also supports being passed a list
+ of dictionaries or full-table-tuples, which on the server will
+ render the less common SQL syntax of "multiple values" - this
+ syntax is supported on backends such as SQLite, Postgresql, MySQL,
+ but not necessarily others::
users.insert().values([
{"name": "some name"},
@@ -238,55 +259,61 @@ class ValuesBase(UpdateBase):
{"name": "yet another name"},
])
- In the case of an :class:`.Update`
- construct, only the single dictionary/tuple form is accepted,
- else an exception is raised. It is also an exception case to
- attempt to mix the single-/multiple- value styles together,
- either through multiple :meth:`.ValuesBase.values` calls
- or by sending a list + kwargs at the same time.
-
- .. note::
-
- Passing a multiple values list is *not* the same
- as passing a multiple values list to the
- :meth:`.Connection.execute` method. Passing a list of parameter
- sets to :meth:`.ValuesBase.values` produces a construct of this
- form::
-
- INSERT INTO table (col1, col2, col3) VALUES
- (col1_0, col2_0, col3_0),
- (col1_1, col2_1, col3_1),
- ...
-
- whereas a multiple list passed to :meth:`.Connection.execute`
- has the effect of using the DBAPI
- `executemany() <http://www.python.org/dev/peps/pep-0249/#id18>`_
- method, which provides a high-performance system of invoking
- a single-row INSERT or single-criteria UPDATE or DELETE statement
- many times against a series
- of parameter sets. The "executemany" style is supported by
- all database backends, and works equally well for INSERT,
- UPDATE, and DELETE, as it does not depend on a special SQL
- syntax. See :ref:`execute_multiple` for an introduction to
- the traditional Core method of multiple parameter set invocation
- using this system.
-
- .. versionadded:: 0.8
- Support for multiple-VALUES INSERT statements.
-
- .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
- clause, even a list of length one,
- implies that the :paramref:`.Insert.inline` flag is set to
- True, indicating that the statement will not attempt to fetch
- the "last inserted primary key" or other defaults. The statement
- deals with an arbitrary number of rows, so the
- :attr:`.ResultProxy.inserted_primary_key` accessor does not apply.
-
- .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
- columns with Python side default values and callables in the
- same way as that of an "executemany" style of invocation; the
- callable is invoked for each row. See :ref:`bug_3288`
- for other details.
+ The above form would render a multiple VALUES statement similar to::
+
+ INSERT INTO users (name) VALUES
+ (:name_1),
+ (:name_2),
+ (:name_3)
+
+ It is essential to note that **passing multiple values is
+ NOT the same as using traditional executemany() form**. The above
+ syntax is a **special** syntax not typically used. To emit an
+ INSERT statement against mutliple rows, the normal method is
+ to pass a mutiple values list to the :meth:`.Connection.execute`
+ method, which is supported by all database backends and is generally
+ more efficient for a very large number of parameters.
+
+ .. seealso::
+
+ :ref:`execute_multiple` - an introduction to
+ the traditional Core method of multiple parameter set
+ invocation for INSERTs and other statements.
+
+ .. versionchanged:: 1.0.0 an INSERT that uses a multiple-VALUES
+ clause, even a list of length one,
+ implies that the :paramref:`.Insert.inline` flag is set to
+ True, indicating that the statement will not attempt to fetch
+ the "last inserted primary key" or other defaults. The
+ statement deals with an arbitrary number of rows, so the
+ :attr:`.ResultProxy.inserted_primary_key` accessor does not
+ apply.
+
+ .. versionchanged:: 1.0.0 A multiple-VALUES INSERT now supports
+ columns with Python side default values and callables in the
+ same way as that of an "executemany" style of invocation; the
+ callable is invoked for each row. See :ref:`bug_3288`
+ for other details.
+
+ The :class:`.Update` construct supports a special form which is a
+ list of 2-tuples, which when provided must be passed in conjunction
+ with the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ parameter.
+ This form causes the UPDATE statement to render the SET clauses
+ using the order of parameters given to :meth:`.Update.values`, rather
+ than the ordering of columns given in the :class:`.Table`.
+
+ .. versionadded:: 1.0.10 - added support for parameter-ordered
+ UPDATE statements via the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag.
+
+ .. seealso::
+
+ :ref:`updates_order_parameters` - full example of the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order`
+ flag
.. seealso::
@@ -582,6 +609,7 @@ class Update(ValuesBase):
prefixes=None,
returning=None,
return_defaults=False,
+ preserve_parameter_order=False,
**dialect_kw):
"""Construct an :class:`.Update` object.
@@ -644,6 +672,19 @@ class Update(ValuesBase):
be available in the dictionary returned from
:meth:`.ResultProxy.last_updated_params`.
+ :param preserve_parameter_order: if True, the update statement is
+ expected to receive parameters **only** via the :meth:`.Update.values`
+ method, and they must be passed as a Python ``list`` of 2-tuples.
+ The rendered UPDATE statement will emit the SET clause for each
+ referenced column maintaining this order.
+
+ .. versionadded:: 1.0.10
+
+ .. seealso::
+
+ :ref:`updates_order_parameters` - full example of the
+ :paramref:`~sqlalchemy.sql.expression.update.preserve_parameter_order` flag
+
If both ``values`` and compile-time bind parameters are present, the
compile-time bind parameters override the information specified
within ``values`` on a per-key basis.
@@ -685,6 +726,7 @@ class Update(ValuesBase):
"""
+ self._preserve_parameter_order = preserve_parameter_order
ValuesBase.__init__(self, table, values, prefixes)
self._bind = bind
self._returning = returning
diff --git a/lib/sqlalchemy/sql/elements.py b/lib/sqlalchemy/sql/elements.py
index 27ecce2b0..70046c66b 100644
--- a/lib/sqlalchemy/sql/elements.py
+++ b/lib/sqlalchemy/sql/elements.py
@@ -124,67 +124,6 @@ def literal(value, type_=None):
return BindParameter(None, value, type_=type_, unique=True)
-def type_coerce(expression, type_):
- """Associate a SQL expression with a particular type, without rendering
- ``CAST``.
-
- E.g.::
-
- from sqlalchemy import type_coerce
-
- stmt = select([type_coerce(log_table.date_string, StringDateTime())])
-
- The above construct will produce SQL that is usually otherwise unaffected
- by the :func:`.type_coerce` call::
-
- SELECT date_string FROM log
-
- However, when result rows are fetched, the ``StringDateTime`` type
- will be applied to result rows on behalf of the ``date_string`` column.
-
- A type that features bound-value handling will also have that behavior
- take effect when literal values or :func:`.bindparam` constructs are
- passed to :func:`.type_coerce` as targets.
- For example, if a type implements the :meth:`.TypeEngine.bind_expression`
- method or :meth:`.TypeEngine.bind_processor` method or equivalent,
- these functions will take effect at statement compilation/execution time
- when a literal value is passed, as in::
-
- # bound-value handling of MyStringType will be applied to the
- # literal value "some string"
- stmt = select([type_coerce("some string", MyStringType)])
-
- :func:`.type_coerce` is similar to the :func:`.cast` function,
- except that it does not render the ``CAST`` expression in the resulting
- statement.
-
- :param expression: A SQL expression, such as a :class:`.ColumnElement`
- expression or a Python string which will be coerced into a bound literal
- value.
-
- :param type_: A :class:`.TypeEngine` class or instance indicating
- the type to which the expression is coerced.
-
- .. seealso::
-
- :func:`.cast`
-
- """
- type_ = type_api.to_instance(type_)
-
- if hasattr(expression, '__clause_element__'):
- return type_coerce(expression.__clause_element__(), type_)
- elif isinstance(expression, BindParameter):
- bp = expression._clone()
- bp.type = type_
- return bp
- elif not isinstance(expression, Visitable):
- if expression is None:
- return Null()
- else:
- return literal(expression, type_=type_)
- else:
- return Label(None, expression, type_=type_)
def outparam(key, type_=None):
@@ -700,6 +639,8 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
self.type._type_affinity
is type_api.BOOLEANTYPE._type_affinity):
return AsBoolean(self, operators.istrue, operators.isfalse)
+ elif (against in (operators.any_op, operators.all_op)):
+ return Grouping(self)
else:
return self
@@ -715,7 +656,14 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
@util.memoized_property
def comparator(self):
- return self.type.comparator_factory(self)
+ try:
+ comparator_factory = self.type.comparator_factory
+ except AttributeError:
+ raise TypeError(
+ "Object %r associated with '.type' attribute "
+ "is not a TypeEngine class or object" % self.type)
+ else:
+ return comparator_factory(self)
def __getattr__(self, key):
try:
@@ -837,6 +785,16 @@ class ColumnElement(operators.ColumnOperators, ClauseElement):
else:
return False
+ def cast(self, type_):
+ """Produce a type cast, i.e. ``CAST(<expression> AS <type>)``.
+
+ This is a shortcut to the :func:`~.expression.cast` function.
+
+ .. versionadded:: 1.0.7
+
+ """
+ return Cast(self, type_)
+
def label(self, name):
"""Produce a column label, i.e. ``<columnname> AS <name>``.
@@ -1128,8 +1086,7 @@ class BindParameter(ColumnElement):
_compared_to_type.coerce_compared_value(
_compared_to_operator, value)
else:
- self.type = type_api._type_map.get(type(value),
- type_api.NULLTYPE)
+ self.type = type_api._resolve_value_to_type(value)
elif isinstance(type_, type):
self.type = type_()
else:
@@ -1144,8 +1101,7 @@ class BindParameter(ColumnElement):
cloned.callable = None
cloned.required = False
if cloned.type is type_api.NULLTYPE:
- cloned.type = type_api._type_map.get(type(value),
- type_api.NULLTYPE)
+ cloned.type = type_api._resolve_value_to_type(value)
return cloned
@property
@@ -1840,9 +1796,12 @@ class BooleanClauseList(ClauseList, ColumnElement):
def _construct(cls, operator, continue_on, skip_on, *clauses, **kw):
convert_clauses = []
- clauses = util.coerce_generator_arg(clauses)
+ clauses = [
+ _expression_literal_as_text(clause)
+ for clause in
+ util.coerce_generator_arg(clauses)
+ ]
for clause in clauses:
- clause = _expression_literal_as_text(clause)
if isinstance(clause, continue_on):
continue
@@ -2327,6 +2286,109 @@ class Cast(ColumnElement):
return self.clause._from_objects
+class TypeCoerce(ColumnElement):
+ """Represent a Python-side type-coercion wrapper.
+
+ :class:`.TypeCoerce` supplies the :func:`.expression.type_coerce`
+ function; see that function for usage details.
+
+ .. versionchanged:: 1.1 The :func:`.type_coerce` function now produces
+ a persistent :class:`.TypeCoerce` wrapper object rather than
+ translating the given object in place.
+
+ .. seealso::
+
+ :func:`.expression.type_coerce`
+
+ """
+
+ __visit_name__ = 'type_coerce'
+
+ def __init__(self, expression, type_):
+ """Associate a SQL expression with a particular type, without rendering
+ ``CAST``.
+
+ E.g.::
+
+ from sqlalchemy import type_coerce
+
+ stmt = select([
+ type_coerce(log_table.date_string, StringDateTime())
+ ])
+
+ The above construct will produce a :class:`.TypeCoerce` object, which
+ renders SQL that labels the expression, but otherwise does not
+ modify its value on the SQL side::
+
+ SELECT date_string AS anon_1 FROM log
+
+ When result rows are fetched, the ``StringDateTime`` type
+ will be applied to result rows on behalf of the ``date_string`` column.
+ The rationale for the "anon_1" label is so that the type-coerced
+ column remains separate in the list of result columns vs. other
+ type-coerced or direct values of the target column. In order to
+ provide a named label for the expression, use
+ :meth:`.ColumnElement.label`::
+
+ stmt = select([
+ type_coerce(
+ log_table.date_string, StringDateTime()).label('date')
+ ])
+
+
+ A type that features bound-value handling will also have that behavior
+ take effect when literal values or :func:`.bindparam` constructs are
+ passed to :func:`.type_coerce` as targets.
+ For example, if a type implements the
+ :meth:`.TypeEngine.bind_expression`
+ method or :meth:`.TypeEngine.bind_processor` method or equivalent,
+ these functions will take effect at statement compilation/execution
+ time when a literal value is passed, as in::
+
+ # bound-value handling of MyStringType will be applied to the
+ # literal value "some string"
+ stmt = select([type_coerce("some string", MyStringType)])
+
+ :func:`.type_coerce` is similar to the :func:`.cast` function,
+ except that it does not render the ``CAST`` expression in the resulting
+ statement.
+
+ :param expression: A SQL expression, such as a :class:`.ColumnElement`
+ expression or a Python string which will be coerced into a bound
+ literal value.
+
+ :param type_: A :class:`.TypeEngine` class or instance indicating
+ the type to which the expression is coerced.
+
+ .. seealso::
+
+ :func:`.cast`
+
+ """
+ self.type = type_api.to_instance(type_)
+ self.clause = _literal_as_binds(expression, type_=self.type)
+
+ def _copy_internals(self, clone=_clone, **kw):
+ self.clause = clone(self.clause, **kw)
+ self.__dict__.pop('typed_expression', None)
+
+ def get_children(self, **kwargs):
+ return self.clause,
+
+ @property
+ def _from_objects(self):
+ return self.clause._from_objects
+
+ @util.memoized_property
+ def typed_expression(self):
+ if isinstance(self.clause, BindParameter):
+ bp = self.clause._clone()
+ bp.type = self.type
+ return bp
+ else:
+ return self.clause
+
+
class Extract(ColumnElement):
"""Represent a SQL EXTRACT clause, ``extract(field FROM expr)``."""
@@ -2668,6 +2730,91 @@ class UnaryExpression(ColumnElement):
return self
+class CollectionAggregate(UnaryExpression):
+ """Forms the basis for right-hand collection operator modifiers
+ ANY and ALL.
+
+ The ANY and ALL keywords are available in different ways on different
+ backends. On Postgresql, they only work for an ARRAY type. On
+ MySQL, they only work for subqueries.
+
+ """
+ @classmethod
+ def _create_any(cls, expr):
+ """Produce an ANY expression.
+
+ This may apply to an array type for some dialects (e.g. postgresql),
+ or to a subquery for others (e.g. mysql). e.g.::
+
+ # postgresql '5 = ANY (somearray)'
+ expr = 5 == any_(mytable.c.somearray)
+
+ # mysql '5 = ANY (SELECT value FROM table)'
+ expr = 5 == any_(select([table.c.value]))
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.expression.all_`
+
+ """
+
+ expr = _literal_as_binds(expr)
+
+ if expr.is_selectable and hasattr(expr, 'as_scalar'):
+ expr = expr.as_scalar()
+ expr = expr.self_group()
+ return CollectionAggregate(
+ expr, operator=operators.any_op,
+ type_=type_api.NULLTYPE, wraps_column_expression=False)
+
+ @classmethod
+ def _create_all(cls, expr):
+ """Produce an ALL expression.
+
+ This may apply to an array type for some dialects (e.g. postgresql),
+ or to a subquery for others (e.g. mysql). e.g.::
+
+ # postgresql '5 = ALL (somearray)'
+ expr = 5 == all_(mytable.c.somearray)
+
+ # mysql '5 = ALL (SELECT value FROM table)'
+ expr = 5 == all_(select([table.c.value]))
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.expression.any_`
+
+ """
+
+ expr = _literal_as_binds(expr)
+ if expr.is_selectable and hasattr(expr, 'as_scalar'):
+ expr = expr.as_scalar()
+ expr = expr.self_group()
+ return CollectionAggregate(
+ expr, operator=operators.all_op,
+ type_=type_api.NULLTYPE, wraps_column_expression=False)
+
+ # operate and reverse_operate are hardwired to
+ # dispatch onto the type comparator directly, so that we can
+ # ensure "reversed" behavior.
+ def operate(self, op, *other, **kwargs):
+ if not operators.is_comparison(op):
+ raise exc.ArgumentError(
+ "Only comparison operators may be used with ANY/ALL")
+ kwargs['reverse'] = True
+ return self.comparator.operate(operators.mirror(op), *other, **kwargs)
+
+ def reverse_operate(self, op, other, **kwargs):
+ # comparison operators should never call reverse_operate
+ assert not operators.is_comparison(op)
+ raise exc.ArgumentError(
+ "Only comparison operators may be used with ANY/ALL")
+
+
class AsBoolean(UnaryExpression):
def __init__(self, element, operator, negate):
@@ -2779,6 +2926,32 @@ class BinaryExpression(ColumnElement):
return super(BinaryExpression, self)._negate()
+class Slice(ColumnElement):
+ """Represent SQL for a Python array-slice object.
+
+ This is not a specific SQL construct at this level, but
+ may be interpreted by specific dialects, e.g. Postgresql.
+
+ """
+ __visit_name__ = 'slice'
+
+ def __init__(self, start, stop, step):
+ self.start = start
+ self.stop = stop
+ self.step = step
+ self.type = type_api.NULLTYPE
+
+ def self_group(self, against=None):
+ assert against is operator.getitem
+ return self
+
+
+class IndexExpression(BinaryExpression):
+ """Represent the class of expressions that are like an "index" operation.
+ """
+ pass
+
+
class Grouping(ColumnElement):
"""Represent a grouping within a column expression"""
@@ -2839,21 +3012,21 @@ class Over(ColumnElement):
order_by = None
partition_by = None
- def __init__(self, func, partition_by=None, order_by=None):
+ def __init__(self, element, partition_by=None, order_by=None):
"""Produce an :class:`.Over` object against a function.
Used against aggregate or so-called "window" functions,
for database backends that support window functions.
- E.g.::
+ :func:`~.expression.over` is usually called using
+ the :meth:`.FunctionElement.over` method, e.g.::
- from sqlalchemy import over
- over(func.row_number(), order_by='x')
+ func.row_number().over(order_by='x')
- Would produce "ROW_NUMBER() OVER(ORDER BY x)".
+ Would produce ``ROW_NUMBER() OVER(ORDER BY x)``.
- :param func: a :class:`.FunctionElement` construct, typically
- generated by :data:`~.expression.func`.
+ :param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
+ or other compatible construct.
:param partition_by: a column element or string, or a list
of such, that will be used as the PARTITION BY clause
of the OVER construct.
@@ -2866,8 +3039,14 @@ class Over(ColumnElement):
.. versionadded:: 0.7
+ .. seealso::
+
+ :data:`.expression.func`
+
+ :func:`.expression.within_group`
+
"""
- self.func = func
+ self.element = element
if order_by is not None:
self.order_by = ClauseList(
*util.to_list(order_by),
@@ -2877,17 +3056,29 @@ class Over(ColumnElement):
*util.to_list(partition_by),
_literal_as_text=_literal_as_label_reference)
+ @property
+ def func(self):
+ """the element referred to by this :class:`.Over`
+ clause.
+
+ .. deprecated:: 1.1 the ``func`` element has been renamed to
+ ``.element``. The two attributes are synonymous though
+ ``.func`` is read-only.
+
+ """
+ return self.element
+
@util.memoized_property
def type(self):
- return self.func.type
+ return self.element.type
def get_children(self, **kwargs):
return [c for c in
- (self.func, self.partition_by, self.order_by)
+ (self.element, self.partition_by, self.order_by)
if c is not None]
def _copy_internals(self, clone=_clone, **kw):
- self.func = clone(self.func, **kw)
+ self.element = clone(self.element, **kw)
if self.partition_by is not None:
self.partition_by = clone(self.partition_by, **kw)
if self.order_by is not None:
@@ -2897,7 +3088,106 @@ class Over(ColumnElement):
def _from_objects(self):
return list(itertools.chain(
*[c._from_objects for c in
- (self.func, self.partition_by, self.order_by)
+ (self.element, self.partition_by, self.order_by)
+ if c is not None]
+ ))
+
+
+class WithinGroup(ColumnElement):
+ """Represent a WITHIN GROUP (ORDER BY) clause.
+
+ This is a special operator against so-called
+ so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including ``percentile_cont()``,
+ ``rank()``, ``dense_rank()``, etc.
+
+ It's supported only by certain database backends, such as PostgreSQL,
+ Oracle and MS SQL Server.
+
+ The :class:`.WithinGroup` consturct extracts its type from the
+ method :meth:`.FunctionElement.within_group_type`. If this returns
+ ``None``, the function's ``.type`` is used.
+
+ """
+ __visit_name__ = 'withingroup'
+
+ order_by = None
+
+ def __init__(self, element, *order_by):
+ """Produce a :class:`.WithinGroup` object against a function.
+
+ Used against so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including :class:`.percentile_cont`,
+ :class:`.rank`, :class:`.dense_rank`, etc.
+
+ :func:`~.expression.within_group` is usually called using
+ the :meth:`.FunctionElement.within_group` method, e.g.::
+
+ from sqlalchemy import within_group
+ stmt = select([
+ department.c.id,
+ func.percentile_cont(0.5).within_group(
+ department.c.salary.desc()
+ )
+ ])
+
+ The above statement would produce SQL similar to
+ ``SELECT department.id, percentile_cont(0.5)
+ WITHIN GROUP (ORDER BY department.salary DESC)``.
+
+ :param element: a :class:`.FunctionElement` construct, typically
+ generated by :data:`~.expression.func`.
+ :param \*order_by: one or more column elements that will be used
+ as the ORDER BY clause of the WITHIN GROUP construct.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :data:`.expression.func`
+
+ :func:`.expression.over`
+
+ """
+ self.element = element
+ if order_by is not None:
+ self.order_by = ClauseList(
+ *util.to_list(order_by),
+ _literal_as_text=_literal_as_label_reference)
+
+ def over(self, partition_by=None, order_by=None):
+ """Produce an OVER clause against this :class:`.WithinGroup`
+ construct.
+
+ This function has the same signature as that of
+ :meth:`.FunctionElement.over`.
+
+ """
+ return Over(self, partition_by=partition_by, order_by=order_by)
+
+ @util.memoized_property
+ def type(self):
+ wgt = self.element.within_group_type(self)
+ if wgt is not None:
+ return wgt
+ else:
+ return self.element.type
+
+ def get_children(self, **kwargs):
+ return [c for c in
+ (self.func, self.order_by)
+ if c is not None]
+
+ def _copy_internals(self, clone=_clone, **kw):
+ self.element = clone(self.element, **kw)
+ if self.order_by is not None:
+ self.order_by = clone(self.order_by, **kw)
+
+ @property
+ def _from_objects(self):
+ return list(itertools.chain(
+ *[c._from_objects for c in
+ (self.element, self.order_by)
if c is not None]
))
diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py
index 74b827d7e..27fae8ca4 100644
--- a/lib/sqlalchemy/sql/expression.py
+++ b/lib/sqlalchemy/sql/expression.py
@@ -15,7 +15,7 @@ class.
"""
__all__ = [
- 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
+ 'Alias', 'Any', 'All', 'ClauseElement', 'ColumnCollection', 'ColumnElement',
'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select',
'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between',
'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct',
@@ -24,19 +24,19 @@ __all__ = [
'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast',
'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery',
'table', 'text',
- 'tuple_', 'type_coerce', 'union', 'union_all', 'update']
+ 'tuple_', 'type_coerce', 'union', 'union_all', 'update', 'within_group']
from .visitors import Visitable
from .functions import func, modifier, FunctionElement, Function
from ..util.langhelpers import public_factory
from .elements import ClauseElement, ColumnElement,\
- BindParameter, UnaryExpression, BooleanClauseList, \
+ BindParameter, CollectionAggregate, UnaryExpression, BooleanClauseList, \
Label, Cast, Case, ColumnClause, TextClause, Over, Null, \
True_, False_, BinaryExpression, Tuple, TypeClause, Extract, \
- Grouping, not_, \
+ Grouping, WithinGroup, not_, \
collate, literal_column, between,\
- literal, outparam, type_coerce, ClauseList, FunctionFilter
+ literal, outparam, TypeCoerce, ClauseList, FunctionFilter
from .elements import SavepointClause, RollbackToSavepointClause, \
ReleaseSavepointClause
@@ -57,6 +57,8 @@ from .dml import Insert, Update, Delete, UpdateBase, ValuesBase
# the functions to be available in the sqlalchemy.sql.* namespace and
# to be auto-cross-documenting from the function to the class itself.
+all_ = public_factory(CollectionAggregate._create_all, ".expression.all_")
+any_ = public_factory(CollectionAggregate._create_any, ".expression.any_")
and_ = public_factory(BooleanClauseList.and_, ".expression.and_")
or_ = public_factory(BooleanClauseList.or_, ".expression.or_")
bindparam = public_factory(BindParameter, ".expression.bindparam")
@@ -65,6 +67,7 @@ text = public_factory(TextClause._create_text, ".expression.text")
table = public_factory(TableClause, ".expression.table")
column = public_factory(ColumnClause, ".expression.column")
over = public_factory(Over, ".expression.over")
+within_group = public_factory(WithinGroup, ".expression.within_group")
label = public_factory(Label, ".expression.label")
case = public_factory(Case, ".expression.case")
cast = public_factory(Cast, ".expression.cast")
@@ -89,6 +92,7 @@ asc = public_factory(UnaryExpression._create_asc, ".expression.asc")
desc = public_factory(UnaryExpression._create_desc, ".expression.desc")
distinct = public_factory(
UnaryExpression._create_distinct, ".expression.distinct")
+type_coerce = public_factory(TypeCoerce, ".expression.type_coerce")
true = public_factory(True_._instance, ".expression.true")
false = public_factory(False_._instance, ".expression.false")
null = public_factory(Null._instance, ".expression.null")
diff --git a/lib/sqlalchemy/sql/functions.py b/lib/sqlalchemy/sql/functions.py
index 538a2c549..6cfbd12b3 100644
--- a/lib/sqlalchemy/sql/functions.py
+++ b/lib/sqlalchemy/sql/functions.py
@@ -12,9 +12,9 @@ from . import sqltypes, schema
from .base import Executable, ColumnCollection
from .elements import ClauseList, Cast, Extract, _literal_as_binds, \
literal_column, _type_from_args, ColumnElement, _clone,\
- Over, BindParameter, FunctionFilter
+ Over, BindParameter, FunctionFilter, Grouping, WithinGroup
from .selectable import FromClause, Select, Alias
-
+from . import util as sqlutil
from . import operators
from .visitors import VisitableType
from .. import util
@@ -116,6 +116,21 @@ class FunctionElement(Executable, ColumnElement, FromClause):
"""
return Over(self, partition_by=partition_by, order_by=order_by)
+ def within_group(self, *order_by):
+ """Produce a WITHIN GROUP (ORDER BY expr) clause against this function.
+
+ Used against so-called "ordered set aggregate" and "hypothetical
+ set aggregate" functions, including :class:`.percentile_cont`,
+ :class:`.rank`, :class:`.dense_rank`, etc.
+
+ See :func:`~.expression.within_group` for a full description.
+
+ .. versionadded:: 1.1
+
+
+ """
+ return WithinGroup(self, *order_by)
+
def filter(self, *criterion):
"""Produce a FILTER clause against this function.
@@ -157,6 +172,18 @@ class FunctionElement(Executable, ColumnElement, FromClause):
self._reset_exported()
FunctionElement.clauses._reset(self)
+ def within_group_type(self, within_group):
+ """For types that define their return type as based on the criteria
+ within a WITHIN GROUP (ORDER BY) expression, called by the
+ :class:`.WithinGroup` construct.
+
+ Returns None by default, in which case the function's normal ``.type``
+ is used.
+
+ """
+
+ return None
+
def alias(self, name=None, flat=False):
"""Produce a :class:`.Alias` construct against this
:class:`.FunctionElement`.
@@ -233,6 +260,16 @@ class FunctionElement(Executable, ColumnElement, FromClause):
return BindParameter(None, obj, _compared_to_operator=operator,
_compared_to_type=self.type, unique=True)
+ def self_group(self, against=None):
+ # for the moment, we are parenthesizing all array-returning
+ # expressions against getitem. This may need to be made
+ # more portable if in the future we support other DBs
+ # besides postgresql.
+ if against is operators.getitem:
+ return Grouping(self)
+ else:
+ return super(FunctionElement, self).self_group(against=against)
+
class _FunctionGenerator(object):
"""Generate :class:`.Function` objects based on getattr calls."""
@@ -483,7 +520,7 @@ class GenericFunction(util.with_metaclass(_GenericMeta, Function)):
def __init__(self, *args, **kwargs):
parsed_args = kwargs.pop('_parsed_args', None)
if parsed_args is None:
- parsed_args = [_literal_as_binds(c) for c in args]
+ parsed_args = [_literal_as_binds(c, self.name) for c in args]
self.packagenames = []
self._bind = kwargs.get('bind', None)
self.clause_expr = ClauseList(
@@ -528,10 +565,10 @@ class ReturnTypeFromArgs(GenericFunction):
"""Define a function whose return type is the same as its arguments."""
def __init__(self, *args, **kwargs):
- args = [_literal_as_binds(c) for c in args]
+ args = [_literal_as_binds(c, self.name) for c in args]
kwargs.setdefault('type_', _type_from_args(args))
kwargs['_parsed_args'] = args
- GenericFunction.__init__(self, *args, **kwargs)
+ super(ReturnTypeFromArgs, self).__init__(*args, **kwargs)
class coalesce(ReturnTypeFromArgs):
@@ -579,7 +616,7 @@ class count(GenericFunction):
def __init__(self, expression=None, **kwargs):
if expression is None:
expression = literal_column('*')
- GenericFunction.__init__(self, expression, **kwargs)
+ super(count, self).__init__(expression, **kwargs)
class current_date(AnsiFunction):
@@ -616,3 +653,150 @@ class sysdate(AnsiFunction):
class user(AnsiFunction):
type = sqltypes.String
+
+
+class array_agg(GenericFunction):
+ """support for the ARRAY_AGG function.
+
+ The ``func.array_agg(expr)`` construct returns an expression of
+ type :class:`.Array`.
+
+ e.g.::
+
+ stmt = select([func.array_agg(table.c.values)[2:5]])
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :func:`.postgresql.array_agg` - PostgreSQL-specific version that
+ returns :class:`.ARRAY`, which has PG-specific operators added.
+
+ """
+
+ type = sqltypes.Array
+
+ def __init__(self, *args, **kwargs):
+ args = [_literal_as_binds(c) for c in args]
+ kwargs.setdefault('type_', self.type(_type_from_args(args)))
+ kwargs['_parsed_args'] = args
+ super(array_agg, self).__init__(*args, **kwargs)
+
+
+class OrderedSetAgg(GenericFunction):
+ """Define a function where the return type is based on the sort
+ expression type as defined by the expression passed to the
+ :meth:`.FunctionElement.within_group` method."""
+
+ array_for_multi_clause = False
+
+ def within_group_type(self, within_group):
+ func_clauses = self.clause_expr.element
+ order_by = sqlutil.unwrap_order_by(within_group.order_by)
+ if self.array_for_multi_clause and len(func_clauses.clauses) > 1:
+ return sqltypes.Array(order_by[0].type)
+ else:
+ return order_by[0].type
+
+
+class mode(OrderedSetAgg):
+ """implement the ``mode`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression.
+
+ .. versionadded:: 1.1
+
+ """
+
+
+class percentile_cont(OrderedSetAgg):
+ """implement the ``percentile_cont`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression,
+ or if the arguments are an array, an :class:`.Array` of the sort
+ expression's type.
+
+ .. versionadded:: 1.1
+
+ """
+
+ array_for_multi_clause = True
+
+
+class percentile_disc(OrderedSetAgg):
+ """implement the ``percentile_disc`` ordered-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is the same as the sort expression,
+ or if the arguments are an array, an :class:`.Array` of the sort
+ expression's type.
+
+ .. versionadded:: 1.1
+
+ """
+
+ array_for_multi_clause = True
+
+
+class rank(GenericFunction):
+ """Implement the ``rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Integer`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Integer()
+
+
+class dense_rank(GenericFunction):
+ """Implement the ``dense_rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Integer`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Integer()
+
+
+class percent_rank(GenericFunction):
+ """Implement the ``percent_rank`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Numeric`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Numeric()
+
+
+class cume_dist(GenericFunction):
+ """Implement the ``cume_dist`` hypothetical-set aggregate function.
+
+ This function must be used with the :meth:`.FunctionElement.within_group`
+ modifier to supply a sort expression to operate upon.
+
+ The return type of this function is :class:`.Numeric`.
+
+ .. versionadded:: 1.1
+
+ """
+ type = sqltypes.Numeric()
diff --git a/lib/sqlalchemy/sql/operators.py b/lib/sqlalchemy/sql/operators.py
index 51f162c98..da3576466 100644
--- a/lib/sqlalchemy/sql/operators.py
+++ b/lib/sqlalchemy/sql/operators.py
@@ -214,10 +214,13 @@ class custom_op(object):
"""
__name__ = 'custom_op'
- def __init__(self, opstring, precedence=0, is_comparison=False):
+ def __init__(
+ self, opstring, precedence=0, is_comparison=False,
+ natural_self_precedent=False):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
+ self.natural_self_precedent = natural_self_precedent
def __eq__(self, other):
return isinstance(other, custom_op) and \
@@ -597,6 +600,14 @@ class ColumnOperators(Operators):
"""
return self.reverse_operate(div, other)
+ def __rmod__(self, other):
+ """Implement the ``%`` operator in reverse.
+
+ See :meth:`.ColumnOperators.__mod__`.
+
+ """
+ return self.reverse_operate(mod, other)
+
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`~.expression.between` clause against
the parent object, given the lower and upper range.
@@ -611,6 +622,24 @@ class ColumnOperators(Operators):
"""
return self.operate(distinct_op)
+ def any_(self):
+ """Produce a :func:`~.expression.any_` clause against the
+ parent object.
+
+ .. versionadded:: 1.1
+
+ """
+ return self.operate(any_op)
+
+ def all_(self):
+ """Produce a :func:`~.expression.all_` clause against the
+ parent object.
+
+ .. versionadded:: 1.1
+
+ """
+ return self.operate(all_op)
+
def __add__(self, other):
"""Implement the ``+`` operator.
@@ -744,6 +773,14 @@ def distinct_op(a):
return a.distinct()
+def any_op(a):
+ return a.any_()
+
+
+def all_op(a):
+ return a.all_()
+
+
def startswith_op(a, b, escape=None):
return a.startswith(b, escape=escape)
@@ -818,6 +855,28 @@ def is_ordering_modifier(op):
return op in (asc_op, desc_op,
nullsfirst_op, nullslast_op)
+
+def is_natural_self_precedent(op):
+ return op in _natural_self_precedent or \
+ isinstance(op, custom_op) and op.natural_self_precedent
+
+_mirror = {
+ gt: lt,
+ ge: le,
+ lt: gt,
+ le: ge
+}
+
+
+def mirror(op):
+ """rotate a comparison operator 180 degrees.
+
+ Note this is not the same as negation.
+
+ """
+ return _mirror.get(op, op)
+
+
_associative = _commutative.union([concat_op, and_, or_])
_natural_self_precedent = _associative.union([getitem])
@@ -826,12 +885,15 @@ parenthesize (a op b).
"""
+
_asbool = util.symbol('_asbool', canonical=-10)
_smallest = util.symbol('_smallest', canonical=-100)
_largest = util.symbol('_largest', canonical=100)
_PRECEDENCE = {
from_: 15,
+ any_op: 15,
+ all_op: 15,
getitem: 15,
mul: 8,
truediv: 8,
@@ -885,7 +947,7 @@ _PRECEDENCE = {
def is_precedent(operator, against):
- if operator is against and operator in _natural_self_precedent:
+ if operator is against and is_natural_self_precedent(operator):
return False
else:
return (_PRECEDENCE.get(operator,
diff --git a/lib/sqlalchemy/sql/schema.py b/lib/sqlalchemy/sql/schema.py
index a8989627d..42dbe72b2 100644
--- a/lib/sqlalchemy/sql/schema.py
+++ b/lib/sqlalchemy/sql/schema.py
@@ -572,18 +572,9 @@ class Table(DialectKWArgs, SchemaItem, TableClause):
def _init_collections(self):
pass
- @util.memoized_property
+ @property
def _autoincrement_column(self):
- for col in self.primary_key:
- if (col.autoincrement and col.type._type_affinity is not None and
- issubclass(col.type._type_affinity,
- type_api.INTEGERTYPE._type_affinity) and
- (not col.foreign_keys or
- col.autoincrement == 'ignore_fk') and
- isinstance(col.default, (type(None), Sequence)) and
- (col.server_default is None or
- col.server_default.reflected)):
- return col
+ return self.primary_key._autoincrement_column
@property
def key(self):
@@ -913,17 +904,40 @@ class Column(SchemaItem, ColumnClause):
argument is available such as ``server_default``, ``default``
and ``unique``.
- :param autoincrement: This flag may be set to ``False`` to
- indicate an integer primary key column that should not be
- considered to be the "autoincrement" column, that is
- the integer primary key column which generates values
- implicitly upon INSERT and whose value is usually returned
- via the DBAPI cursor.lastrowid attribute. It defaults
- to ``True`` to satisfy the common use case of a table
- with a single integer primary key column. If the table
- has a composite primary key consisting of more than one
- integer column, set this flag to True only on the
- column that should be considered "autoincrement".
+ :param autoincrement: Set up "auto increment" semantics for an integer
+ primary key column. The default value is the string ``"auto"``
+ which indicates that a single-column primary key that is of
+ an INTEGER type with no stated client-side or python-side defaults
+ should receive auto increment semantics automatically;
+ all other varieties of primary key columns will not. This
+ includes that :term:`DDL` such as Postgresql SERIAL or MySQL
+ AUTO_INCREMENT will be emitted for this column during a table
+ create, as well as that the column is assumed to generate new
+ integer primary key values when an INSERT statement invokes which
+ will be retrieved by the dialect.
+
+ The flag may be set to ``True`` to indicate that a column which
+ is part of a composite (e.g. multi-column) primary key should
+ have autoincrement semantics, though note that only one column
+ within a primary key may have this setting. It can also
+ be set to ``True`` to indicate autoincrement semantics on a
+ column that has a client-side or server-side default configured,
+ however note that not all dialects can accommodate all styles
+ of default as an "autoincrement". It can also be
+ set to ``False`` on a single-column primary key that has a
+ datatype of INTEGER in order to disable auto increment semantics
+ for that column.
+
+ .. versionchanged:: 1.1 The autoincrement flag now defaults to
+ ``"auto"`` which indicates autoincrement semantics by default
+ for single-column integer primary keys only; for composite
+ (multi-column) primary keys, autoincrement is never implicitly
+ enabled; as always, ``autoincrement=True`` will allow for
+ at most one of those columns to be an "autoincrement" column.
+ ``autoincrement=True`` may also be set on a :class:`.Column`
+ that has an explicit client-side or server-side default,
+ subject to limitations of the backend database and dialect.
+
The setting *only* has an effect for columns which are:
@@ -940,11 +954,8 @@ class Column(SchemaItem, ColumnClause):
primary_key=True, autoincrement='ignore_fk')
It is typically not desirable to have "autoincrement" enabled
- on such a column as its value intends to mirror that of a
- primary key column elsewhere.
-
- * have no server side or client side defaults (with the exception
- of Postgresql SERIAL).
+ on a column that refers to another via foreign key, as such a column
+ is required to refer to a value that originates from elsewhere.
The setting has these two effects on columns that meet the
above criteria:
@@ -961,20 +972,15 @@ class Column(SchemaItem, ColumnClause):
:ref:`sqlite_autoincrement`
- * The column will be considered to be available as
- cursor.lastrowid or equivalent, for those dialects which
- "post fetch" newly inserted identifiers after a row has
- been inserted (SQLite, MySQL, MS-SQL). It does not have
- any effect in this regard for databases that use sequences
- to generate primary key identifiers (i.e. Firebird, Postgresql,
- Oracle).
-
- .. versionchanged:: 0.7.4
- ``autoincrement`` accepts a special value ``'ignore_fk'``
- to indicate that autoincrementing status regardless of foreign
- key references. This applies to certain composite foreign key
- setups, such as the one demonstrated in the ORM documentation
- at :ref:`post_update`.
+ * The column will be considered to be available using an
+ "autoincrement" method specific to the backend database, such
+ as calling upon ``cursor.lastrowid``, using RETURNING in an
+ INSERT statement to get at a sequence-generated value, or using
+ special functions such as "SELECT scope_identity()".
+ These methods are highly specific to the DBAPIs and databases in
+ use and vary greatly, so care should be taken when associating
+ ``autoincrement=True`` with a custom default generation function.
+
:param default: A scalar, Python callable, or
:class:`.ColumnElement` expression representing the
@@ -984,8 +990,12 @@ class Column(SchemaItem, ColumnClause):
a positional argument; see that class for full detail on the
structure of the argument.
- Contrast this argument to ``server_default`` which creates a
- default generator on the database side.
+ Contrast this argument to :paramref:`.Column.server_default`
+ which creates a default generator on the database side.
+
+ .. seealso::
+
+ :ref:`metadata_defaults_toplevel`
:param doc: optional String that can be used by the ORM or similar
to document attributes. This attribute does not render SQL
@@ -1051,6 +1061,10 @@ class Column(SchemaItem, ColumnClause):
construct does not specify any DDL and the implementation is left
to the database, such as via a trigger.
+ .. seealso::
+
+ :ref:`server_defaults`
+
:param server_onupdate: A :class:`.FetchedValue` instance
representing a database-side default generation function. This
indicates to SQLAlchemy that a newly generated value will be
@@ -1128,7 +1142,7 @@ class Column(SchemaItem, ColumnClause):
self.system = kwargs.pop('system', False)
self.doc = kwargs.pop('doc', None)
self.onupdate = kwargs.pop('onupdate', None)
- self.autoincrement = kwargs.pop('autoincrement', True)
+ self.autoincrement = kwargs.pop('autoincrement', "auto")
self.constraints = set()
self.foreign_keys = set()
@@ -1263,12 +1277,12 @@ class Column(SchemaItem, ColumnClause):
if self.primary_key:
table.primary_key._replace(self)
- Table._autoincrement_column._reset(table)
elif self.key in table.primary_key:
raise exc.ArgumentError(
"Trying to redefine primary-key column '%s' as a "
"non-primary-key column on table '%s'" % (
self.key, table.fullname))
+
self.table = table
if self.index:
@@ -1981,13 +1995,14 @@ class ColumnDefault(DefaultGenerator):
try:
argspec = util.get_callable_argspec(fn, no_self=True)
except TypeError:
- return lambda ctx: fn()
+ return util.wrap_callable(lambda ctx: fn(), fn)
defaulted = argspec[3] is not None and len(argspec[3]) or 0
positionals = len(argspec[0]) - defaulted
if positionals == 0:
- return lambda ctx: fn()
+ return util.wrap_callable(lambda ctx: fn(), fn)
+
elif positionals == 1:
return fn
else:
@@ -2040,8 +2055,9 @@ class Sequence(DefaultGenerator):
is_sequence = True
- def __init__(self, name, start=None, increment=None, schema=None,
- optional=False, quote=None, metadata=None,
+ def __init__(self, name, start=None, increment=None, minvalue=None,
+ maxvalue=None, nominvalue=None, nomaxvalue=None, cycle=None,
+ schema=None, optional=False, quote=None, metadata=None,
quote_schema=None,
for_update=False):
"""Construct a :class:`.Sequence` object.
@@ -2057,6 +2073,53 @@ class Sequence(DefaultGenerator):
the database as the value of the "INCREMENT BY" clause. If ``None``,
the clause is omitted, which on most platforms indicates an
increment of 1.
+ :param minvalue: the minimum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "MINVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ minvalue of 1 and -2^63-1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param maxvalue: the maximum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "MAXVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ maxvalue of 2^63-1 and -1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param nominvalue: no minimum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "NO MINVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ minvalue of 1 and -2^63-1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param nomaxvalue: no maximum value of the sequence. This
+ value is used when the CREATE SEQUENCE command is emitted to
+ the database as the value of the "NO MAXVALUE" clause. If ``None``,
+ the clause is omitted, which on most platforms indicates a
+ maxvalue of 2^63-1 and -1 for ascending and descending sequences,
+ respectively.
+
+ .. versionadded:: 1.0.7
+
+ :param cycle: allows the sequence to wrap around when the maxvalue
+ or minvalue has been reached by an ascending or descending sequence
+ respectively. This value is used when the CREATE SEQUENCE command
+ is emitted to the database as the "CYCLE" clause. If the limit is
+ reached, the next number generated will be the minvalue or maxvalue,
+ respectively. If cycle=False (the default) any calls to nextval
+ after the sequence has reached its maximum value will return an
+ error.
+
+ .. versionadded:: 1.0.7
+
:param schema: Optional schema name for the sequence, if located
in a schema other than the default.
:param optional: boolean value, when ``True``, indicates that this
@@ -2101,6 +2164,11 @@ class Sequence(DefaultGenerator):
self.name = quoted_name(name, quote)
self.start = start
self.increment = increment
+ self.minvalue = minvalue
+ self.maxvalue = maxvalue
+ self.nominvalue = nominvalue
+ self.nomaxvalue = nomaxvalue
+ self.cycle = cycle
self.optional = optional
if metadata is not None and schema is None and metadata.schema:
self.schema = schema = metadata.schema
@@ -2972,11 +3040,77 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
self.columns.extend(columns)
+ PrimaryKeyConstraint._autoincrement_column._reset(self)
self._set_parent_with_dispatch(self.table)
def _replace(self, col):
+ PrimaryKeyConstraint._autoincrement_column._reset(self)
self.columns.replace(col)
+ @property
+ def columns_autoinc_first(self):
+ autoinc = self._autoincrement_column
+
+ if autoinc is not None:
+ return [autoinc] + [c for c in self.columns if c is not autoinc]
+ else:
+ return list(self.columns)
+
+ @util.memoized_property
+ def _autoincrement_column(self):
+
+ def _validate_autoinc(col, autoinc_true):
+ if col.type._type_affinity is None or not issubclass(
+ col.type._type_affinity,
+ type_api.INTEGERTYPE._type_affinity):
+ if autoinc_true:
+ raise exc.ArgumentError(
+ "Column type %s on column '%s' is not "
+ "compatible with autoincrement=True" % (
+ col.type,
+ col
+ ))
+ else:
+ return False
+ elif not isinstance(col.default, (type(None), Sequence)) and \
+ not autoinc_true:
+ return False
+ elif col.server_default is not None and not autoinc_true:
+ return False
+ elif (
+ col.foreign_keys and col.autoincrement
+ not in (True, 'ignore_fk')):
+ return False
+ return True
+
+ if len(self.columns) == 1:
+ col = list(self.columns)[0]
+
+ if col.autoincrement is True:
+ _validate_autoinc(col, True)
+ return col
+ elif (
+ col.autoincrement in ('auto', 'ignore_fk') and
+ _validate_autoinc(col, False)
+ ):
+ return col
+
+ else:
+ autoinc = None
+ for col in self.columns:
+ if col.autoincrement is True:
+ _validate_autoinc(col, True)
+ if autoinc is not None:
+ raise exc.ArgumentError(
+ "Only one Column may be marked "
+ "autoincrement=True, found both %s and %s." %
+ (col.name, autoinc.name)
+ )
+ else:
+ autoinc = col
+
+ return autoinc
+
class UniqueConstraint(ColumnCollectionConstraint):
"""A table-level UNIQUE constraint.
diff --git a/lib/sqlalchemy/sql/selectable.py b/lib/sqlalchemy/sql/selectable.py
index 245c54817..73341053d 100644
--- a/lib/sqlalchemy/sql/selectable.py
+++ b/lib/sqlalchemy/sql/selectable.py
@@ -224,7 +224,7 @@ class HasSuffixes(object):
stmt = select([col1, col2]).cte().suffix_with(
"cycle empno set y_cycle to 1 default 0", dialect="oracle")
- Multiple prefixes can be specified by multiple calls
+ Multiple suffixes can be specified by multiple calls
to :meth:`.suffix_with`.
:param \*expr: textual or :class:`.ClauseElement` construct which
@@ -1101,6 +1101,14 @@ class Alias(FromClause):
or 'anon'))
self.name = name
+ def self_group(self, target=None):
+ if isinstance(target, CompoundSelect) and \
+ isinstance(self.original, Select) and \
+ self.original._needs_parens_for_grouping():
+ return FromGrouping(self)
+
+ return super(Alias, self).self_group(target)
+
@property
def description(self):
if util.py3k:
@@ -3208,6 +3216,13 @@ class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
return None
return None
+ def _needs_parens_for_grouping(self):
+ return (
+ self._limit_clause is not None or
+ self._offset_clause is not None or
+ bool(self._order_by_clause.clauses)
+ )
+
def self_group(self, against=None):
"""return a 'grouping' construct as per the ClauseElement
specification.
@@ -3217,7 +3232,8 @@ class Select(HasPrefixes, HasSuffixes, GenerativeSelect):
expressions and should not require explicit use.
"""
- if isinstance(against, CompoundSelect):
+ if isinstance(against, CompoundSelect) and \
+ not self._needs_parens_for_grouping():
return self
return FromGrouping(self)
diff --git a/lib/sqlalchemy/sql/sqltypes.py b/lib/sqlalchemy/sql/sqltypes.py
index 7e2e601e2..4abb9b15a 100644
--- a/lib/sqlalchemy/sql/sqltypes.py
+++ b/lib/sqlalchemy/sql/sqltypes.py
@@ -13,10 +13,11 @@ import datetime as dt
import codecs
from .type_api import TypeEngine, TypeDecorator, to_instance
-from .elements import quoted_name, type_coerce, _defer_name
+from .elements import quoted_name, TypeCoerce as type_coerce, _defer_name
from .. import exc, util, processors
from .base import _bind_or_error, SchemaEventTarget
from . import operators
+from .. import inspection
from .. import event
from ..util import pickle
import decimal
@@ -68,7 +69,39 @@ class Concatenable(object):
)):
return operators.concat_op, self.expr.type
else:
- return op, self.expr.type
+ return super(Concatenable.Comparator, self)._adapt_expression(
+ op, other_comparator)
+
+ comparator_factory = Comparator
+
+
+class Indexable(object):
+ """A mixin that marks a type as supporting indexing operations,
+ such as array or JSON structures.
+
+
+ .. versionadded:: 1.1.0
+
+
+ """
+
+ zero_indexes = False
+ """if True, Python zero-based indexes should be interpreted as one-based
+ on the SQL expression side."""
+
+ class Comparator(TypeEngine.Comparator):
+
+ def _setup_getitem(self, index):
+ raise NotImplementedError()
+
+ def __getitem__(self, index):
+ operator, adjusted_right_expr, result_type = \
+ self._setup_getitem(index)
+ return self.operate(
+ operator,
+ adjusted_right_expr,
+ result_type=result_type
+ )
comparator_factory = Comparator
@@ -215,9 +248,6 @@ class String(Concatenable, TypeEngine):
self.convert_unicode != 'force_nocheck'
)
if needs_convert:
- to_unicode = processors.to_unicode_processor_factory(
- dialect.encoding, self.unicode_error)
-
if needs_isinstance:
return processors.to_conditional_unicode_processor_factory(
dialect.encoding, self.unicode_error)
@@ -1466,6 +1496,246 @@ class Interval(_DateAffinity, TypeDecorator):
return self.impl.coerce_compared_value(op, value)
+class Array(Indexable, Concatenable, TypeEngine):
+ """Represent a SQL Array type.
+
+ .. note:: This type serves as the basis for all ARRAY operations.
+ However, currently **only the Postgresql backend has support
+ for SQL arrays in SQLAlchemy**. It is recommended to use the
+ :class:`.postgresql.ARRAY` type directly when using ARRAY types
+ with PostgreSQL, as it provides additional operators specific
+ to that backend.
+
+ :class:`.Array` is part of the Core in support of various SQL standard
+ functions such as :class:`.array_agg` which explicitly involve arrays;
+ however, with the exception of the PostgreSQL backend and possibly
+ some third-party dialects, no other SQLAlchemy built-in dialect has
+ support for this type.
+
+ An :class:`.Array` type is constructed given the "type"
+ of element::
+
+ mytable = Table("mytable", metadata,
+ Column("data", Array(Integer))
+ )
+
+ The above type represents an N-dimensional array,
+ meaning a supporting backend such as Postgresql will interpret values
+ with any number of dimensions automatically. To produce an INSERT
+ construct that passes in a 1-dimensional array of integers::
+
+ connection.execute(
+ mytable.insert(),
+ data=[1,2,3]
+ )
+
+ The :class:`.Array` type can be constructed given a fixed number
+ of dimensions::
+
+ mytable = Table("mytable", metadata,
+ Column("data", Array(Integer, dimensions=2))
+ )
+
+ Sending a number of dimensions is optional, but recommended if the
+ datatype is to represent arrays of more than one dimension. This number
+ is used:
+
+ * When emitting the type declaration itself to the database, e.g.
+ ``INTEGER[][]``
+
+ * When translating Python values to database values, and vice versa, e.g.
+ an ARRAY of :class:`.Unicode` objects uses this number to efficiently
+ access the string values inside of array structures without resorting
+ to per-row type inspection
+
+ * When used with the Python ``getitem`` accessor, the number of dimensions
+ serves to define the kind of type that the ``[]`` operator should
+ return, e.g. for an ARRAY of INTEGER with two dimensions::
+
+ >>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
+ >>> expr = expr[6] # returns Integer
+
+ For 1-dimensional arrays, an :class:`.Array` instance with no
+ dimension parameter will generally assume single-dimensional behaviors.
+
+ SQL expressions of type :class:`.Array` have support for "index" and
+ "slice" behavior. The Python ``[]`` operator works normally here, given
+ integer indexes or slices. Arrays default to 1-based indexing.
+ The operator produces binary expression
+ constructs which will produce the appropriate SQL, both for
+ SELECT statements::
+
+ select([mytable.c.data[5], mytable.c.data[2:7]])
+
+ as well as UPDATE statements when the :meth:`.Update.values` method
+ is used::
+
+ mytable.update().values({
+ mytable.c.data[5]: 7,
+ mytable.c.data[2:7]: [1, 2, 3]
+ })
+
+ The :class:`.Array` type also provides for the operators
+ :meth:`.Array.Comparator.any` and :meth:`.Array.Comparator.all`.
+ The PostgreSQL-specific version of :class:`.Array` also provides additional
+ operators.
+
+ .. versionadded:: 1.1.0
+
+ .. seealso::
+
+ :class:`.postgresql.ARRAY`
+
+ """
+ __visit_name__ = 'ARRAY'
+
+ class Comparator(Indexable.Comparator, Concatenable.Comparator):
+
+ """Define comparison operations for :class:`.Array`.
+
+ More operators are available on the dialect-specific form
+ of this type. See :class:`.postgresql.ARRAY.Comparator`.
+
+ """
+
+ def _setup_getitem(self, index):
+ if isinstance(index, slice):
+ return_type = self.type
+ elif self.type.dimensions is None or self.type.dimensions == 1:
+ return_type = self.type.item_type
+ else:
+ adapt_kw = {'dimensions': self.type.dimensions - 1}
+ return_type = self.type.adapt(self.type.__class__, **adapt_kw)
+
+ return operators.getitem, index, return_type
+
+ @util.dependencies("sqlalchemy.sql.elements")
+ def any(self, elements, other, operator=None):
+ """Return ``other operator ANY (array)`` clause.
+
+ Argument places are switched, because ANY requires array
+ expression to be on the right hand-side.
+
+ E.g.::
+
+ from sqlalchemy.sql import operators
+
+ conn.execute(
+ select([table.c.data]).where(
+ table.c.data.any(7, operator=operators.lt)
+ )
+ )
+
+ :param other: expression to be compared
+ :param operator: an operator object from the
+ :mod:`sqlalchemy.sql.operators`
+ package, defaults to :func:`.operators.eq`.
+
+ .. seealso::
+
+ :func:`.sql.expression.any_`
+
+ :meth:`.Array.Comparator.all`
+
+ """
+ operator = operator if operator else operators.eq
+ return operator(
+ elements._literal_as_binds(other),
+ elements.CollectionAggregate._create_any(self.expr)
+ )
+
+ @util.dependencies("sqlalchemy.sql.elements")
+ def all(self, elements, other, operator=None):
+ """Return ``other operator ALL (array)`` clause.
+
+ Argument places are switched, because ALL requires array
+ expression to be on the right hand-side.
+
+ E.g.::
+
+ from sqlalchemy.sql import operators
+
+ conn.execute(
+ select([table.c.data]).where(
+ table.c.data.all(7, operator=operators.lt)
+ )
+ )
+
+ :param other: expression to be compared
+ :param operator: an operator object from the
+ :mod:`sqlalchemy.sql.operators`
+ package, defaults to :func:`.operators.eq`.
+
+ .. seealso::
+
+ :func:`.sql.expression.all_`
+
+ :meth:`.Array.Comparator.any`
+
+ """
+ operator = operator if operator else operators.eq
+ return operator(
+ elements._literal_as_binds(other),
+ elements.CollectionAggregate._create_all(self.expr)
+ )
+
+ comparator_factory = Comparator
+
+ def __init__(self, item_type, as_tuple=False, dimensions=None,
+ zero_indexes=False):
+ """Construct an :class:`.Array`.
+
+ E.g.::
+
+ Column('myarray', Array(Integer))
+
+ Arguments are:
+
+ :param item_type: The data type of items of this array. Note that
+ dimensionality is irrelevant here, so multi-dimensional arrays like
+ ``INTEGER[][]``, are constructed as ``Array(Integer)``, not as
+ ``Array(Array(Integer))`` or such.
+
+ :param as_tuple=False: Specify whether return results
+ should be converted to tuples from lists. This parameter is
+ not generally needed as a Python list corresponds well
+ to a SQL array.
+
+ :param dimensions: if non-None, the ARRAY will assume a fixed
+ number of dimensions. This impacts how the array is declared
+ on the database, how it goes about interpreting Python and
+ result values, as well as how expression behavior in conjunction
+ with the "getitem" operator works. See the description at
+ :class:`.Array` for additional detail.
+
+ :param zero_indexes=False: when True, index values will be converted
+ between Python zero-based and SQL one-based indexes, e.g.
+ a value of one will be added to all index values before passing
+ to the database.
+
+ """
+ if isinstance(item_type, Array):
+ raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype")
+ if isinstance(item_type, type):
+ item_type = item_type()
+ self.item_type = item_type
+ self.as_tuple = as_tuple
+ self.dimensions = dimensions
+ self.zero_indexes = zero_indexes
+
+ @property
+ def hashable(self):
+ return self.as_tuple
+
+ @property
+ def python_type(self):
+ return list
+
+ def compare_values(self, x, y):
+ return x == y
+
+
class REAL(Float):
"""The SQL REAL type."""
@@ -1648,6 +1918,8 @@ class NullType(TypeEngine):
_isnull = True
+ hashable = False
+
def literal_processor(self, dialect):
def process(value):
return "NULL"
@@ -1704,6 +1976,26 @@ else:
_type_map[unicode] = Unicode()
_type_map[str] = String()
+_type_map_get = _type_map.get
+
+
+def _resolve_value_to_type(value):
+ _result_type = _type_map_get(type(value), False)
+ if _result_type is False:
+ # use inspect() to detect SQLAlchemy built-in
+ # objects.
+ insp = inspection.inspect(value, False)
+ if (
+ insp is not None and
+ # foil mock.Mock() and other impostors by ensuring
+ # the inspection target itself self-inspects
+ insp.__class__ in inspection._registrars
+ ):
+ raise exc.ArgumentError(
+ "Object %r is not legal as a SQL literal value" % value)
+ return NULLTYPE
+ else:
+ return _result_type
# back-assign to type_api
from . import type_api
@@ -1712,6 +2004,6 @@ type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
-type_api._type_map = _type_map
-
+type_api.INDEXABLE = Indexable
+type_api._resolve_value_to_type = _resolve_value_to_type
TypeEngine.Comparator.BOOLEANTYPE = BOOLEANTYPE
diff --git a/lib/sqlalchemy/sql/type_api.py b/lib/sqlalchemy/sql/type_api.py
index a55eed981..c367bc73e 100644
--- a/lib/sqlalchemy/sql/type_api.py
+++ b/lib/sqlalchemy/sql/type_api.py
@@ -13,6 +13,7 @@
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
+from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
@@ -20,6 +21,8 @@ INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
+INDEXABLE = None
+_resolve_value_to_type = None
class TypeEngine(Visitable):
@@ -90,7 +93,7 @@ class TypeEngine(Visitable):
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
- return op, other_comparator.type
+ return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
@@ -128,6 +131,76 @@ class TypeEngine(Visitable):
"""
+ should_evaluate_none = False
+ """If True, the Python constant ``None`` is considered to be handled
+ explicitly by this type.
+
+ The ORM uses this flag to indicate that a positive value of ``None``
+ is passed to the column in an INSERT statement, rather than omitting
+ the column from the INSERT statement which has the effect of firing
+ off column-level defaults. It also allows types which have special
+ behavior for Python None, such as a JSON type, to indicate that
+ they'd like to handle the None value explicitly.
+
+ To set this flag on an existing type, use the
+ :meth:`.TypeEngine.evaluates_none` method.
+
+ .. seealso::
+
+ :meth:`.TypeEngine.evaluates_none`
+
+ .. versionadded:: 1.1
+
+
+ """
+
+ def evaluates_none(self):
+ """Return a copy of this type which has the :attr:`.should_evaluate_none`
+ flag set to True.
+
+ E.g.::
+
+ Table(
+ 'some_table', metadata,
+ Column(
+ String(50).evaluates_none(),
+ nullable=True,
+ server_default='no value')
+ )
+
+ The ORM uses this flag to indicate that a positive value of ``None``
+ is passed to the column in an INSERT statement, rather than omitting
+ the column from the INSERT statement which has the effect of firing
+ off column-level defaults. It also allows for types which have
+ special behavior associated with the Python None value to indicate
+ that the value doesn't necessarily translate into SQL NULL; a
+ prime example of this is a JSON type which may wish to persist the
+ JSON value ``'null'``.
+
+ In all cases, the actual NULL SQL value can be always be
+ persisted in any column by using
+ the :obj:`~.expression.null` SQL construct in an INSERT statement
+ or associated with an ORM-mapped attribute.
+
+ .. versionadded:: 1.1
+
+ .. seealso::
+
+ :ref:`session_forcing_null` - in the ORM documentation
+
+ :paramref:`.postgresql.JSON.none_as_null` - Postgresql JSON
+ interaction with this flag.
+
+ :attr:`.TypeEngine.should_evaluate_none` - class-level flag
+
+ """
+ typ = self.copy()
+ typ.should_evaluate_none = True
+ return typ
+
+ def copy(self, **kw):
+ return self.adapt(self.__class__)
+
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
@@ -440,7 +513,7 @@ class TypeEngine(Visitable):
end-user customization of this behavior.
"""
- _coerced_type = _type_map.get(type(value), NULLTYPE)
+ _coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
@@ -577,7 +650,7 @@ class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
return self
-class TypeDecorator(TypeEngine):
+class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
@@ -602,7 +675,7 @@ class TypeDecorator(TypeEngine):
def process_result_value(self, value, dialect):
return value[7:]
- def copy(self):
+ def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
@@ -656,6 +729,26 @@ class TypeDecorator(TypeEngine):
else:
return self
+ .. warning::
+
+ Note that the **behavior of coerce_compared_value is not inherited
+ by default from that of the base type**.
+ If the :class:`.TypeDecorator` is augmenting a
+ type that requires special logic for certain types of operators,
+ this method **must** be overridden. A key example is when decorating
+ the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
+ the default rules of :meth:`.TypeEngine.coerce_compared_value` should
+ be used in order to deal with operators like index operations::
+
+ class MyJsonType(TypeDecorator):
+ impl = postgresql.JSON
+
+ def coerce_compared_value(self, op, value):
+ return self.impl.coerce_compared_value(op, value)
+
+ Without the above step, index operations such as ``mycol['foo']``
+ will cause the index value ``'foo'`` to be JSON encoded.
+
"""
__visit_name__ = "type_decorator"
@@ -757,6 +850,18 @@ class TypeDecorator(TypeEngine):
"""
return self.impl._type_affinity
+ def _set_parent(self, column):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.impl, SchemaEventTarget):
+ self.impl._set_parent(column)
+
+ def _set_parent_with_dispatch(self, parent):
+ """Support SchemaEentTarget"""
+
+ if isinstance(self.impl, SchemaEventTarget):
+ self.impl._set_parent_with_dispatch(parent)
+
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
@@ -1031,7 +1136,7 @@ class TypeDecorator(TypeEngine):
"""
return self
- def copy(self):
+ def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py
index 8f502fc86..f5aa9f228 100644
--- a/lib/sqlalchemy/sql/util.py
+++ b/lib/sqlalchemy/sql/util.py
@@ -154,6 +154,7 @@ def unwrap_order_by(clause):
without DESC/ASC/NULLS FIRST/NULLS LAST"""
cols = util.column_set()
+ result = []
stack = deque([clause])
while stack:
t = stack.popleft()
@@ -166,11 +167,13 @@ def unwrap_order_by(clause):
t = t.element
if isinstance(t, (_textual_label_reference)):
continue
- cols.add(t)
+ if t not in cols:
+ cols.add(t)
+ result.append(t)
else:
for c in t.get_children():
stack.append(c)
- return cols
+ return result
def clause_is_present(clause, search):
@@ -200,6 +203,21 @@ def surface_selectables(clause):
stack.append(elem.element)
+def surface_column_elements(clause):
+ """traverse and yield only outer-exposed column elements, such as would
+ be addressable in the WHERE clause of a SELECT if this element were
+ in the columns clause."""
+
+ stack = deque([clause])
+ while stack:
+ elem = stack.popleft()
+ yield elem
+ for sub in elem.get_children():
+ if isinstance(sub, FromGrouping):
+ continue
+ stack.append(sub)
+
+
def selectables_overlap(left, right):
"""Return True if left/right have some overlapping selectable"""
@@ -433,7 +451,6 @@ def criterion_as_pairs(expression, consider_as_foreign_keys=None,
return pairs
-
class ClauseAdapter(visitors.ReplacingCloningVisitor):
"""Clones and modifies clauses based on column correspondence.