summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHajime Nakagami <nakagami@gmail.com>2013-04-06 17:45:42 +0900
committerHajime Nakagami <nakagami@gmail.com>2013-04-06 17:45:42 +0900
commitb4da894d02f2e99f0a604ad45f6c0e5058392d81 (patch)
tree1eb9887480bec5224924e379b2e0f03d10ce8ebb
parent7a4945ebdd1358502e1872560b97f1344b8c53db (diff)
parent6bdd3bb93fd18a4aec54ee2a836875a922dcaab3 (diff)
downloadsqlalchemy-b4da894d02f2e99f0a604ad45f6c0e5058392d81.tar.gz
merge from default
-rw-r--r--.hgtags1
-rw-r--r--README.dialects.rst8
-rw-r--r--doc/build/changelog/changelog_02.rst2
-rw-r--r--doc/build/changelog/changelog_03.rst2
-rw-r--r--doc/build/changelog/changelog_04.rst6
-rw-r--r--doc/build/changelog/changelog_06.rst4
-rw-r--r--doc/build/changelog/changelog_07.rst18
-rw-r--r--doc/build/changelog/changelog_08.rst97
-rw-r--r--doc/build/changelog/migration_08.rst56
-rw-r--r--doc/build/conf.py4
-rw-r--r--doc/build/core/tutorial.rst972
-rw-r--r--doc/build/core/types.rst2
-rw-r--r--doc/build/dialects/index.rst3
-rw-r--r--doc/build/glossary.rst64
-rw-r--r--doc/build/intro.rst2
-rw-r--r--doc/build/orm/session.rst2
-rw-r--r--doc/build/testdocs.py3
-rw-r--r--lib/sqlalchemy/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/__init__.py1
-rw-r--r--lib/sqlalchemy/dialects/firebird/kinterbasdb.py2
-rw-r--r--lib/sqlalchemy/dialects/mssql/pymssql.py4
-rw-r--r--lib/sqlalchemy/dialects/mssql/pyodbc.py2
-rw-r--r--lib/sqlalchemy/dialects/mysql/base.py1
-rw-r--r--lib/sqlalchemy/dialects/oracle/cx_oracle.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py12
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py5
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py2
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py2
-rw-r--r--lib/sqlalchemy/dialects/sybase/pyodbc.py2
-rw-r--r--lib/sqlalchemy/engine/default.py6
-rw-r--r--lib/sqlalchemy/orm/attributes.py43
-rw-r--r--lib/sqlalchemy/orm/interfaces.py10
-rw-r--r--lib/sqlalchemy/orm/mapper.py18
-rw-r--r--lib/sqlalchemy/orm/persistence.py2
-rw-r--r--lib/sqlalchemy/orm/properties.py8
-rw-r--r--lib/sqlalchemy/orm/relationships.py27
-rw-r--r--lib/sqlalchemy/orm/util.py37
-rw-r--r--lib/sqlalchemy/pool.py4
-rw-r--r--lib/sqlalchemy/schema.py19
-rw-r--r--lib/sqlalchemy/sql/compiler.py17
-rw-r--r--lib/sqlalchemy/sql/expression.py139
-rw-r--r--lib/sqlalchemy/sql/util.py7
-rw-r--r--lib/sqlalchemy/testing/assertsql.py2
-rw-r--r--lib/sqlalchemy/testing/plugin/noseplugin.py8
-rw-r--r--lib/sqlalchemy/testing/profiling.py4
-rw-r--r--lib/sqlalchemy/testing/runner.py10
-rw-r--r--lib/sqlalchemy/testing/schema.py27
-rw-r--r--lib/sqlalchemy/testing/suite/test_insert.py4
-rw-r--r--lib/sqlalchemy/testing/suite/test_reflection.py10
-rw-r--r--lib/sqlalchemy/testing/util.py3
-rw-r--r--lib/sqlalchemy/types.py7
-rw-r--r--lib/sqlalchemy/util/__init__.py16
-rw-r--r--lib/sqlalchemy/util/_collections.py39
-rw-r--r--lib/sqlalchemy/util/compat.py159
-rw-r--r--lib/sqlalchemy/util/langhelpers.py28
-rw-r--r--lib/sqlalchemy/util/topological.py3
-rw-r--r--setup.cfg4
-rw-r--r--test/aaa_profiling/test_memusage.py2
-rw-r--r--test/dialect/test_mssql.py55
-rw-r--r--test/dialect/test_oracle.py5
-rw-r--r--test/dialect/test_postgresql.py19
-rw-r--r--test/orm/inheritance/test_basic.py67
-rw-r--r--test/orm/test_default_strategies.py1
-rw-r--r--test/orm/test_froms.py6
-rw-r--r--test/orm/test_mapper.py31
-rw-r--r--test/orm/test_query.py29
-rw-r--r--test/orm/test_rel_fn.py104
-rw-r--r--test/perf/stress_all.py2
-rw-r--r--test/profiles.txt2
-rw-r--r--test/sql/test_compiler.py645
-rw-r--r--test/sql/test_constraints.py13
-rw-r--r--test/sql/test_delete.py86
-rw-r--r--test/sql/test_functions.py2
-rw-r--r--test/sql/test_generative.py293
-rw-r--r--test/sql/test_insert.py312
-rw-r--r--test/sql/test_labels.py102
-rw-r--r--test/sql/test_query.py19
-rw-r--r--test/sql/test_returning.py20
-rw-r--r--test/sql/test_types.py1
-rw-r--r--test/sql/test_update.py646
80 files changed, 2701 insertions, 1705 deletions
diff --git a/.hgtags b/.hgtags
index 85921a7e2..402707e37 100644
--- a/.hgtags
+++ b/.hgtags
@@ -89,3 +89,4 @@ ebe9514a69a4b4ec6209f0e9aa43053ba28d080b rel_0_7_5
d557287431986274a796348750f1c6ce885b196c rel_0_7_6
6495bcf87e10461675d8905d62f5632e634ec33c rel_0_8_0b1
8d82961d34643c8d53d865ddf76911807a36fde6 rel_0_8_8b2
+662aaaa7bc65c47db7ecd2e0269f8a8fbb613acd rel_0_8_0
diff --git a/README.dialects.rst b/README.dialects.rst
index 2e1d20db8..26bc1edaf 100644
--- a/README.dialects.rst
+++ b/README.dialects.rst
@@ -112,7 +112,13 @@ Key aspects of this file layout include:
from sqlalchemy.testing import runner
- runner.main()
+ # use this in setup.py 'test_suite':
+ # test_suite="run_tests.setup_py_test"
+ def setup_py_test():
+ runner.setup_py_test()
+
+ if __name__ == '__main__':
+ runner.main()
Where above, the ``registry`` module, introduced in SQLAlchemy 0.8, provides
an in-Python means of installing the dialect entrypoints without the use
diff --git a/doc/build/changelog/changelog_02.rst b/doc/build/changelog/changelog_02.rst
index 3053659a8..600dcc6eb 100644
--- a/doc/build/changelog/changelog_02.rst
+++ b/doc/build/changelog/changelog_02.rst
@@ -678,7 +678,7 @@
modified version (works in py2.3/2.4!) that uses a threading.RLock
for a mutex. this is to fix a reported case where a ConnectionFairy's
__del__() method got called within the Queue's get() method, which
- then returns its connection to the Queue via the the put() method,
+ then returns its connection to the Queue via the put() method,
causing a reentrant hang unless threading.RLock is used.
.. change::
diff --git a/doc/build/changelog/changelog_03.rst b/doc/build/changelog/changelog_03.rst
index c1944c705..e47da340a 100644
--- a/doc/build/changelog/changelog_03.rst
+++ b/doc/build/changelog/changelog_03.rst
@@ -2092,7 +2092,7 @@
:tickets:
added a mutex to the mapper compilation step. ive been reluctant to add any
- kind of threading anything to SA but this is one spot that its its really
+ kind of threading anything to SA but this is one spot that its really
needed since mappers are typically "global", and while their state does not
change during normal operation, the initial compilation step does modify
internal state significantly, and this step usually occurs not at
diff --git a/doc/build/changelog/changelog_04.rst b/doc/build/changelog/changelog_04.rst
index 37b424df7..e1acfe4c0 100644
--- a/doc/build/changelog/changelog_04.rst
+++ b/doc/build/changelog/changelog_04.rst
@@ -2036,7 +2036,7 @@
new synonym() behavior: an attribute will be placed on the mapped
class, if one does not exist already, in all cases. if a property
already exists on the class, the synonym will decorate the property
- with the appropriate comparison operators so that it can be used in in
+ with the appropriate comparison operators so that it can be used in
column expressions just like any other mapped attribute (i.e. usable in
filter(), etc.) the "proxy=True" flag is deprecated and no longer means
anything. Additionally, the flag "map_column=True" will automatically
@@ -2872,7 +2872,7 @@
:tickets:
PG reflection, upon seeing the default schema name being used explicitly
- as the "schema" argument in a Table, will assume that this is the the
+ as the "schema" argument in a Table, will assume that this is the
user's desired convention, and will explicitly set the "schema" argument
in foreign-key-related reflected tables, thus making them match only
with Table constructors that also use the explicit "schema" argument
@@ -2929,7 +2929,7 @@
:tickets: 810
Fixed breakage with postgres and multiple two-phase transactions. Two-phase
- commits and and rollbacks didn't automatically end up with a new transaction
+ commits and rollbacks didn't automatically end up with a new transaction
as the usual dbapi commits/rollbacks do.
.. change::
diff --git a/doc/build/changelog/changelog_06.rst b/doc/build/changelog/changelog_06.rst
index 0ec7027ce..c7f4dcdea 100644
--- a/doc/build/changelog/changelog_06.rst
+++ b/doc/build/changelog/changelog_06.rst
@@ -943,7 +943,7 @@
:tickets: 1953
The cx_oracle "decimal detection" logic, which takes place
- for for result set columns with ambiguous numeric characteristics,
+ for result set columns with ambiguous numeric characteristics,
now uses the decimal point character determined by the locale/
NLS_LANG setting, using an on-first-connect detection of
this character. cx_oracle 5.0.3 or greater is also required
@@ -2851,7 +2851,7 @@
:tickets: 1071
Postgresql now reflects sequence names associated with
- SERIAL columns correctly, after the name of of the sequence
+ SERIAL columns correctly, after the name of the sequence
has been changed. Thanks to Kumar McMillan for the patch.
.. change::
diff --git a/doc/build/changelog/changelog_07.rst b/doc/build/changelog/changelog_07.rst
index df919dc3d..df63654a4 100644
--- a/doc/build/changelog/changelog_07.rst
+++ b/doc/build/changelog/changelog_07.rst
@@ -7,6 +7,24 @@
:version: 0.7.11
.. change::
+ :tags: bug, orm
+ :tickets: 2689
+
+ Fixed bug in unit of work whereby a joined-inheritance
+ subclass could insert the row for the "sub" table
+ before the parent table, if the two tables had no
+ ForeignKey constraints set up between them.
+
+ .. change::
+ :tags: feature, postgresql
+ :tickets: 2676
+
+ Added support for Postgresql's traditional SUBSTRING
+ function syntax, renders as "SUBSTRING(x FROM y FOR z)"
+ when regular ``func.substring()`` is used.
+ Courtesy Gunnlaugur Þór Briem.
+
+ .. change::
:tags: bug, tests
:tickets: 2669
:pullreq: 41
diff --git a/doc/build/changelog/changelog_08.rst b/doc/build/changelog/changelog_08.rst
index 59f2b3ad1..4ef11590f 100644
--- a/doc/build/changelog/changelog_08.rst
+++ b/doc/build/changelog/changelog_08.rst
@@ -4,18 +4,111 @@
==============
.. changelog::
+ :version: 0.8.1
+
+ .. change::
+ :tags: bug, orm
+ :tickets: 2689
+
+ Fixed bug in unit of work whereby a joined-inheritance
+ subclass could insert the row for the "sub" table
+ before the parent table, if the two tables had no
+ ForeignKey constraints set up between them.
+ Also in 0.7.11.
+
+ .. change::
+ :tags: bug, mssql
+ :pullreq: 47
+
+ Added support for additional "disconnect" messages
+ to the pymssql dialect. Courtesy John Anderson.
+
+ .. change::
+ :tags: feature, sql
+
+ Loosened the check on dialect-specific argument names
+ passed to Table(); since we want to support external dialects
+ and also want to support args without a certain dialect
+ being installed, it only checks the format of the arg now,
+ rather than looking for that dialect in sqlalchemy.dialects.
+
+ .. change::
+ :tags: bug, sql
+
+ Fixed bug whereby a DBAPI that can return "0"
+ for cursor.lastrowid would not function correctly
+ in conjunction with :attr:`.ResultProxy.inserted_primary_key`.
+
+ .. change::
+ :tags: bug, mssql
+ :tickets: 2683
+ :pullreq: 46
+
+ Fixed Py3K bug regarding "binary" types and
+ pymssql. Courtesy Marc Abramowitz.
+
+ .. change::
+ :tags: bug, postgresql
+ :tickets: 2680
+
+ Added missing HSTORE type to postgresql type names
+ so that the type can be reflected.
+
+.. changelog::
:version: 0.8.0
+ :released: March 9, 2013
.. note::
- Be sure to *re-read* :doc:`migration_08` for this release.
There are some new behavioral changes as of 0.8.0
- not present in 0.8.0b2, including:
+ not present in 0.8.0b2. They are present in the
+ migration document as follows:
* :ref:`legacy_is_orphan_addition`
* :ref:`metadata_create_drop_tables`
+ * :ref:`correlation_context_specific`
+
+ .. change::
+ :tags: feature, postgresql
+ :tickets: 2676
+
+ Added support for Postgresql's traditional SUBSTRING
+ function syntax, renders as "SUBSTRING(x FROM y FOR z)"
+ when regular ``func.substring()`` is used.
+ Also in 0.7.11. Courtesy Gunnlaugur Þór Briem.
+
+ .. change::
+ :tags: feature, orm
+ :tickets: 2675
+
+ A meaningful :attr:`.QueryableAttribute.info` attribute is
+ added, which proxies down to the ``.info`` attribute on either
+ the :class:`.schema.Column` object if directly present, or
+ the :class:`.MapperProperty` otherwise. The full behavior
+ is documented and ensured by tests to remain stable.
+
+ .. change::
+ :tags: bug, sql
+ :tickets: 2668
+
+ The behavior of SELECT correlation has been improved such that
+ the :meth:`.Select.correlate` and :meth:`.Select.correlate_except`
+ methods, as well as their ORM analogues, will still retain
+ "auto-correlation" behavior in that the FROM clause is modified
+ only if the output would be legal SQL; that is, the FROM clause
+ is left intact if the correlated SELECT is not used in the context
+ of an enclosing SELECT inside of the WHERE, columns, or HAVING clause.
+ The two methods now only specify conditions to the default
+ "auto correlation", rather than absolute FROM lists.
+
+ .. change::
+ :tags: feature, mysql
+ :pullreq: 42
+
+ New dialect for CyMySQL added, courtesy Hajime Nakagami.
+
.. change::
:tags: bug, orm
:tickets: 2674
diff --git a/doc/build/changelog/migration_08.rst b/doc/build/changelog/migration_08.rst
index bb6d85e58..971dd2f51 100644
--- a/doc/build/changelog/migration_08.rst
+++ b/doc/build/changelog/migration_08.rst
@@ -7,9 +7,10 @@ What's New in SQLAlchemy 0.8?
This document describes changes between SQLAlchemy version 0.7,
undergoing maintenance releases as of October, 2012,
and SQLAlchemy version 0.8, which is expected for release
- in late 2012.
+ in early 2013.
Document date: October 25, 2012
+ Updated: March 9, 2013
Introduction
============
@@ -1145,6 +1146,59 @@ entity, ``query.correlate(someentity)``.
:ticket:`2179`
+.. _correlation_context_specific:
+
+Correlation is now always context-specific
+------------------------------------------
+
+To allow a wider variety of correlation scenarios, the behavior of
+:meth:`.Select.correlate` and :meth:`.Query.correlate` has changed slightly
+such that the SELECT statement will omit the "correlated" target from the
+FROM clause only if the statement is actually used in that context. Additionally,
+it's no longer possible for a SELECT statement that's placed as a FROM
+in an enclosing SELECT statement to "correlate" (i.e. omit) a FROM clause.
+
+This change only makes things better as far as rendering SQL, in that it's no
+longer possible to render illegal SQL where there are insufficient FROM
+objects relative to what's being selected::
+
+ from sqlalchemy.sql import table, column, select
+
+ t1 = table('t1', column('x'))
+ t2 = table('t2', column('y'))
+ s = select([t1, t2]).correlate(t1)
+
+ print(s)
+
+Prior to this change, the above would return::
+
+ SELECT t1.x, t2.y FROM t2
+
+which is invalid SQL as "t1" is not referred to in any FROM clause.
+
+Now, in the absense of an enclosing SELECT, it returns::
+
+ SELECT t1.x, t2.y FROM t1, t2
+
+Within a SELECT, the correlation takes effect as expected::
+
+ s2 = select([t1, t2]).where(t1.c.x == t2.c.y).where(t1.c.x == s)
+
+ print (s2)
+
+ SELECT t1.x, t2.y FROM t1, t2
+ WHERE t1.x = t2.y AND t1.x =
+ (SELECT t1.x, t2.y FROM t2)
+
+This change is not expected to impact any existing applications, as
+the correlation behavior remains identical for properly constructed
+expressions. Only an application that relies, most likely within a
+testing scenario, on the invalid string output of a correlated
+SELECT used in a non-correlating context would see any change.
+
+:ticket:`2668`
+
+
.. _metadata_create_drop_tables:
create_all() and drop_all() will now honor an empty list as such
diff --git a/doc/build/conf.py b/doc/build/conf.py
index 8bfe2d2bf..34caedc22 100644
--- a/doc/build/conf.py
+++ b/doc/build/conf.py
@@ -83,9 +83,9 @@ copyright = u'2007-2013, the SQLAlchemy authors and contributors'
# The short X.Y version.
version = "0.8"
# The full version, including alpha/beta/rc tags.
-release = "0.8.0b2"
+release = "0.8.0"
-release_date = "December 14, 2012"
+release_date = "March 9, 2013"
site_base = "http://www.sqlalchemy.org"
diff --git a/doc/build/core/tutorial.rst b/doc/build/core/tutorial.rst
index a05e6ccdf..fd6c69bff 100644
--- a/doc/build/core/tutorial.rst
+++ b/doc/build/core/tutorial.rst
@@ -236,9 +236,9 @@ we use the ``connect()`` method::
>>> conn = engine.connect()
>>> conn #doctest: +ELLIPSIS
- <sqlalchemy.engine.Connection object at 0x...>
+ <sqlalchemy.engine.base.Connection object at 0x...>
-The :class:`~sqlalchemy.engine.Connection` object represents an actively
+The :class:`~sqlalchemy.engine.base.Connection` object represents an actively
checked out DBAPI connection resource. Lets feed it our
:class:`~sqlalchemy.sql.expression.Insert` object and see what happens:
@@ -252,7 +252,7 @@ checked out DBAPI connection resource. Lets feed it our
So the INSERT statement was now issued to the database. Although we got
positional "qmark" bind parameters instead of "named" bind parameters in the
output. How come ? Because when executed, the
-:class:`~sqlalchemy.engine.Connection` used the SQLite **dialect** to
+:class:`~sqlalchemy.engine.base.Connection` used the SQLite **dialect** to
help generate the statement; when we use the ``str()`` function, the statement
isn't aware of this dialect, and falls back onto a default which uses named
parameters. We can view this manually as follows:
@@ -264,9 +264,9 @@ parameters. We can view this manually as follows:
'INSERT INTO users (name, fullname) VALUES (?, ?)'
What about the ``result`` variable we got when we called ``execute()`` ? As
-the SQLAlchemy :class:`~sqlalchemy.engine.Connection` object references a
+the SQLAlchemy :class:`~sqlalchemy.engine.base.Connection` object references a
DBAPI connection, the result, known as a
-:class:`~sqlalchemy.engine.ResultProxy` object, is analogous to the DBAPI
+:class:`~sqlalchemy.engine.result.ResultProxy` object, is analogous to the DBAPI
cursor object. In the case of an INSERT, we can get important information from
it, such as the primary key values which were generated from our statement:
@@ -292,7 +292,7 @@ Our insert example above was intentionally a little drawn out to show some
various behaviors of expression language constructs. In the usual case, an
:class:`~sqlalchemy.sql.expression.Insert` statement is usually compiled
against the parameters sent to the ``execute()`` method on
-:class:`~sqlalchemy.engine.Connection`, so that there's no need to use
+:class:`~sqlalchemy.engine.base.Connection`, so that there's no need to use
the ``values`` keyword with :class:`~sqlalchemy.sql.expression.Insert`. Lets
create a generic :class:`~sqlalchemy.sql.expression.Insert` statement again
and use it in the "normal" way:
@@ -304,13 +304,13 @@ and use it in the "normal" way:
{opensql}INSERT INTO users (id, name, fullname) VALUES (?, ?, ?)
(2, 'wendy', 'Wendy Williams')
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
-Above, because we specified all three columns in the the ``execute()`` method,
-the compiled :class:`~sqlalchemy.sql.expression.Insert` included all three
-columns. The :class:`~sqlalchemy.sql.expression.Insert` statement is compiled
+Above, because we specified all three columns in the ``execute()`` method,
+the compiled :class:`~.expression.Insert` included all three
+columns. The :class:`~.expression.Insert` statement is compiled
at execution time based on the parameters we specified; if we specified fewer
-parameters, the :class:`~sqlalchemy.sql.expression.Insert` would have fewer
+parameters, the :class:`~.expression.Insert` would have fewer
entries in its VALUES clause.
To issue many inserts using DBAPI's ``executemany()`` method, we can send in a
@@ -328,7 +328,7 @@ inserted, as we do here to add some email addresses:
{opensql}INSERT INTO addresses (user_id, email_address) VALUES (?, ?)
((1, 'jack@yahoo.com'), (1, 'jack@msn.com'), (2, 'www@www.org'), (2, 'wendy@aol.com'))
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
Above, we again relied upon SQLite's automatic generation of primary key
identifiers for each ``addresses`` row.
@@ -363,10 +363,10 @@ Above, we issued a basic :func:`.select` call, placing the ``users`` table
within the COLUMNS clause of the select, and then executing. SQLAlchemy
expanded the ``users`` table into the set of each of its columns, and also
generated a FROM clause for us. The result returned is again a
-:class:`~sqlalchemy.engine.ResultProxy` object, which acts much like a
+:class:`~sqlalchemy.engine.result.ResultProxy` object, which acts much like a
DBAPI cursor, including methods such as
-:func:`~sqlalchemy.engine.ResultProxy.fetchone` and
-:func:`~sqlalchemy.engine.ResultProxy.fetchall`. The easiest way to get
+:func:`~sqlalchemy.engine.result.ResultProxy.fetchone` and
+:func:`~sqlalchemy.engine.result.ResultProxy.fetchall`. The easiest way to get
rows from it is to just iterate:
.. sourcecode:: pycon+sql
@@ -375,8 +375,6 @@ rows from it is to just iterate:
... print row
(1, u'jack', u'Jack Jones')
(2, u'wendy', u'Wendy Williams')
- (3, u'fred', u'Fred Flintstone')
- (4, u'mary', u'Mary Contrary')
Above, we see that printing each row produces a simple tuple-like result. We
have more options at accessing the data in each row. One very common way is
@@ -413,12 +411,10 @@ But another way, whose usefulness will become apparent later on, is to use the
()
{stop}name: jack ; fullname: Jack Jones
name: wendy ; fullname: Wendy Williams
- name: fred ; fullname: Fred Flintstone
- name: mary ; fullname: Mary Contrary
Result sets which have pending rows remaining should be explicitly closed
before discarding. While the cursor and connection resources referenced by the
-:class:`~sqlalchemy.engine.ResultProxy` will be respectively closed and
+:class:`~sqlalchemy.engine.result.ResultProxy` will be respectively closed and
returned to the connection pool when the object is garbage collected, it's
better to make it explicit as some database APIs are very picky about such
things:
@@ -444,8 +440,6 @@ the ``c`` attribute of the :class:`~sqlalchemy.schema.Table` object:
... print row
(u'jack', u'Jack Jones')
(u'wendy', u'Wendy Williams')
- (u'fred', u'Fred Flintstone')
- (u'mary', u'Mary Contrary')
Lets observe something interesting about the FROM clause. Whereas the
generated statement contains two distinct sections, a "SELECT columns" part
@@ -468,27 +462,20 @@ our :func:`.select` statement:
(2, u'wendy', u'Wendy Williams', 2, 1, u'jack@msn.com')
(2, u'wendy', u'Wendy Williams', 3, 2, u'www@www.org')
(2, u'wendy', u'Wendy Williams', 4, 2, u'wendy@aol.com')
- (3, u'fred', u'Fred Flintstone', 1, 1, u'jack@yahoo.com')
- (3, u'fred', u'Fred Flintstone', 2, 1, u'jack@msn.com')
- (3, u'fred', u'Fred Flintstone', 3, 2, u'www@www.org')
- (3, u'fred', u'Fred Flintstone', 4, 2, u'wendy@aol.com')
- (4, u'mary', u'Mary Contrary', 1, 1, u'jack@yahoo.com')
- (4, u'mary', u'Mary Contrary', 2, 1, u'jack@msn.com')
- (4, u'mary', u'Mary Contrary', 3, 2, u'www@www.org')
- (4, u'mary', u'Mary Contrary', 4, 2, u'wendy@aol.com')
It placed **both** tables into the FROM clause. But also, it made a real mess.
Those who are familiar with SQL joins know that this is a **Cartesian
product**; each row from the ``users`` table is produced against each row from
the ``addresses`` table. So to put some sanity into this statement, we need a
-WHERE clause. Which brings us to the second argument of :func:`.select`:
+WHERE clause. We do that using :meth:`.Select.where`:
.. sourcecode:: pycon+sql
- >>> s = select([users, addresses], users.c.id==addresses.c.user_id)
+ >>> s = select([users, addresses]).where(users.c.id == addresses.c.user_id)
{sql}>>> for row in conn.execute(s):
... print row # doctest: +NORMALIZE_WHITESPACE
- SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address
+ SELECT users.id, users.name, users.fullname, addresses.id,
+ addresses.user_id, addresses.email_address
FROM users, addresses
WHERE users.id = addresses.user_id
()
@@ -503,27 +490,27 @@ statement, and our results were managed down so that the join of ``users`` and
``addresses`` rows made sense. But let's look at that expression? It's using
just a Python equality operator between two different
:class:`~sqlalchemy.schema.Column` objects. It should be clear that something
-is up. Saying ``1==1`` produces ``True``, and ``1==2`` produces ``False``, not
+is up. Saying ``1 == 1`` produces ``True``, and ``1 == 2`` produces ``False``, not
a WHERE clause. So lets see exactly what that expression is doing:
.. sourcecode:: pycon+sql
- >>> users.c.id==addresses.c.user_id #doctest: +ELLIPSIS
+ >>> users.c.id == addresses.c.user_id #doctest: +ELLIPSIS
<sqlalchemy.sql.expression.BinaryExpression object at 0x...>
Wow, surprise ! This is neither a ``True`` nor a ``False``. Well what is it ?
.. sourcecode:: pycon+sql
- >>> str(users.c.id==addresses.c.user_id)
+ >>> str(users.c.id == addresses.c.user_id)
'users.id = addresses.user_id'
As you can see, the ``==`` operator is producing an object that is very much
-like the :class:`~sqlalchemy.sql.expression.Insert` and :func:`.select`
+like the :class:`~.expression.Insert` and :func:`.select`
objects we've made so far, thanks to Python's ``__eq__()`` builtin; you call
``str()`` on it and it produces SQL. By now, one can see that everything we
are working with is ultimately the same type of object. SQLAlchemy terms the
-base class of all of these expressions as ``sqlalchemy.sql.ClauseElement``.
+base class of all of these expressions as :class:`~.expression.ColumnElement`.
Operators
==========
@@ -533,7 +520,7 @@ some of its capabilities. We've seen how to equate two columns to each other:
.. sourcecode:: pycon+sql
- >>> print users.c.id==addresses.c.user_id
+ >>> print users.c.id == addresses.c.user_id
users.id = addresses.user_id
If we use a literal value (a literal meaning, not a SQLAlchemy clause object),
@@ -541,16 +528,16 @@ we get a bind parameter:
.. sourcecode:: pycon+sql
- >>> print users.c.id==7
+ >>> print users.c.id == 7
users.id = :id_1
-The ``7`` literal is embedded in
-:class:`~sqlalchemy.sql.expression.ClauseElement`; we can use the same trick
+The ``7`` literal is embedded the resulting
+:class:`~.expression.ColumnElement`; we can use the same trick
we did with the :class:`~sqlalchemy.sql.expression.Insert` object to see it:
.. sourcecode:: pycon+sql
- >>> (users.c.id==7).compile().params
+ >>> (users.c.id == 7).compile().params
{u'id_1': 7}
Most Python operators, as it turns out, produce a SQL expression here, like
@@ -576,8 +563,8 @@ If we add two integer columns together, we get an addition expression:
>>> print users.c.id + addresses.c.id
users.id + addresses.id
-Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important
-! If we use ``+`` with two string based columns (recall we put types like
+Interestingly, the type of the :class:`~sqlalchemy.schema.Column` is important!
+If we use ``+`` with two string based columns (recall we put types like
:class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String` on
our :class:`~sqlalchemy.schema.Column` objects at the beginning), we get
something different:
@@ -592,7 +579,8 @@ not all of them. MySQL users, fear not:
.. sourcecode:: pycon+sql
- >>> print (users.c.name + users.c.fullname).compile(bind=create_engine('mysql://'))
+ >>> print (users.c.name + users.c.fullname).\
+ ... compile(bind=create_engine('mysql://'))
concat(users.name, users.fullname)
The above illustrates the SQL that's generated for an
@@ -632,18 +620,26 @@ Conjunctions
We'd like to show off some of our operators inside of :func:`.select`
constructs. But we need to lump them together a little more, so let's first
introduce some conjunctions. Conjunctions are those little words like AND and
-OR that put things together. We'll also hit upon NOT. AND, OR and NOT can work
+OR that put things together. We'll also hit upon NOT. :func:`.and_`, :func:`.or_`,
+and :func:`.not_` can work
from the corresponding functions SQLAlchemy provides (notice we also throw in
-a LIKE):
+a :meth:`~.ColumnOperators.like`):
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import and_, or_, not_
- >>> print and_(users.c.name.like('j%'), users.c.id==addresses.c.user_id, #doctest: +NORMALIZE_WHITESPACE
- ... or_(addresses.c.email_address=='wendy@aol.com', addresses.c.email_address=='jack@yahoo.com'),
- ... not_(users.c.id>5))
+ >>> print and_(
+ ... users.c.name.like('j%'),
+ ... users.c.id == addresses.c.user_id, #doctest: +NORMALIZE_WHITESPACE
+ ... or_(
+ ... addresses.c.email_address == 'wendy@aol.com',
+ ... addresses.c.email_address == 'jack@yahoo.com'
+ ... ),
+ ... not_(users.c.id > 5)
+ ... )
users.name LIKE :name_1 AND users.id = addresses.user_id AND
- (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2)
+ (addresses.email_address = :email_address_1
+ OR addresses.email_address = :email_address_2)
AND users.id <= :id_1
And you can also use the re-jiggered bitwise AND, OR and NOT operators,
@@ -652,35 +648,43 @@ parenthesis:
.. sourcecode:: pycon+sql
- >>> print users.c.name.like('j%') & (users.c.id==addresses.c.user_id) & \
- ... ((addresses.c.email_address=='wendy@aol.com') | (addresses.c.email_address=='jack@yahoo.com')) \
+ >>> print users.c.name.like('j%') & (users.c.id == addresses.c.user_id) & \
+ ... (
+ ... (addresses.c.email_address == 'wendy@aol.com') | \
+ ... (addresses.c.email_address == 'jack@yahoo.com')
+ ... ) \
... & ~(users.c.id>5) # doctest: +NORMALIZE_WHITESPACE
users.name LIKE :name_1 AND users.id = addresses.user_id AND
- (addresses.email_address = :email_address_1 OR addresses.email_address = :email_address_2)
+ (addresses.email_address = :email_address_1
+ OR addresses.email_address = :email_address_2)
AND users.id <= :id_1
So with all of this vocabulary, let's select all users who have an email
address at AOL or MSN, whose name starts with a letter between "m" and "z",
and we'll also generate a column containing their full name combined with
their email address. We will add two new constructs to this statement,
-``between()`` and ``label()``. ``between()`` produces a BETWEEN clause, and
-``label()`` is used in a column expression to produce labels using the ``AS``
+:meth:`~.ColumnOperators.between` and :meth:`~.ColumnElement.label`.
+:meth:`~.ColumnOperators.between` produces a BETWEEN clause, and
+:meth:`~.ColumnElement.label` is used in a column expression to produce labels using the ``AS``
keyword; it's recommended when selecting from expressions that otherwise would
not have a name:
.. sourcecode:: pycon+sql
- >>> s = select([(users.c.fullname + ", " + addresses.c.email_address).label('title')],
- ... and_(
- ... users.c.id==addresses.c.user_id,
- ... users.c.name.between('m', 'z'),
- ... or_(
- ... addresses.c.email_address.like('%@aol.com'),
- ... addresses.c.email_address.like('%@msn.com')
+ >>> s = select([(users.c.fullname +
+ ... ", " + addresses.c.email_address).
+ ... label('title')]).\
+ ... where(
+ ... and_(
+ ... users.c.id == addresses.c.user_id,
+ ... users.c.name.between('m', 'z'),
+ ... or_(
+ ... addresses.c.email_address.like('%@aol.com'),
+ ... addresses.c.email_address.like('%@msn.com')
+ ... )
... )
... )
- ... )
- >>> print conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE
SELECT users.fullname || ? || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
@@ -693,6 +697,33 @@ it will determine the FROM clause based on all of its other bits; the columns
clause, the where clause, and also some other elements which we haven't
covered yet, which include ORDER BY, GROUP BY, and HAVING.
+A shortcut to using :func:`.and_` is to chain together multiple
+:meth:`~.Select.where` clauses. The above can also be written as:
+
+.. sourcecode:: pycon+sql
+
+ >>> s = select([(users.c.fullname +
+ ... ", " + addresses.c.email_address).
+ ... label('title')]).\
+ ... where(users.c.id == addresses.c.user_id).\
+ ... where(users.c.name.between('m', 'z')).\
+ ... where(
+ ... or_(
+ ... addresses.c.email_address.like('%@aol.com'),
+ ... addresses.c.email_address.like('%@msn.com')
+ ... )
+ ... )
+ >>> conn.execute(s).fetchall() #doctest: +NORMALIZE_WHITESPACE
+ SELECT users.fullname || ? || addresses.email_address AS title
+ FROM users, addresses
+ WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
+ (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
+ (', ', 'm', 'z', '%@aol.com', '%@msn.com')
+ [(u'Wendy Williams, wendy@aol.com',)]
+
+The way that we can build up a :func:`.select` construct through successive
+method calls is called :term:`method chaining`.
+
.. _sqlexpression_text:
Using Text
@@ -701,20 +732,23 @@ Using Text
Our last example really became a handful to type. Going from what one
understands to be a textual SQL expression into a Python construct which
groups components together in a programmatic style can be hard. That's why
-SQLAlchemy lets you just use strings too. The ``text()`` construct represents
-any textual statement. To use bind parameters with ``text()``, always use the
-named colon format. Such as below, we create a ``text()`` and execute it,
-feeding in the bind parameters to the ``execute()`` method:
+SQLAlchemy lets you just use strings too. The :func:`~.expression.text` construct represents
+any textual statement, in a backend-agnostic way.
+To use bind parameters with :func:`~.expression.text`, always use the
+named colon format. Such as below, we create a :func:`~.expression.text` and execute it,
+feeding in the bind parameters to the :meth:`~.Connection.execute` method:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import text
- >>> s = text("""SELECT users.fullname || ', ' || addresses.email_address AS title
- ... FROM users, addresses
- ... WHERE users.id = addresses.user_id AND users.name BETWEEN :x AND :y AND
- ... (addresses.email_address LIKE :e1 OR addresses.email_address LIKE :e2)
- ... """)
- {sql}>>> print conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() # doctest:+NORMALIZE_WHITESPACE
+ >>> s = text(
+ ... "SELECT users.fullname || ', ' || addresses.email_address AS title "
+ ... "FROM users, addresses "
+ ... "WHERE users.id = addresses.user_id "
+ ... "AND users.name BETWEEN :x AND :y "
+ ... "AND (addresses.email_address LIKE :e1 "
+ ... "OR addresses.email_address LIKE :e2)")
+ {sql}>>> conn.execute(s, x='m', y='z', e1='%@aol.com', e2='%@msn.com').fetchall() # doctest:+NORMALIZE_WHITESPACE
SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
WHERE users.id = addresses.user_id AND users.name BETWEEN ? AND ? AND
@@ -722,30 +756,33 @@ feeding in the bind parameters to the ``execute()`` method:
('m', 'z', '%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
-To gain a "hybrid" approach, the `select()` construct accepts strings for most
+To gain a "hybrid" approach, the :func:`.select` construct accepts strings for most
of its arguments. Below we combine the usage of strings with our constructed
:func:`.select` object, by using the :func:`.select` object to structure the
statement, and strings to provide all the content within the structure. For
this example, SQLAlchemy is not given any :class:`~sqlalchemy.schema.Column`
or :class:`~sqlalchemy.schema.Table` objects in any of its expressions, so it
-cannot generate a FROM clause. So we also give it the ``from_obj`` keyword
-argument, which is a list of ``ClauseElements`` (or strings) to be placed
-within the FROM clause:
-
-.. sourcecode:: pycon+sql
-
- >>> s = select(["users.fullname || ', ' || addresses.email_address AS title"],
- ... and_(
- ... "users.id = addresses.user_id",
- ... "users.name BETWEEN 'm' AND 'z'",
- ... "(addresses.email_address LIKE :x OR addresses.email_address LIKE :y)"
- ... ),
- ... from_obj=['users', 'addresses']
- ... )
- {sql}>>> print conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE
+cannot generate a FROM clause. So we also use the :meth:`~.Select.select_from`
+method, which accepts a :class:`.FromClause` or string expression
+to be placed within the FROM clause:
+
+.. sourcecode:: pycon+sql
+
+ >>> s = select([
+ ... "users.fullname || ', ' || addresses.email_address AS title"
+ ... ]).\
+ ... where(
+ ... and_(
+ ... "users.id = addresses.user_id",
+ ... "users.name BETWEEN 'm' AND 'z'",
+ ... "(addresses.email_address LIKE :x OR addresses.email_address LIKE :y)"
+ ... )
+ ... ).select_from('users, addresses')
+ {sql}>>> conn.execute(s, x='%@aol.com', y='%@msn.com').fetchall() #doctest: +NORMALIZE_WHITESPACE
SELECT users.fullname || ', ' || addresses.email_address AS title
FROM users, addresses
- WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z' AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
+ WHERE users.id = addresses.user_id AND users.name BETWEEN 'm' AND 'z'
+ AND (addresses.email_address LIKE ? OR addresses.email_address LIKE ?)
('%@aol.com', '%@msn.com')
{stop}[(u'Wendy Williams, wendy@aol.com',)]
@@ -756,9 +793,9 @@ construct. It also becomes more tedious for SQLAlchemy to be made aware of the
datatypes in use; for example, if our bind parameters required UTF-8 encoding
before going in, or conversion from a Python ``datetime`` into a string (as is
required with SQLite), we would have to add extra information to our
-``text()`` construct. Similar issues arise on the result set side, where
+:func:`~.expression.text` construct. Similar issues arise on the result set side, where
SQLAlchemy also performs type-specific data conversion in some cases; still
-more information can be added to ``text()`` to work around this. But what we
+more information can be added to :func:`~.expression.text` to work around this. But what we
really lose from our statement is the ability to manipulate it, transform it,
and analyze it. These features are critical when using the ORM, which makes
heavy usage of relational transformations. To show off what we mean, we'll
@@ -789,16 +826,20 @@ once for each address. We create two :class:`.Alias` constructs against
>>> a1 = addresses.alias()
>>> a2 = addresses.alias()
- >>> s = select([users], and_(
- ... users.c.id==a1.c.user_id,
- ... users.c.id==a2.c.user_id,
- ... a1.c.email_address=='jack@msn.com',
- ... a2.c.email_address=='jack@yahoo.com'
- ... ))
- {sql}>>> print conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> s = select([users]).\
+ ... where(and_(
+ ... users.c.id == a1.c.user_id,
+ ... users.c.id == a2.c.user_id,
+ ... a1.c.email_address == 'jack@msn.com',
+ ... a2.c.email_address == 'jack@yahoo.com'
+ ... ))
+ {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, users.name, users.fullname
FROM users, addresses AS addresses_1, addresses AS addresses_2
- WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ?
+ WHERE users.id = addresses_1.user_id
+ AND users.id = addresses_2.user_id
+ AND addresses_1.email_address = ?
+ AND addresses_2.email_address = ?
('jack@msn.com', 'jack@yahoo.com')
{stop}[(1, u'jack', u'Jack Jones')]
@@ -826,12 +867,15 @@ to "correlate" the inner ``users`` table with the outer one:
.. sourcecode:: pycon+sql
>>> a1 = s.correlate(None).alias()
- >>> s = select([users.c.name], users.c.id==a1.c.id)
- {sql}>>> print conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> s = select([users.c.name]).where(users.c.id == a1.c.id)
+ {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT users.name
- FROM users, (SELECT users.id AS id, users.name AS name, users.fullname AS fullname
- FROM users, addresses AS addresses_1, addresses AS addresses_2
- WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id AND addresses_1.email_address = ? AND addresses_2.email_address = ?) AS anon_1
+ FROM users,
+ (SELECT users.id AS id, users.name AS name, users.fullname AS fullname
+ FROM users, addresses AS addresses_1, addresses AS addresses_2
+ WHERE users.id = addresses_1.user_id AND users.id = addresses_2.user_id
+ AND addresses_1.email_address = ?
+ AND addresses_2.email_address = ?) AS anon_1
WHERE users.id = anon_1.id
('jack@msn.com', 'jack@yahoo.com')
{stop}[(u'jack',)]
@@ -844,8 +888,8 @@ We're halfway along to being able to construct any SELECT expression. The next
cornerstone of the SELECT is the JOIN expression. We've already been doing
joins in our examples, by just placing two tables in either the columns clause
or the where clause of the :func:`.select` construct. But if we want to make a
-real "JOIN" or "OUTERJOIN" construct, we use the ``join()`` and
-``outerjoin()`` methods, most commonly accessed from the left table in the
+real "JOIN" or "OUTERJOIN" construct, we use the :meth:`~.FromClause.join` and
+:meth:`~.FromClause.outerjoin` methods, most commonly accessed from the left table in the
join:
.. sourcecode:: pycon+sql
@@ -866,34 +910,38 @@ username:
.. sourcecode:: pycon+sql
- >>> print users.join(addresses, addresses.c.email_address.like(users.c.name + '%'))
- users JOIN addresses ON addresses.email_address LIKE users.name || :name_1
+ >>> print users.join(addresses,
+ ... addresses.c.email_address.like(users.c.name + '%')
+ ... )
+ users JOIN addresses ON addresses.email_address LIKE (users.name || :name_1)
When we create a :func:`.select` construct, SQLAlchemy looks around at the
tables we've mentioned and then places them in the FROM clause of the
statement. When we use JOINs however, we know what FROM clause we want, so
-here we make usage of the ``from_obj`` keyword argument:
+here we make use of the :meth:`~.Select.select_from` method:
.. sourcecode:: pycon+sql
- >>> s = select([users.c.fullname], from_obj=[
- ... users.join(addresses, addresses.c.email_address.like(users.c.name + '%'))
- ... ])
- {sql}>>> print conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ >>> s = select([users.c.fullname]).select_from(
+ ... users.join(addresses,
+ ... addresses.c.email_address.like(users.c.name + '%'))
+ ... )
+ {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT users.fullname
- FROM users JOIN addresses ON addresses.email_address LIKE users.name || ?
+ FROM users JOIN addresses ON addresses.email_address LIKE (users.name || ?)
('%',)
{stop}[(u'Jack Jones',), (u'Jack Jones',), (u'Wendy Williams',)]
-The ``outerjoin()`` function just creates ``LEFT OUTER JOIN`` constructs. It's
-used just like ``join()``:
+The :meth:`~.FromClause.outerjoin` method creates ``LEFT OUTER JOIN`` constructs,
+and is used in the same way as :meth:`~.FromClause.join`:
.. sourcecode:: pycon+sql
- >>> s = select([users.c.fullname], from_obj=[users.outerjoin(addresses)])
+ >>> s = select([users.c.fullname]).select_from(users.outerjoin(addresses))
>>> print s # doctest: +NORMALIZE_WHITESPACE
SELECT users.fullname
- FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id
+ FROM users
+ LEFT OUTER JOIN addresses ON users.id = addresses.user_id
That's the output ``outerjoin()`` produces, unless, of course, you're stuck in
a gig using Oracle prior to version 9, and you've set up your engine (which
@@ -910,132 +958,6 @@ would be using ``OracleDialect``) to use Oracle-specific SQL:
If you don't know what that SQL means, don't worry ! The secret tribe of
Oracle DBAs don't want their black magic being found out ;).
-Intro to Generative Selects
-================================================
-
-We've now gained the ability to construct very sophisticated statements. We
-can use all kinds of operators, table constructs, text, joins, and aliases.
-The point of all of this, as mentioned earlier, is not that it's an "easier"
-or "better" way to write SQL than just writing a SQL statement yourself; the
-point is that it's better for writing *programmatically generated* SQL which
-can be morphed and adapted as needed in automated scenarios.
-
-To support this, the :func:`.select` construct we've been working with
-supports piecemeal construction, in addition to the "all at once" method we've
-been doing. Suppose you're writing a search function, which receives criterion
-and then must construct a select from it. To accomplish this, upon each
-criterion encountered, you apply "generative" criterion to an existing
-:func:`.select` construct with new elements, one at a time. We start with a
-basic :func:`.select` constructed with the shortcut method available on the
-``users`` table:
-
-.. sourcecode:: pycon+sql
-
- >>> query = users.select()
- >>> print query # doctest: +NORMALIZE_WHITESPACE
- SELECT users.id, users.name, users.fullname
- FROM users
-
-We encounter search criterion of "name='jack'". So we apply WHERE criterion
-stating such:
-
-.. sourcecode:: pycon+sql
-
- >>> query = query.where(users.c.name=='jack')
-
-Next, we encounter that they'd like the results in descending order by full
-name. We apply ORDER BY, using an extra modifier ``desc``:
-
-.. sourcecode:: pycon+sql
-
- >>> query = query.order_by(users.c.fullname.desc())
-
-We also come across that they'd like only users who have an address at MSN. A
-quick way to tack this on is by using an EXISTS clause, which we correlate to
-the ``users`` table in the enclosing SELECT:
-
-.. sourcecode:: pycon+sql
-
- >>> from sqlalchemy.sql import exists
- >>> query = query.where(
- ... exists([addresses.c.id],
- ... and_(addresses.c.user_id==users.c.id, addresses.c.email_address.like('%@msn.com'))
- ... ).correlate(users))
-
-And finally, the application also wants to see the listing of email addresses
-at once; so to save queries, we outerjoin the ``addresses`` table (using an
-outer join so that users with no addresses come back as well; since we're
-programmatic, we might not have kept track that we used an EXISTS clause
-against the ``addresses`` table too...). Additionally, since the ``users`` and
-``addresses`` table both have a column named ``id``, let's isolate their names
-from each other in the COLUMNS clause by using labels:
-
-.. sourcecode:: pycon+sql
-
- >>> query = query.column(addresses).select_from(users.outerjoin(addresses)).apply_labels()
-
-Let's bake for .0001 seconds and see what rises:
-
-.. sourcecode:: pycon+sql
-
- >>> conn.execute(query).fetchall() # doctest: +NORMALIZE_WHITESPACE
- {opensql}SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, addresses.id AS addresses_id, addresses.user_id AS addresses_user_id, addresses.email_address AS addresses_email_address
- FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id
- WHERE users.name = ? AND (EXISTS (SELECT addresses.id
- FROM addresses
- WHERE addresses.user_id = users.id AND addresses.email_address LIKE ?)) ORDER BY users.fullname DESC
- ('jack', '%@msn.com')
- {stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')]
-
-The generative approach is about starting small, adding one thing at a time,
-to arrive with a full statement.
-
-Transforming a Statement
-------------------------
-
-We've seen how methods like :meth:`.Select.where` and :meth:`.SelectBase.order_by` are
-part of the so-called *Generative* family of methods on the :func:`.select` construct,
-where one :func:`.select` copies itself to return a new one with modifications.
-SQL constructs also support another form of generative behavior which is
-the *transformation*. This is an advanced technique that most core applications
-won't use directly; however, it is a system which the ORM relies on heavily,
-and can be useful for any system that deals with generalized behavior of Core SQL
-constructs.
-
-Using a transformation we can take our ``users``/``addresses`` query and replace
-all occurrences of ``addresses`` with an alias of itself. That is, anywhere
-that ``addresses`` is referred to in the original query, the new query will
-refer to ``addresses_1``, which is selected as ``addresses AS addresses_1``.
-The :meth:`.FromClause.replace_selectable` method can achieve this:
-
-.. sourcecode:: pycon+sql
-
- >>> a1 = addresses.alias()
- >>> query = query.replace_selectable(addresses, a1)
- >>> print query # doctest: +NORMALIZE_WHITESPACE
- {opensql}SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address
- FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id
- WHERE users.name = :name_1 AND (EXISTS (SELECT addresses_1.id
- FROM addresses AS addresses_1
- WHERE addresses_1.user_id = users.id AND addresses_1.email_address LIKE :email_address_1)) ORDER BY users.fullname DESC
-
-For a query such as the above, we can access the columns referred
-to by the ``a1`` alias in a result set using the :class:`.Column` objects
-present directly on ``a1``:
-
-.. sourcecode:: pycon+sql
-
- {sql}>>> for row in conn.execute(query):
- ... print "Name:", row[users.c.name], "; Email Address", row[a1.c.email_address] # doctest: +NORMALIZE_WHITESPACE
- SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname, addresses_1.id AS addresses_1_id, addresses_1.user_id AS addresses_1_user_id, addresses_1.email_address AS addresses_1_email_address
- FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id
- WHERE users.name = ? AND (EXISTS (SELECT addresses_1.id
- FROM addresses AS addresses_1
- WHERE addresses_1.user_id = users.id AND addresses_1.email_address LIKE ?)) ORDER BY users.fullname DESC
- ('jack', '%@msn.com')
- {stop}Name: jack ; Email Address jack@yahoo.com
- Name: jack ; Email Address jack@msn.com
-
Everything Else
================
@@ -1055,7 +977,7 @@ here where it converts to positional for SQLite:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import bindparam
- >>> s = users.select(users.c.name==bindparam('username'))
+ >>> s = users.select(users.c.name == bindparam('username'))
{sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, users.name, users.fullname
FROM users
@@ -1074,7 +996,7 @@ off to the database:
{sql}>>> conn.execute(s, username='wendy').fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, users.name, users.fullname
FROM users
- WHERE users.name LIKE ? || '%'
+ WHERE users.name LIKE (? || '%')
('wendy',)
{stop}[(2, u'wendy', u'Wendy Williams')]
@@ -1084,14 +1006,23 @@ single named value is needed in the execute parameters:
.. sourcecode:: pycon+sql
- >>> s = select([users, addresses],
- ... users.c.name.like(bindparam('name', type_=String) + text("'%'")) |
- ... addresses.c.email_address.like(bindparam('name', type_=String) + text("'@%'")),
- ... from_obj=[users.outerjoin(addresses)])
+ >>> s = select([users, addresses]).\
+ ... where(
+ ... or_(
+ ... users.c.name.like(
+ ... bindparam('name', type_=String) + text("'%'")),
+ ... addresses.c.email_address.like(
+ ... bindparam('name', type_=String) + text("'@%'"))
+ ... )
+ ... ).\
+ ... select_from(users.outerjoin(addresses)).\
+ ... order_by(addresses.c.id)
{sql}>>> conn.execute(s, name='jack').fetchall() # doctest: +NORMALIZE_WHITESPACE
- SELECT users.id, users.name, users.fullname, addresses.id, addresses.user_id, addresses.email_address
+ SELECT users.id, users.name, users.fullname, addresses.id,
+ addresses.user_id, addresses.email_address
FROM users LEFT OUTER JOIN addresses ON users.id = addresses.user_id
- WHERE users.name LIKE ? || '%' OR addresses.email_address LIKE ? || '@%'
+ WHERE users.name LIKE (? || '%') OR addresses.email_address LIKE (? || '@%')
+ ORDER BY addresses.id
('jack', 'jack')
{stop}[(1, u'jack', u'Jack Jones', 1, 1, u'jack@yahoo.com'), (1, u'jack', u'Jack Jones', 2, 1, u'jack@msn.com')]
@@ -1136,13 +1067,16 @@ not important in this case:
.. sourcecode:: pycon+sql
- >>> print conn.execute(
- ... select([func.max(addresses.c.email_address, type_=String).label('maxemail')])
- ... ).scalar() # doctest: +NORMALIZE_WHITESPACE
+ >>> conn.execute(
+ ... select([
+ ... func.max(addresses.c.email_address, type_=String).
+ ... label('maxemail')
+ ... ])
+ ... ).scalar() # doctest: +NORMALIZE_WHITESPACE
{opensql}SELECT max(addresses.email_address) AS maxemail
FROM addresses
()
- {stop}www@www.org
+ {stop}u'www@www.org'
Databases such as PostgreSQL and Oracle which support functions that return
whole result sets can be assembled into selectable units, which can be used in
@@ -1154,14 +1088,19 @@ well as bind parameters:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import column
- >>> calculate = select([column('q'), column('z'), column('r')],
- ... from_obj=[func.calculate(bindparam('x'), bindparam('y'))])
-
- >>> print select([users], users.c.id > calculate.c.z) # doctest: +NORMALIZE_WHITESPACE
+ >>> calculate = select([column('q'), column('z'), column('r')]).\
+ ... select_from(
+ ... func.calculate(
+ ... bindparam('x'),
+ ... bindparam('y')
+ ... )
+ ... )
+ >>> calc = calculate.alias()
+ >>> print select([users]).where(users.c.id > calc.c.z) # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, users.name, users.fullname
FROM users, (SELECT q, z, r
- FROM calculate(:x, :y))
- WHERE users.id > z
+ FROM calculate(:x, :y)) AS anon_1
+ WHERE users.id > anon_1.z
If we wanted to use our ``calculate`` statement twice with different bind
parameters, the :func:`~sqlalchemy.sql.expression.ClauseElement.unique_params`
@@ -1171,21 +1110,20 @@ of our selectable:
.. sourcecode:: pycon+sql
- >>> s = select([users], users.c.id.between(
- ... calculate.alias('c1').unique_params(x=17, y=45).c.z,
- ... calculate.alias('c2').unique_params(x=5, y=12).c.z))
-
+ >>> calc1 = calculate.alias('c1').unique_params(x=17, y=45)
+ >>> calc2 = calculate.alias('c2').unique_params(x=5, y=12)
+ >>> s = select([users]).\
+ ... where(users.c.id.between(calc1.c.z, calc2.c.z))
>>> print s # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, users.name, users.fullname
- FROM users, (SELECT q, z, r
- FROM calculate(:x_1, :y_1)) AS c1, (SELECT q, z, r
- FROM calculate(:x_2, :y_2)) AS c2
+ FROM users,
+ (SELECT q, z, r FROM calculate(:x_1, :y_1)) AS c1,
+ (SELECT q, z, r FROM calculate(:x_2, :y_2)) AS c2
WHERE users.id BETWEEN c1.z AND c2.z
>>> s.compile().params
{u'x_2': 5, u'y_2': 12, u'y_1': 45, u'x_1': 17}
-See also :data:`~.expression.func`.
Window Functions
-----------------
@@ -1196,7 +1134,10 @@ OVER clause, using the :meth:`~.FunctionElement.over` method:
.. sourcecode:: pycon+sql
- >>> s = select([users.c.id, func.row_number().over(order_by=users.c.name)])
+ >>> s = select([
+ ... users.c.id,
+ ... func.row_number().over(order_by=users.c.name)
+ ... ])
>>> print s # doctest: +NORMALIZE_WHITESPACE
SELECT users.id, row_number() OVER (ORDER BY users.name) AS anon_1
FROM users
@@ -1205,40 +1146,51 @@ Unions and Other Set Operations
-------------------------------
Unions come in two flavors, UNION and UNION ALL, which are available via
-module level functions:
+module level functions :func:`~.expression.union` and
+:func:`~.expression.union_all`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import union
>>> u = union(
- ... addresses.select(addresses.c.email_address=='foo@bar.com'),
- ... addresses.select(addresses.c.email_address.like('%@yahoo.com')),
+ ... addresses.select().
+ ... where(addresses.c.email_address == 'foo@bar.com'),
+ ... addresses.select().
+ ... where(addresses.c.email_address.like('%@yahoo.com')),
... ).order_by(addresses.c.email_address)
- {sql}>>> print conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
- WHERE addresses.email_address = ? UNION SELECT addresses.id, addresses.user_id, addresses.email_address
+ WHERE addresses.email_address = ?
+ UNION
+ SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ? ORDER BY addresses.email_address
('foo@bar.com', '%@yahoo.com')
{stop}[(1, 1, u'jack@yahoo.com')]
-Also available, though not supported on all databases, are ``intersect()``,
-``intersect_all()``, ``except_()``, and ``except_all()``:
+Also available, though not supported on all databases, are
+:func:`~.expression.intersect`,
+:func:`~.expression.intersect_all`,
+:func:`~.expression.except_`, and :func:`~.expression.except_all`:
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import except_
>>> u = except_(
- ... addresses.select(addresses.c.email_address.like('%@%.com')),
- ... addresses.select(addresses.c.email_address.like('%@msn.com'))
+ ... addresses.select().
+ ... where(addresses.c.email_address.like('%@%.com')),
+ ... addresses.select().
+ ... where(addresses.c.email_address.like('%@msn.com'))
... )
- {sql}>>> print conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
- WHERE addresses.email_address LIKE ? EXCEPT SELECT addresses.id, addresses.user_id, addresses.email_address
+ WHERE addresses.email_address LIKE ?
+ EXCEPT
+ SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ?
('%@%.com', '%@msn.com')
@@ -1256,209 +1208,347 @@ want the "union" to be stated as a subquery:
>>> u = except_(
... union(
- ... addresses.select(addresses.c.email_address.like('%@yahoo.com')),
- ... addresses.select(addresses.c.email_address.like('%@msn.com'))
+ ... addresses.select().
+ ... where(addresses.c.email_address.like('%@yahoo.com')),
+ ... addresses.select().
+ ... where(addresses.c.email_address.like('%@msn.com'))
... ).alias().select(), # apply subquery here
... addresses.select(addresses.c.email_address.like('%@msn.com'))
... )
- {sql}>>> print conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {sql}>>> conn.execute(u).fetchall() # doctest: +NORMALIZE_WHITESPACE
SELECT anon_1.id, anon_1.user_id, anon_1.email_address
FROM (SELECT addresses.id AS id, addresses.user_id AS user_id,
- addresses.email_address AS email_address FROM addresses
- WHERE addresses.email_address LIKE ? UNION SELECT addresses.id AS id,
- addresses.user_id AS user_id, addresses.email_address AS email_address
- FROM addresses WHERE addresses.email_address LIKE ?) AS anon_1 EXCEPT
+ addresses.email_address AS email_address
+ FROM addresses
+ WHERE addresses.email_address LIKE ?
+ UNION
+ SELECT addresses.id AS id,
+ addresses.user_id AS user_id,
+ addresses.email_address AS email_address
+ FROM addresses
+ WHERE addresses.email_address LIKE ?) AS anon_1
+ EXCEPT
SELECT addresses.id, addresses.user_id, addresses.email_address
FROM addresses
WHERE addresses.email_address LIKE ?
('%@yahoo.com', '%@msn.com', '%@msn.com')
{stop}[(1, 1, u'jack@yahoo.com')]
+.. _scalar_selects:
Scalar Selects
--------------
-To embed a SELECT in a column expression, use
-:func:`~sqlalchemy.sql.expression.SelectBase.as_scalar`:
+A scalar select is a SELECT that returns exactly one row and one
+column. It can then be used as a column expression. A scalar select
+is often a :term:`correlated subquery`, which relies upon the enclosing
+SELECT statement in order to acquire at least one of its FROM clauses.
+
+The :func:`.select` construct can be modified to act as a
+column expression by calling either the :meth:`~.SelectBase.as_scalar`
+or :meth:`~.SelectBase.label` method:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([func.count(addresses.c.id)]).\
+ ... where(users.c.id == addresses.c.user_id).\
+ ... as_scalar()
+
+The above construct is now a :class:`~.expression.ScalarSelect` object,
+and is no longer part of the :class:`~.expression.FromClause` hierarchy;
+it instead is within the :class:`~.expression.ColumnElement` family of
+expression constructs. We can place this construct the same as any
+other column within another :func:`.select`:
.. sourcecode:: pycon+sql
- {sql}>>> print conn.execute(select([ # doctest: +NORMALIZE_WHITESPACE
- ... users.c.name,
- ... select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).as_scalar()
- ... ])).fetchall()
- SELECT users.name, (SELECT count(addresses.id) AS count_1
+ >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1
FROM addresses
WHERE users.id = addresses.user_id) AS anon_1
FROM users
()
- {stop}[(u'jack', 2), (u'wendy', 2), (u'fred', 0), (u'mary', 0)]
+ {stop}[(u'jack', 2), (u'wendy', 2)]
-Alternatively, applying a ``label()`` to a select evaluates it as a scalar as
-well:
+To apply a non-anonymous column name to our scalar select, we create
+it using :meth:`.SelectBase.label` instead:
.. sourcecode:: pycon+sql
- {sql}>>> print conn.execute(select([ # doctest: +NORMALIZE_WHITESPACE
- ... users.c.name,
- ... select([func.count(addresses.c.id)], users.c.id==addresses.c.user_id).label('address_count')
- ... ])).fetchall()
- SELECT users.name, (SELECT count(addresses.id) AS count_1
+ >>> stmt = select([func.count(addresses.c.id)]).\
+ ... where(users.c.id == addresses.c.user_id).\
+ ... label("address_count")
+ >>> conn.execute(select([users.c.name, stmt])).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, (SELECT count(addresses.id) AS count_1
FROM addresses
WHERE users.id = addresses.user_id) AS address_count
FROM users
()
- {stop}[(u'jack', 2), (u'wendy', 2), (u'fred', 0), (u'mary', 0)]
+ {stop}[(u'jack', 2), (u'wendy', 2)]
.. _correlated_subqueries:
Correlated Subqueries
---------------------
-Notice in the examples on "scalar selects", the FROM clause of each embedded
+Notice in the examples on :ref:`scalar_selects`, the FROM clause of each embedded
select did not contain the ``users`` table in its FROM clause. This is because
-SQLAlchemy automatically attempts to correlate embedded FROM objects to that
-of an enclosing query. To disable this, or to specify explicit FROM clauses to
-be correlated, use ``correlate()``::
+SQLAlchemy automatically :term:`correlates` embedded FROM objects to that
+of an enclosing query, if present, and if the inner SELECT statement would
+still have at least one FROM clause of its own. For example:
- >>> s = select([users.c.name], users.c.id==select([users.c.id]).correlate(None))
- >>> print s # doctest: +NORMALIZE_WHITESPACE
- SELECT users.name
- FROM users
- WHERE users.id = (SELECT users.id
- FROM users)
+.. sourcecode:: pycon+sql
- >>> s = select([users.c.name, addresses.c.email_address], users.c.id==
- ... select([users.c.id], users.c.id==addresses.c.user_id).correlate(addresses)
- ... )
- >>> print s # doctest: +NORMALIZE_WHITESPACE
- SELECT users.name, addresses.email_address
- FROM users, addresses
- WHERE users.id = (SELECT users.id
+ >>> stmt = select([addresses.c.user_id]).\
+ ... where(addresses.c.user_id == users.c.id).\
+ ... where(addresses.c.email_address == 'jack@yahoo.com')
+ >>> enclosing_stmt = select([users.c.name]).where(users.c.id == stmt)
+ >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name
FROM users
- WHERE users.id = addresses.user_id)
+ WHERE users.id = (SELECT addresses.user_id
+ FROM addresses
+ WHERE addresses.user_id = users.id
+ AND addresses.email_address = ?)
+ ('jack@yahoo.com',)
+ {stop}[(u'jack',)]
+
+Auto-correlation will usually do what's expected, however it can also be controlled.
+For example, if we wanted a statement to correlate only to the ``addresses`` table
+but not the ``users`` table, even if both were present in the enclosing SELECT,
+we use the :meth:`~.Select.correlate` method to specify those FROM clauses that
+may be correlated:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.id]).\
+ ... where(users.c.id == addresses.c.user_id).\
+ ... where(users.c.name == 'jack').\
+ ... correlate(addresses)
+ >>> enclosing_stmt = select(
+ ... [users.c.name, addresses.c.email_address]).\
+ ... select_from(users.join(addresses)).\
+ ... where(users.c.id == stmt)
+ >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, addresses.email_address
+ FROM users JOIN addresses ON users.id = addresses.user_id
+ WHERE users.id = (SELECT users.id
+ FROM users
+ WHERE users.id = addresses.user_id AND users.name = ?)
+ ('jack',)
+ {stop}[(u'jack', u'jack@yahoo.com'), (u'jack', u'jack@msn.com')]
+
+To entirely disable a statement from correlating, we can pass ``None``
+as the argument:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.id]).\
+ ... where(users.c.name == 'wendy').\
+ ... correlate(None)
+ >>> enclosing_stmt = select([users.c.name]).\
+ ... where(users.c.id == stmt)
+ >>> conn.execute(enclosing_stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name
+ FROM users
+ WHERE users.id = (SELECT users.id
+ FROM users
+ WHERE users.name = ?)
+ ('wendy',)
+ {stop}[(u'wendy',)]
Ordering, Grouping, Limiting, Offset...ing...
---------------------------------------------
+Ordering is done by passing column expressions to the
+:meth:`~.SelectBase.order_by` method:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.name]).order_by(users.c.name)
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name
+ FROM users ORDER BY users.name
+ ()
+ {stop}[(u'jack',), (u'wendy',)]
-The :func:`.select` function can take keyword arguments ``order_by``,
-``group_by`` (as well as ``having``), ``limit``, and ``offset``. There's also
-``distinct=True``. These are all also available as generative functions.
-``order_by()`` expressions can use the modifiers ``asc()`` or ``desc()`` to
-indicate ascending or descending.
+Ascending or descending can be controlled using the :meth:`~.ColumnElement.asc`
+and :meth:`~.ColumnElement.desc` modifiers:
.. sourcecode:: pycon+sql
- >>> s = select([addresses.c.user_id, func.count(addresses.c.id)]).\
- ... group_by(addresses.c.user_id).having(func.count(addresses.c.id)>1)
- {sql}>>> print conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
- SELECT addresses.user_id, count(addresses.id) AS count_1
- FROM addresses GROUP BY addresses.user_id
- HAVING count(addresses.id) > ?
- (1,)
- {stop}[(1, 2), (2, 2)]
+ >>> stmt = select([users.c.name]).order_by(users.c.name.desc())
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name
+ FROM users ORDER BY users.name DESC
+ ()
+ {stop}[(u'wendy',), (u'jack',)]
+
+Grouping refers to the GROUP BY clause, and is usually used in conjunction
+with aggregate functions to establish groups of rows to be aggregated.
+This is provided via the :meth:`~.SelectBase.group_by` method:
- >>> s = select([addresses.c.email_address, addresses.c.id]).distinct().\
- ... order_by(addresses.c.email_address.desc(), addresses.c.id)
- {sql}>>> conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
- SELECT DISTINCT addresses.email_address, addresses.id
- FROM addresses ORDER BY addresses.email_address DESC, addresses.id
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\
+ ... select_from(users.join(addresses)).\
+ ... group_by(users.c.name)
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, count(addresses.id) AS count_1
+ FROM users JOIN addresses
+ ON users.id = addresses.user_id
+ GROUP BY users.name
()
- {stop}[(u'www@www.org', 3), (u'wendy@aol.com', 4), (u'jack@yahoo.com', 1), (u'jack@msn.com', 2)]
+ {stop}[(u'jack', 2), (u'wendy', 2)]
- >>> s = select([addresses]).offset(1).limit(1)
- {sql}>>> print conn.execute(s).fetchall() # doctest: +NORMALIZE_WHITESPACE
- SELECT addresses.id, addresses.user_id, addresses.email_address
- FROM addresses
- LIMIT 1 OFFSET 1
+HAVING can be used to filter results on an aggregate value, after GROUP BY has
+been applied. It's available here via the :meth:`~.Select.having`
+method:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.name, func.count(addresses.c.id)]).\
+ ... select_from(users.join(addresses)).\
+ ... group_by(users.c.name).\
+ ... having(func.length(users.c.name) > 4)
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, count(addresses.id) AS count_1
+ FROM users JOIN addresses
+ ON users.id = addresses.user_id
+ GROUP BY users.name
+ HAVING length(users.name) > ?
+ (4,)
+ {stop}[(u'wendy', 2)]
+
+A common system of dealing with duplicates in composed SELECT statments
+is the DISTINCT modifier. A simple DISTINCT clause can be added using the
+:meth:`.Select.distinct` method:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = select([users.c.name]).\
+ ... where(addresses.c.email_address.
+ ... contains(users.c.name)).\
+ ... distinct()
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT DISTINCT users.name
+ FROM users, addresses
+ WHERE addresses.email_address LIKE '%%' || users.name || '%%'
()
- {stop}[(2, 1, u'jack@msn.com')]
+ {stop}[(u'jack',), (u'wendy',)]
-.. _inserts_and_updates:
+Most database backends support a system of limiting how many rows
+are returned, and the majority also feature a means of starting to return
+rows after a given "offset". While common backends like Postgresql,
+MySQL and SQLite support LIMIT and OFFSET keywords, other backends
+need to refer to more esoteric features such as "window functions"
+and row ids to achieve the same effect. The :meth:`~.Select.limit`
+and :meth:`~.Select.offset` methods provide an easy abstraction
+into the current backend's methodology:
-Inserts and Updates
-===================
+.. sourcecode:: pycon+sql
-Finally, we're back to INSERT for some more detail. The
-:func:`~sqlalchemy.sql.expression.insert` construct provides a :meth:`~.ValuesBase.values`
-method which can be used to send any value or clause expression to the VALUES
-portion of the INSERT::
+ >>> stmt = select([users.c.name, addresses.c.email_address]).\
+ ... select_from(users.join(addresses)).\
+ ... limit(1).offset(1)
+ >>> conn.execute(stmt).fetchall() # doctest: +NORMALIZE_WHITESPACE
+ {opensql}SELECT users.name, addresses.email_address
+ FROM users JOIN addresses ON users.id = addresses.user_id
+ LIMIT ? OFFSET ?
+ (1, 1)
+ {stop}[(u'jack', u'jack@msn.com')]
- # insert from a function
- users.insert().values(id=12, name=func.upper('jack'))
- # insert from a concatenation expression
- addresses.insert().values(email_address = name + '@' + host)
+.. _inserts_and_updates:
-``values()`` can be mixed with per-execution values::
+Inserts, Updates and Deletes
+============================
- conn.execute(
- users.insert().values(name=func.upper('jack')),
- fullname='Jack Jones'
- )
+We've seen :meth:`~.TableClause.insert` demonstrated
+earlier in this tutorial. Where :meth:`~.TableClause.insert`
+prodces INSERT, the :meth:`~.TableClause.update`
+method produces UPDATE. Both of these constructs feature
+a method called :meth:`~.ValuesBase.values` which specifies
+the VALUES or SET clause of the statement.
-:func:`~sqlalchemy.sql.expression.bindparam` constructs can be passed, however
-the names of the table's columns are reserved for the "automatic" generation
-of bind names::
+The :meth:`~.ValuesBase.values` method accommodates any column expression
+as a value:
- users.insert().values(id=bindparam('_id'), name=bindparam('_name'))
+.. sourcecode:: pycon+sql
- # insert many rows at once:
- conn.execute(
- users.insert().values(id=bindparam('_id'), name=bindparam('_name')),
- [
- {'_id':1, '_name':'name1'},
- {'_id':2, '_name':'name2'},
- {'_id':3, '_name':'name3'},
- ]
- )
+ >>> stmt = users.update().\
+ ... values(fullname="Fullname: " + users.c.name)
+ >>> conn.execute(stmt) #doctest: +ELLIPSIS
+ {opensql}UPDATE users SET fullname=(? || users.name)
+ ('Fullname: ',)
+ COMMIT
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
+
+When using :meth:`~.TableClause.insert` or :meth:`~.TableClause.update`
+in an "execute many" context, we may also want to specify named
+bound parameters which we can refer to in the argument list.
+The two constructs will automatically generate bound placeholders
+for any column names passed in the dictionaries sent to
+:meth:`~.Connection.execute` at execution time. However, if we
+wish to use explicitly targeted named parameters with composed expressions,
+we need to use the :func:`~.expression.bindparam` construct.
+When using :func:`~.expression.bindparam` with
+:meth:`~.TableClause.insert` or :meth:`~.TableClause.update`,
+the names of the table's columns themselves are reserved for the
+"automatic" generation of bind names. We can combine the usage
+of implicitly available bind names and explicitly named parameters
+as in the example below:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = users.insert().\
+ ... values(name=bindparam('_name') + " .. name")
+ >>> conn.execute(stmt, [ # doctest: +ELLIPSIS
+ ... {'id':4, '_name':'name1'},
+ ... {'id':5, '_name':'name2'},
+ ... {'id':6, '_name':'name3'},
+ ... ])
+ {opensql}INSERT INTO users (id, name) VALUES (?, (? || ?))
+ ((4, 'name1', ' .. name'), (5, 'name2', ' .. name'), (6, 'name3', ' .. name'))
+ COMMIT
+ <sqlalchemy.engine.result.ResultProxy object at 0x...>
-An UPDATE statement is emitted using the :func:`.update` construct. These
-work much like an INSERT, except there is an additional WHERE clause
+An UPDATE statement is emitted using the :meth:`~.TableClause.update` construct. This
+works much like an INSERT, except there is an additional WHERE clause
that can be specified:
.. sourcecode:: pycon+sql
- >>> # change 'jack' to 'ed'
- {sql}>>> conn.execute(users.update().
- ... where(users.c.name=='jack').
- ... values(name='ed')
- ... ) #doctest: +ELLIPSIS
- UPDATE users SET name=? WHERE users.name = ?
- ('ed', 'jack')
- COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ >>> stmt = users.update().\
+ ... where(users.c.name == 'jack').\
+ ... values(name='ed')
- >>> # use bind parameters
- >>> u = users.update().\
- ... where(users.c.name==bindparam('oldname')).\
- ... values(name=bindparam('newname'))
- {sql}>>> conn.execute(u, oldname='jack', newname='ed') #doctest: +ELLIPSIS
- UPDATE users SET name=? WHERE users.name = ?
+ >>> conn.execute(stmt) #doctest: +ELLIPSIS
+ {opensql}UPDATE users SET name=? WHERE users.name = ?
('ed', 'jack')
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
- >>> # with binds, you can also update many rows at once
- {sql}>>> conn.execute(u,
+When using :meth:`~.TableClause.update` in an "execute many" context,
+we may wish to also use explicitly named bound parameters in the
+WHERE clause. Again, :func:`~.expression.bindparam` is the construct
+used to achieve this:
+
+.. sourcecode:: pycon+sql
+
+ >>> stmt = users.update().\
+ ... where(users.c.name == bindparam('oldname')).\
+ ... values(name=bindparam('newname'))
+ >>> conn.execute(stmt, [
... {'oldname':'jack', 'newname':'ed'},
... {'oldname':'wendy', 'newname':'mary'},
... {'oldname':'jim', 'newname':'jake'},
- ... ) #doctest: +ELLIPSIS
- UPDATE users SET name=? WHERE users.name = ?
- [('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim')]
+ ... ]) #doctest: +ELLIPSIS
+ {opensql}UPDATE users SET name=? WHERE users.name = ?
+ (('ed', 'jack'), ('mary', 'wendy'), ('jake', 'jim'))
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
- >>> # update a column to an expression.:
- {sql}>>> conn.execute(users.update().
- ... values(fullname="Fullname: " + users.c.name)
- ... ) #doctest: +ELLIPSIS
- UPDATE users SET fullname=(? || users.name)
- ('Fullname: ',)
- COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
Correlated Updates
------------------
@@ -1468,15 +1558,17 @@ table, or the same table:
.. sourcecode:: pycon+sql
- >>> s = select([addresses.c.email_address], addresses.c.user_id==users.c.id).limit(1)
- {sql}>>> conn.execute(users.update().values(fullname=s)) #doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
- UPDATE users SET fullname=(SELECT addresses.email_address
- FROM addresses
- WHERE addresses.user_id = users.id
- LIMIT 1 OFFSET 0)
- ()
+ >>> stmt = select([addresses.c.email_address]).\
+ ... where(addresses.c.user_id == users.c.id).\
+ ... limit(1)
+ >>> conn.execute(users.update().values(fullname=stmt)) #doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
+ {opensql}UPDATE users SET fullname=(SELECT addresses.email_address
+ FROM addresses
+ WHERE addresses.user_id = users.id
+ LIMIT ? OFFSET ?)
+ (1, 0)
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
Multiple Table Updates
----------------------
@@ -1493,7 +1585,7 @@ implicitly, by specifying multiple tables in the WHERE clause::
stmt = users.update().\
values(name='ed wood').\
- where(users.c.id==addresses.c.id).\
+ where(users.c.id == addresses.c.id).\
where(addresses.c.email_address.startswith('ed%'))
conn.execute(stmt)
@@ -1511,7 +1603,7 @@ SET clause directly, using the dictionary form passed to :meth:`.Update.values`:
users.c.name:'ed wood',
addresses.c.email_address:'ed.wood@foo.com'
}).\
- where(users.c.id==addresses.c.id).\
+ where(users.c.id == addresses.c.id).\
where(addresses.c.email_address.startswith('ed%'))
The tables are referenced explicitly in the SET clause::
@@ -1528,24 +1620,42 @@ by the database if this syntax is not supported.
.. _deletes:
Deletes
-========
+-------
Finally, a delete. This is accomplished easily enough using the
-:func:`~.expression.delete` construct:
+:meth:`~.TableClause.delete` construct:
.. sourcecode:: pycon+sql
- {sql}>>> conn.execute(addresses.delete()) #doctest: +ELLIPSIS
- DELETE FROM addresses
+ >>> conn.execute(addresses.delete()) #doctest: +ELLIPSIS
+ {opensql}DELETE FROM addresses
()
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
- {sql}>>> conn.execute(users.delete().where(users.c.name > 'm')) #doctest: +ELLIPSIS
- DELETE FROM users WHERE users.name > ?
+ >>> conn.execute(users.delete().where(users.c.name > 'm')) #doctest: +ELLIPSIS
+ {opensql}DELETE FROM users WHERE users.name > ?
('m',)
COMMIT
- {stop}<sqlalchemy.engine.ResultProxy object at 0x...>
+ {stop}<sqlalchemy.engine.result.ResultProxy object at 0x...>
+
+Matched Row Counts
+------------------
+
+Both of :meth:`~.TableClause.update` and
+:meth:`~.TableClause.delete` are associated with *matched row counts*. This is a
+number indicating the number of rows that were matched by the WHERE clause.
+Note that by "matched", this includes rows where no UPDATE actually took place.
+The value is available as :attr:`~.ResultProxy.rowcount`:
+
+.. sourcecode:: pycon+sql
+
+ >>> result = conn.execute(users.delete()) #doctest: +ELLIPSIS
+ {opensql}DELETE FROM users
+ ()
+ COMMIT
+ {stop}>>> result.rowcount
+ 1
Further Reference
==================
diff --git a/doc/build/core/types.rst b/doc/build/core/types.rst
index e59c81fc0..131e8e64d 100644
--- a/doc/build/core/types.rst
+++ b/doc/build/core/types.rst
@@ -672,7 +672,7 @@ Usage::
The implementation for :meth:`.ColumnOperators.__add__` is consulted
by an owning SQL expression, by instantiating the :class:`.TypeEngine.Comparator` with
-itself as as the ``expr`` attribute. The mechanics of the expression
+itself as the ``expr`` attribute. The mechanics of the expression
system are such that operations continue recursively until an
expression object produces a new SQL expression construct. Above, we
could just as well have said ``self.expr.op("goofy")(other)`` instead
diff --git a/doc/build/dialects/index.rst b/doc/build/dialects/index.rst
index 46628eed7..fc1210994 100644
--- a/doc/build/dialects/index.rst
+++ b/doc/build/dialects/index.rst
@@ -38,10 +38,11 @@ External Dialects
SQLAlchemy install and test suite from growing inordinately large.
The "classic" dialects such as SQLite, MySQL, Postgresql, Oracle,
- SQL Server, Firebird will remain in the Core for the time being.
+ SQL Server, and Firebird will remain in the Core for the time being.
Current external dialect projects for SQLAlchemy include:
+* `ibm_db_sa <http://code.google.com/p/ibm-db/wiki/README>`_ - driver for IBM DB2, developed jointly by IBM and SQLAlchemy developers.
* `sqlalchemy-access <https://bitbucket.org/zzzeek/sqlalchemy-access>`_ - driver for Microsoft Access.
* `sqlalchemy-akiban <https://github.com/zzzeek/sqlalchemy_akiban>`_ - driver and ORM extensions for the `Akiban <http://www.akiban.com>`_ database.
* `sqlalchemy-cubrid <https://bitbucket.org/zzzeek/sqlalchemy-cubrid>`_ - driver for the CUBRID database.
diff --git a/doc/build/glossary.rst b/doc/build/glossary.rst
index 8a473fda4..afdc35eda 100644
--- a/doc/build/glossary.rst
+++ b/doc/build/glossary.rst
@@ -158,6 +158,37 @@ Glossary
of classes; "joined", "single", and "concrete". The section
:ref:`inheritance_toplevel` describes inheritance mapping fully.
+ generative
+ A term that SQLAlchemy uses to refer what's normally known
+ as :term:`method chaining`; see that term for details.
+
+ method chaining
+ An object-oriented technique whereby the state of an object
+ is constructed by calling methods on the object. The
+ object features any number of methods, each of which return
+ a new object (or in some cases the same object) with
+ additional state added to the object.
+
+ The two SQLAlchemy objects that make the most use of
+ method chaining are the :class:`~.expression.Select`
+ object and the :class:`~.orm.query.Query` object.
+ For example, a :class:`~.expression.Select` object can
+ be assigned two expressions to its WHERE clause as well
+ as an ORDER BY clause by calling upon the :meth:`~.Select.where`
+ and :meth:`~.Select.order_by` methods::
+
+ stmt = select([user.c.name]).\
+ where(user.c.id > 5).\
+ where(user.c.name.like('e%').\
+ order_by(user.c.name)
+
+ Each method call above returns a copy of the original
+ :class:`~.expression.Select` object with additional qualifiers
+ added.
+
+ .. seealso::
+
+ :term:`generative`
release
releases
@@ -231,3 +262,36 @@ Glossary
`Unit of Work by Martin Fowler <http://martinfowler.com/eaaCatalog/unitOfWork.html>`_
:doc:`orm/session`
+
+ correlates
+ correlated subquery
+ correlated subqueries
+ A :term:`subquery` is correlated if it depends on data in the
+ enclosing ``SELECT``.
+
+ Below, a subquery selects the aggregate value ``MIN(a.id)``
+ from the ``email_address`` table, such that
+ it will be invoked for each value of ``user_account.id``, correlating
+ the value of this column against the ``email_address.user_account_id``
+ column:
+
+ .. sourcecode:: sql
+
+ SELECT user_account.name, email_address.email
+ FROM user_account
+ JOIN email_address ON user_account.id=email_address.user_account_id
+ WHERE email_address.id = (
+ SELECT MIN(a.id) FROM email_address AS a
+ WHERE a.user_account_id=user_account.id
+ )
+
+ The above subquery refers to the ``user_account`` table, which is not itself
+ in the ``FROM`` clause of this nested query. Instead, the ``user_account``
+ table is recieved from the enclosing query, where each row selected from
+ ``user_account`` results in a distinct execution of the subquery.
+
+ A correlated subquery is nearly always present in the :term:`WHERE clause`
+ or :term:`columns clause` of the enclosing ``SELECT`` statement, and never
+ in the :term:`FROM clause`; this is because
+ the correlation can only proceed once the original source rows from the enclosing
+ statement's FROM clause are available.
diff --git a/doc/build/intro.rst b/doc/build/intro.rst
index fc7e1142e..c5e7f7425 100644
--- a/doc/build/intro.rst
+++ b/doc/build/intro.rst
@@ -94,7 +94,7 @@ SQLAlchemy supports installation using standard Python "distutils" or
* **Standard Setuptools** - When using `setuptools <http://pypi.python.org/pypi/setuptools/>`_,
SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C
extensions are supported. setuptools is not supported on Python 3 at the time
- of of this writing.
+ of this writing.
* **Distribute** - With `distribute <http://pypi.python.org/pypi/distribute/>`_,
SQLAlchemy can be installed via ``setup.py`` or ``easy_install``, and the C
extensions as well as Python 3 builds are supported.
diff --git a/doc/build/orm/session.rst b/doc/build/orm/session.rst
index 97d6f15a0..6774af2d9 100644
--- a/doc/build/orm/session.rst
+++ b/doc/build/orm/session.rst
@@ -1372,7 +1372,7 @@ Using Subtransactions with Autocommit
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A subtransaction indicates usage of the :meth:`.Session.begin` method in conjunction with
-the ``subtransactions=True`` flag. This produces a a non-transactional, delimiting construct that
+the ``subtransactions=True`` flag. This produces a non-transactional, delimiting construct that
allows nesting of calls to :meth:`~.Session.begin` and :meth:`~.Session.commit`.
It's purpose is to allow the construction of code that can function within a transaction
both independently of any external code that starts a transaction,
diff --git a/doc/build/testdocs.py b/doc/build/testdocs.py
index 815aa8669..9d84808e5 100644
--- a/doc/build/testdocs.py
+++ b/doc/build/testdocs.py
@@ -60,7 +60,8 @@ def replace_file(s, newfile):
raise ValueError("Couldn't find suitable create_engine call to replace '%s' in it" % oldfile)
return s
-for filename in 'orm/tutorial','core/tutorial',:
+#for filename in 'orm/tutorial','core/tutorial',:
+for filename in 'core/tutorial',:
filename = '%s.rst' % filename
s = open(filename).read()
#s = replace_file(s, ':memory:')
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
index 2352f1308..6e924ea9d 100644
--- a/lib/sqlalchemy/__init__.py
+++ b/lib/sqlalchemy/__init__.py
@@ -120,7 +120,7 @@ from .engine import create_engine, engine_from_config
__all__ = sorted(name for name, obj in locals().items()
if not (name.startswith('_') or _inspect.ismodule(obj)))
-__version__ = '0.8.0'
+__version__ = '0.8.1'
del _inspect, sys
diff --git a/lib/sqlalchemy/dialects/__init__.py b/lib/sqlalchemy/dialects/__init__.py
index fbbff153c..7f5d34707 100644
--- a/lib/sqlalchemy/dialects/__init__.py
+++ b/lib/sqlalchemy/dialects/__init__.py
@@ -18,7 +18,6 @@ __all__ = (
from .. import util
-
def _auto_fn(name):
"""default dialect importer.
diff --git a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
index 206dbf38b..d581f799a 100644
--- a/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
+++ b/lib/sqlalchemy/dialects/firebird/kinterbasdb.py
@@ -50,8 +50,8 @@ __ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurr
from .base import FBDialect, FBExecutionContext
from ... import util, types as sqltypes
-from ...util.compat import decimal
from re import match
+import decimal
class _FBNumeric_kinterbasdb(sqltypes.Numeric):
diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py
index 6f6d3b01f..b916612fb 100644
--- a/lib/sqlalchemy/dialects/mssql/pymssql.py
+++ b/lib/sqlalchemy/dialects/mssql/pymssql.py
@@ -54,7 +54,7 @@ class MSDialect_pymssql(MSDialect):
module = __import__('pymssql')
# pymmsql doesn't have a Binary method. we use string
# TODO: monkeypatching here is less than ideal
- module.Binary = str
+ module.Binary = lambda x: x if hasattr(x, 'decode') else str(x)
client_ver = tuple(int(x) for x in module.__version__.split("."))
if client_ver < (1, ):
@@ -85,6 +85,8 @@ class MSDialect_pymssql(MSDialect):
def is_disconnect(self, e, connection, cursor):
for msg in (
+ "Adaptive Server connection timed out",
+ "message 20003", # connection timeout
"Error 10054",
"Not connected to any MS SQL server",
"Connection is closed"
diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py
index 8854d1caa..beb6066f5 100644
--- a/lib/sqlalchemy/dialects/mssql/pyodbc.py
+++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py
@@ -114,7 +114,7 @@ for unix + PyODBC.
from .base import MSExecutionContext, MSDialect
from ...connectors.pyodbc import PyODBCConnector
from ... import types as sqltypes, util
-from ...util.compat import decimal
+import decimal
class _MSNumeric_pyodbc(sqltypes.Numeric):
diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py
index 62598ad00..f83020d93 100644
--- a/lib/sqlalchemy/dialects/mysql/base.py
+++ b/lib/sqlalchemy/dialects/mysql/base.py
@@ -2409,7 +2409,6 @@ class MySQLTableDefinitionParser(object):
state.constraints.append(spec)
else:
pass
-
return state
def _parse_constraints(self, line):
diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
index fd9fea878..b8f7439f5 100644
--- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py
+++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py
@@ -190,7 +190,7 @@ from ...engine import result as _result
from sqlalchemy import types as sqltypes, util, exc, processors
import random
import collections
-from sqlalchemy.util.compat import decimal
+import decimal
import re
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index a7a9e65ce..c59caff8d 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -1030,6 +1030,15 @@ class PGCompiler(compiler.SQLCompiler):
field, self.process(expr))
+ def visit_substring_func(self, func, **kw):
+ s = self.process(func.clauses.clauses[0], **kw)
+ start = self.process(func.clauses.clauses[1], **kw)
+ if len(func.clauses.clauses) > 2:
+ length = self.process(func.clauses.clauses[2], **kw)
+ return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length)
+ else:
+ return "SUBSTRING(%s FROM %s)" % (s, start)
+
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
@@ -1042,8 +1051,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
(
isinstance(column.default, schema.Sequence) and
column.default.optional
- )
- ):
+ )):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
else:
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index 157e03fd5..e555a1afd 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -6,7 +6,7 @@
import re
-from .base import ARRAY
+from .base import ARRAY, ischema_names
from ... import types as sqltypes
from ...sql import functions as sqlfunc
from ...sql.operators import custom_op
@@ -276,6 +276,9 @@ class HSTORE(sqltypes.Concatenable, sqltypes.TypeEngine):
return process
+ischema_names['hstore'] = HSTORE
+
+
class hstore(sqlfunc.GenericFunction):
"""Construct an hstore value within a SQL expression using the
Postgresql ``hstore()`` function.
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index 214db348c..0e503746c 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -27,7 +27,7 @@ yet.
"""
from ... import util, exc
-from ...util.compat import decimal
+import decimal
from ... import processors
from ... import types as sqltypes
from .base import PGDialect, \
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index 649a95ee6..1f118067f 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -147,7 +147,7 @@ import re
import logging
from ... import util, exc
-from ...util.compat import decimal
+import decimal
from ... import processors
from ...engine import result as _result
from ...sql import expression
diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py
index 644f4edb1..283c60da3 100644
--- a/lib/sqlalchemy/dialects/sybase/pyodbc.py
+++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py
@@ -36,7 +36,7 @@ from sqlalchemy.dialects.sybase.base import SybaseDialect,\
SybaseExecutionContext
from sqlalchemy.connectors.pyodbc import PyODBCConnector
from sqlalchemy import types as sqltypes, processors
-from sqlalchemy.util.compat import decimal
+import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py
index 1db0f2ce4..abb9f0fc3 100644
--- a/lib/sqlalchemy/engine/default.py
+++ b/lib/sqlalchemy/engine/default.py
@@ -34,6 +34,10 @@ class DefaultDialect(interfaces.Dialect):
preparer = compiler.IdentifierPreparer
supports_alter = True
+ # the first value we'd get for an autoincrement
+ # column.
+ default_sequence_base = 1
+
# most DBAPIs happy with this for execute().
# not cx_oracle.
execute_sequence_format = tuple
@@ -679,7 +683,7 @@ class DefaultExecutionContext(interfaces.ExecutionContext):
lastrowid = proc(lastrowid)
self.inserted_primary_key = [
- c is autoinc_col and lastrowid or v
+ lastrowid if c is autoinc_col else v
for c, v in zip(
table.primary_key,
self.inserted_primary_key)
diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py
index 891aef862..3eda127fd 100644
--- a/lib/sqlalchemy/orm/attributes.py
+++ b/lib/sqlalchemy/orm/attributes.py
@@ -174,6 +174,49 @@ class QueryableAttribute(interfaces._MappedAttribute,
# TODO: conditionally attach this method based on clause_element ?
return self
+
+ @util.memoized_property
+ def info(self):
+ """Return the 'info' dictionary for the underlying SQL element.
+
+ The behavior here is as follows:
+
+ * If the attribute is a column-mapped property, i.e.
+ :class:`.ColumnProperty`, which is mapped directly
+ to a schema-level :class:`.Column` object, this attribute
+ will return the :attr:`.SchemaItem.info` dictionary associated
+ with the core-level :class:`.Column` object.
+
+ * If the attribute is a :class:`.ColumnProperty` but is mapped to
+ any other kind of SQL expression other than a :class:`.Column`,
+ the attribute will refer to the :attr:`.MapperProperty.info` dictionary
+ associated directly with the :class:`.ColumnProperty`, assuming the SQL
+ expression itself does not have it's own ``.info`` attribute
+ (which should be the case, unless a user-defined SQL construct
+ has defined one).
+
+ * If the attribute refers to any other kind of :class:`.MapperProperty`,
+ including :class:`.RelationshipProperty`, the attribute will refer
+ to the :attr:`.MapperProperty.info` dictionary associated with
+ that :class:`.MapperProperty`.
+
+ * To access the :attr:`.MapperProperty.info` dictionary of the :class:`.MapperProperty`
+ unconditionally, including for a :class:`.ColumnProperty` that's
+ associated directly with a :class:`.schema.Column`, the attribute
+ can be referred to using :attr:`.QueryableAttribute.property`
+ attribute, as ``MyClass.someattribute.property.info``.
+
+ .. versionadded:: 0.8.0
+
+ .. seealso::
+
+ :attr:`.SchemaItem.info`
+
+ :attr:`.MapperProperty.info`
+
+ """
+ return self.comparator.info
+
@util.memoized_property
def parent(self):
"""Return an inspection instance representing the parent.
diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py
index b3f7baf0f..62cdb2710 100644
--- a/lib/sqlalchemy/orm/interfaces.py
+++ b/lib/sqlalchemy/orm/interfaces.py
@@ -209,6 +209,12 @@ class MapperProperty(_MappedAttribute, _InspectionAttr):
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
+ .. seealso::
+
+ :attr:`.QueryableAttribute.info`
+
+ :attr:`.SchemaItem.info`
+
"""
return {}
@@ -390,6 +396,10 @@ class PropComparator(operators.ColumnOperators):
return self.__class__(self.prop, self._parentmapper, adapter)
+ @util.memoized_property
+ def info(self):
+ return self.property.info
+
@staticmethod
def any_op(a, b, **kwargs):
return a.any(b, **kwargs)
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 4e7b4d272..d258a20b6 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -2002,10 +2002,20 @@ class Mapper(_InspectionAttr):
@_memoized_configured_property
def _sorted_tables(self):
table_to_mapper = {}
+
for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper.setdefault(t, mapper)
+ extra_dependencies = []
+ for table, mapper in table_to_mapper.items():
+ super_ = mapper.inherits
+ if super_:
+ extra_dependencies.extend([
+ (super_table, table)
+ for super_table in super_.tables
+ ])
+
def skip(fk):
# attempt to skip dependencies that are not
# significant to the inheritance chain
@@ -2017,7 +2027,7 @@ class Mapper(_InspectionAttr):
if parent is not None and \
dep is not None and \
dep is not parent and \
- dep.inherit_condition is not None:
+ dep.inherit_condition is not None:
cols = set(sql_util.find_columns(dep.inherit_condition))
if parent.inherit_condition is not None:
cols = cols.union(sql_util.find_columns(
@@ -2028,7 +2038,9 @@ class Mapper(_InspectionAttr):
return False
sorted_ = sql_util.sort_tables(table_to_mapper.iterkeys(),
- skip_fn=skip)
+ skip_fn=skip,
+ extra_dependencies=extra_dependencies)
+
ret = util.OrderedDict()
for t in sorted_:
ret[t] = table_to_mapper[t]
@@ -2224,7 +2236,7 @@ def _event_on_resurrect(state):
state, state.dict, col, val)
-class _ColumnMapping(util.py25_dict):
+class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
def __init__(self, mapper):
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index d9dfff77d..e225a7c83 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -154,7 +154,7 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
- instance_key in uowtransaction.session.identity_map:
+ instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py
index 9d977b221..9f8721de9 100644
--- a/lib/sqlalchemy/orm/properties.py
+++ b/lib/sqlalchemy/orm/properties.py
@@ -193,6 +193,14 @@ class ColumnProperty(StrategizedProperty):
"parententity": self._parentmapper,
"parentmapper": self._parentmapper})
+ @util.memoized_property
+ def info(self):
+ ce = self.__clause_element__()
+ try:
+ return ce.info
+ except AttributeError:
+ return self.prop.info
+
def __getattr__(self, key):
"""proxy attribute access down to the mapped column.
diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py
index fd0c54d88..9e44e01f7 100644
--- a/lib/sqlalchemy/orm/relationships.py
+++ b/lib/sqlalchemy/orm/relationships.py
@@ -352,7 +352,7 @@ class JoinCondition(object):
return
if "foreign" not in binary.left._annotations and \
- "foreign" not in binary.right._annotations:
+ "foreign" not in binary.right._annotations:
col = is_foreign(binary.left, binary.right)
if col is not None:
if col.compare(binary.left):
@@ -451,12 +451,11 @@ class JoinCondition(object):
def visit_binary(binary):
equated = binary.left.compare(binary.right)
if isinstance(binary.left, expression.ColumnClause) and \
- isinstance(binary.right, expression.ColumnClause):
+ isinstance(binary.right, expression.ColumnClause):
# assume one to many - FKs are "remote"
if fn(binary.left):
binary.left = binary.left._annotate({"remote": True})
- if fn(binary.right) and \
- not equated:
+ if fn(binary.right) and not equated:
binary.right = binary.right._annotate(
{"remote": True})
else:
@@ -507,9 +506,9 @@ class JoinCondition(object):
def proc_left_right(left, right):
if isinstance(left, expression.ColumnClause) and \
- isinstance(right, expression.ColumnClause):
+ isinstance(right, expression.ColumnClause):
if self.child_selectable.c.contains_column(right) and \
- self.parent_selectable.c.contains_column(left):
+ self.parent_selectable.c.contains_column(left):
right = right._annotate({"remote": True})
else:
self._warn_non_column_elements()
@@ -532,8 +531,7 @@ class JoinCondition(object):
not self.parent_local_selectable.c.\
contains_column(element)
or self.child_local_selectable.c.\
- contains_column(element)
- ):
+ contains_column(element)):
return element._annotate({"remote": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, repl)
@@ -568,7 +566,7 @@ class JoinCondition(object):
def locals_(elem):
if "remote" not in elem._annotations and \
- elem in local_side:
+ elem in local_side:
return elem._annotate({"local": True})
self.primaryjoin = visitors.replacement_traverse(
self.primaryjoin, {}, locals_
@@ -603,7 +601,7 @@ class JoinCondition(object):
can_sync = bool(self.secondary_synchronize_pairs)
if self.support_sync and can_sync or \
- (not self.support_sync and has_foreign):
+ (not self.support_sync and has_foreign):
return
# from here below is just determining the best error message
@@ -685,8 +683,7 @@ class JoinCondition(object):
"Ensure that only those columns referring "
"to a parent column are marked as foreign, "
"either via the foreign() annotation or "
- "via the foreign_keys argument."
- % self.prop)
+ "via the foreign_keys argument." % self.prop)
elif onetomany_fk:
self.direction = ONETOMANY
elif manytoone_fk:
@@ -716,14 +713,14 @@ class JoinCondition(object):
def visit_binary(binary, left, right):
if "remote" in right._annotations and \
"remote" not in left._annotations and \
- self.can_be_synced_fn(left):
+ self.can_be_synced_fn(left):
lrp.add((left, right))
elif "remote" in left._annotations and \
"remote" not in right._annotations and \
- self.can_be_synced_fn(right):
+ self.can_be_synced_fn(right):
lrp.add((right, left))
if binary.operator is operators.eq and \
- self.can_be_synced_fn(left, right):
+ self.can_be_synced_fn(left, right):
if "foreign" in right._annotations:
collection.append((left, right))
elif "foreign" in left._annotations:
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
index cc9dd6ba5..f3b8e271d 100644
--- a/lib/sqlalchemy/orm/util.py
+++ b/lib/sqlalchemy/orm/util.py
@@ -1265,3 +1265,40 @@ def attribute_str(instance, attribute):
def state_attribute_str(state, attribute):
return state_str(state) + "." + attribute
+
+
+def randomize_unitofwork():
+ """Use random-ordering sets within the unit of work in order
+ to detect unit of work sorting issues.
+
+ This is a utility function that can be used to help reproduce
+ inconsistent unit of work sorting issues. For example,
+ if two kinds of objects A and B are being inserted, and
+ B has a foreign key reference to A - the A must be inserted first.
+ However, if there is no relationship between A and B, the unit of work
+ won't know to perform this sorting, and an operation may or may not
+ fail, depending on how the ordering works out. Since Python sets
+ and dictionaries have non-deterministic ordering, such an issue may
+ occur on some runs and not on others, and in practice it tends to
+ have a great dependence on the state of the interpreter. This leads
+ to so-called "heisenbugs" where changing entirely irrelevant aspects
+ of the test program still cause the failure behavior to change.
+
+ By calling ``randomize_unitofwork()`` when a script first runs, the
+ ordering of a key series of sets within the unit of work implementation
+ are randomized, so that the script can be minimized down to the fundamental
+ mapping and operation that's failing, while still reproducing the issue
+ on at least some runs.
+
+ This utility is also available when running the test suite via the
+ ``--reversetop`` flag.
+
+ .. versionadded:: 0.8.1 created a standalone version of the
+ ``--reversetop`` feature.
+
+ """
+ from sqlalchemy.orm import unitofwork, session, mapper, dependency
+ from sqlalchemy.util import topological
+ from sqlalchemy.testing.util import RandomSet
+ topological.set = unitofwork.set = session.set = mapper.set = \
+ dependency.set = RandomSet
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index f72082ceb..501b6d2a0 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -736,8 +736,8 @@ class QueuePool(Pool):
self._overflow = 0 - pool_size
self._max_overflow = max_overflow
self._timeout = timeout
- self._overflow_lock = self._max_overflow > -1 and \
- threading.Lock() or DummyLock()
+ self._overflow_lock = threading.Lock() if self._max_overflow > -1 \
+ else DummyLock()
def _do_return_conn(self, conn):
try:
diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py
index 9d14bd3ca..9a07b9de4 100644
--- a/lib/sqlalchemy/schema.py
+++ b/lib/sqlalchemy/schema.py
@@ -93,14 +93,13 @@ def _get_table_key(name, schema):
def _validate_dialect_kwargs(kwargs, name):
# validate remaining kwargs that they all specify DB prefixes
- if len([k for k in kwargs
- if not re.match(
- r'^(?:%s)_' %
- '|'.join(dialects.__all__), k
- )
- ]):
- raise TypeError(
- "Invalid argument(s) for %s: %r" % (name, kwargs.keys()))
+
+ for k in kwargs:
+ m = re.match('^(.+?)_.*', k)
+ if m is None:
+ raise TypeError("Additional arguments should be "
+ "named <dialectname>_<argument>, got '%s'" % k)
+
inspection._self_inspects(SchemaItem)
@@ -2025,7 +2024,7 @@ class ColumnCollectionMixin(object):
for c in columns]
if self._pending_colargs and \
isinstance(self._pending_colargs[0], Column) and \
- self._pending_colargs[0].table is not None:
+ isinstance(self._pending_colargs[0].table, Table):
self._set_parent_with_dispatch(self._pending_colargs[0].table)
def _set_parent(self, table):
@@ -2121,7 +2120,7 @@ class CheckConstraint(Constraint):
elif _autoattach:
cols = sqlutil.find_columns(self.sqltext)
tables = set([c.table for c in cols
- if c.table is not None])
+ if isinstance(c.table, Table)])
if len(tables) == 1:
self._set_parent_with_dispatch(
tables.pop())
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index 59e46de12..5a3a92a3e 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -423,7 +423,7 @@ class SQLCompiler(engine.Compiled):
name = orig_name = column.name
if name is None:
raise exc.CompileError("Cannot compile Column object until "
- "it's 'name' is assigned.")
+ "its 'name' is assigned.")
is_literal = column.is_literal
if not is_literal and isinstance(name, sql._truncated_label):
@@ -787,14 +787,14 @@ class SQLCompiler(engine.Compiled):
existing = self.binds[name]
if existing is not bindparam:
if (existing.unique or bindparam.unique) and \
- not existing.proxy_set.intersection(bindparam.proxy_set):
+ not existing.proxy_set.intersection(
+ bindparam.proxy_set):
raise exc.CompileError(
"Bind parameter '%s' conflicts with "
"unique bind parameter of the same name" %
bindparam.key
)
- elif getattr(existing, '_is_crud', False) or \
- getattr(bindparam, '_is_crud', False):
+ elif existing._is_crud or bindparam._is_crud:
raise exc.CompileError(
"bindparam() name '%s' is reserved "
"for automatic usage in the VALUES or SET "
@@ -1086,14 +1086,9 @@ class SQLCompiler(engine.Compiled):
positional_names=None, **kwargs):
entry = self.stack and self.stack[-1] or {}
- if not asfrom:
- existingfroms = entry.get('from', None)
- else:
- # don't render correlations if we're rendering a FROM list
- # entry
- existingfroms = []
+ existingfroms = entry.get('from', None)
- froms = select._get_display_froms(existingfroms)
+ froms = select._get_display_froms(existingfroms, asfrom=asfrom)
correlate_froms = set(sql._from_objects(*froms))
diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py
index 490004e39..5cef778bb 100644
--- a/lib/sqlalchemy/sql/expression.py
+++ b/lib/sqlalchemy/sql/expression.py
@@ -181,10 +181,10 @@ def select(columns=None, whereclause=None, from_obj=[], **kwargs):
string arguments, which will be converted as appropriate into
either :func:`text()` or :func:`literal_column()` constructs.
- See also:
+ .. seealso::
- :ref:`coretutorial_selecting` - Core Tutorial description of
- :func:`.select`.
+ :ref:`coretutorial_selecting` - Core Tutorial description of
+ :func:`.select`.
:param columns:
A list of :class:`.ClauseElement` objects, typically
@@ -464,7 +464,7 @@ def update(table, whereclause=None, values=None, inline=False, **kwargs):
as_scalar()
)
- See also:
+ .. seealso::
:ref:`inserts_and_updates` - SQL Expression
Language Tutorial
@@ -493,7 +493,7 @@ def delete(table, whereclause=None, **kwargs):
condition of the ``UPDATE`` statement. Note that the
:meth:`~Delete.where()` generative method may be used instead.
- See also:
+ .. seealso::
:ref:`deletes` - SQL Expression Tutorial
@@ -2873,6 +2873,8 @@ class BindParameter(ColumnElement):
__visit_name__ = 'bindparam'
quote = None
+ _is_crud = False
+
def __init__(self, key, value, type_=None, unique=False,
callable_=None,
isoutparam=False, required=False,
@@ -3073,7 +3075,7 @@ class Executable(Generative):
See :meth:`.Connection.execution_options` for a full list of
possible options.
- See also:
+ .. seealso::
:meth:`.Connection.execution_options()`
@@ -3444,15 +3446,15 @@ class Case(ColumnElement):
class FunctionElement(Executable, ColumnElement, FromClause):
"""Base for SQL function-oriented constructs.
- See also:
+ .. seealso::
- :class:`.Function` - named SQL function.
+ :class:`.Function` - named SQL function.
- :data:`.func` - namespace which produces registered or ad-hoc
- :class:`.Function` instances.
+ :data:`.func` - namespace which produces registered or ad-hoc
+ :class:`.Function` instances.
- :class:`.GenericFunction` - allows creation of registered function
- types.
+ :class:`.GenericFunction` - allows creation of registered function
+ types.
"""
@@ -3571,15 +3573,13 @@ class Function(FunctionElement):
See the superclass :class:`.FunctionElement` for a description
of public methods.
- See also:
-
- See also:
+ .. seealso::
- :data:`.func` - namespace which produces registered or ad-hoc
- :class:`.Function` instances.
+ :data:`.func` - namespace which produces registered or ad-hoc
+ :class:`.Function` instances.
- :class:`.GenericFunction` - allows creation of registered function
- types.
+ :class:`.GenericFunction` - allows creation of registered function
+ types.
"""
@@ -4725,7 +4725,9 @@ class SelectBase(Executable, FromClause):
"""return a 'scalar' representation of this selectable, embedded as a
subquery with a label.
- See also :meth:`~.SelectBase.as_scalar`.
+ .. seealso::
+
+ :meth:`~.SelectBase.as_scalar`.
"""
return self.as_scalar().label(name)
@@ -4843,9 +4845,9 @@ class SelectBase(Executable, FromClause):
result = conn.execute(statement).fetchall()
- See also:
+ .. seealso::
- :meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`.
+ :meth:`.orm.query.Query.cte` - ORM version of :meth:`.SelectBase.cte`.
"""
return CTE(self, name=name, recursive=recursive)
@@ -4914,6 +4916,10 @@ class SelectBase(Executable, FromClause):
The criterion will be appended to any pre-existing ORDER BY criterion.
+ This is an **in-place** mutation method; the
+ :meth:`~.SelectBase.order_by` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
if len(clauses) == 1 and clauses[0] is None:
self._order_by_clause = ClauseList()
@@ -4927,6 +4933,10 @@ class SelectBase(Executable, FromClause):
The criterion will be appended to any pre-existing GROUP BY criterion.
+ This is an **in-place** mutation method; the
+ :meth:`~.SelectBase.group_by` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
if len(clauses) == 1 and clauses[0] is None:
self._group_by_clause = ClauseList()
@@ -4980,7 +4990,7 @@ class CompoundSelect(SelectBase):
INTERSECT_ALL = util.symbol('INTERSECT ALL')
def __init__(self, keyword, *selects, **kwargs):
- self._should_correlate = kwargs.pop('correlate', False)
+ self._auto_correlate = kwargs.pop('correlate', False)
self.keyword = keyword
self.selects = []
@@ -5120,13 +5130,13 @@ class HasPrefixes(object):
class Select(HasPrefixes, SelectBase):
"""Represents a ``SELECT`` statement.
- See also:
+ .. seealso::
- :func:`~.expression.select` - the function which creates
- a :class:`.Select` object.
+ :func:`~.expression.select` - the function which creates
+ a :class:`.Select` object.
- :ref:`coretutorial_selecting` - Core Tutorial description
- of :func:`.select`.
+ :ref:`coretutorial_selecting` - Core Tutorial description
+ of :func:`.select`.
"""
@@ -5159,7 +5169,7 @@ class Select(HasPrefixes, SelectBase):
:class:`SelectBase` superclass.
"""
- self._should_correlate = correlate
+ self._auto_correlate = correlate
if distinct is not False:
if distinct is True:
self._distinct = True
@@ -5232,7 +5242,7 @@ class Select(HasPrefixes, SelectBase):
return froms
- def _get_display_froms(self, existing_froms=None):
+ def _get_display_froms(self, existing_froms=None, asfrom=False):
"""Return the full list of 'from' clauses to be displayed.
Takes into account a set of existing froms which may be
@@ -5258,18 +5268,29 @@ class Select(HasPrefixes, SelectBase):
# using a list to maintain ordering
froms = [f for f in froms if f not in toremove]
- if len(froms) > 1 or self._correlate or self._correlate_except:
+ if not asfrom:
if self._correlate:
- froms = [f for f in froms if f not in
- _cloned_intersection(froms,
- self._correlate)]
+ froms = [
+ f for f in froms if f not in
+ _cloned_intersection(
+ _cloned_intersection(froms, existing_froms or ()),
+ self._correlate
+ )
+ ]
if self._correlate_except:
- froms = [f for f in froms if f in _cloned_intersection(froms,
- self._correlate_except)]
- if self._should_correlate and existing_froms:
- froms = [f for f in froms if f not in
- _cloned_intersection(froms,
- existing_froms)]
+ froms = [
+ f for f in froms if f in
+ _cloned_intersection(
+ froms,
+ self._correlate_except
+ )
+ ]
+
+ if self._auto_correlate and existing_froms and len(froms) > 1:
+ froms = [
+ f for f in froms if f not in
+ _cloned_intersection(froms, existing_froms)
+ ]
if not len(froms):
raise exc.InvalidRequestError("Select statement '%s"
@@ -5642,7 +5663,7 @@ class Select(HasPrefixes, SelectBase):
:ref:`correlated_subqueries`
"""
- self._should_correlate = False
+ self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate = ()
else:
@@ -5662,7 +5683,7 @@ class Select(HasPrefixes, SelectBase):
:ref:`correlated_subqueries`
"""
- self._should_correlate = False
+ self._auto_correlate = False
if fromclauses and fromclauses[0] is None:
self._correlate_except = ()
else:
@@ -5671,9 +5692,15 @@ class Select(HasPrefixes, SelectBase):
def append_correlation(self, fromclause):
"""append the given correlation expression to this select()
- construct."""
+ construct.
+
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.correlate` method is preferred, as it provides standard
+ :term:`method chaining`.
+
+ """
- self._should_correlate = False
+ self._auto_correlate = False
self._correlate = set(self._correlate).union(
_interpret_as_from(f) for f in fromclause)
@@ -5681,6 +5708,10 @@ class Select(HasPrefixes, SelectBase):
"""append the given column expression to the columns clause of this
select() construct.
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.column` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
self._reset_exported()
column = _interpret_as_column_or_from(column)
@@ -5694,6 +5725,10 @@ class Select(HasPrefixes, SelectBase):
"""append the given columns clause prefix expression to this select()
construct.
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.prefix_with` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
clause = _literal_as_text(clause)
self._prefixes = self._prefixes + (clause,)
@@ -5704,6 +5739,10 @@ class Select(HasPrefixes, SelectBase):
The expression will be joined to existing WHERE criterion via AND.
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.where` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
self._reset_exported()
whereclause = _literal_as_text(whereclause)
@@ -5719,6 +5758,10 @@ class Select(HasPrefixes, SelectBase):
The expression will be joined to existing HAVING criterion via AND.
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.having` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
if self._having is not None:
self._having = and_(self._having, _literal_as_text(having))
@@ -5729,6 +5772,10 @@ class Select(HasPrefixes, SelectBase):
"""append the given FromClause expression to this select() construct's
FROM clause.
+ This is an **in-place** mutation method; the
+ :meth:`~.Select.select_from` method is preferred, as it provides standard
+ :term:`method chaining`.
+
"""
self._reset_exported()
fromclause = _interpret_as_from(fromclause)
@@ -6124,9 +6171,9 @@ class Insert(ValuesBase):
The :class:`.Insert` object is created using the
:func:`~.expression.insert()` function.
- See also:
+ .. seealso::
- :ref:`coretutorial_insert_expressions`
+ :ref:`coretutorial_insert_expressions`
"""
__visit_name__ = 'insert'
diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py
index fd138cfec..520c90f99 100644
--- a/lib/sqlalchemy/sql/util.py
+++ b/lib/sqlalchemy/sql/util.py
@@ -13,12 +13,14 @@ from collections import deque
"""Utility functions that build upon SQL and Schema constructs."""
-def sort_tables(tables, skip_fn=None):
+def sort_tables(tables, skip_fn=None, extra_dependencies=None):
"""sort a collection of Table objects in order of
their foreign-key dependency."""
tables = list(tables)
tuples = []
+ if extra_dependencies is not None:
+ tuples.extend(extra_dependencies)
def visit_foreign_key(fkey):
if fkey.use_alter:
@@ -507,6 +509,9 @@ class AnnotatedColumnElement(Annotated):
"""pull 'key' from parent, if not present"""
return self._Annotated__element.key
+ @util.memoized_property
+ def info(self):
+ return self._Annotated__element.info
# hard-generate Annotated subclasses. this technique
# is used instead of on-the-fly types (i.e. type.__new__())
diff --git a/lib/sqlalchemy/testing/assertsql.py b/lib/sqlalchemy/testing/assertsql.py
index 864ce5b4d..0e250f356 100644
--- a/lib/sqlalchemy/testing/assertsql.py
+++ b/lib/sqlalchemy/testing/assertsql.py
@@ -174,6 +174,8 @@ class CompiledSQL(SQLMatchRule):
params = self.params
if not isinstance(params, list):
params = [params]
+ else:
+ params = list(params)
all_params = list(params)
all_received = list(_received_parameters)
while params:
diff --git a/lib/sqlalchemy/testing/plugin/noseplugin.py b/lib/sqlalchemy/testing/plugin/noseplugin.py
index 4ce76363e..5bd7ff3cd 100644
--- a/lib/sqlalchemy/testing/plugin/noseplugin.py
+++ b/lib/sqlalchemy/testing/plugin/noseplugin.py
@@ -215,11 +215,8 @@ def _set_table_options(options, file_config):
@post
def _reverse_topological(options, file_config):
if options.reversetop:
- from sqlalchemy.orm import unitofwork, session, mapper, dependency
- from sqlalchemy.util import topological
- from sqlalchemy.testing.util import RandomSet
- topological.set = unitofwork.set = session.set = mapper.set = \
- dependency.set = RandomSet
+ from sqlalchemy.orm.util import randomize_unitofwork
+ randomize_unitofwork()
def _requirements_opt(options, opt_str, value, parser):
@@ -361,7 +358,6 @@ class NoseSQLAlchemy(Plugin):
The class being examined by the selector
"""
-
if not issubclass(cls, fixtures.TestBase):
return False
elif cls.__name__.startswith('_'):
diff --git a/lib/sqlalchemy/testing/profiling.py b/lib/sqlalchemy/testing/profiling.py
index ae9d176b7..19a9731be 100644
--- a/lib/sqlalchemy/testing/profiling.py
+++ b/lib/sqlalchemy/testing/profiling.py
@@ -14,11 +14,12 @@ import pstats
import time
import collections
from .. import util
+
try:
import cProfile
except ImportError:
cProfile = None
-from ..util.compat import jython, pypy, win32
+from ..util import jython, pypy, win32, update_wrapper
_current_test = None
@@ -210,7 +211,6 @@ class ProfileStatsFile(object):
profile_f.write("%s %s %s\n" % (test_key, platform_key, c))
profile_f.close()
-from sqlalchemy.util.compat import update_wrapper
def function_call_count(variance=0.05):
diff --git a/lib/sqlalchemy/testing/runner.py b/lib/sqlalchemy/testing/runner.py
index 6ec73d7c8..2bdbaebd1 100644
--- a/lib/sqlalchemy/testing/runner.py
+++ b/lib/sqlalchemy/testing/runner.py
@@ -31,3 +31,13 @@ import nose
def main():
nose.main(addplugins=[NoseSQLAlchemy()])
+
+def setup_py_test():
+ """Runner to use for the 'test_suite' entry of your setup.py.
+
+ Prevents any name clash shenanigans from the command line
+ argument "test" that the "setup.py test" command sends
+ to nose.
+
+ """
+ nose.main(addplugins=[NoseSQLAlchemy()], argv=['runner'])
diff --git a/lib/sqlalchemy/testing/schema.py b/lib/sqlalchemy/testing/schema.py
index ad233ec22..325d74f1e 100644
--- a/lib/sqlalchemy/testing/schema.py
+++ b/lib/sqlalchemy/testing/schema.py
@@ -66,18 +66,27 @@ def Column(*args, **kw):
col = schema.Column(*args, **kw)
if 'test_needs_autoincrement' in test_opts and \
- kw.get('primary_key', False) and \
- exclusions.against('firebird', 'oracle'):
- def add_seq(c, tbl):
- c._init_items(
- schema.Sequence(_truncate_name(
- config.db.dialect, tbl.name + '_' + c.name + '_seq'),
- optional=True)
- )
- event.listen(col, 'after_parent_attach', add_seq, propagate=True)
+ kw.get('primary_key', False):
+
+ # allow any test suite to pick up on this
+ col.info['test_needs_autoincrement'] = True
+
+ # hardcoded rule for firebird, oracle; this should
+ # be moved out
+ if exclusions.against('firebird', 'oracle'):
+ def add_seq(c, tbl):
+ c._init_items(
+ schema.Sequence(_truncate_name(
+ config.db.dialect, tbl.name + '_' + c.name + '_seq'),
+ optional=True)
+ )
+ event.listen(col, 'after_parent_attach', add_seq, propagate=True)
return col
+
+
+
def _truncate_name(dialect, name):
if len(name) > dialect.max_identifier_length:
return name[0:max(dialect.max_identifier_length - 6, 0)] + \
diff --git a/lib/sqlalchemy/testing/suite/test_insert.py b/lib/sqlalchemy/testing/suite/test_insert.py
index 66aa1ecfa..a00fde312 100644
--- a/lib/sqlalchemy/testing/suite/test_insert.py
+++ b/lib/sqlalchemy/testing/suite/test_insert.py
@@ -33,7 +33,7 @@ class LastrowidTest(fixtures.TablesTest):
row = conn.execute(table.select()).first()
eq_(
row,
- (1, "some data")
+ (config.db.dialect.default_sequence_base, "some data")
)
def test_autoincrement_on_insert(self):
@@ -132,7 +132,7 @@ class ReturningTest(fixtures.TablesTest):
row = conn.execute(table.select()).first()
eq_(
row,
- (1, "some data")
+ (config.db.dialect.default_sequence_base, "some data")
)
@classmethod
diff --git a/lib/sqlalchemy/testing/suite/test_reflection.py b/lib/sqlalchemy/testing/suite/test_reflection.py
index 5beed6aad..7cae48572 100644
--- a/lib/sqlalchemy/testing/suite/test_reflection.py
+++ b/lib/sqlalchemy/testing/suite/test_reflection.py
@@ -87,8 +87,10 @@ class ComponentReflectionTest(fixtures.TablesTest):
test_needs_fk=True,
)
- cls.define_index(metadata, users)
- cls.define_views(metadata, schema)
+ if testing.requires.index_reflection.enabled:
+ cls.define_index(metadata, users)
+ if testing.requires.view_reflection.enabled:
+ cls.define_views(metadata, schema)
@classmethod
def define_index(cls, metadata, users):
@@ -121,12 +123,14 @@ class ComponentReflectionTest(fixtures.TablesTest):
self.assert_('test_schema' in insp.get_schema_names())
+ @testing.requires.schema_reflection
def test_dialect_initialize(self):
engine = engines.testing_engine()
assert not hasattr(engine.dialect, 'default_schema_name')
inspect(engine)
assert hasattr(engine.dialect, 'default_schema_name')
+ @testing.requires.schema_reflection
def test_get_default_schema_name(self):
insp = inspect(testing.db)
eq_(insp.default_schema_name, testing.db.dialect.default_schema_name)
@@ -157,6 +161,7 @@ class ComponentReflectionTest(fixtures.TablesTest):
self._test_get_table_names()
@testing.requires.table_reflection
+ @testing.requires.foreign_key_constraint_reflection
def test_get_table_names_fks(self):
self._test_get_table_names(order_by='foreign_key')
@@ -261,6 +266,7 @@ class ComponentReflectionTest(fixtures.TablesTest):
self._test_get_pk_constraint()
@testing.requires.table_reflection
+ @testing.requires.primary_key_constraint_reflection
@testing.requires.schemas
def test_get_pk_constraint_with_schema(self):
self._test_get_pk_constraint(schema='test_schema')
diff --git a/lib/sqlalchemy/testing/util.py b/lib/sqlalchemy/testing/util.py
index 2592c341e..d9ff14eaf 100644
--- a/lib/sqlalchemy/testing/util.py
+++ b/lib/sqlalchemy/testing/util.py
@@ -1,6 +1,5 @@
from ..util import jython, pypy, defaultdict, decorator
-from ..util.compat import decimal
-
+import decimal
import gc
import time
import random
diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py
index b9f7b9444..08aba4b56 100644
--- a/lib/sqlalchemy/types.py
+++ b/lib/sqlalchemy/types.py
@@ -25,10 +25,10 @@ import codecs
from . import exc, schema, util, processors, events, event
from .sql import operators
-from .sql.expression import _DefaultColumnComparator, column, bindparam
+from .sql.expression import _DefaultColumnComparator
from .util import pickle
-from .util.compat import decimal
from .sql.visitors import Visitable
+import decimal
default = util.importlater("sqlalchemy.engine", "default")
NoneType = type(None)
@@ -1372,8 +1372,7 @@ class Numeric(_DateAffinity, TypeEngine):
implementations however, most of which contain an import for plain
``decimal`` in their source code, even though some such as psycopg2
provide hooks for alternate adapters. SQLAlchemy imports ``decimal``
- globally as well. While the alternate ``Decimal`` class can be patched
- into SQLA's ``decimal`` module, overall the most straightforward and
+ globally as well. The most straightforward and
foolproof way to use "cdecimal" given current DBAPI and Python support
is to patch it directly into sys.modules before anything else is
imported::
diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py
index 249c46ead..57bbdca85 100644
--- a/lib/sqlalchemy/util/__init__.py
+++ b/lib/sqlalchemy/util/__init__.py
@@ -4,10 +4,9 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from .compat import callable, cmp, reduce, defaultdict, py25_dict, \
+from .compat import callable, cmp, reduce, \
threading, py3k, py3k_warning, jython, pypy, cpython, win32, set_types, \
- buffer, pickle, update_wrapper, partial, md5_hex, decode_slice, \
- dottedgetter, parse_qsl, any, contextmanager, namedtuple, next, WeakSet
+ pickle, dottedgetter, parse_qsl, namedtuple, next, WeakSet
from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \
Properties, OrderedProperties, ImmutableProperties, OrderedDict, \
@@ -21,8 +20,8 @@ from .langhelpers import iterate_attributes, class_hierarchy, \
portable_instancemethod, unbound_method_to_callable, \
getargspec_init, format_argspec_init, format_argspec_plus, \
get_func_kwargs, get_cls_kwargs, decorator, as_interface, \
- memoized_property, memoized_instancemethod, \
- group_expirable_memoized_property, importlater, \
+ memoized_property, memoized_instancemethod, md5_hex, \
+ group_expirable_memoized_property, importlater, decode_slice, \
monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\
duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\
classproperty, set_creation_order, warn_exception, warn, NoneType,\
@@ -31,3 +30,10 @@ from .langhelpers import iterate_attributes, class_hierarchy, \
from .deprecations import warn_deprecated, warn_pending_deprecation, \
deprecated, pending_deprecation
+
+# things that used to be not always available,
+# but are now as of current support Python versions
+from collections import defaultdict
+from functools import partial
+from functools import update_wrapper
+from contextlib import contextmanager
diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py
index ca77103b2..2c9c982fb 100644
--- a/lib/sqlalchemy/util/_collections.py
+++ b/lib/sqlalchemy/util/_collections.py
@@ -6,7 +6,6 @@
"""Collection classes and helpers."""
-import sys
import itertools
import weakref
import operator
@@ -649,37 +648,25 @@ class OrderedIdentitySet(IdentitySet):
self.add(o)
-if sys.version_info >= (2, 5):
- class PopulateDict(dict):
- """A dict which populates missing values via a creation function.
+class PopulateDict(dict):
+ """A dict which populates missing values via a creation function.
- Note the creation function takes a key, unlike
- collections.defaultdict.
+ Note the creation function takes a key, unlike
+ collections.defaultdict.
- """
-
- def __init__(self, creator):
- self.creator = creator
-
- def __missing__(self, key):
- self[key] = val = self.creator(key)
- return val
-else:
- class PopulateDict(dict):
- """A dict which populates missing values via a creation function."""
+ """
- def __init__(self, creator):
- self.creator = creator
+ def __init__(self, creator):
+ self.creator = creator
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- self[key] = value = self.creator(key)
- return value
+ def __missing__(self, key):
+ self[key] = val = self.creator(key)
+ return val
-# define collections that are capable of storing
+# Define collections that are capable of storing
# ColumnElement objects as hashable keys/elements.
+# At this point, these are mostly historical, things
+# used to be more complicated.
column_set = set
column_dict = dict
ordered_column_set = OrderedSet
diff --git a/lib/sqlalchemy/util/compat.py b/lib/sqlalchemy/util/compat.py
index 3725a8491..2a0f06f8e 100644
--- a/lib/sqlalchemy/util/compat.py
+++ b/lib/sqlalchemy/util/compat.py
@@ -54,44 +54,6 @@ else:
except ImportError:
import pickle
-
-# a controversial feature, required by MySQLdb currently
-def buffer(x):
- return x
-
-# Py2K
-buffer = buffer
-# end Py2K
-
-try:
- from contextlib import contextmanager
-except ImportError:
- def contextmanager(fn):
- return fn
-
-try:
- from functools import update_wrapper
-except ImportError:
- def update_wrapper(wrapper, wrapped,
- assigned=('__doc__', '__module__', '__name__'),
- updated=('__dict__',)):
- for attr in assigned:
- setattr(wrapper, attr, getattr(wrapped, attr))
- for attr in updated:
- getattr(wrapper, attr).update(getattr(wrapped, attr, ()))
- return wrapper
-
-try:
- from functools import partial
-except ImportError:
- def partial(func, *args, **keywords):
- def newfunc(*fargs, **fkeywords):
- newkeywords = keywords.copy()
- newkeywords.update(fkeywords)
- return func(*(args + fargs), **newkeywords)
- return newfunc
-
-
if sys.version_info < (2, 6):
# emits a nasty deprecation warning
# in newer pythons
@@ -132,52 +94,6 @@ except ImportError:
return tuptype
try:
- from collections import defaultdict
-except ImportError:
- class defaultdict(dict):
-
- def __init__(self, default_factory=None, *a, **kw):
- if (default_factory is not None and
- not hasattr(default_factory, '__call__')):
- raise TypeError('first argument must be callable')
- dict.__init__(self, *a, **kw)
- self.default_factory = default_factory
-
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- return self.__missing__(key)
-
- def __missing__(self, key):
- if self.default_factory is None:
- raise KeyError(key)
- self[key] = value = self.default_factory()
- return value
-
- def __reduce__(self):
- if self.default_factory is None:
- args = tuple()
- else:
- args = self.default_factory,
- return type(self), args, None, None, self.iteritems()
-
- def copy(self):
- return self.__copy__()
-
- def __copy__(self):
- return type(self)(self.default_factory, self)
-
- def __deepcopy__(self, memo):
- import copy
- return type(self)(self.default_factory,
- copy.deepcopy(self.items()))
-
- def __repr__(self):
- return 'defaultdict(%s, %s)' % (self.default_factory,
- dict.__repr__(self))
-
-try:
from weakref import WeakSet
except:
import weakref
@@ -199,79 +115,12 @@ except:
def add(self, other):
self._storage[other] = True
-
-# find or create a dict implementation that supports __missing__
-class _probe(dict):
- def __missing__(self, key):
- return 1
-
-try:
- try:
- _probe()['missing']
- py25_dict = dict
- except KeyError:
- class py25_dict(dict):
- def __getitem__(self, key):
- try:
- return dict.__getitem__(self, key)
- except KeyError:
- try:
- missing = self.__missing__
- except AttributeError:
- raise KeyError(key)
- else:
- return missing(key)
-finally:
- del _probe
-
-
-try:
- import hashlib
- _md5 = hashlib.md5
-except ImportError:
- import md5
- _md5 = md5.new
-
-
-def md5_hex(x):
- # Py3K
- #x = x.encode('utf-8')
- m = _md5()
- m.update(x)
- return m.hexdigest()
-
import time
if win32 or jython:
time_func = time.clock
else:
time_func = time.time
-if sys.version_info >= (2, 5):
- any = any
-else:
- def any(iterator):
- for item in iterator:
- if bool(item):
- return True
- else:
- return False
-
-if sys.version_info >= (2, 5):
- def decode_slice(slc):
- """decode a slice object as sent to __getitem__.
-
- takes into account the 2.5 __index__() method, basically.
-
- """
- ret = []
- for x in slc.start, slc.stop, slc.step:
- if hasattr(x, '__index__'):
- x = x.__index__()
- ret.append(x)
- return tuple(ret)
-else:
- def decode_slice(slc):
- return (slc.start, slc.stop, slc.step)
if sys.version_info >= (2, 6):
from operator import attrgetter as dottedgetter
@@ -283,5 +132,11 @@ else:
return obj
return g
+# Adapted from six.py
+if py3k:
+ def b(s):
+ return s.encode("latin-1")
+else:
+ def b(s):
+ return s
-import decimal
diff --git a/lib/sqlalchemy/util/langhelpers.py b/lib/sqlalchemy/util/langhelpers.py
index a9b791234..e3aed24d8 100644
--- a/lib/sqlalchemy/util/langhelpers.py
+++ b/lib/sqlalchemy/util/langhelpers.py
@@ -15,10 +15,31 @@ import re
import sys
import types
import warnings
-from .compat import update_wrapper, set_types, threading, \
+from .compat import set_types, threading, \
callable, inspect_getfullargspec
+from functools import update_wrapper
from .. import exc
+import hashlib
+def md5_hex(x):
+ # Py3K
+ #x = x.encode('utf-8')
+ m = hashlib.md5()
+ m.update(x)
+ return m.hexdigest()
+
+def decode_slice(slc):
+ """decode a slice object as sent to __getitem__.
+
+ takes into account the 2.5 __index__() method, basically.
+
+ """
+ ret = []
+ for x in slc.start, slc.stop, slc.step:
+ if hasattr(x, '__index__'):
+ x = x.__index__()
+ ret.append(x)
+ return tuple(ret)
def _unique_symbols(used, *bases):
used = set(used)
@@ -123,7 +144,7 @@ def get_cls_kwargs(cls):
ctr = class_.__dict__.get('__init__', False)
if (not ctr or
not isinstance(ctr, types.FunctionType) or
- not isinstance(ctr.func_code, types.CodeType)):
+ not isinstance(ctr.func_code, types.CodeType)):
stack.update(class_.__bases__)
continue
@@ -256,7 +277,6 @@ def format_argspec_init(method, grouped=True):
try:
return format_argspec_plus(method, grouped=grouped)
except TypeError:
- self_arg = 'self'
if method is object.__init__:
args = grouped and '(self)' or 'self'
else:
@@ -784,7 +804,7 @@ def duck_type_collection(specimen, default=None):
if hasattr(specimen, '__emulates__'):
# canonicalize set vs sets.Set to a standard: the builtin set
if (specimen.__emulates__ is not None and
- issubclass(specimen.__emulates__, set_types)):
+ issubclass(specimen.__emulates__, set_types)):
return set
else:
return specimen.__emulates__
diff --git a/lib/sqlalchemy/util/topological.py b/lib/sqlalchemy/util/topological.py
index 6f895e7b7..de3dfd0ae 100644
--- a/lib/sqlalchemy/util/topological.py
+++ b/lib/sqlalchemy/util/topological.py
@@ -49,7 +49,8 @@ def sort(tuples, allitems):
def find_cycles(tuples, allitems):
- # straight from gvr with some mods
+ # adapted from:
+ # http://neopythonic.blogspot.com/2009/01/detecting-cycles-in-directed-graph.html
edges = util.defaultdict(set)
for parent, child in tuples:
diff --git a/setup.cfg b/setup.cfg
index a3894cd4e..92bdbc40f 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -7,6 +7,10 @@ exclude = ^examples
first-package-wins = true
where = test
+[upload]
+sign = 1
+identity = C4DAFEE1
+
[sqla_testing]
requirement_cls=test.requirements:DefaultRequirements
profile_file=test/profiles.txt
diff --git a/test/aaa_profiling/test_memusage.py b/test/aaa_profiling/test_memusage.py
index aabc0a2bc..05be39002 100644
--- a/test/aaa_profiling/test_memusage.py
+++ b/test/aaa_profiling/test_memusage.py
@@ -14,7 +14,7 @@ from sqlalchemy.sql import column
from sqlalchemy.processors import to_decimal_processor_factory, \
to_unicode_processor_factory
from sqlalchemy.testing.util import gc_collect
-from sqlalchemy.util.compat import decimal
+import decimal
import gc
from sqlalchemy.testing import fixtures
import weakref
diff --git a/test/dialect/test_mssql.py b/test/dialect/test_mssql.py
index 52ba77310..0dfda9015 100644
--- a/test/dialect/test_mssql.py
+++ b/test/dialect/test_mssql.py
@@ -13,10 +13,10 @@ from sqlalchemy.engine import url
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, \
AssertsExecutionResults, ComparesTables
from sqlalchemy import testing
-from sqlalchemy.testing import eq_, emits_warning_on, \
- assert_raises_message
-from sqlalchemy.util.compat import decimal
+from sqlalchemy.testing import emits_warning_on, assert_raises_message
+import decimal
from sqlalchemy.engine.reflection import Inspector
+from sqlalchemy.util.compat import b
class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
@@ -1210,28 +1210,28 @@ class MatchTest(fixtures.TestBase, AssertsCompiledSQL):
eq_([1, 3, 5], [r.id for r in results])
-class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
- @classmethod
- def setup_class(cls):
- global dialect
- dialect = pyodbc.dialect()
+class ParseConnectTest(fixtures.TestBase):
def test_pyodbc_connect_dsn_trusted(self):
+ dialect = pyodbc.dialect()
u = url.make_url('mssql://mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_old_style_dsn_trusted(self):
+ dialect = pyodbc.dialect()
u = url.make_url('mssql:///?dsn=mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;Trusted_Connection=Yes'], {}], connection)
def test_pyodbc_connect_dsn_non_trusted(self):
+ dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@mydsn')
connection = dialect.create_connect_args(u)
eq_([['dsn=mydsn;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_dsn_extra(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@mydsn/?LANGUAGE=us_'
'english&foo=bar')
@@ -1241,12 +1241,14 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
assert ";foo=bar" in dsn_string
def test_pyodbc_connect(self):
+ dialect = pyodbc.dialect()
u = url.make_url('mssql://username:password@hostspec/database')
connection = dialect.create_connect_args(u)
eq_([['DRIVER={SQL Server};Server=hostspec;Database=database;UI'
'D=username;PWD=password'], {}], connection)
def test_pyodbc_connect_comma_port(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec:12345/data'
'base')
@@ -1255,6 +1257,7 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
'ase;UID=username;PWD=password'], {}], connection)
def test_pyodbc_connect_config_port(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?p'
'ort=12345')
@@ -1263,6 +1266,7 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
'D=username;PWD=password;port=12345'], {}], connection)
def test_pyodbc_extra_connect(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql://username:password@hostspec/database?L'
'ANGUAGE=us_english&foo=bar')
@@ -1275,6 +1279,7 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
'username;PWD=password;LANGUAGE=us_english;foo=bar'), True)
def test_pyodbc_odbc_connect(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=DRIVER%3D%7BSQL+Server'
'%7D%3BServer%3Dhostspec%3BDatabase%3Ddatabase'
@@ -1284,6 +1289,7 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
'D=username;PWD=password'], {}], connection)
def test_pyodbc_odbc_connect_with_dsn(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql:///?odbc_connect=dsn%3Dmydsn%3BDatabase'
'%3Ddatabase%3BUID%3Dusername%3BPWD%3Dpassword'
@@ -1293,6 +1299,7 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
{}], connection)
def test_pyodbc_odbc_connect_ignores_other_values(self):
+ dialect = pyodbc.dialect()
u = \
url.make_url('mssql://userdiff:passdiff@localhost/dbdiff?od'
'bc_connect=DRIVER%3D%7BSQL+Server%7D%3BServer'
@@ -1321,7 +1328,22 @@ class ParseConnectTest(fixtures.TestBase, AssertsCompiledSQL):
'user': 'scott', 'database': 'test'}], connection
)
- @testing.only_on(['mssql+pyodbc', 'mssql+pymssql'], "FreeTDS specific test")
+ def test_pymssql_disconnect(self):
+ dialect = pymssql.dialect()
+
+ for error in [
+ 'Adaptive Server connection timed out',
+ 'message 20003',
+ "Error 10054",
+ "Not connected to any MS SQL server",
+ "Connection is closed"
+ ]:
+ eq_(dialect.is_disconnect(error, None, None), True)
+
+ eq_(dialect.is_disconnect("not an error", None, None), False)
+
+ @testing.only_on(['mssql+pyodbc', 'mssql+pymssql'],
+ "FreeTDS specific test")
def test_bad_freetds_warning(self):
engine = engines.testing_engine()
@@ -1926,6 +1948,21 @@ class TypeRoundTripTest(fixtures.TestBase, AssertsExecutionResults, ComparesTabl
not in list(engine.execute(tbl.select()).first())
engine.execute(tbl.delete())
+class MonkeyPatchedBinaryTest(fixtures.TestBase):
+ __only_on__ = 'mssql'
+
+ def test_unicode(self):
+ module = __import__('pymssql')
+ result = module.Binary(u'foo')
+ eq_(result, u'foo')
+
+ def test_bytes(self):
+ module = __import__('pymssql')
+ input = b('\x80\x03]q\x00X\x03\x00\x00\x00oneq\x01a.')
+ expected_result = input
+ result = module.Binary(input)
+ eq_(result, expected_result)
+
class BinaryTest(fixtures.TestBase, AssertsExecutionResults):
"""Test the Binary and VarBinary types"""
diff --git a/test/dialect/test_oracle.py b/test/dialect/test_oracle.py
index 7384d7bb4..861b28c5f 100644
--- a/test/dialect/test_oracle.py
+++ b/test/dialect/test_oracle.py
@@ -7,12 +7,11 @@ from sqlalchemy import types as sqltypes, exc, schema
from sqlalchemy.sql import table, column
from sqlalchemy.testing import fixtures, AssertsExecutionResults, AssertsCompiledSQL
from sqlalchemy import testing
-from sqlalchemy.testing import eq_, assert_raises, assert_raises_message
+from sqlalchemy.testing import assert_raises, assert_raises_message
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.dialects.oracle import cx_oracle, base as oracle
from sqlalchemy.engine import default
-from sqlalchemy.util import jython
-from sqlalchemy.util.compat import decimal
+import decimal
from sqlalchemy.testing.schema import Table, Column
import datetime
import os
diff --git a/test/dialect/test_postgresql.py b/test/dialect/test_postgresql.py
index 3337fa6ab..005aed1ce 100644
--- a/test/dialect/test_postgresql.py
+++ b/test/dialect/test_postgresql.py
@@ -17,8 +17,8 @@ from sqlalchemy import Table, Column, select, MetaData, text, Integer, \
from sqlalchemy.orm import Session, mapper, aliased
from sqlalchemy import exc, schema, types
from sqlalchemy.dialects.postgresql import base as postgresql
-from sqlalchemy.dialects.postgresql import HSTORE, hstore, array, ARRAY
-from sqlalchemy.util.compat import decimal
+from sqlalchemy.dialects.postgresql import HSTORE, hstore, array
+import decimal
from sqlalchemy.testing.util import round_decimal
from sqlalchemy.sql import table, column, operators
import logging
@@ -180,6 +180,14 @@ class CompileTest(fixtures.TestBase, AssertsCompiledSQL):
'USING hash (data)',
dialect=postgresql.dialect())
+ def test_substring(self):
+ self.assert_compile(func.substring('abc', 1, 2),
+ 'SUBSTRING(%(substring_1)s FROM %(substring_2)s '
+ 'FOR %(substring_3)s)')
+ self.assert_compile(func.substring('abc', 1),
+ 'SUBSTRING(%(substring_1)s FROM %(substring_2)s)')
+
+
def test_extract(self):
t = table('t', column('col1', DateTime), column('col2', Date),
@@ -734,7 +742,6 @@ class NumericInterpretationTest(fixtures.TestBase):
def test_numeric_codes(self):
from sqlalchemy.dialects.postgresql import pg8000, psycopg2, base
- from sqlalchemy.util.compat import decimal
for dialect in (pg8000.dialect(), psycopg2.dialect()):
@@ -3094,6 +3101,12 @@ class HStoreRoundTripTest(fixtures.TablesTest):
engine.connect()
return engine
+ def test_reflect(self):
+ from sqlalchemy import inspect
+ insp = inspect(testing.db)
+ cols = insp.get_columns('data_table')
+ assert isinstance(cols[2]['type'], HSTORE)
+
@testing.only_on("postgresql+psycopg2")
def test_insert_native(self):
engine = testing.db
diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py
index 66991e922..3cd3db928 100644
--- a/test/orm/inheritance/test_basic.py
+++ b/test/orm/inheritance/test_basic.py
@@ -1055,6 +1055,73 @@ class FlushTest(fixtures.MappedTest):
sess.flush()
assert user_roles.count().scalar() == 1
+class JoinedNoFKSortingTest(fixtures.MappedTest):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table("a", metadata,
+ Column('id', Integer, primary_key=True,
+ test_needs_autoincrement=True)
+ )
+ Table("b", metadata,
+ Column('id', Integer, primary_key=True)
+ )
+ Table("c", metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class A(cls.Basic):
+ pass
+ class B(A):
+ pass
+ class C(A):
+ pass
+
+ @classmethod
+ def setup_mappers(cls):
+ A, B, C = cls.classes.A, cls.classes.B, cls.classes.C
+ mapper(A, cls.tables.a)
+ mapper(B, cls.tables.b, inherits=A,
+ inherit_condition=cls.tables.a.c.id == cls.tables.b.c.id)
+ mapper(C, cls.tables.c, inherits=A,
+ inherit_condition=cls.tables.a.c.id == cls.tables.c.c.id)
+
+ def test_ordering(self):
+ B, C = self.classes.B, self.classes.C
+ sess = Session()
+ sess.add_all([B(), C(), B(), C()])
+ self.assert_sql_execution(
+ testing.db,
+ sess.flush,
+ CompiledSQL(
+ "INSERT INTO a () VALUES ()",
+ {}
+ ),
+ CompiledSQL(
+ "INSERT INTO a () VALUES ()",
+ {}
+ ),
+ CompiledSQL(
+ "INSERT INTO a () VALUES ()",
+ {}
+ ),
+ CompiledSQL(
+ "INSERT INTO a () VALUES ()",
+ {}
+ ),
+ AllOf(
+ CompiledSQL(
+ "INSERT INTO b (id) VALUES (:id)",
+ [{"id": 1}, {"id": 3}]
+ ),
+ CompiledSQL(
+ "INSERT INTO c (id) VALUES (:id)",
+ [{"id": 2}, {"id": 4}]
+ )
+ )
+ )
+
class VersioningTest(fixtures.MappedTest):
@classmethod
def define_tables(cls, metadata):
diff --git a/test/orm/test_default_strategies.py b/test/orm/test_default_strategies.py
index b986ac568..c1668cdd4 100644
--- a/test/orm/test_default_strategies.py
+++ b/test/orm/test_default_strategies.py
@@ -2,7 +2,6 @@ from test.orm import _fixtures
from sqlalchemy import testing
from sqlalchemy.orm import mapper, relationship, create_session
from sqlalchemy import util
-from sqlalchemy.util import any
import sqlalchemy as sa
from sqlalchemy.testing import eq_, assert_raises_message
diff --git a/test/orm/test_froms.py b/test/orm/test_froms.py
index 4c566948a..b98333e3d 100644
--- a/test/orm/test_froms.py
+++ b/test/orm/test_froms.py
@@ -174,9 +174,7 @@ class RawSelectTest(QueryTest, AssertsCompiledSQL):
)
# a little tedious here, adding labels to work around Query's
- # auto-labelling. TODO: can we detect only one table in the
- # "froms" and then turn off use_labels ? note: this query is
- # incorrect SQL with the correlate of users in the FROM list.
+ # auto-labelling.
s = sess.query(addresses.c.id.label('id'),
addresses.c.email_address.label('email')).\
filter(addresses.c.user_id == users.c.id).correlate(users).\
@@ -188,7 +186,7 @@ class RawSelectTest(QueryTest, AssertsCompiledSQL):
"SELECT users.id AS users_id, users.name AS users_name, "
"anon_1.email AS anon_1_email "
"FROM users JOIN (SELECT addresses.id AS id, "
- "addresses.email_address AS email FROM addresses "
+ "addresses.email_address AS email FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 "
"ON anon_1.id = users.id",
)
diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py
index 66082b549..6b97fb135 100644
--- a/test/orm/test_mapper.py
+++ b/test/orm/test_mapper.py
@@ -407,6 +407,37 @@ class MapperTest(_fixtures.FixtureTest, AssertsCompiledSQL):
obj.info["q"] = "p"
eq_(obj.info, {"q": "p"})
+ def test_info_via_instrumented(self):
+ m = MetaData()
+ # create specific tables here as we don't want
+ # users.c.id.info to be pre-initialized
+ users = Table('u', m, Column('id', Integer, primary_key=True),
+ Column('name', String))
+ addresses = Table('a', m, Column('id', Integer, primary_key=True),
+ Column('name', String),
+ Column('user_id', Integer, ForeignKey('u.id')))
+ Address = self.classes.Address
+ User = self.classes.User
+
+ mapper(User, users, properties={
+ "name_lower": column_property(func.lower(users.c.name)),
+ "addresses": relationship(Address)
+ })
+ mapper(Address, addresses)
+
+ # attr.info goes down to the original Column object
+ # for the dictionary. The annotated element needs to pass
+ # this on.
+ assert 'info' not in users.c.id.__dict__
+ is_(User.id.info, users.c.id.info)
+ assert 'info' in users.c.id.__dict__
+
+ # for SQL expressions, ORM-level .info
+ is_(User.name_lower.info, User.name_lower.property.info)
+
+ # same for relationships
+ is_(User.addresses.info, User.addresses.property.info)
+
def test_add_property(self):
users, addresses, Address = (self.tables.users,
diff --git a/test/orm/test_query.py b/test/orm/test_query.py
index f418d2581..ac9c95f41 100644
--- a/test/orm/test_query.py
+++ b/test/orm/test_query.py
@@ -194,22 +194,33 @@ class RawSelectTest(QueryTest, AssertsCompiledSQL):
Address = self.classes.Address
self.assert_compile(
- select([User]).where(User.id == Address.user_id).
- correlate(Address),
- "SELECT users.id, users.name FROM users "
- "WHERE users.id = addresses.user_id"
+ select([User.name, Address.id,
+ select([func.count(Address.id)]).\
+ where(User.id == Address.user_id).\
+ correlate(User).as_scalar()
+ ]),
+ "SELECT users.name, addresses.id, "
+ "(SELECT count(addresses.id) AS count_1 "
+ "FROM addresses WHERE users.id = addresses.user_id) AS anon_1 "
+ "FROM users, addresses"
)
def test_correlate_aliased_entity(self):
User = self.classes.User
Address = self.classes.Address
- aa = aliased(Address, name="aa")
+ uu = aliased(User, name="uu")
self.assert_compile(
- select([User]).where(User.id == aa.user_id).
- correlate(aa),
- "SELECT users.id, users.name FROM users "
- "WHERE users.id = aa.user_id"
+ select([uu.name, Address.id,
+ select([func.count(Address.id)]).\
+ where(uu.id == Address.user_id).\
+ correlate(uu).as_scalar()
+ ]),
+ # curious, "address.user_id = uu.id" is reversed here
+ "SELECT uu.name, addresses.id, "
+ "(SELECT count(addresses.id) AS count_1 "
+ "FROM addresses WHERE addresses.user_id = uu.id) AS anon_1 "
+ "FROM users AS uu, addresses"
)
def test_columns_clause_entity(self):
diff --git a/test/orm/test_rel_fn.py b/test/orm/test_rel_fn.py
index bad3a0dd7..10ba41429 100644
--- a/test/orm/test_rel_fn.py
+++ b/test/orm/test_rel_fn.py
@@ -1,4 +1,4 @@
-from sqlalchemy.testing import assert_raises, assert_raises_message, eq_, \
+from sqlalchemy.testing import assert_raises_message, eq_, \
AssertsCompiledSQL, is_
from sqlalchemy.testing import fixtures
from sqlalchemy.orm import relationships, foreign, remote
@@ -119,9 +119,9 @@ class _JoinFixtures(object):
support_sync=False,
can_be_synced_fn=_can_sync,
primaryjoin=and_(
- self.three_tab_a.c.id==self.three_tab_b.c.aid,
- self.three_tab_c.c.bid==self.three_tab_b.c.id,
- self.three_tab_c.c.aid==self.three_tab_a.c.id
+ self.three_tab_a.c.id == self.three_tab_b.c.aid,
+ self.three_tab_c.c.bid == self.three_tab_b.c.id,
+ self.three_tab_c.c.aid == self.three_tab_a.c.id
)
)
@@ -215,9 +215,9 @@ class _JoinFixtures(object):
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
- self.composite_selfref.c.group_id==
+ self.composite_selfref.c.group_id ==
func.foo(self.composite_selfref.c.group_id),
- self.composite_selfref.c.parent_id==
+ self.composite_selfref.c.parent_id ==
self.composite_selfref.c.id
),
**kw
@@ -230,9 +230,9 @@ class _JoinFixtures(object):
self.composite_selfref,
self.composite_selfref,
primaryjoin=and_(
- remote(self.composite_selfref.c.group_id)==
+ remote(self.composite_selfref.c.group_id) ==
func.foo(self.composite_selfref.c.group_id),
- remote(self.composite_selfref.c.parent_id)==
+ remote(self.composite_selfref.c.parent_id) ==
self.composite_selfref.c.id
),
**kw
@@ -281,58 +281,60 @@ class _JoinFixtures(object):
# see test/orm/inheritance/test_abc_inheritance:TestaTobM2O
# and others there
right = self.base_w_sub_rel.join(self.rel_sub,
- self.base_w_sub_rel.c.id==self.rel_sub.c.id
+ self.base_w_sub_rel.c.id == self.rel_sub.c.id
)
return relationships.JoinCondition(
self.base_w_sub_rel,
right,
self.base_w_sub_rel,
self.rel_sub,
- primaryjoin=self.base_w_sub_rel.c.sub_id==\
+ primaryjoin=self.base_w_sub_rel.c.sub_id == \
self.rel_sub.c.id,
**kw
)
def _join_fixture_o2m_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub_w_base_rel,
- self.base.c.id==self.sub_w_base_rel.c.id)
+ self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
self.base,
self.sub_w_base_rel,
self.base,
- primaryjoin=self.sub_w_base_rel.c.base_id==self.base.c.id
+ primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id
)
def _join_fixture_m2o_joined_sub_to_sub_on_base(self, **kw):
# this is a late add - a variant of the test case
# in #2491 where we join on the base cols instead. only
# m2o has a problem at the time of this test.
- left = self.base.join(self.sub, self.base.c.id==self.sub.c.id)
- right = self.base.join(self.sub_w_base_rel, self.base.c.id==self.sub_w_base_rel.c.id)
+ left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
+ right = self.base.join(self.sub_w_base_rel,
+ self.base.c.id == self.sub_w_base_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_base_rel,
- primaryjoin=self.sub_w_base_rel.c.base_id==self.base.c.id,
+ primaryjoin=self.sub_w_base_rel.c.base_id == self.base.c.id,
)
def _join_fixture_o2m_joined_sub_to_sub(self, **kw):
- left = self.base.join(self.sub, self.base.c.id==self.sub.c.id)
- right = self.base.join(self.sub_w_sub_rel, self.base.c.id==self.sub_w_sub_rel.c.id)
+ left = self.base.join(self.sub, self.base.c.id == self.sub.c.id)
+ right = self.base.join(self.sub_w_sub_rel,
+ self.base.c.id == self.sub_w_sub_rel.c.id)
return relationships.JoinCondition(
left,
right,
self.sub,
self.sub_w_sub_rel,
- primaryjoin=self.sub.c.id==self.sub_w_sub_rel.c.sub_id
+ primaryjoin=self.sub.c.id == self.sub_w_sub_rel.c.sub_id
)
def _join_fixture_m2o_sub_to_joined_sub(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
- self.base.c.id==self.right_w_base_rel.c.id)
+ self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
@@ -343,19 +345,19 @@ class _JoinFixtures(object):
def _join_fixture_m2o_sub_to_joined_sub_func(self, **kw):
# see test.orm.test_mapper:MapperTest.test_add_column_prop_deannotate,
right = self.base.join(self.right_w_base_rel,
- self.base.c.id==self.right_w_base_rel.c.id)
+ self.base.c.id == self.right_w_base_rel.c.id)
return relationships.JoinCondition(
self.right_w_base_rel,
right,
self.right_w_base_rel,
self.right_w_base_rel,
- primaryjoin=self.right_w_base_rel.c.base_id==\
+ primaryjoin=self.right_w_base_rel.c.base_id == \
func.foo(self.base.c.id)
)
def _join_fixture_o2o_joined_sub_to_base(self, **kw):
left = self.base.join(self.sub,
- self.base.c.id==self.sub.c.id)
+ self.base.c.id == self.sub.c.id)
# see test_relationships->AmbiguousJoinInterpretedAsSelfRef
return relationships.JoinCondition(
@@ -371,7 +373,7 @@ class _JoinFixtures(object):
self.right,
self.left,
self.right,
- primaryjoin=self.left.c.id==
+ primaryjoin=self.left.c.id ==
foreign(func.foo(self.right.c.lid)),
**kw
)
@@ -382,7 +384,7 @@ class _JoinFixtures(object):
self.right,
self.left,
self.right,
- primaryjoin=self.left.c.id==
+ primaryjoin=self.left.c.id ==
func.foo(self.right.c.lid),
consider_as_foreign_keys=[self.right.c.lid],
**kw
@@ -399,7 +401,7 @@ class _JoinFixtures(object):
)
def _assert_raises_no_relevant_fks(self, fn, expr, relname,
- primary, *arg, **kw):
+ primary, *arg, **kw):
assert_raises_message(
exc.ArgumentError,
r"Could not locate any relevant foreign key columns "
@@ -414,9 +416,9 @@ class _JoinFixtures(object):
)
def _assert_raises_no_equality(self, fn, expr, relname,
- primary, *arg, **kw):
+ primary, *arg, **kw):
assert_raises_message(
- sa.exc.ArgumentError,
+ exc.ArgumentError,
"Could not locate any simple equality expressions "
"involving locally mapped foreign key columns for %s join "
"condition '%s' on relationship %s. "
@@ -431,7 +433,7 @@ class _JoinFixtures(object):
)
def _assert_raises_ambig_join(self, fn, relname, secondary_arg,
- *arg, **kw):
+ *arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.AmbiguousForeignKeysError,
@@ -455,7 +457,7 @@ class _JoinFixtures(object):
fn, *arg, **kw)
def _assert_raises_no_join(self, fn, relname, secondary_arg,
- *arg, **kw):
+ *arg, **kw):
if secondary_arg is not None:
assert_raises_message(
exc.NoForeignKeysError,
@@ -463,7 +465,8 @@ class _JoinFixtures(object):
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables "
"via secondary table '%s'. "
- "Ensure that referencing columns are associated with a ForeignKey "
+ "Ensure that referencing columns are associated "
+ "with a ForeignKey "
"or ForeignKeyConstraint, or specify 'primaryjoin' and "
"'secondaryjoin' expressions"
% (relname, secondary_arg),
@@ -474,14 +477,16 @@ class _JoinFixtures(object):
"Could not determine join condition between "
"parent/child tables on relationship %s - "
"there are no foreign keys linking these tables. "
- "Ensure that referencing columns are associated with a ForeignKey "
+ "Ensure that referencing columns are associated "
+ "with a ForeignKey "
"or ForeignKeyConstraint, or specify a 'primaryjoin' "
"expression."
% (relname,),
fn, *arg, **kw)
-class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
+class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase,
+ AssertsCompiledSQL):
def test_determine_local_remote_pairs_o2o_joined_sub_to_base(self):
joincond = self._join_fixture_o2o_joined_sub_to_base()
eq_(
@@ -580,7 +585,7 @@ class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL
]
)
- def test_determine_local_remote_compound_1(self):
+ def test_determine_local_remote_compound_3(self):
joincond = self._join_fixture_compound_expression_1()
eq_(
joincond.local_remote_pairs,
@@ -627,8 +632,10 @@ class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL
eq_(
joincond.local_remote_pairs,
[
- (self.composite_selfref.c.group_id, self.composite_selfref.c.group_id),
- (self.composite_selfref.c.id, self.composite_selfref.c.parent_id),
+ (self.composite_selfref.c.group_id,
+ self.composite_selfref.c.group_id),
+ (self.composite_selfref.c.id,
+ self.composite_selfref.c.parent_id),
]
)
@@ -647,8 +654,10 @@ class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL
eq_(
joincond.local_remote_pairs,
[
- (self.composite_selfref.c.group_id, self.composite_selfref.c.group_id),
- (self.composite_selfref.c.id, self.composite_selfref.c.parent_id),
+ (self.composite_selfref.c.group_id,
+ self.composite_selfref.c.group_id),
+ (self.composite_selfref.c.id,
+ self.composite_selfref.c.parent_id),
]
)
@@ -713,8 +722,8 @@ class ColumnCollectionsTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL
eq_(
j2.local_remote_pairs,
[
- (self.m2mright.c.id, self.m2msecondary.c.rid),
- (self.m2mleft.c.id, self.m2msecondary.c.lid),
+ (self.m2mright.c.id, self.m2msecondary.c.rid),
+ (self.m2mleft.c.id, self.m2msecondary.c.lid),
]
)
@@ -997,19 +1006,22 @@ class AdaptedJoinTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
)
class LazyClauseTest(_JoinFixtures, fixtures.TestBase, AssertsCompiledSQL):
+ __dialect__ = 'default'
- def _test_lazy_clause_o2m(self):
+ def test_lazy_clause_o2m(self):
joincond = self._join_fixture_o2m()
+ lazywhere, bind_to_col, equated_columns = joincond.create_lazy_clause()
self.assert_compile(
- relationships.create_lazy_clause(joincond),
- ""
+ lazywhere,
+ ":param_1 = rgt.lid"
)
- def _test_lazy_clause_o2m_reverse(self):
+ def test_lazy_clause_o2m_reverse(self):
joincond = self._join_fixture_o2m()
+ lazywhere, bind_to_col, equated_columns =\
+ joincond.create_lazy_clause(reverse_direction=True)
self.assert_compile(
- relationships.create_lazy_clause(joincond,
- reverse_direction=True),
- ""
+ lazywhere,
+ "lft.id = :param_1"
)
diff --git a/test/perf/stress_all.py b/test/perf/stress_all.py
index d17028530..890ef24a3 100644
--- a/test/perf/stress_all.py
+++ b/test/perf/stress_all.py
@@ -1,6 +1,6 @@
# -*- encoding: utf8 -*-
from datetime import *
-from sqlalchemy.util.compat import decimal
+import decimal
#from fastdec import mpd as Decimal
from cPickle import dumps, loads
diff --git a/test/profiles.txt b/test/profiles.txt
index d83280c2c..b119886bb 100644
--- a/test/profiles.txt
+++ b/test/profiles.txt
@@ -107,6 +107,7 @@ test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_mysql_mysqldb_nocex
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_cextensions 122,18
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_postgresql_psycopg2_nocextensions 122,18
test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_cextensions 122,18
+test.aaa_profiling.test_orm.MergeTest.test_merge_no_load 2.7_sqlite_pysqlite_nocextensions 122,18
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect
@@ -116,6 +117,7 @@ test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_mysql_mysqldb_
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_cextensions 82
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_postgresql_psycopg2_nocextensions 82
test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_cextensions 82
+test.aaa_profiling.test_pool.QueuePoolTest.test_first_connect 2.7_sqlite_pysqlite_nocextensions 82
# TEST: test.aaa_profiling.test_pool.QueuePoolTest.test_second_connect
diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py
index 3b8aed23f..9cd893c1a 100644
--- a/test/sql/test_compiler.py
+++ b/test/sql/test_compiler.py
@@ -14,8 +14,8 @@ from sqlalchemy.testing import eq_, is_, assert_raises, assert_raises_message
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL
from sqlalchemy import Integer, String, MetaData, Table, Column, select, \
- func, not_, cast, text, tuple_, exists, delete, update, bindparam,\
- insert, literal, and_, null, type_coerce, alias, or_, literal_column,\
+ func, not_, cast, text, tuple_, exists, update, bindparam,\
+ literal, and_, null, type_coerce, alias, or_, literal_column,\
Float, TIMESTAMP, Numeric, Date, Text, collate, union, except_,\
intersect, union_all, Boolean, distinct, join, outerjoin, asc, desc,\
over, subquery, case
@@ -87,6 +87,7 @@ keyed = Table('keyed', metadata,
Column('z', Integer),
)
+
class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -424,35 +425,6 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
"AS z FROM keyed) AS anon_2) AS anon_1"
)
- def test_dont_overcorrelate(self):
- self.assert_compile(select([table1], from_obj=[table1,
- table1.select()]),
- "SELECT mytable.myid, mytable.name, "
- "mytable.description FROM mytable, (SELECT "
- "mytable.myid AS myid, mytable.name AS "
- "name, mytable.description AS description "
- "FROM mytable)")
-
- def test_full_correlate(self):
- # intentional
- t = table('t', column('a'), column('b'))
- s = select([t.c.a]).where(t.c.a == 1).correlate(t).as_scalar()
-
- s2 = select([t.c.a, s])
- self.assert_compile(s2,
- "SELECT t.a, (SELECT t.a WHERE t.a = :a_1) AS anon_1 FROM t")
-
- # unintentional
- t2 = table('t2', column('c'), column('d'))
- s = select([t.c.a]).where(t.c.a == t2.c.d).as_scalar()
- s2 = select([t, t2, s])
- assert_raises(exc.InvalidRequestError, str, s2)
-
- # intentional again
- s = s.correlate(t, t2)
- s2 = select([t, t2, s])
- self.assert_compile(s, "SELECT t.a WHERE t.a = t2.d")
-
def test_exists(self):
s = select([table1.c.myid]).where(table1.c.myid == 5)
@@ -2239,14 +2211,14 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
assert_raises_message(
exc.CompileError,
- "Cannot compile Column object until it's 'name' is assigned.",
+ "Cannot compile Column object until its 'name' is assigned.",
str, sel2
)
sel3 = select([my_str]).as_scalar()
assert_raises_message(
exc.CompileError,
- "Cannot compile Column object until it's 'name' is assigned.",
+ "Cannot compile Column object until its 'name' is assigned.",
str, sel3
)
@@ -2488,326 +2460,6 @@ class KwargPropagationTest(fixtures.TestBase):
class CRUDTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
- def test_insert(self):
- # generic insert, will create bind params for all columns
- self.assert_compile(insert(table1),
- "INSERT INTO mytable (myid, name, description) "
- "VALUES (:myid, :name, :description)")
-
- # insert with user-supplied bind params for specific columns,
- # cols provided literally
- self.assert_compile(
- insert(table1, {
- table1.c.myid: bindparam('userid'),
- table1.c.name: bindparam('username')}),
- "INSERT INTO mytable (myid, name) VALUES (:userid, :username)")
-
- # insert with user-supplied bind params for specific columns, cols
- # provided as strings
- self.assert_compile(
- insert(table1, dict(myid=3, name='jack')),
- "INSERT INTO mytable (myid, name) VALUES (:myid, :name)"
- )
-
- # test with a tuple of params instead of named
- self.assert_compile(
- insert(table1, (3, 'jack', 'mydescription')),
- "INSERT INTO mytable (myid, name, description) VALUES "
- "(:myid, :name, :description)",
- checkparams={
- 'myid': 3, 'name': 'jack', 'description': 'mydescription'}
- )
-
- self.assert_compile(
- insert(table1, values={
- table1.c.myid: bindparam('userid')
- }).values(
- {table1.c.name: bindparam('username')}),
- "INSERT INTO mytable (myid, name) VALUES (:userid, :username)"
- )
-
- self.assert_compile(
- insert(table1, values=dict(myid=func.lala())),
- "INSERT INTO mytable (myid) VALUES (lala())")
-
- def test_insert_prefix(self):
- stmt = table1.insert().prefix_with("A", "B", dialect="mysql").\
- prefix_with("C", "D")
- self.assert_compile(stmt,
- "INSERT A B C D INTO mytable (myid, name, description) "
- "VALUES (%s, %s, %s)", dialect=mysql.dialect()
- )
- self.assert_compile(stmt,
- "INSERT C D INTO mytable (myid, name, description) "
- "VALUES (:myid, :name, :description)")
-
- def test_inline_default_insert(self):
- metadata = MetaData()
- table = Table('sometable', metadata,
- Column('id', Integer, primary_key=True),
- Column('foo', Integer, default=func.foobar()))
- self.assert_compile(
- table.insert(values={}, inline=True),
- "INSERT INTO sometable (foo) VALUES (foobar())")
- self.assert_compile(
- table.insert(inline=True),
- "INSERT INTO sometable (foo) VALUES (foobar())", params={})
-
- def test_insert_returning_not_in_default(self):
- stmt = table1.insert().returning(table1.c.myid)
- assert_raises_message(
- exc.CompileError,
- "RETURNING is not supported by this dialect's statement compiler.",
- stmt.compile
- )
-
- def test_empty_insert_default(self):
- stmt = table1.insert().values({}) # hide from 2to3
- self.assert_compile(stmt, "INSERT INTO mytable () VALUES ()")
-
- def test_empty_insert_default_values(self):
- stmt = table1.insert().values({}) # hide from 2to3
- dialect = default.DefaultDialect()
- dialect.supports_empty_insert = dialect.supports_default_values = True
- self.assert_compile(stmt, "INSERT INTO mytable DEFAULT VALUES",
- dialect=dialect)
-
- def test_empty_insert_not_supported(self):
- stmt = table1.insert().values({}) # hide from 2to3
- dialect = default.DefaultDialect()
- dialect.supports_empty_insert = dialect.supports_default_values = False
- assert_raises_message(
- exc.CompileError,
- "The 'default' dialect with current database version "
- "settings does not support empty inserts.",
- stmt.compile, dialect=dialect
- )
-
- def test_multivalues_insert_not_supported(self):
- stmt = table1.insert().values([{"myid": 1}, {"myid": 2}])
- dialect = default.DefaultDialect()
- assert_raises_message(
- exc.CompileError,
- "The 'default' dialect with current database version settings "
- "does not support in-place multirow inserts.",
- stmt.compile, dialect=dialect
- )
-
- def test_multivalues_insert_named(self):
- stmt = table1.insert().\
- values([{"myid": 1, "name": 'a', "description": 'b'},
- {"myid": 2, "name": 'c', "description": 'd'},
- {"myid": 3, "name": 'e', "description": 'f'}
- ])
-
- result = "INSERT INTO mytable (myid, name, description) VALUES " \
- "(:myid_0, :name_0, :description_0), " \
- "(:myid_1, :name_1, :description_1), " \
- "(:myid_2, :name_2, :description_2)"
-
- dialect = default.DefaultDialect()
- dialect.supports_multivalues_insert = True
- self.assert_compile(stmt, result,
- checkparams={
- 'description_2': 'f', 'name_2': 'e',
- 'name_0': 'a', 'name_1': 'c', 'myid_2': 3,
- 'description_0': 'b', 'myid_0': 1,
- 'myid_1': 2, 'description_1': 'd'
- },
- dialect=dialect)
-
- def test_multivalues_insert_positional(self):
- stmt = table1.insert().\
- values([{"myid": 1, "name": 'a', "description": 'b'},
- {"myid": 2, "name": 'c', "description": 'd'},
- {"myid": 3, "name": 'e', "description": 'f'}
- ])
-
- result = "INSERT INTO mytable (myid, name, description) VALUES " \
- "(%s, %s, %s), " \
- "(%s, %s, %s), " \
- "(%s, %s, %s)" \
-
- dialect = default.DefaultDialect()
- dialect.supports_multivalues_insert = True
- dialect.paramstyle = "format"
- dialect.positional = True
- self.assert_compile(stmt, result,
- checkpositional=(1, 'a', 'b', 2, 'c', 'd', 3, 'e', 'f'),
- dialect=dialect)
-
- def test_multirow_inline_default_insert(self):
- metadata = MetaData()
- table = Table('sometable', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String),
- Column('foo', Integer, default=func.foobar()))
-
- stmt = table.insert().\
- values([
- {"id": 1, "data": "data1"},
- {"id": 2, "data": "data2", "foo": "plainfoo"},
- {"id": 3, "data": "data3"},
- ])
- result = "INSERT INTO sometable (id, data, foo) VALUES "\
- "(%(id_0)s, %(data_0)s, foobar()), "\
- "(%(id_1)s, %(data_1)s, %(foo_1)s), "\
- "(%(id_2)s, %(data_2)s, foobar())"
-
- self.assert_compile(stmt, result,
- checkparams={'data_2': 'data3', 'id_0': 1, 'id_2': 3,
- 'foo_1': 'plainfoo', 'data_1': 'data2',
- 'id_1': 2, 'data_0': 'data1'},
- dialect=postgresql.dialect())
-
- def test_multirow_server_default_insert(self):
- metadata = MetaData()
- table = Table('sometable', metadata,
- Column('id', Integer, primary_key=True),
- Column('data', String),
- Column('foo', Integer, server_default=func.foobar()))
-
- stmt = table.insert().\
- values([
- {"id": 1, "data": "data1"},
- {"id": 2, "data": "data2", "foo": "plainfoo"},
- {"id": 3, "data": "data3"},
- ])
- result = "INSERT INTO sometable (id, data) VALUES "\
- "(%(id_0)s, %(data_0)s), "\
- "(%(id_1)s, %(data_1)s), "\
- "(%(id_2)s, %(data_2)s)"
-
- self.assert_compile(stmt, result,
- checkparams={'data_2': 'data3', 'id_0': 1, 'id_2': 3,
- 'data_1': 'data2',
- 'id_1': 2, 'data_0': 'data1'},
- dialect=postgresql.dialect())
-
- stmt = table.insert().\
- values([
- {"id": 1, "data": "data1", "foo": "plainfoo"},
- {"id": 2, "data": "data2"},
- {"id": 3, "data": "data3", "foo": "otherfoo"},
- ])
-
- # note the effect here is that the first set of params
- # takes effect for the rest of them, when one is absent
- result = "INSERT INTO sometable (id, data, foo) VALUES "\
- "(%(id_0)s, %(data_0)s, %(foo_0)s), "\
- "(%(id_1)s, %(data_1)s, %(foo_0)s), "\
- "(%(id_2)s, %(data_2)s, %(foo_2)s)"
-
- self.assert_compile(stmt, result,
- checkparams={'data_2': 'data3', 'id_0': 1, 'id_2': 3,
- 'data_1': 'data2',
- "foo_0": "plainfoo",
- "foo_2": "otherfoo",
- 'id_1': 2, 'data_0': 'data1'},
- dialect=postgresql.dialect())
-
- def test_update(self):
- self.assert_compile(
- update(table1, table1.c.myid == 7),
- "UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
- params={table1.c.name: 'fred'})
- self.assert_compile(
- table1.update().where(table1.c.myid == 7).
- values({table1.c.myid: 5}),
- "UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
- checkparams={'myid': 5, 'myid_1': 7})
- self.assert_compile(
- update(table1, table1.c.myid == 7),
- "UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1",
- params={'name': 'fred'})
- self.assert_compile(
- update(table1, values={table1.c.name: table1.c.myid}),
- "UPDATE mytable SET name=mytable.myid")
- self.assert_compile(
- update(table1,
- whereclause=table1.c.name == bindparam('crit'),
- values={table1.c.name: 'hi'}),
- "UPDATE mytable SET name=:name WHERE mytable.name = :crit",
- params={'crit': 'notthere'},
- checkparams={'crit': 'notthere', 'name': 'hi'})
- self.assert_compile(
- update(table1, table1.c.myid == 12,
- values={table1.c.name: table1.c.myid}),
- "UPDATE mytable SET name=mytable.myid, description="
- ":description WHERE mytable.myid = :myid_1",
- params={'description': 'test'},
- checkparams={'description': 'test', 'myid_1': 12})
- self.assert_compile(
- update(table1, table1.c.myid == 12,
- values={table1.c.myid: 9}),
- "UPDATE mytable SET myid=:myid, description=:description "
- "WHERE mytable.myid = :myid_1",
- params={'myid_1': 12, 'myid': 9, 'description': 'test'})
- self.assert_compile(
- update(table1, table1.c.myid == 12),
- "UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1",
- params={'myid': 18}, checkparams={'myid': 18, 'myid_1': 12})
- s = table1.update(table1.c.myid == 12, values={table1.c.name: 'lala'})
- c = s.compile(column_keys=['id', 'name'])
- self.assert_compile(
- update(table1, table1.c.myid == 12,
- values={table1.c.name: table1.c.myid}
- ).values({table1.c.name: table1.c.name + 'foo'}),
- "UPDATE mytable SET name=(mytable.name || :name_1), "
- "description=:description WHERE mytable.myid = :myid_1",
- params={'description': 'test'})
- eq_(str(s), str(c))
-
- self.assert_compile(update(table1,
- (table1.c.myid == func.hoho(4)) &
- (table1.c.name == literal('foo') +
- table1.c.name + literal('lala')),
- values={
- table1.c.name: table1.c.name + "lala",
- table1.c.myid: func.do_stuff(table1.c.myid, literal('hoho'))
- }), "UPDATE mytable SET myid=do_stuff(mytable.myid, :param_1), "
- "name=(mytable.name || :name_1) "
- "WHERE mytable.myid = hoho(:hoho_1) "
- "AND mytable.name = :param_2 || "
- "mytable.name || :param_3")
-
- def test_update_prefix(self):
- stmt = table1.update().prefix_with("A", "B", dialect="mysql").\
- prefix_with("C", "D")
- self.assert_compile(stmt,
- "UPDATE A B C D mytable SET myid=%s, name=%s, description=%s",
- dialect=mysql.dialect()
- )
- self.assert_compile(stmt,
- "UPDATE C D mytable SET myid=:myid, name=:name, "
- "description=:description")
-
- def test_aliased_update(self):
- talias1 = table1.alias('t1')
- self.assert_compile(
- update(talias1, talias1.c.myid == 7),
- "UPDATE mytable AS t1 SET name=:name WHERE t1.myid = :myid_1",
- params={table1.c.name: 'fred'})
- self.assert_compile(
- update(talias1, table1.c.myid == 7),
- "UPDATE mytable AS t1 SET name=:name FROM "
- "mytable WHERE mytable.myid = :myid_1",
- params={table1.c.name: 'fred'})
-
- def test_update_to_expression(self):
- """test update from an expression.
-
- this logic is triggered currently by a left side that doesn't
- have a key. The current supported use case is updating the index
- of a Postgresql ARRAY type.
-
- """
- expr = func.foo(table1.c.myid)
- assert not hasattr(expr, "key")
- self.assert_compile(
- table1.update().values({expr: 'bar'}),
- "UPDATE mytable SET foo(myid)=:param_1"
- )
def test_correlated_update(self):
# test against a straight text subquery
@@ -2880,51 +2532,6 @@ class CRUDTest(fixtures.TestBase, AssertsCompiledSQL):
"AND myothertable.othername = mytable_1.name",
dialect=mssql.dialect())
- def test_delete(self):
- self.assert_compile(
- delete(table1, table1.c.myid == 7),
- "DELETE FROM mytable WHERE mytable.myid = :myid_1")
- self.assert_compile(
- table1.delete().where(table1.c.myid == 7),
- "DELETE FROM mytable WHERE mytable.myid = :myid_1")
- self.assert_compile(
- table1.delete().where(table1.c.myid == 7).\
- where(table1.c.name == 'somename'),
- "DELETE FROM mytable WHERE mytable.myid = :myid_1 "
- "AND mytable.name = :name_1")
-
- def test_delete_prefix(self):
- stmt = table1.delete().prefix_with("A", "B", dialect="mysql").\
- prefix_with("C", "D")
- self.assert_compile(stmt,
- "DELETE A B C D FROM mytable",
- dialect=mysql.dialect()
- )
- self.assert_compile(stmt,
- "DELETE C D FROM mytable")
-
- def test_aliased_delete(self):
- talias1 = table1.alias('t1')
- self.assert_compile(
- delete(talias1).where(talias1.c.myid == 7),
- "DELETE FROM mytable AS t1 WHERE t1.myid = :myid_1")
-
- def test_correlated_delete(self):
- # test a non-correlated WHERE clause
- s = select([table2.c.othername], table2.c.otherid == 7)
- u = delete(table1, table1.c.name == s)
- self.assert_compile(u, "DELETE FROM mytable WHERE mytable.name = "
- "(SELECT myothertable.othername FROM myothertable "
- "WHERE myothertable.otherid = :otherid_1)")
-
- # test one that is actually correlated...
- s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
- u = table1.delete(table1.c.name == s)
- self.assert_compile(u,
- "DELETE FROM mytable WHERE mytable.name = (SELECT "
- "myothertable.othername FROM myothertable WHERE "
- "myothertable.otherid = mytable.myid)")
-
def test_binds_that_match_columns(self):
"""test bind params named after column names
replace the normal SET/VALUES generation."""
@@ -3189,6 +2796,246 @@ class SchemaTest(fixtures.TestBase, AssertsCompiledSQL):
"(:rem_id, :datatype_id, :value)")
+class CorrelateTest(fixtures.TestBase, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_dont_overcorrelate(self):
+ self.assert_compile(select([table1], from_obj=[table1,
+ table1.select()]),
+ "SELECT mytable.myid, mytable.name, "
+ "mytable.description FROM mytable, (SELECT "
+ "mytable.myid AS myid, mytable.name AS "
+ "name, mytable.description AS description "
+ "FROM mytable)")
+
+ def _fixture(self):
+ t1 = table('t1', column('a'))
+ t2 = table('t2', column('a'))
+ return t1, t2, select([t1]).where(t1.c.a == t2.c.a)
+
+ def _assert_where_correlated(self, stmt):
+ self.assert_compile(
+ stmt,
+ "SELECT t2.a FROM t2 WHERE t2.a = "
+ "(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
+
+ def _assert_where_all_correlated(self, stmt):
+ self.assert_compile(
+ stmt,
+ "SELECT t1.a, t2.a FROM t1, t2 WHERE t2.a = "
+ "(SELECT t1.a WHERE t1.a = t2.a)")
+
+ def _assert_where_backwards_correlated(self, stmt):
+ self.assert_compile(
+ stmt,
+ "SELECT t2.a FROM t2 WHERE t2.a = "
+ "(SELECT t1.a FROM t2 WHERE t1.a = t2.a)")
+
+ def _assert_column_correlated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a, (SELECT t1.a FROM t1 WHERE t1.a = t2.a) "
+ "AS anon_1 FROM t2")
+
+ def _assert_column_all_correlated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t1.a, t2.a, "
+ "(SELECT t1.a WHERE t1.a = t2.a) AS anon_1 FROM t1, t2")
+
+ def _assert_column_backwards_correlated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a, (SELECT t1.a FROM t2 WHERE t1.a = t2.a) "
+ "AS anon_1 FROM t2")
+
+ def _assert_having_correlated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a FROM t2 HAVING t2.a = "
+ "(SELECT t1.a FROM t1 WHERE t1.a = t2.a)")
+
+ def _assert_from_uncorrelated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a, anon_1.a FROM t2, "
+ "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
+
+ def _assert_from_all_uncorrelated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t1.a, t2.a, anon_1.a FROM t1, t2, "
+ "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a) AS anon_1")
+
+ def _assert_where_uncorrelated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a FROM t2 WHERE t2.a = "
+ "(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
+
+ def _assert_column_uncorrelated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a, (SELECT t1.a FROM t1, t2 "
+ "WHERE t1.a = t2.a) AS anon_1 FROM t2")
+
+ def _assert_having_uncorrelated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t2.a FROM t2 HAVING t2.a = "
+ "(SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a)")
+
+ def _assert_where_single_full_correlated(self, stmt):
+ self.assert_compile(stmt,
+ "SELECT t1.a FROM t1 WHERE t1.a = (SELECT t1.a)")
+
+ def test_correlate_semiauto_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_correlated(
+ select([t2]).where(t2.c.a == s1.correlate(t2)))
+
+ def test_correlate_semiauto_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_correlated(
+ select([t2, s1.correlate(t2).as_scalar()]))
+
+ def test_correlate_semiauto_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_uncorrelated(
+ select([t2, s1.correlate(t2).alias()]))
+
+ def test_correlate_semiauto_having(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_having_correlated(
+ select([t2]).having(t2.c.a == s1.correlate(t2)))
+
+ def test_correlate_except_inclusion_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_correlated(
+ select([t2]).where(t2.c.a == s1.correlate_except(t1)))
+
+ def test_correlate_except_exclusion_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_backwards_correlated(
+ select([t2]).where(t2.c.a == s1.correlate_except(t2)))
+
+ def test_correlate_except_inclusion_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_correlated(
+ select([t2, s1.correlate_except(t1).as_scalar()]))
+
+ def test_correlate_except_exclusion_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_backwards_correlated(
+ select([t2, s1.correlate_except(t2).as_scalar()]))
+
+ def test_correlate_except_inclusion_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_uncorrelated(
+ select([t2, s1.correlate_except(t1).alias()]))
+
+ def test_correlate_except_exclusion_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_uncorrelated(
+ select([t2, s1.correlate_except(t2).alias()]))
+
+ def test_correlate_except_having(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_having_correlated(
+ select([t2]).having(t2.c.a == s1.correlate_except(t1)))
+
+ def test_correlate_auto_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_correlated(
+ select([t2]).where(t2.c.a == s1))
+
+ def test_correlate_auto_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_correlated(
+ select([t2, s1.as_scalar()]))
+
+ def test_correlate_auto_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_uncorrelated(
+ select([t2, s1.alias()]))
+
+ def test_correlate_auto_having(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_having_correlated(
+ select([t2]).having(t2.c.a == s1))
+
+ def test_correlate_disabled_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_uncorrelated(
+ select([t2]).where(t2.c.a == s1.correlate(None)))
+
+ def test_correlate_disabled_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_uncorrelated(
+ select([t2, s1.correlate(None).as_scalar()]))
+
+ def test_correlate_disabled_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_uncorrelated(
+ select([t2, s1.correlate(None).alias()]))
+
+ def test_correlate_disabled_having(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_having_uncorrelated(
+ select([t2]).having(t2.c.a == s1.correlate(None)))
+
+ def test_correlate_all_where(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_where_all_correlated(
+ select([t1, t2]).where(t2.c.a == s1.correlate(t1, t2)))
+
+ def test_correlate_all_column(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_column_all_correlated(
+ select([t1, t2, s1.correlate(t1, t2).as_scalar()]))
+
+ def test_correlate_all_from(self):
+ t1, t2, s1 = self._fixture()
+ self._assert_from_all_uncorrelated(
+ select([t1, t2, s1.correlate(t1, t2).alias()]))
+
+ def test_correlate_where_all_unintentional(self):
+ t1, t2, s1 = self._fixture()
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "returned no FROM clauses due to auto-correlation",
+ select([t1, t2]).where(t2.c.a == s1).compile
+ )
+
+ def test_correlate_from_all_ok(self):
+ t1, t2, s1 = self._fixture()
+ self.assert_compile(
+ select([t1, t2, s1]),
+ "SELECT t1.a, t2.a, a FROM t1, t2, "
+ "(SELECT t1.a AS a FROM t1, t2 WHERE t1.a = t2.a)"
+ )
+
+ def test_correlate_auto_where_singlefrom(self):
+ t1, t2, s1 = self._fixture()
+ s = select([t1.c.a])
+ s2 = select([t1]).where(t1.c.a == s)
+ self.assert_compile(s2,
+ "SELECT t1.a FROM t1 WHERE t1.a = "
+ "(SELECT t1.a FROM t1)")
+
+ def test_correlate_semiauto_where_singlefrom(self):
+ t1, t2, s1 = self._fixture()
+
+ s = select([t1.c.a])
+
+ s2 = select([t1]).where(t1.c.a == s.correlate(t1))
+ self._assert_where_single_full_correlated(s2)
+
+ def test_correlate_except_semiauto_where_singlefrom(self):
+ t1, t2, s1 = self._fixture()
+
+ s = select([t1.c.a])
+
+ s2 = select([t1]).where(t1.c.a == s.correlate_except(t2))
+ self._assert_where_single_full_correlated(s2)
+
+ def test_correlate_alone_noeffect(self):
+ # new as of #2668
+ t1, t2, s1 = self._fixture()
+ self.assert_compile(s1.correlate(t1, t2),
+ "SELECT t1.a FROM t1, t2 WHERE t1.a = t2.a")
+
class CoercionTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -3315,4 +3162,4 @@ class ResultMapTest(fixtures.TestBase):
)
is_(
comp.result_map['t1_a'][1][2], t1.c.a
- ) \ No newline at end of file
+ )
diff --git a/test/sql/test_constraints.py b/test/sql/test_constraints.py
index ab294e1eb..026095c3b 100644
--- a/test/sql/test_constraints.py
+++ b/test/sql/test_constraints.py
@@ -7,6 +7,7 @@ from sqlalchemy import testing
from sqlalchemy.testing import engines
from sqlalchemy.testing import eq_
from sqlalchemy.testing.assertsql import AllOf, RegexSQL, ExactSQL, CompiledSQL
+from sqlalchemy.sql import table, column
class ConstraintGenTest(fixtures.TestBase, AssertsExecutionResults):
__dialect__ = 'default'
@@ -753,6 +754,18 @@ class ConstraintAPITest(fixtures.TestBase):
c = Index('foo', t.c.a)
assert c in t.indexes
+ def test_auto_append_lowercase_table(self):
+ t = table('t', column('a'))
+ t2 = table('t2', column('a'))
+ for c in (
+ UniqueConstraint(t.c.a),
+ CheckConstraint(t.c.a > 5),
+ ForeignKeyConstraint([t.c.a], [t2.c.a]),
+ PrimaryKeyConstraint(t.c.a),
+ Index('foo', t.c.a)
+ ):
+ assert True
+
def test_tometadata_ok(self):
m = MetaData()
diff --git a/test/sql/test_delete.py b/test/sql/test_delete.py
new file mode 100644
index 000000000..b56731515
--- /dev/null
+++ b/test/sql/test_delete.py
@@ -0,0 +1,86 @@
+#! coding:utf-8
+
+from sqlalchemy import Column, Integer, String, Table, delete, select
+from sqlalchemy.dialects import mysql
+from sqlalchemy.testing import AssertsCompiledSQL, fixtures
+
+
+class _DeleteTestBase(object):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table('mytable', metadata,
+ Column('myid', Integer),
+ Column('name', String(30)),
+ Column('description', String(50)))
+ Table('myothertable', metadata,
+ Column('otherid', Integer),
+ Column('othername', String(30)))
+
+
+class DeleteTest(_DeleteTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_delete(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ delete(table1, table1.c.myid == 7),
+ 'DELETE FROM mytable WHERE mytable.myid = :myid_1')
+
+ self.assert_compile(
+ table1.delete().where(table1.c.myid == 7),
+ 'DELETE FROM mytable WHERE mytable.myid = :myid_1')
+
+ self.assert_compile(
+ table1.delete().
+ where(table1.c.myid == 7).
+ where(table1.c.name == 'somename'),
+ 'DELETE FROM mytable '
+ 'WHERE mytable.myid = :myid_1 '
+ 'AND mytable.name = :name_1')
+
+ def test_prefix_with(self):
+ table1 = self.tables.mytable
+
+ stmt = table1.delete().\
+ prefix_with('A', 'B', dialect='mysql').\
+ prefix_with('C', 'D')
+
+ self.assert_compile(stmt,
+ 'DELETE C D FROM mytable')
+
+ self.assert_compile(stmt,
+ 'DELETE A B C D FROM mytable',
+ dialect=mysql.dialect())
+
+ def test_alias(self):
+ table1 = self.tables.mytable
+
+ talias1 = table1.alias('t1')
+ stmt = delete(talias1).where(talias1.c.myid == 7)
+
+ self.assert_compile(stmt,
+ 'DELETE FROM mytable AS t1 WHERE t1.myid = :myid_1')
+
+ def test_correlated(self):
+ table1, table2 = self.tables.mytable, self.tables.myothertable
+
+ # test a non-correlated WHERE clause
+ s = select([table2.c.othername], table2.c.otherid == 7)
+ self.assert_compile(delete(table1, table1.c.name == s),
+ 'DELETE FROM mytable '
+ 'WHERE mytable.name = ('
+ 'SELECT myothertable.othername '
+ 'FROM myothertable '
+ 'WHERE myothertable.otherid = :otherid_1'
+ ')')
+
+ # test one that is actually correlated...
+ s = select([table2.c.othername], table2.c.otherid == table1.c.myid)
+ self.assert_compile(table1.delete(table1.c.name == s),
+ 'DELETE FROM mytable '
+ 'WHERE mytable.name = ('
+ 'SELECT myothertable.othername '
+ 'FROM myothertable '
+ 'WHERE myothertable.otherid = mytable.myid'
+ ')')
diff --git a/test/sql/test_functions.py b/test/sql/test_functions.py
index ae8e28e24..b325b7763 100644
--- a/test/sql/test_functions.py
+++ b/test/sql/test_functions.py
@@ -8,7 +8,7 @@ from sqlalchemy.testing.engines import all_dialects
from sqlalchemy import types as sqltypes
from sqlalchemy.sql import functions
from sqlalchemy.sql.functions import GenericFunction
-from sqlalchemy.util.compat import decimal
+import decimal
from sqlalchemy import testing
from sqlalchemy.testing import fixtures, AssertsCompiledSQL, engines
from sqlalchemy.dialects import sqlite, postgresql, mysql, oracle
diff --git a/test/sql/test_generative.py b/test/sql/test_generative.py
index e868cbe88..8b2abef0e 100644
--- a/test/sql/test_generative.py
+++ b/test/sql/test_generative.py
@@ -590,13 +590,18 @@ class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
def test_correlated_select(self):
s = select(['*'], t1.c.col1 == t2.c.col1,
from_obj=[t1, t2]).correlate(t2)
+
class Vis(CloningVisitor):
def visit_select(self, select):
select.append_whereclause(t1.c.col2 == 7)
- self.assert_compile(Vis().traverse(s),
- "SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
- "AND table1.col2 = :col2_1")
+ self.assert_compile(
+ select([t2]).where(t2.c.col1 == Vis().traverse(s)),
+ "SELECT table2.col1, table2.col2, table2.col3 "
+ "FROM table2 WHERE table2.col1 = "
+ "(SELECT * FROM table1 WHERE table1.col1 = table2.col1 "
+ "AND table1.col2 = :col2_1)"
+ )
def test_this_thing(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
@@ -616,35 +621,49 @@ class ClauseTest(fixtures.TestBase, AssertsCompiledSQL):
'AS table1_1 WHERE table1_1.col1 = '
':col1_1) AS anon_1')
- def test_select_fromtwice(self):
+ def test_select_fromtwice_one(self):
t1a = t1.alias()
- s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1)
+ s = select([1], t1.c.col1 == t1a.c.col1, from_obj=t1a).correlate(t1a)
+ s = select([t1]).where(t1.c.col1 == s)
self.assert_compile(s,
- 'SELECT 1 FROM table1 AS table1_1 WHERE '
- 'table1.col1 = table1_1.col1')
-
+ "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
+ "WHERE table1.col1 = "
+ "(SELECT 1 FROM table1, table1 AS table1_1 "
+ "WHERE table1.col1 = table1_1.col1)"
+ )
s = CloningVisitor().traverse(s)
self.assert_compile(s,
- 'SELECT 1 FROM table1 AS table1_1 WHERE '
- 'table1.col1 = table1_1.col1')
+ "SELECT table1.col1, table1.col2, table1.col3 FROM table1 "
+ "WHERE table1.col1 = "
+ "(SELECT 1 FROM table1, table1 AS table1_1 "
+ "WHERE table1.col1 = table1_1.col1)")
+ def test_select_fromtwice_two(self):
s = select([t1]).where(t1.c.col1 == 'foo').alias()
s2 = select([1], t1.c.col1 == s.c.col1, from_obj=s).correlate(t1)
- self.assert_compile(s2,
- 'SELECT 1 FROM (SELECT table1.col1 AS '
- 'col1, table1.col2 AS col2, table1.col3 AS '
- 'col3 FROM table1 WHERE table1.col1 = '
- ':col1_1) AS anon_1 WHERE table1.col1 = '
- 'anon_1.col1')
- s2 = ReplacingCloningVisitor().traverse(s2)
- self.assert_compile(s2,
- 'SELECT 1 FROM (SELECT table1.col1 AS '
- 'col1, table1.col2 AS col2, table1.col3 AS '
- 'col3 FROM table1 WHERE table1.col1 = '
- ':col1_1) AS anon_1 WHERE table1.col1 = '
- 'anon_1.col1')
+ s3 = select([t1]).where(t1.c.col1 == s2)
+ self.assert_compile(s3,
+ "SELECT table1.col1, table1.col2, table1.col3 "
+ "FROM table1 WHERE table1.col1 = "
+ "(SELECT 1 FROM "
+ "(SELECT table1.col1 AS col1, table1.col2 AS col2, "
+ "table1.col3 AS col3 FROM table1 "
+ "WHERE table1.col1 = :col1_1) "
+ "AS anon_1 WHERE table1.col1 = anon_1.col1)"
+ )
+
+ s4 = ReplacingCloningVisitor().traverse(s3)
+ self.assert_compile(s4,
+ "SELECT table1.col1, table1.col2, table1.col3 "
+ "FROM table1 WHERE table1.col1 = "
+ "(SELECT 1 FROM "
+ "(SELECT table1.col1 AS col1, table1.col2 AS col2, "
+ "table1.col3 AS col3 FROM table1 "
+ "WHERE table1.col1 = :col1_1) "
+ "AS anon_1 WHERE table1.col1 = anon_1.col1)"
+ )
class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = 'default'
@@ -763,67 +782,125 @@ class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
'FROM addresses WHERE users_1.id = '
'addresses.user_id')
- def test_table_to_alias(self):
-
+ def test_table_to_alias_1(self):
t1alias = t1.alias('t1alias')
vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
assert list(_from_objects(ff)) == [t1alias]
+ def test_table_to_alias_2(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], from_obj=[t1])),
'SELECT * FROM table1 AS t1alias')
+
+ def test_table_to_alias_3(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(select(['*'], t1.c.col1 == t2.c.col2),
'SELECT * FROM table1, table2 WHERE '
'table1.col1 = table2.col2')
+
+ def test_table_to_alias_4(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2)),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
+
+ def test_table_to_alias_5(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2, from_obj=[t1, t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'WHERE t1alias.col1 = table2.col2')
- self.assert_compile(vis.traverse(select(['*'], t1.c.col1
- == t2.c.col2, from_obj=[t1,
- t2]).correlate(t1)),
- 'SELECT * FROM table2 WHERE t1alias.col1 = '
- 'table2.col2')
- self.assert_compile(vis.traverse(select(['*'], t1.c.col1
- == t2.c.col2, from_obj=[t1,
- t2]).correlate(t2)),
- 'SELECT * FROM table1 AS t1alias WHERE '
- 't1alias.col1 = table2.col2')
+
+ def test_table_to_alias_6(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
+ self.assert_compile(
+ select([t1alias, t2]).where(t1alias.c.col1 ==
+ vis.traverse(select(['*'],
+ t1.c.col1 == t2.c.col2,
+ from_obj=[t1, t2]).correlate(t1))),
+ "SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
+ "table2.col1, table2.col2, table2.col3 "
+ "FROM table1 AS t1alias, table2 WHERE t1alias.col1 = "
+ "(SELECT * FROM table2 WHERE t1alias.col1 = table2.col2)"
+ )
+
+ def test_table_to_alias_7(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
+ self.assert_compile(
+ select([t1alias, t2]).where(t1alias.c.col1 ==
+ vis.traverse(select(['*'],
+ t1.c.col1 == t2.c.col2,
+ from_obj=[t1, t2]).correlate(t2))),
+ "SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
+ "table2.col1, table2.col2, table2.col3 "
+ "FROM table1 AS t1alias, table2 "
+ "WHERE t1alias.col1 = "
+ "(SELECT * FROM table1 AS t1alias "
+ "WHERE t1alias.col1 = table2.col2)")
+
+ def test_table_to_alias_8(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(case([(t1.c.col1 == 5,
t1.c.col2)], else_=t1.c.col1)),
'CASE WHEN (t1alias.col1 = :col1_1) THEN '
't1alias.col2 ELSE t1alias.col1 END')
+
+ def test_table_to_alias_9(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(case([(5, t1.c.col2)],
value=t1.c.col1, else_=t1.c.col1)),
'CASE t1alias.col1 WHEN :param_1 THEN '
't1alias.col2 ELSE t1alias.col1 END')
+ def test_table_to_alias_10(self):
s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
+
+ def test_table_to_alias_11(self):
+ s = select(['*'], from_obj=[t1]).alias('foo')
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
self.assert_compile(vis.traverse(s.select()),
'SELECT foo.* FROM (SELECT * FROM table1 '
'AS t1alias) AS foo')
+
+ def test_table_to_alias_12(self):
+ s = select(['*'], from_obj=[t1]).alias('foo')
self.assert_compile(s.select(),
'SELECT foo.* FROM (SELECT * FROM table1) '
'AS foo')
+
+ def test_table_to_alias_13(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
ff = vis.traverse(func.count(t1.c.col1).label('foo'))
self.assert_compile(select([ff]),
'SELECT count(t1alias.col1) AS foo FROM '
'table1 AS t1alias')
assert list(_from_objects(ff)) == [t1alias]
+ #def test_table_to_alias_2(self):
# TODO: self.assert_compile(vis.traverse(select([func.count(t1.c
# .col1).l abel('foo')]), clone=True), "SELECT
# count(t1alias.col1) AS foo FROM table1 AS t1alias")
+ def test_table_to_alias_14(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
t2alias = t2.alias('t2alias')
vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
@@ -831,28 +908,59 @@ class ClauseAdapterTest(fixtures.TestBase, AssertsCompiledSQL):
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
+
+ def test_table_to_alias_15(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
+ t2alias = t2.alias('t2alias')
+ vis.chain(sql_util.ClauseAdapter(t2alias))
self.assert_compile(vis.traverse(select(['*'], t1.c.col1
== t2.c.col2, from_obj=[t1, t2])),
'SELECT * FROM table1 AS t1alias, table2 '
'AS t2alias WHERE t1alias.col1 = '
't2alias.col2')
- self.assert_compile(vis.traverse(select(['*'], t1.c.col1
- == t2.c.col2, from_obj=[t1,
- t2]).correlate(t1)),
- 'SELECT * FROM table2 AS t2alias WHERE '
- 't1alias.col1 = t2alias.col2')
- self.assert_compile(vis.traverse(select(['*'], t1.c.col1
- == t2.c.col2, from_obj=[t1,
- t2]).correlate(t2)),
- 'SELECT * FROM table1 AS t1alias WHERE '
- 't1alias.col1 = t2alias.col2')
+
+ def test_table_to_alias_16(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
+ t2alias = t2.alias('t2alias')
+ vis.chain(sql_util.ClauseAdapter(t2alias))
+ self.assert_compile(
+ select([t1alias, t2alias]).where(
+ t1alias.c.col1 ==
+ vis.traverse(select(['*'],
+ t1.c.col1 == t2.c.col2,
+ from_obj=[t1, t2]).correlate(t1))
+ ),
+ "SELECT t1alias.col1, t1alias.col2, t1alias.col3, "
+ "t2alias.col1, t2alias.col2, t2alias.col3 "
+ "FROM table1 AS t1alias, table2 AS t2alias "
+ "WHERE t1alias.col1 = "
+ "(SELECT * FROM table2 AS t2alias "
+ "WHERE t1alias.col1 = t2alias.col2)"
+ )
+
+ def test_table_to_alias_17(self):
+ t1alias = t1.alias('t1alias')
+ vis = sql_util.ClauseAdapter(t1alias)
+ t2alias = t2.alias('t2alias')
+ vis.chain(sql_util.ClauseAdapter(t2alias))
+ self.assert_compile(
+ t2alias.select().where(t2alias.c.col2 ==
+ vis.traverse(select(['*'],
+ t1.c.col1 == t2.c.col2,
+ from_obj=[t1, t2]).correlate(t2))),
+ 'SELECT t2alias.col1, t2alias.col2, t2alias.col3 '
+ 'FROM table2 AS t2alias WHERE t2alias.col2 = '
+ '(SELECT * FROM table1 AS t1alias WHERE '
+ 't1alias.col1 = t2alias.col2)')
def test_include_exclude(self):
m = MetaData()
- a=Table( 'a',m,
- Column( 'id', Integer, primary_key=True),
- Column( 'xxx_id', Integer,
- ForeignKey( 'a.id', name='adf',use_alter=True )
+ a = Table('a', m,
+ Column('id', Integer, primary_key=True),
+ Column('xxx_id', Integer,
+ ForeignKey('a.id', name='adf', use_alter=True)
)
)
@@ -1167,93 +1275,6 @@ class SelectTest(fixtures.TestBase, AssertsCompiledSQL):
'SELECT table1.col1, table1.col2, '
'table1.col3 FROM table1')
- def test_correlation(self):
- s = select([t2], t1.c.col1 == t2.c.col1)
- self.assert_compile(s,
- 'SELECT table2.col1, table2.col2, '
- 'table2.col3 FROM table2, table1 WHERE '
- 'table1.col1 = table2.col1')
- s2 = select([t1], t1.c.col2 == s.c.col2)
- # dont correlate in a FROM entry
- self.assert_compile(s2,
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2, table1 WHERE '
- 'table1.col1 = table2.col1) WHERE '
- 'table1.col2 = col2')
- s3 = s.correlate(None)
- self.assert_compile(select([t1], t1.c.col2 == s3.c.col2),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2, table1 '
- 'WHERE table1.col1 = table2.col1) WHERE '
- 'table1.col2 = col2')
- # dont correlate in a FROM entry
- self.assert_compile(select([t1], t1.c.col2 == s.c.col2),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2, table1 WHERE '
- 'table1.col1 = table2.col1) WHERE '
- 'table1.col2 = col2')
-
- # but correlate in a WHERE entry
- s_w = select([t2.c.col1]).where(t1.c.col1 == t2.c.col1)
- self.assert_compile(select([t1], t1.c.col2 == s_w),
- 'SELECT table1.col1, table1.col2, table1.col3 '
- 'FROM table1 WHERE table1.col2 = '
- '(SELECT table2.col1 FROM table2 '
- 'WHERE table1.col1 = table2.col1)'
- )
-
-
- s4 = s3.correlate(t1)
- self.assert_compile(select([t1], t1.c.col2 == s4.c.col2),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2 WHERE '
- 'table1.col1 = table2.col1) WHERE '
- 'table1.col2 = col2')
-
- self.assert_compile(select([t1], t1.c.col2 == s3.c.col2),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2, table1 '
- 'WHERE table1.col1 = table2.col1) WHERE '
- 'table1.col2 = col2')
-
- self.assert_compile(t1.select().where(t1.c.col1
- == 5).order_by(t1.c.col3),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1 WHERE table1.col1 '
- '= :col1_1 ORDER BY table1.col3')
-
- # dont correlate in FROM
- self.assert_compile(t1.select().select_from(select([t2],
- t2.c.col1
- == t1.c.col1)).order_by(t1.c.col3),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2, table1 WHERE '
- 'table2.col1 = table1.col1) ORDER BY '
- 'table1.col3')
-
- # still works if you actually add that table to correlate()
- s = select([t2], t2.c.col1 == t1.c.col1)
- s = s.correlate(t1).order_by(t2.c.col3)
-
- self.assert_compile(t1.select().select_from(s).order_by(t1.c.col3),
- 'SELECT table1.col1, table1.col2, '
- 'table1.col3 FROM table1, (SELECT '
- 'table2.col1 AS col1, table2.col2 AS col2, '
- 'table2.col3 AS col3 FROM table2 WHERE '
- 'table2.col1 = table1.col1 ORDER BY '
- 'table2.col3) ORDER BY table1.col3')
def test_prefixes(self):
s = t1.select()
diff --git a/test/sql/test_insert.py b/test/sql/test_insert.py
new file mode 100644
index 000000000..cd040538f
--- /dev/null
+++ b/test/sql/test_insert.py
@@ -0,0 +1,312 @@
+#! coding:utf-8
+
+from sqlalchemy import Column, Integer, MetaData, String, Table,\
+ bindparam, exc, func, insert
+from sqlalchemy.dialects import mysql, postgresql
+from sqlalchemy.engine import default
+from sqlalchemy.testing import AssertsCompiledSQL,\
+ assert_raises_message, fixtures
+
+
+class _InsertTestBase(object):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table('mytable', metadata,
+ Column('myid', Integer),
+ Column('name', String(30)),
+ Column('description', String(30)))
+ Table('myothertable', metadata,
+ Column('otherid', Integer),
+ Column('othername', String(30)))
+
+
+class InsertTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_generic_insert_bind_params_all_columns(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(insert(table1),
+ 'INSERT INTO mytable (myid, name, description) '
+ 'VALUES (:myid, :name, :description)')
+
+ def test_insert_with_values_dict(self):
+ table1 = self.tables.mytable
+
+ checkparams = {
+ 'myid': 3,
+ 'name': 'jack'
+ }
+
+ self.assert_compile(insert(table1, dict(myid=3, name='jack')),
+ 'INSERT INTO mytable (myid, name) VALUES (:myid, :name)',
+ checkparams=checkparams)
+
+ def test_insert_with_values_tuple(self):
+ table1 = self.tables.mytable
+
+ checkparams = {
+ 'myid': 3,
+ 'name': 'jack',
+ 'description': 'mydescription'
+ }
+
+ self.assert_compile(insert(table1, (3, 'jack', 'mydescription')),
+ 'INSERT INTO mytable (myid, name, description) '
+ 'VALUES (:myid, :name, :description)',
+ checkparams=checkparams)
+
+ def test_insert_with_values_func(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(insert(table1, values=dict(myid=func.lala())),
+ 'INSERT INTO mytable (myid) VALUES (lala())')
+
+ def test_insert_with_user_supplied_bind_params(self):
+ table1 = self.tables.mytable
+
+ values = {
+ table1.c.myid: bindparam('userid'),
+ table1.c.name: bindparam('username')
+ }
+
+ self.assert_compile(insert(table1, values),
+ 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)')
+
+ def test_insert_values(self):
+ table1 = self.tables.mytable
+
+ values1 = {table1.c.myid: bindparam('userid')}
+ values2 = {table1.c.name: bindparam('username')}
+
+ self.assert_compile(insert(table1, values=values1).values(values2),
+ 'INSERT INTO mytable (myid, name) VALUES (:userid, :username)')
+
+ def test_prefix_with(self):
+ table1 = self.tables.mytable
+
+ stmt = table1.insert().\
+ prefix_with('A', 'B', dialect='mysql').\
+ prefix_with('C', 'D')
+
+ self.assert_compile(stmt,
+ 'INSERT C D INTO mytable (myid, name, description) '
+ 'VALUES (:myid, :name, :description)')
+
+ self.assert_compile(stmt,
+ 'INSERT A B C D INTO mytable (myid, name, description) '
+ 'VALUES (%s, %s, %s)', dialect=mysql.dialect())
+
+ def test_inline_default(self):
+ metadata = MetaData()
+ table = Table('sometable', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('foo', Integer, default=func.foobar()))
+
+ self.assert_compile(table.insert(values={}, inline=True),
+ 'INSERT INTO sometable (foo) VALUES (foobar())')
+
+ self.assert_compile(table.insert(inline=True),
+ 'INSERT INTO sometable (foo) VALUES (foobar())', params={})
+
+ def test_insert_returning_not_in_default(self):
+ table1 = self.tables.mytable
+
+ stmt = table1.insert().returning(table1.c.myid)
+ assert_raises_message(
+ exc.CompileError,
+ "RETURNING is not supported by this dialect's statement compiler.",
+ stmt.compile,
+ dialect=default.DefaultDialect()
+ )
+
+class EmptyTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_empty_insert_default(self):
+ table1 = self.tables.mytable
+
+ stmt = table1.insert().values({}) # hide from 2to3
+ self.assert_compile(stmt, 'INSERT INTO mytable () VALUES ()')
+
+ def test_supports_empty_insert_true(self):
+ table1 = self.tables.mytable
+
+ dialect = default.DefaultDialect()
+ dialect.supports_empty_insert = dialect.supports_default_values = True
+
+ stmt = table1.insert().values({}) # hide from 2to3
+ self.assert_compile(stmt,
+ 'INSERT INTO mytable DEFAULT VALUES',
+ dialect=dialect)
+
+ def test_supports_empty_insert_false(self):
+ table1 = self.tables.mytable
+
+ dialect = default.DefaultDialect()
+ dialect.supports_empty_insert = dialect.supports_default_values = False
+
+ stmt = table1.insert().values({}) # hide from 2to3
+ assert_raises_message(exc.CompileError,
+ "The 'default' dialect with current database version "
+ "settings does not support empty inserts.",
+ stmt.compile, dialect=dialect)
+
+
+class MultirowTest(_InsertTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_not_supported(self):
+ table1 = self.tables.mytable
+
+ dialect = default.DefaultDialect()
+ stmt = table1.insert().values([{'myid': 1}, {'myid': 2}])
+ assert_raises_message(
+ exc.CompileError,
+ "The 'default' dialect with current database version settings "
+ "does not support in-place multirow inserts.",
+ stmt.compile, dialect=dialect)
+
+ def test_named(self):
+ table1 = self.tables.mytable
+
+ values = [
+ {'myid': 1, 'name': 'a', 'description': 'b'},
+ {'myid': 2, 'name': 'c', 'description': 'd'},
+ {'myid': 3, 'name': 'e', 'description': 'f'}
+ ]
+
+ checkparams = {
+ 'myid_0': 1,
+ 'myid_1': 2,
+ 'myid_2': 3,
+ 'name_0': 'a',
+ 'name_1': 'c',
+ 'name_2': 'e',
+ 'description_0': 'b',
+ 'description_1': 'd',
+ 'description_2': 'f',
+ }
+
+ dialect = default.DefaultDialect()
+ dialect.supports_multivalues_insert = True
+
+ self.assert_compile(table1.insert().values(values),
+ 'INSERT INTO mytable (myid, name, description) VALUES '
+ '(:myid_0, :name_0, :description_0), '
+ '(:myid_1, :name_1, :description_1), '
+ '(:myid_2, :name_2, :description_2)',
+ checkparams=checkparams, dialect=dialect)
+
+ def test_positional(self):
+ table1 = self.tables.mytable
+
+ values = [
+ {'myid': 1, 'name': 'a', 'description': 'b'},
+ {'myid': 2, 'name': 'c', 'description': 'd'},
+ {'myid': 3, 'name': 'e', 'description': 'f'}
+ ]
+
+ checkpositional = (1, 'a', 'b', 2, 'c', 'd', 3, 'e', 'f')
+
+ dialect = default.DefaultDialect()
+ dialect.supports_multivalues_insert = True
+ dialect.paramstyle = 'format'
+ dialect.positional = True
+
+ self.assert_compile(table1.insert().values(values),
+ 'INSERT INTO mytable (myid, name, description) VALUES '
+ '(%s, %s, %s), (%s, %s, %s), (%s, %s, %s)',
+ checkpositional=checkpositional, dialect=dialect)
+
+ def test_inline_default(self):
+ metadata = MetaData()
+ table = Table('sometable', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', String),
+ Column('foo', Integer, default=func.foobar()))
+
+ values = [
+ {'id': 1, 'data': 'data1'},
+ {'id': 2, 'data': 'data2', 'foo': 'plainfoo'},
+ {'id': 3, 'data': 'data3'},
+ ]
+
+ checkparams = {
+ 'id_0': 1,
+ 'id_1': 2,
+ 'id_2': 3,
+ 'data_0': 'data1',
+ 'data_1': 'data2',
+ 'data_2': 'data3',
+ 'foo_1': 'plainfoo',
+ }
+
+ self.assert_compile(table.insert().values(values),
+ 'INSERT INTO sometable (id, data, foo) VALUES '
+ '(%(id_0)s, %(data_0)s, foobar()), '
+ '(%(id_1)s, %(data_1)s, %(foo_1)s), '
+ '(%(id_2)s, %(data_2)s, foobar())',
+ checkparams=checkparams, dialect=postgresql.dialect())
+
+ def test_server_default(self):
+ metadata = MetaData()
+ table = Table('sometable', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', String),
+ Column('foo', Integer, server_default=func.foobar()))
+
+ values = [
+ {'id': 1, 'data': 'data1'},
+ {'id': 2, 'data': 'data2', 'foo': 'plainfoo'},
+ {'id': 3, 'data': 'data3'},
+ ]
+
+ checkparams = {
+ 'id_0': 1,
+ 'id_1': 2,
+ 'id_2': 3,
+ 'data_0': 'data1',
+ 'data_1': 'data2',
+ 'data_2': 'data3',
+ }
+
+ self.assert_compile(table.insert().values(values),
+ 'INSERT INTO sometable (id, data) VALUES '
+ '(%(id_0)s, %(data_0)s), '
+ '(%(id_1)s, %(data_1)s), '
+ '(%(id_2)s, %(data_2)s)',
+ checkparams=checkparams, dialect=postgresql.dialect())
+
+ def test_server_default_absent_value(self):
+ metadata = MetaData()
+ table = Table('sometable', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', String),
+ Column('foo', Integer, server_default=func.foobar()))
+
+ values = [
+ {'id': 1, 'data': 'data1', 'foo': 'plainfoo'},
+ {'id': 2, 'data': 'data2'},
+ {'id': 3, 'data': 'data3', 'foo': 'otherfoo'},
+ ]
+
+ checkparams = {
+ 'id_0': 1,
+ 'id_1': 2,
+ 'id_2': 3,
+ 'data_0': 'data1',
+ 'data_1': 'data2',
+ 'data_2': 'data3',
+ 'foo_0': 'plainfoo',
+ 'foo_2': 'otherfoo',
+ }
+
+ # note the effect here is that the first set of params
+ # takes effect for the rest of them, when one is absent
+ self.assert_compile(table.insert().values(values),
+ 'INSERT INTO sometable (id, data, foo) VALUES '
+ '(%(id_0)s, %(data_0)s, %(foo_0)s), '
+ '(%(id_1)s, %(data_1)s, %(foo_0)s), '
+ '(%(id_2)s, %(data_2)s, %(foo_2)s)',
+ checkparams=checkparams, dialect=postgresql.dialect())
diff --git a/test/sql/test_labels.py b/test/sql/test_labels.py
index d7cb8db4a..fd45d303f 100644
--- a/test/sql/test_labels.py
+++ b/test/sql/test_labels.py
@@ -1,19 +1,15 @@
-
-from sqlalchemy import exc as exceptions
-from sqlalchemy import testing
-from sqlalchemy.testing import engines
-from sqlalchemy import select, MetaData, Integer, or_
+from sqlalchemy import exc as exceptions, select, MetaData, Integer, or_
from sqlalchemy.engine import default
from sqlalchemy.sql import table, column
-from sqlalchemy.testing import assert_raises, eq_
-from sqlalchemy.testing import fixtures, AssertsCompiledSQL
-from sqlalchemy.testing.engines import testing_engine
+from sqlalchemy.testing import AssertsCompiledSQL, assert_raises, engines,\
+ fixtures
from sqlalchemy.testing.schema import Table, Column
IDENT_LENGTH = 29
class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
+ __dialect__ = 'DefaultDialect'
table1 = table('some_large_named_table',
column('this_is_the_primarykey_column'),
@@ -25,9 +21,6 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
column('this_is_the_data_column')
)
- __dialect__ = 'DefaultDialect'
-
-
def _length_fixture(self, length=IDENT_LENGTH, positional=False):
dialect = default.DefaultDialect()
dialect.max_identifier_length = length
@@ -60,7 +53,7 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
ta = table2.alias()
on = table1.c.this_is_the_data_column == ta.c.this_is_the_data_column
self.assert_compile(
- select([table1, ta]).select_from(table1.join(ta, on)).\
+ select([table1, ta]).select_from(table1.join(ta, on)).
where(ta.c.this_is_the_data_column == 'data3'),
'SELECT '
'some_large_named_table.this_is_the_primarykey_column, '
@@ -87,16 +80,9 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
t = Table('this_name_is_too_long_for_what_were_doing_in_this_test',
m, Column('foo', Integer))
eng = self._engine_fixture()
- for meth in (
- t.create,
- t.drop,
- m.create_all,
- m.drop_all
- ):
- assert_raises(
- exceptions.IdentifierError,
- meth, eng
- )
+ methods = (t.create, t.drop, m.create_all, m.drop_all)
+ for meth in methods:
+ assert_raises(exceptions.IdentifierError, meth, eng)
def _assert_labeled_table1_select(self, s):
table1 = self.table1
@@ -263,7 +249,9 @@ class MaxIdentTest(fixtures.TestBase, AssertsCompiledSQL):
dialect=self._length_fixture(positional=True)
)
+
class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
+ __dialect__ = 'DefaultDialect'
table1 = table('some_large_named_table',
column('this_is_the_primarykey_column'),
@@ -275,8 +263,6 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
column('this_is_the_data_column')
)
- __dialect__ = 'DefaultDialect'
-
def test_adjustable_1(self):
table1 = self.table1
q = table1.select(
@@ -404,27 +390,27 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
'AS _1',
dialect=compile_dialect)
-
def test_adjustable_result_schema_column_1(self):
table1 = self.table1
+
q = table1.select(
table1.c.this_is_the_primarykey_column == 4).apply_labels().\
alias('foo')
- dialect = default.DefaultDialect(label_length=10)
+ dialect = default.DefaultDialect(label_length=10)
compiled = q.compile(dialect=dialect)
+
assert set(compiled.result_map['some_2'][1]).issuperset([
- table1.c.this_is_the_data_column,
- 'some_large_named_table_this_is_the_data_column',
- 'some_2'
+ table1.c.this_is_the_data_column,
+ 'some_large_named_table_this_is_the_data_column',
+ 'some_2'
+ ])
- ])
assert set(compiled.result_map['some_1'][1]).issuperset([
- table1.c.this_is_the_primarykey_column,
- 'some_large_named_table_this_is_the_primarykey_column',
- 'some_1'
-
- ])
+ table1.c.this_is_the_primarykey_column,
+ 'some_large_named_table_this_is_the_primarykey_column',
+ 'some_1'
+ ])
def test_adjustable_result_schema_column_2(self):
table1 = self.table1
@@ -434,20 +420,17 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
x = select([q])
dialect = default.DefaultDialect(label_length=10)
-
compiled = x.compile(dialect=dialect)
+
assert set(compiled.result_map['this_2'][1]).issuperset([
- q.corresponding_column(table1.c.this_is_the_data_column),
- 'this_is_the_data_column',
- 'this_2'
+ q.corresponding_column(table1.c.this_is_the_data_column),
+ 'this_is_the_data_column',
+ 'this_2'])
- ])
assert set(compiled.result_map['this_1'][1]).issuperset([
- q.corresponding_column(table1.c.this_is_the_primarykey_column),
- 'this_is_the_primarykey_column',
- 'this_1'
-
- ])
+ q.corresponding_column(table1.c.this_is_the_primarykey_column),
+ 'this_is_the_primarykey_column',
+ 'this_1'])
def test_table_plus_column_exceeds_length(self):
"""test that the truncation only occurs when tablename + colname are
@@ -490,7 +473,6 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
'other_thirty_characters_table_.thirty_characters_table_id',
dialect=compile_dialect)
-
def test_colnames_longer_than_labels_lowercase(self):
t1 = table('a', column('abcde'))
self._test_colnames_longer_than_labels(t1)
@@ -507,30 +489,18 @@ class LabelLengthTest(fixtures.TestBase, AssertsCompiledSQL):
# 'abcde' is longer than 4, but rendered as itself
# needs to have all characters
s = select([a1])
- self.assert_compile(
- select([a1]),
- "SELECT asdf.abcde FROM a AS asdf",
- dialect=dialect
- )
+ self.assert_compile(select([a1]),
+ 'SELECT asdf.abcde FROM a AS asdf',
+ dialect=dialect)
compiled = s.compile(dialect=dialect)
assert set(compiled.result_map['abcde'][1]).issuperset([
- 'abcde',
- a1.c.abcde,
- 'abcde'
- ])
+ 'abcde', a1.c.abcde, 'abcde'])
# column still there, but short label
s = select([a1]).apply_labels()
- self.assert_compile(
- s,
- "SELECT asdf.abcde AS _1 FROM a AS asdf",
- dialect=dialect
- )
+ self.assert_compile(s,
+ 'SELECT asdf.abcde AS _1 FROM a AS asdf',
+ dialect=dialect)
compiled = s.compile(dialect=dialect)
assert set(compiled.result_map['_1'][1]).issuperset([
- 'asdf_abcde',
- a1.c.abcde,
- '_1'
- ])
-
-
+ 'asdf_abcde', a1.c.abcde, '_1'])
diff --git a/test/sql/test_query.py b/test/sql/test_query.py
index b5f50aeea..a61363378 100644
--- a/test/sql/test_query.py
+++ b/test/sql/test_query.py
@@ -190,10 +190,27 @@ class QueryTest(fixtures.TestBase):
try:
table.create(bind=engine, checkfirst=True)
i = insert_values(engine, table, values)
- assert i == assertvalues, "tablename: %s %r %r" % (table.name, repr(i), repr(assertvalues))
+ assert i == assertvalues, "tablename: %s %r %r" % \
+ (table.name, repr(i), repr(assertvalues))
finally:
table.drop(bind=engine)
+ @testing.only_on('sqlite+pysqlite')
+ @testing.provide_metadata
+ def test_lastrowid_zero(self):
+ from sqlalchemy.dialects import sqlite
+ eng = engines.testing_engine()
+ class ExcCtx(sqlite.base.SQLiteExecutionContext):
+ def get_lastrowid(self):
+ return 0
+ eng.dialect.execution_ctx_cls = ExcCtx
+ t = Table('t', MetaData(), Column('x', Integer, primary_key=True),
+ Column('y', Integer))
+ t.create(eng)
+ r = eng.execute(t.insert().values(y=5))
+ eq_(r.inserted_primary_key, [0])
+
+
@testing.fails_on('sqlite', "sqlite autoincremnt doesn't work with composite pks")
def test_misordered_lastrow(self):
related = Table('related', metadata,
diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py
index a182444e9..6a42b0625 100644
--- a/test/sql/test_returning.py
+++ b/test/sql/test_returning.py
@@ -88,26 +88,6 @@ class ReturningTest(fixtures.TestBase, AssertsExecutionResults):
eq_(result.fetchall(), [(1,)])
- @testing.fails_on('postgresql', 'undefined behavior')
- @testing.fails_on('oracle+cx_oracle', 'undefined behavior')
- @testing.crashes('mssql+mxodbc', 'Raises an error')
- def test_insert_returning_execmany(self):
-
- # return value is documented as failing with psycopg2/executemany
- result2 = table.insert().returning(table).execute(
- [{'persons': 2, 'full': False}, {'persons': 3, 'full': True}])
-
- if testing.against('mssql+zxjdbc'):
- # jtds apparently returns only the first row
- eq_(result2.fetchall(), [(2, 2, False, None)])
- elif testing.against('firebird', 'mssql', 'oracle'):
- # Multiple inserts only return the last row
- eq_(result2.fetchall(), [(3, 3, True, None)])
- else:
- # nobody does this as far as we know (pg8000?)
- eq_(result2.fetchall(), [(2, 2, False, None), (3, 3, True, None)])
-
-
@testing.requires.multivalues_inserts
def test_multirow_returning(self):
ins = table.insert().returning(table.c.id, table.c.persons).values(
diff --git a/test/sql/test_types.py b/test/sql/test_types.py
index 3c981e539..fac22a205 100644
--- a/test/sql/test_types.py
+++ b/test/sql/test_types.py
@@ -15,7 +15,6 @@ from sqlalchemy import testing
from sqlalchemy.testing import AssertsCompiledSQL, AssertsExecutionResults, \
engines, pickleable
from sqlalchemy.testing.util import picklers
-from sqlalchemy.util.compat import decimal
from sqlalchemy.testing.util import round_decimal
from sqlalchemy.testing import fixtures
diff --git a/test/sql/test_update.py b/test/sql/test_update.py
index b46489cd2..a8df86cd2 100644
--- a/test/sql/test_update.py
+++ b/test/sql/test_update.py
@@ -1,55 +1,53 @@
-from sqlalchemy.testing import eq_, assert_raises_message, assert_raises, AssertsCompiledSQL
-import datetime
from sqlalchemy import *
-from sqlalchemy import exc, sql, util
-from sqlalchemy.engine import default, base
from sqlalchemy import testing
-from sqlalchemy.testing import fixtures
-from sqlalchemy.testing.schema import Table, Column
from sqlalchemy.dialects import mysql
+from sqlalchemy.testing import AssertsCompiledSQL, eq_, fixtures
+from sqlalchemy.testing.schema import Table, Column
+
class _UpdateFromTestBase(object):
@classmethod
def define_tables(cls, metadata):
+ Table('mytable', metadata,
+ Column('myid', Integer),
+ Column('name', String(30)),
+ Column('description', String(50)))
+ Table('myothertable', metadata,
+ Column('otherid', Integer),
+ Column('othername', String(30)))
Table('users', metadata,
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
- Column('name', String(30), nullable=False),
- )
-
+ test_needs_autoincrement=True),
+ Column('name', String(30), nullable=False))
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
Column('name', String(30), nullable=False),
- Column('email_address', String(50), nullable=False),
- )
-
- Table("dingalings", metadata,
+ Column('email_address', String(50), nullable=False))
+ Table('dingalings', metadata,
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=True),
Column('address_id', None, ForeignKey('addresses.id')),
- Column('data', String(30)),
- )
+ Column('data', String(30)))
@classmethod
def fixtures(cls):
return dict(
- users = (
+ users=(
('id', 'name'),
(7, 'jack'),
(8, 'ed'),
(9, 'fred'),
(10, 'chuck')
),
-
addresses = (
('id', 'user_id', 'name', 'email_address'),
- (1, 7, 'x', "jack@bean.com"),
- (2, 8, 'x', "ed@wood.com"),
- (3, 8, 'x', "ed@bettyboop.com"),
- (4, 8, 'x', "ed@lala.com"),
- (5, 9, 'x', "fred@fred.com")
+ (1, 7, 'x', 'jack@bean.com'),
+ (2, 8, 'x', 'ed@wood.com'),
+ (3, 8, 'x', 'ed@bettyboop.com'),
+ (4, 8, 'x', 'ed@lala.com'),
+ (5, 9, 'x', 'fred@fred.com')
),
dingalings = (
('id', 'address_id', 'data'),
@@ -59,288 +57,462 @@ class _UpdateFromTestBase(object):
)
-class UpdateFromCompileTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+class UpdateTest(_UpdateFromTestBase, fixtures.TablesTest, AssertsCompiledSQL):
+ __dialect__ = 'default'
+
+ def test_update_1(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1, table1.c.myid == 7),
+ 'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1',
+ params={table1.c.name: 'fred'})
+
+ def test_update_2(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ table1.update().
+ where(table1.c.myid == 7).
+ values({table1.c.myid: 5}),
+ 'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1',
+ checkparams={'myid': 5, 'myid_1': 7})
+
+ def test_update_3(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1, table1.c.myid == 7),
+ 'UPDATE mytable SET name=:name WHERE mytable.myid = :myid_1',
+ params={'name': 'fred'})
+
+ def test_update_4(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1, values={table1.c.name: table1.c.myid}),
+ 'UPDATE mytable SET name=mytable.myid')
+
+ def test_update_5(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1,
+ whereclause=table1.c.name == bindparam('crit'),
+ values={table1.c.name: 'hi'}),
+ 'UPDATE mytable SET name=:name WHERE mytable.name = :crit',
+ params={'crit': 'notthere'},
+ checkparams={'crit': 'notthere', 'name': 'hi'})
+
+ def test_update_6(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1,
+ table1.c.myid == 12,
+ values={table1.c.name: table1.c.myid}),
+ 'UPDATE mytable '
+ 'SET name=mytable.myid, description=:description '
+ 'WHERE mytable.myid = :myid_1',
+ params={'description': 'test'},
+ checkparams={'description': 'test', 'myid_1': 12})
+
+ def test_update_7(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1, table1.c.myid == 12, values={table1.c.myid: 9}),
+ 'UPDATE mytable '
+ 'SET myid=:myid, description=:description '
+ 'WHERE mytable.myid = :myid_1',
+ params={'myid_1': 12, 'myid': 9, 'description': 'test'})
+
+ def test_update_8(self):
+ table1 = self.tables.mytable
+
+ self.assert_compile(
+ update(table1, table1.c.myid == 12),
+ 'UPDATE mytable SET myid=:myid WHERE mytable.myid = :myid_1',
+ params={'myid': 18}, checkparams={'myid': 18, 'myid_1': 12})
+
+ def test_update_9(self):
+ table1 = self.tables.mytable
+
+ s = table1.update(table1.c.myid == 12, values={table1.c.name: 'lala'})
+ c = s.compile(column_keys=['id', 'name'])
+ eq_(str(s), str(c))
+
+ def test_update_10(self):
+ table1 = self.tables.mytable
+
+ v1 = {table1.c.name: table1.c.myid}
+ v2 = {table1.c.name: table1.c.name + 'foo'}
+ self.assert_compile(
+ update(table1, table1.c.myid == 12, values=v1).values(v2),
+ 'UPDATE mytable '
+ 'SET '
+ 'name=(mytable.name || :name_1), '
+ 'description=:description '
+ 'WHERE mytable.myid = :myid_1',
+ params={'description': 'test'})
+
+ def test_update_11(self):
+ table1 = self.tables.mytable
+
+ values = {
+ table1.c.name: table1.c.name + 'lala',
+ table1.c.myid: func.do_stuff(table1.c.myid, literal('hoho'))
+ }
+ self.assert_compile(update(table1,
+ (table1.c.myid == func.hoho(4)) &
+ (table1.c.name == literal('foo') +
+ table1.c.name + literal('lala')),
+ values=values),
+ 'UPDATE mytable '
+ 'SET '
+ 'myid=do_stuff(mytable.myid, :param_1), '
+ 'name=(mytable.name || :name_1) '
+ 'WHERE '
+ 'mytable.myid = hoho(:hoho_1) AND '
+ 'mytable.name = :param_2 || mytable.name || :param_3')
+
+ def test_prefix_with(self):
+ table1 = self.tables.mytable
+
+ stmt = table1.update().\
+ prefix_with('A', 'B', dialect='mysql').\
+ prefix_with('C', 'D')
+
+ self.assert_compile(stmt,
+ 'UPDATE C D mytable SET myid=:myid, name=:name, '
+ 'description=:description')
+
+ self.assert_compile(stmt,
+ 'UPDATE A B C D mytable SET myid=%s, name=%s, description=%s',
+ dialect=mysql.dialect())
+
+ def test_alias(self):
+ table1 = self.tables.mytable
+ talias1 = table1.alias('t1')
+
+ self.assert_compile(update(talias1, talias1.c.myid == 7),
+ 'UPDATE mytable AS t1 '
+ 'SET name=:name '
+ 'WHERE t1.myid = :myid_1',
+ params={table1.c.name: 'fred'})
+
+ self.assert_compile(update(talias1, table1.c.myid == 7),
+ 'UPDATE mytable AS t1 '
+ 'SET name=:name '
+ 'FROM mytable '
+ 'WHERE mytable.myid = :myid_1',
+ params={table1.c.name: 'fred'})
+
+ def test_update_to_expression(self):
+ """test update from an expression.
+
+ this logic is triggered currently by a left side that doesn't
+ have a key. The current supported use case is updating the index
+ of a Postgresql ARRAY type.
+
+ """
+ table1 = self.tables.mytable
+ expr = func.foo(table1.c.myid)
+ assert not hasattr(expr, 'key')
+ self.assert_compile(table1.update().values({expr: 'bar'}),
+ 'UPDATE mytable SET foo(myid)=:param_1')
+
+
+class UpdateFromCompileTest(_UpdateFromTestBase, fixtures.TablesTest,
+ AssertsCompiledSQL):
__dialect__ = 'default'
run_create_tables = run_inserts = run_deletes = None
def test_render_table(self):
users, addresses = self.tables.users, self.tables.addresses
+
self.assert_compile(
- users.update().\
- values(name='newname').\
- where(users.c.id==addresses.c.user_id).\
- where(addresses.c.email_address=='e1'),
- "UPDATE users SET name=:name FROM addresses "
- "WHERE users.id = addresses.user_id AND "
- "addresses.email_address = :email_address_1",
- checkparams={u'email_address_1': 'e1', 'name': 'newname'}
- )
+ users.update().
+ values(name='newname').
+ where(users.c.id == addresses.c.user_id).
+ where(addresses.c.email_address == 'e1'),
+ 'UPDATE users '
+ 'SET name=:name FROM addresses '
+ 'WHERE '
+ 'users.id = addresses.user_id AND '
+ 'addresses.email_address = :email_address_1',
+ checkparams={u'email_address_1': 'e1', 'name': 'newname'})
def test_render_multi_table(self):
- users, addresses, dingalings = \
- self.tables.users, \
- self.tables.addresses, \
- self.tables.dingalings
+ users = self.tables.users
+ addresses = self.tables.addresses
+ dingalings = self.tables.dingalings
+
+ checkparams = {
+ u'email_address_1': 'e1',
+ u'id_1': 2,
+ 'name': 'newname'
+ }
+
self.assert_compile(
- users.update().\
- values(name='newname').\
- where(users.c.id==addresses.c.user_id).\
- where(addresses.c.email_address=='e1').\
- where(addresses.c.id==dingalings.c.address_id).\
- where(dingalings.c.id==2),
- "UPDATE users SET name=:name FROM addresses, "
- "dingalings WHERE users.id = addresses.user_id "
- "AND addresses.email_address = :email_address_1 "
- "AND addresses.id = dingalings.address_id AND "
- "dingalings.id = :id_1",
- checkparams={u'email_address_1': 'e1', u'id_1': 2,
- 'name': 'newname'}
- )
+ users.update().
+ values(name='newname').
+ where(users.c.id == addresses.c.user_id).
+ where(addresses.c.email_address == 'e1').
+ where(addresses.c.id == dingalings.c.address_id).
+ where(dingalings.c.id == 2),
+ 'UPDATE users '
+ 'SET name=:name '
+ 'FROM addresses, dingalings '
+ 'WHERE '
+ 'users.id = addresses.user_id AND '
+ 'addresses.email_address = :email_address_1 AND '
+ 'addresses.id = dingalings.address_id AND '
+ 'dingalings.id = :id_1',
+ checkparams=checkparams)
def test_render_table_mysql(self):
users, addresses = self.tables.users, self.tables.addresses
+
self.assert_compile(
- users.update().\
- values(name='newname').\
- where(users.c.id==addresses.c.user_id).\
- where(addresses.c.email_address=='e1'),
- "UPDATE users, addresses SET users.name=%s "
- "WHERE users.id = addresses.user_id AND "
- "addresses.email_address = %s",
+ users.update().
+ values(name='newname').
+ where(users.c.id == addresses.c.user_id).
+ where(addresses.c.email_address == 'e1'),
+ 'UPDATE users, addresses '
+ 'SET users.name=%s '
+ 'WHERE '
+ 'users.id = addresses.user_id AND '
+ 'addresses.email_address = %s',
checkparams={u'email_address_1': 'e1', 'name': 'newname'},
- dialect=mysql.dialect()
- )
+ dialect=mysql.dialect())
def test_render_subquery(self):
users, addresses = self.tables.users, self.tables.addresses
- subq = select([addresses.c.id,
- addresses.c.user_id,
- addresses.c.email_address]).\
- where(addresses.c.id==7).alias()
+
+ checkparams = {
+ u'email_address_1': 'e1',
+ u'id_1': 7,
+ 'name': 'newname'
+ }
+
+ cols = [
+ addresses.c.id,
+ addresses.c.user_id,
+ addresses.c.email_address
+ ]
+
+ subq = select(cols).where(addresses.c.id == 7).alias()
self.assert_compile(
- users.update().\
- values(name='newname').\
- where(users.c.id==subq.c.user_id).\
- where(subq.c.email_address=='e1'),
- "UPDATE users SET name=:name FROM "
- "(SELECT addresses.id AS id, addresses.user_id "
- "AS user_id, addresses.email_address AS "
- "email_address FROM addresses WHERE addresses.id = "
- ":id_1) AS anon_1 WHERE users.id = anon_1.user_id "
- "AND anon_1.email_address = :email_address_1",
- checkparams={u'email_address_1': 'e1',
- u'id_1': 7, 'name': 'newname'}
- )
+ users.update().
+ values(name='newname').
+ where(users.c.id == subq.c.user_id).
+ where(subq.c.email_address == 'e1'),
+ 'UPDATE users '
+ 'SET name=:name FROM ('
+ 'SELECT '
+ 'addresses.id AS id, '
+ 'addresses.user_id AS user_id, '
+ 'addresses.email_address AS email_address '
+ 'FROM addresses '
+ 'WHERE addresses.id = :id_1'
+ ') AS anon_1 '
+ 'WHERE users.id = anon_1.user_id '
+ 'AND anon_1.email_address = :email_address_1',
+ checkparams=checkparams)
+
class UpdateFromRoundTripTest(_UpdateFromTestBase, fixtures.TablesTest):
@testing.requires.update_from
def test_exec_two_table(self):
users, addresses = self.tables.users, self.tables.addresses
+
testing.db.execute(
- addresses.update().\
- values(email_address=users.c.name).\
- where(users.c.id==addresses.c.user_id).\
- where(users.c.name=='ed')
- )
- eq_(
- testing.db.execute(
- addresses.select().\
- order_by(addresses.c.id)).fetchall(),
- [
- (1, 7, 'x', "jack@bean.com"),
- (2, 8, 'x', "ed"),
- (3, 8, 'x', "ed"),
- (4, 8, 'x', "ed"),
- (5, 9, 'x', "fred@fred.com")
- ]
- )
+ addresses.update().
+ values(email_address=users.c.name).
+ where(users.c.id == addresses.c.user_id).
+ where(users.c.name == 'ed'))
+
+ expected = [
+ (1, 7, 'x', 'jack@bean.com'),
+ (2, 8, 'x', 'ed'),
+ (3, 8, 'x', 'ed'),
+ (4, 8, 'x', 'ed'),
+ (5, 9, 'x', 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
@testing.requires.update_from
def test_exec_two_table_plus_alias(self):
users, addresses = self.tables.users, self.tables.addresses
- a1 = addresses.alias()
+ a1 = addresses.alias()
testing.db.execute(
- addresses.update().\
- values(email_address=users.c.name).\
- where(users.c.id==a1.c.user_id).\
- where(users.c.name=='ed').\
- where(a1.c.id==addresses.c.id)
- )
- eq_(
- testing.db.execute(
- addresses.select().\
- order_by(addresses.c.id)).fetchall(),
- [
- (1, 7, 'x', "jack@bean.com"),
- (2, 8, 'x', "ed"),
- (3, 8, 'x', "ed"),
- (4, 8, 'x', "ed"),
- (5, 9, 'x', "fred@fred.com")
- ]
+ addresses.update().
+ values(email_address=users.c.name).
+ where(users.c.id == a1.c.user_id).
+ where(users.c.name == 'ed').
+ where(a1.c.id == addresses.c.id)
)
+ expected = [
+ (1, 7, 'x', 'jack@bean.com'),
+ (2, 8, 'x', 'ed'),
+ (3, 8, 'x', 'ed'),
+ (4, 8, 'x', 'ed'),
+ (5, 9, 'x', 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
+
@testing.requires.update_from
def test_exec_three_table(self):
- users, addresses, dingalings = \
- self.tables.users, \
- self.tables.addresses, \
- self.tables.dingalings
+ users = self.tables.users
+ addresses = self.tables.addresses
+ dingalings = self.tables.dingalings
+
testing.db.execute(
- addresses.update().\
- values(email_address=users.c.name).\
- where(users.c.id==addresses.c.user_id).\
- where(users.c.name=='ed').
- where(addresses.c.id==dingalings.c.address_id).\
- where(dingalings.c.id==1),
- )
- eq_(
- testing.db.execute(
- addresses.select().order_by(addresses.c.id)
- ).fetchall(),
- [
- (1, 7, 'x', "jack@bean.com"),
- (2, 8, 'x', "ed"),
- (3, 8, 'x', "ed@bettyboop.com"),
- (4, 8, 'x', "ed@lala.com"),
- (5, 9, 'x', "fred@fred.com")
- ]
- )
+ addresses.update().
+ values(email_address=users.c.name).
+ where(users.c.id == addresses.c.user_id).
+ where(users.c.name == 'ed').
+ where(addresses.c.id == dingalings.c.address_id).
+ where(dingalings.c.id == 1))
+
+ expected = [
+ (1, 7, 'x', 'jack@bean.com'),
+ (2, 8, 'x', 'ed'),
+ (3, 8, 'x', 'ed@bettyboop.com'),
+ (4, 8, 'x', 'ed@lala.com'),
+ (5, 9, 'x', 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
@testing.only_on('mysql', 'Multi table update')
def test_exec_multitable(self):
users, addresses = self.tables.users, self.tables.addresses
+
+ values = {
+ addresses.c.email_address: users.c.name,
+ users.c.name: 'ed2'
+ }
+
testing.db.execute(
- addresses.update().\
- values({
- addresses.c.email_address:users.c.name,
- users.c.name:'ed2'
- }).\
- where(users.c.id==addresses.c.user_id).\
- where(users.c.name=='ed')
- )
- eq_(
- testing.db.execute(
- addresses.select().order_by(addresses.c.id)).fetchall(),
- [
- (1, 7, 'x', "jack@bean.com"),
- (2, 8, 'x', "ed"),
- (3, 8, 'x', "ed"),
- (4, 8, 'x', "ed"),
- (5, 9, 'x', "fred@fred.com")
- ]
- )
- eq_(
- testing.db.execute(
- users.select().order_by(users.c.id)).fetchall(),
- [
- (7, 'jack'),
- (8, 'ed2'),
- (9, 'fred'),
- (10, 'chuck')
- ]
- )
+ addresses.update().
+ values(values).
+ where(users.c.id == addresses.c.user_id).
+ where(users.c.name == 'ed'))
+
+ expected = [
+ (1, 7, 'x', 'jack@bean.com'),
+ (2, 8, 'x', 'ed'),
+ (3, 8, 'x', 'ed'),
+ (4, 8, 'x', 'ed'),
+ (5, 9, 'x', 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
+
+ expected = [
+ (7, 'jack'),
+ (8, 'ed2'),
+ (9, 'fred'),
+ (10, 'chuck')]
+ self._assert_users(users, expected)
-class UpdateFromMultiTableUpdateDefaultsTest(_UpdateFromTestBase, fixtures.TablesTest):
+ def _assert_addresses(self, addresses, expected):
+ stmt = addresses.select().order_by(addresses.c.id)
+ eq_(testing.db.execute(stmt).fetchall(), expected)
+
+ def _assert_users(self, users, expected):
+ stmt = users.select().order_by(users.c.id)
+ eq_(testing.db.execute(stmt).fetchall(), expected)
+
+
+class UpdateFromMultiTableUpdateDefaultsTest(_UpdateFromTestBase,
+ fixtures.TablesTest):
@classmethod
def define_tables(cls, metadata):
Table('users', metadata,
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=True),
Column('name', String(30), nullable=False),
- Column('some_update', String(30), onupdate="im the update")
- )
+ Column('some_update', String(30), onupdate='im the update'))
Table('addresses', metadata,
Column('id', Integer, primary_key=True,
- test_needs_autoincrement=True),
+ test_needs_autoincrement=True),
Column('user_id', None, ForeignKey('users.id')),
- Column('email_address', String(50), nullable=False),
- )
+ Column('email_address', String(50), nullable=False))
@classmethod
def fixtures(cls):
return dict(
- users = (
+ users=(
('id', 'name', 'some_update'),
(8, 'ed', 'value'),
(9, 'fred', 'value'),
),
-
- addresses = (
+ addresses=(
('id', 'user_id', 'email_address'),
- (2, 8, "ed@wood.com"),
- (3, 8, "ed@bettyboop.com"),
- (4, 9, "fred@fred.com")
+ (2, 8, 'ed@wood.com'),
+ (3, 8, 'ed@bettyboop.com'),
+ (4, 9, 'fred@fred.com')
),
)
@testing.only_on('mysql', 'Multi table update')
def test_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
+
+ values = {
+ addresses.c.email_address: users.c.name,
+ users.c.name: 'ed2'
+ }
+
ret = testing.db.execute(
- addresses.update().\
- values({
- addresses.c.email_address:users.c.name,
- users.c.name:'ed2'
- }).\
- where(users.c.id==addresses.c.user_id).\
- where(users.c.name=='ed')
- )
- eq_(
- set(ret.prefetch_cols()),
- set([users.c.some_update])
- )
- eq_(
- testing.db.execute(
- addresses.select().order_by(addresses.c.id)).fetchall(),
- [
- (2, 8, "ed"),
- (3, 8, "ed"),
- (4, 9, "fred@fred.com")
- ]
- )
- eq_(
- testing.db.execute(
- users.select().order_by(users.c.id)).fetchall(),
- [
- (8, 'ed2', 'im the update'),
- (9, 'fred', 'value'),
- ]
- )
+ addresses.update().
+ values(values).
+ where(users.c.id == addresses.c.user_id).
+ where(users.c.name == 'ed'))
+
+ eq_(set(ret.prefetch_cols()), set([users.c.some_update]))
+
+ expected = [
+ (2, 8, 'ed'),
+ (3, 8, 'ed'),
+ (4, 9, 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
+
+ expected = [
+ (8, 'ed2', 'im the update'),
+ (9, 'fred', 'value')]
+ self._assert_users(users, expected)
@testing.only_on('mysql', 'Multi table update')
def test_no_defaults_second_table(self):
users, addresses = self.tables.users, self.tables.addresses
+
ret = testing.db.execute(
- addresses.update().\
- values({
- 'email_address':users.c.name,
- }).\
- where(users.c.id==addresses.c.user_id).\
- where(users.c.name=='ed')
- )
- eq_(
- ret.prefetch_cols(),[]
- )
- eq_(
- testing.db.execute(
- addresses.select().order_by(addresses.c.id)).fetchall(),
- [
- (2, 8, "ed"),
- (3, 8, "ed"),
- (4, 9, "fred@fred.com")
- ]
- )
- # users table not actually updated,
- # so no onupdate
- eq_(
- testing.db.execute(
- users.select().order_by(users.c.id)).fetchall(),
- [
- (8, 'ed', 'value'),
- (9, 'fred', 'value'),
- ]
- )
+ addresses.update().
+ values({'email_address': users.c.name}).
+ where(users.c.id == addresses.c.user_id).
+ where(users.c.name == 'ed'))
+
+ eq_(ret.prefetch_cols(), [])
+
+ expected = [
+ (2, 8, 'ed'),
+ (3, 8, 'ed'),
+ (4, 9, 'fred@fred.com')]
+ self._assert_addresses(addresses, expected)
+
+ # users table not actually updated, so no onupdate
+ expected = [
+ (8, 'ed', 'value'),
+ (9, 'fred', 'value')]
+ self._assert_users(users, expected)
+
+ def _assert_addresses(self, addresses, expected):
+ stmt = addresses.select().order_by(addresses.c.id)
+ eq_(testing.db.execute(stmt).fetchall(), expected)
+
+ def _assert_users(self, users, expected):
+ stmt = users.select().order_by(users.c.id)
+ eq_(testing.db.execute(stmt).fetchall(), expected)