summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2010-09-19 16:11:16 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2010-09-19 16:11:16 -0400
commit90c8d8e0c9e2d0a9eeace7fa326df26a5f28465a (patch)
tree48b1e7fa44d5368f56be00c78c0e3d647186c497
parente4bc7d289477e22815f4c6ab86b3f0c1bf356e08 (diff)
parentc5c8cdf3b4d7dc456cfef29ea04b2b7300060c7a (diff)
downloadsqlalchemy-90c8d8e0c9e2d0a9eeace7fa326df26a5f28465a.tar.gz
merge tip
-rw-r--r--.hgtags1
-rw-r--r--CHANGES222
-rw-r--r--doc/build/conf.py2
-rw-r--r--doc/build/core/compiler.rst7
-rw-r--r--doc/build/core/connections.rst369
-rw-r--r--doc/build/core/engines.rst311
-rw-r--r--doc/build/core/exceptions.rst6
-rw-r--r--doc/build/core/expression_api.rst (renamed from doc/build/reference/sqlalchemy/expressions.rst)28
-rw-r--r--doc/build/core/index.rst21
-rw-r--r--doc/build/core/interfaces.rst30
-rw-r--r--doc/build/core/pooling.rst226
-rw-r--r--doc/build/core/schema.rst1358
-rw-r--r--doc/build/core/serializer.rst (renamed from doc/build/reference/ext/serializer.rst)6
-rw-r--r--doc/build/core/sqla_engine_arch.pngbin0 -> 28189 bytes
-rw-r--r--doc/build/core/tutorial.rst (renamed from doc/build/sqlexpression.rst)0
-rw-r--r--doc/build/core/types.rst (renamed from doc/build/reference/sqlalchemy/types.rst)38
-rw-r--r--doc/build/dbengine.rst516
-rw-r--r--doc/build/dialects/access.rst (renamed from doc/build/reference/dialects/access.rst)0
-rw-r--r--doc/build/dialects/firebird.rst (renamed from doc/build/reference/dialects/firebird.rst)0
-rw-r--r--doc/build/dialects/index.rst (renamed from doc/build/reference/dialects/index.rst)13
-rw-r--r--doc/build/dialects/informix.rst (renamed from doc/build/reference/dialects/informix.rst)0
-rw-r--r--doc/build/dialects/maxdb.rst (renamed from doc/build/reference/dialects/maxdb.rst)0
-rw-r--r--doc/build/dialects/mssql.rst (renamed from doc/build/reference/dialects/mssql.rst)0
-rw-r--r--doc/build/dialects/mysql.rst (renamed from doc/build/reference/dialects/mysql.rst)0
-rw-r--r--doc/build/dialects/oracle.rst (renamed from doc/build/reference/dialects/oracle.rst)0
-rw-r--r--doc/build/dialects/postgresql.rst (renamed from doc/build/reference/dialects/postgresql.rst)0
-rw-r--r--doc/build/dialects/sqlite.rst (renamed from doc/build/reference/dialects/sqlite.rst)0
-rw-r--r--doc/build/dialects/sybase.rst (renamed from doc/build/reference/dialects/sybase.rst)0
-rw-r--r--doc/build/index.rst14
-rw-r--r--doc/build/intro.rst82
-rw-r--r--doc/build/mappers.rst2068
-rw-r--r--doc/build/metadata.rst859
-rw-r--r--doc/build/orm/collections.rst410
-rw-r--r--doc/build/orm/deprecated.rst1
-rw-r--r--doc/build/orm/examples.rst (renamed from doc/build/examples.rst)3
-rw-r--r--doc/build/orm/exceptions.rst6
-rw-r--r--doc/build/orm/extensions/associationproxy.rst (renamed from doc/build/reference/ext/associationproxy.rst)4
-rw-r--r--doc/build/orm/extensions/declarative.rst (renamed from doc/build/reference/ext/declarative.rst)2
-rw-r--r--doc/build/orm/extensions/horizontal_shard.rst (renamed from doc/build/reference/ext/horizontal_shard.rst)4
-rw-r--r--doc/build/orm/extensions/index.rst18
-rw-r--r--doc/build/orm/extensions/orderinglist.rst (renamed from doc/build/reference/ext/orderinglist.rst)4
-rw-r--r--doc/build/orm/extensions/sqlsoup.rst (renamed from doc/build/reference/ext/sqlsoup.rst)0
-rw-r--r--doc/build/orm/index.rst22
-rw-r--r--doc/build/orm/inheritance.rst579
-rw-r--r--doc/build/orm/interfaces.rst107
-rw-r--r--doc/build/orm/loading.rst356
-rw-r--r--doc/build/orm/mapper_config.rst722
-rw-r--r--doc/build/orm/query.rst (renamed from doc/build/reference/orm/query.rst)36
-rw-r--r--doc/build/orm/relationships.rst792
-rw-r--r--doc/build/orm/session.rst (renamed from doc/build/session.rst)275
-rw-r--r--doc/build/orm/tutorial.rst (renamed from doc/build/ormtutorial.rst)0
-rw-r--r--doc/build/reference/ext/compiler.rst7
-rw-r--r--doc/build/reference/ext/index.rst21
-rw-r--r--doc/build/reference/index.rst13
-rw-r--r--doc/build/reference/orm/collections.rst20
-rw-r--r--doc/build/reference/orm/index.rst16
-rw-r--r--doc/build/reference/orm/interfaces.rst7
-rw-r--r--doc/build/reference/orm/mapping.rst96
-rw-r--r--doc/build/reference/orm/sessions.rst18
-rw-r--r--doc/build/reference/orm/utilities.rst6
-rw-r--r--doc/build/reference/sqlalchemy/connections.rst65
-rw-r--r--doc/build/reference/sqlalchemy/index.rst15
-rw-r--r--doc/build/reference/sqlalchemy/inspector.rst36
-rw-r--r--doc/build/reference/sqlalchemy/interfaces.rst8
-rw-r--r--doc/build/reference/sqlalchemy/pooling.rst153
-rw-r--r--doc/build/reference/sqlalchemy/schema.rst169
-rwxr-xr-xdoc/build/reference/sqlalchemy/util.rst5
-rw-r--r--doc/build/sqla_arch_small.jpgbin41140 -> 0 bytes
-rw-r--r--doc/build/sqla_arch_small.pngbin0 -> 48834 bytes
-rw-r--r--doc/build/static/docs.css3
-rw-r--r--doc/build/templates/layout.mako2
-rw-r--r--doc/build/templates/site_base.mako1
-rw-r--r--doc/build/testdocs.py2
-rw-r--r--doc/build/texinputs/sphinx.sty353
-rw-r--r--examples/derived_attributes/attributes.py134
-rw-r--r--examples/inheritance/polymorph.py8
-rw-r--r--examples/versioning/history_meta.py6
-rw-r--r--examples/versioning/test_versioning.py19
-rw-r--r--lib/sqlalchemy/__init__.py2
-rw-r--r--lib/sqlalchemy/dialects/firebird/base.py3
-rw-r--r--lib/sqlalchemy/dialects/informix/base.py27
-rw-r--r--lib/sqlalchemy/dialects/informix/informixdb.py11
-rw-r--r--lib/sqlalchemy/dialects/mssql/base.py2
-rw-r--r--lib/sqlalchemy/dialects/oracle/base.py35
-rw-r--r--lib/sqlalchemy/engine/__init__.py36
-rw-r--r--lib/sqlalchemy/engine/base.py108
-rw-r--r--lib/sqlalchemy/engine/reflection.py14
-rw-r--r--lib/sqlalchemy/exc.py4
-rw-r--r--lib/sqlalchemy/ext/compiler.py2
-rwxr-xr-xlib/sqlalchemy/ext/declarative.py7
-rw-r--r--lib/sqlalchemy/ext/sqlsoup.py10
-rw-r--r--lib/sqlalchemy/orm/__init__.py118
-rw-r--r--lib/sqlalchemy/orm/attributes.py98
-rw-r--r--lib/sqlalchemy/orm/collections.py14
-rw-r--r--lib/sqlalchemy/orm/dependency.py15
-rw-r--r--lib/sqlalchemy/orm/dynamic.py2
-rw-r--r--lib/sqlalchemy/orm/evaluator.py3
-rw-r--r--lib/sqlalchemy/orm/exc.py2
-rw-r--r--lib/sqlalchemy/orm/identity.py77
-rw-r--r--lib/sqlalchemy/orm/interfaces.py39
-rw-r--r--lib/sqlalchemy/orm/mapper.py222
-rw-r--r--lib/sqlalchemy/orm/properties.py39
-rw-r--r--lib/sqlalchemy/orm/query.py94
-rw-r--r--lib/sqlalchemy/orm/scoping.py9
-rw-r--r--lib/sqlalchemy/orm/session.py118
-rw-r--r--lib/sqlalchemy/orm/state.py2
-rw-r--r--lib/sqlalchemy/orm/strategies.py122
-rw-r--r--lib/sqlalchemy/orm/sync.py2
-rw-r--r--lib/sqlalchemy/orm/unitofwork.py6
-rw-r--r--lib/sqlalchemy/orm/util.py40
-rw-r--r--lib/sqlalchemy/pool.py43
-rw-r--r--lib/sqlalchemy/schema.py118
-rw-r--r--lib/sqlalchemy/sql/compiler.py4
-rw-r--r--lib/sqlalchemy/sql/expression.py75
-rw-r--r--lib/sqlalchemy/sql/util.py25
-rw-r--r--lib/sqlalchemy/test/requires.py8
-rw-r--r--lib/sqlalchemy/test/testing.py3
-rw-r--r--lib/sqlalchemy/types.py346
-rw-r--r--lib/sqlalchemy/util.py75
-rw-r--r--setup.py136
-rw-r--r--test/aaa_profiling/test_zoomark_orm.py2
-rw-r--r--test/dialect/test_firebird.py18
-rw-r--r--test/dialect/test_oracle.py75
-rw-r--r--test/dialect/test_postgresql.py2
-rw-r--r--test/engine/test_metadata.py63
-rw-r--r--test/ext/test_declarative.py48
-rw-r--r--test/orm/inheritance/test_basic.py7
-rw-r--r--test/orm/inheritance/test_single.py78
-rw-r--r--test/orm/test_assorted_eager.py4
-rw-r--r--test/orm/test_attributes.py120
-rw-r--r--test/orm/test_dynamic.py1
-rw-r--r--test/orm/test_load_on_fks.py273
-rw-r--r--test/orm/test_mapper.py96
-rw-r--r--test/orm/test_naturalpks.py45
-rw-r--r--test/orm/test_query.py51
-rw-r--r--test/orm/test_session.py51
-rw-r--r--test/orm/test_unitofwork.py84
-rw-r--r--test/sql/test_case_statement.py8
-rw-r--r--test/sql/test_compiler.py423
-rw-r--r--test/sql/test_query.py18
-rw-r--r--test/sql/test_types.py39
-rw-r--r--test/zblog/test_zblog.py24
142 files changed, 8783 insertions, 5564 deletions
diff --git a/.hgtags b/.hgtags
index 79c9206b7..85b12076c 100644
--- a/.hgtags
+++ b/.hgtags
@@ -74,3 +74,4 @@ a147e7959727d17b52fa2219ab6130ee443ed9ff rel_0_6_0
bb99158e5821d56344df21ed46128c2ea79bf2bd rel_0_6_1
30b7ef7a9a9c24abdc86bbde332ad3e6213bff2d rel_0_6_2
1db7766705b70f5326b614699b7c06d46168d19d rel_0_6_3
+2db46b7f51c1e64f06d9c31c49ff6e15be98e9ca rel_0_6_4
diff --git a/CHANGES b/CHANGES
index 056f69519..7d72f8769 100644
--- a/CHANGES
+++ b/CHANGES
@@ -3,6 +3,116 @@
=======
CHANGES
=======
+0.6.5
+=====
+- orm
+ - Fixed recursion bug which could occur when moving
+ an object from one reference to another, with
+ backrefs involved, where the initiating parent
+ was a subclass (with its own mapper) of the
+ previous parent.
+
+ - Fixed a regression in 0.6.4 which occurred if you
+ passed an empty list to "include_properties" on
+ mapper() [ticket:1918]
+
+ - The exception raised by Session when it is used
+ subsequent to a subtransaction rollback (which is what
+ happens when a flush fails in autocommit=False mode) has
+ now been reworded (this is the "inactive due to a
+ rollback in a subtransaction" message). In particular,
+ if the rollback was due to an exception during flush(),
+ the message states this is the case, and reiterates the
+ string form of the original exception that occurred
+ during flush. If the session is closed due to explicit
+ usage of subtransactions (not very common), the message
+ just states this is the case.
+
+ - The exception raised by Mapper when repeated requests to
+ its initialization are made after initialization already
+ failed no longer assumes the "hasattr" case, since
+ there's other scenarios in which this message gets
+ emitted, and the message also does not compound onto
+ itself multiple times - you get the same message for
+ each attempt at usage. The misnomer "compiles" is being
+ traded out for "initialize".
+
+ - Added an assertion during flush which ensures
+ that no NULL-holding identity keys were generated
+ on "newly persistent" objects.
+ This can occur when user defined code inadvertently
+ triggers flushes on not-fully-loaded objects.
+
+ - lazy loads for relationship attributes now use
+ the current state, not the "committed" state,
+ of foreign and primary key attributes
+ when issuing SQL, if a flush is not in process.
+ Previously, only the database-committed state would
+ be used. In particular, this would cause a many-to-one
+ get()-on-lazyload operation to fail, as autoflush
+ is not triggered on these loads when the attributes are
+ determined and the "committed" state may not be
+ available. [ticket:1910]
+
+ - A new flag on relationship(), load_on_pending, allows
+ the lazy loader to fire off on pending objects without a
+ flush taking place, as well as a transient object that's
+ been manually "attached" to the session. Note that this
+ flag blocks attribute events from taking place when an
+ object is loaded, so backrefs aren't available until
+ after a flush. The flag is only intended for very
+ specific use cases.
+
+ - Slight improvement to the behavior of
+ "passive_updates=False" when placed only on the
+ many-to-one side of a relationship; documentation has
+ been clarified that passive_updates=False should really
+ be on the one-to-many side.
+
+ - Placing passive_deletes=True on a many-to-one emits
+ a warning, since you probably intended to put it on
+ the one-to-many side.
+
+ - Fixed bug that would prevent "subqueryload" from
+ working correctly with single table inheritance
+ for a relationship from a subclass - the "where
+ type in (x, y, z)" only gets placed on the inside,
+ instead of repeatedly.
+
+ - When using from_self() with single table inheritance,
+ the "where type in (x, y, z)" is placed on the outside
+ of the query only, instead of repeatedly. May make
+ some more adjustments to this.
+
+- sql
+ - Table.tometadata() now copies Index objects associated
+ with the Table as well.
+
+ - Table.tometadata() issues a warning if the given Table
+ is already present in the target MetaData - the existing
+ Table object is returned.
+
+ - An informative error message is raised if a Column
+ which has not yet been assigned a name, i.e. as in
+ declarative, is used in a context where it is
+ exported to the columns collection of an enclosing
+ select() construct, or if any construct involving
+ that column is compiled before its name is
+ assigned.
+
+ - as_scalar(), label() can be called on a selectable
+ which contains a Column that is not yet named.
+ [ticket:1862]
+
+- engine
+
+ - Fixed a regression in 0.6.4 whereby the change that
+ allowed cursor errors to be raised consistently broke
+ the result.lastrowid accessor. Test coverage has
+ been added for result.lastrowid. Note that lastrowid
+ is only supported by Pysqlite and some MySQL drivers,
+ so isn't super-useful in the general case.
+
0.6.4
=====
- orm
@@ -14,6 +124,52 @@ CHANGES
for schemes that may be specifying
ConcurrentModificationError in an "except:"
clause.
+
+ - Added a mutex to the identity map which mutexes
+ remove operations against iteration methods,
+ which now pre-buffer before returning an
+ iterable. This because asyncrhonous gc
+ can remove items via the gc thread at any time.
+ [ticket:1891]
+
+ - The Session class is now present in sqlalchemy.orm.*.
+ We're moving away from the usage of create_session(),
+ which has non-standard defaults, for those situations
+ where a one-step Session constructor is desired. Most
+ users should stick with sessionmaker() for general use,
+ however.
+
+ - query.with_parent() now accepts transient objects
+ and will use the non-persistent values of their pk/fk
+ attributes in order to formulate the criterion.
+ Docs are also clarified as to the purpose of with_parent().
+
+ - The include_properties and exclude_properties arguments
+ to mapper() now accept Column objects as members in
+ addition to strings. This so that same-named Column
+ objects, such as those within a join(), can be
+ disambiguated.
+
+ - A warning is now emitted if a mapper is created against a
+ join or other single selectable that includes multiple
+ columns with the same name in its .c. collection,
+ and those columns aren't explictly named as part of
+ the same or separate attributes (or excluded).
+ In 0.7 this warning will be an exception. Note that
+ this warning is not emitted when the combination occurs
+ as a result of inheritance, so that attributes
+ still allow being overridden naturally.
+ [ticket:1896]. In 0.7 this will be improved further.
+
+ - The primary_key argument to mapper() can now specify
+ a series of columns that are only a subset of
+ the calculated "primary key" columns of the mapped
+ selectable, without an error being raised. This
+ helps for situations where a selectable's effective
+ primary key is simpler than the number of columns
+ in the selectable that are actually marked as
+ "primary_key", such as a join against two
+ tables on their primary key columns [ticket:1896].
- An object that's been deleted now gets a flag
'deleted', which prohibits the object from
@@ -121,8 +277,36 @@ CHANGES
- object_session() raises the proper
UnmappedInstanceError when presented with an
unmapped instance. [ticket:1881]
+
+ - Applied further memoizations to calculated Mapper
+ properties, with significant (~90%) runtime mapper.py
+ call count reduction in heavily polymorphic mapping
+ configurations.
+
+ - mapper _get_col_to_prop private method used
+ by the versioning example is deprecated;
+ now use mapper.get_property_by_column() which
+ will remain the public method for this.
+
+ - the versioning example works correctly now
+ if versioning on a col that was formerly
+ NULL.
- sql
+ - Calling execute() on an alias() construct is pending
+ deprecation for 0.7, as it is not itself an
+ "executable" construct. It currently "proxies" its
+ inner element and is conditionally "executable" but
+ this is not the kind of ambiguity we like these days.
+
+ - The execute() and scalar() methods of ClauseElement
+ are now moved appropriately to the Executable
+ subclass. ClauseElement.execute()/ scalar() are still
+ present and are pending deprecation in 0.7, but note
+ these would always raise an error anyway if you were
+ not an Executable (unless you were an alias(), see
+ previous note).
+
- Added basic math expression coercion for
Numeric->Integer,
so that resulting type is Numeric regardless
@@ -222,10 +406,46 @@ CHANGES
- Fixed "default schema" query to work with
pymssql backend.
+- firebird
+ - Fixed bug whereby a column default would fail to
+ reflect if the "default" keyword were lower case.
+
- oracle
- Added ROWID type to the Oracle dialect, for those
cases where an explicit CAST might be needed.
[ticket:1879]
+
+ - Oracle reflection of indexes has been tuned so
+ that indexes which include some or all primary
+ key columns, but not the same set of columns
+ as that of the primary key, are reflected.
+ Indexes which contain the identical columns
+ as that of the primary key are skipped within
+ reflection, as the index in that case is assumed
+ to be the auto-generated primary key index.
+ Previously, any index with PK columns present
+ would be skipped. Thanks to Kent Bower
+ for the patch. [ticket:1867]
+
+ - Oracle now reflects the names of primary key
+ constraints - also thanks to Kent Bower.
+ [ticket:1868]
+
+- informix
+ - Applied patches from [ticket:1904] to get
+ basic Informix functionality up again. We
+ rely upon end-user testing to ensure that
+ Informix is working to some degree.
+
+- documentation
+ - The docs have been reorganized such that the "API
+ Reference" section is gone - all the docstrings from
+ there which were public API are moved into the
+ context of the main doc section that talks about it.
+ Main docs divided into "SQLAlchemy Core" and
+ "SQLAlchemy ORM" sections, mapper/relationship docs
+ have been broken out. Lots of sections rewritten
+ and/or reorganized.
- examples
- The beaker_caching example has been reorgnized
@@ -305,7 +525,7 @@ CHANGES
mode and must run isinstance() on every value
to check if its Decimal already. Reopen of
[ticket:1840]
-
+
0.6.2
=====
- orm
diff --git a/doc/build/conf.py b/doc/build/conf.py
index 175330437..e44651685 100644
--- a/doc/build/conf.py
+++ b/doc/build/conf.py
@@ -73,7 +73,7 @@ release = sqlalchemy.__version__
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
-unused_docs = ['copyright']
+unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
diff --git a/doc/build/core/compiler.rst b/doc/build/core/compiler.rst
new file mode 100644
index 000000000..73c9e3995
--- /dev/null
+++ b/doc/build/core/compiler.rst
@@ -0,0 +1,7 @@
+.. _sqlalchemy.ext.compiler_toplevel:
+
+Custom SQL Constructs and Compilation Extension
+===============================================
+
+.. automodule:: sqlalchemy.ext.compiler
+ :members: \ No newline at end of file
diff --git a/doc/build/core/connections.rst b/doc/build/core/connections.rst
new file mode 100644
index 000000000..91e1d698b
--- /dev/null
+++ b/doc/build/core/connections.rst
@@ -0,0 +1,369 @@
+.. _connections_toplevel:
+
+=====================================
+Working with Engines and Connections
+=====================================
+
+.. module:: sqlalchemy.engine.base
+
+This section details direct usage of the :class:`.Engine`,
+:class:`.Connection`, and related objects. Its important to note that when
+using the SQLAlchemy ORM, these objects are not generally accessed; instead,
+the :class:`.Session` object is used as the interface to the database.
+However, for applications that are built around direct usage of textual SQL
+statements and/or SQL expression constructs without involvement by the ORM's
+higher level management services, the :class:`.Engine` and
+:class:`.Connection` are king (and queen?) - read on.
+
+Basic Usage
+===========
+
+Recall from :ref:`engines_toplevel` that an :class:`.Engine` is created via
+the :func:`.create_engine` call::
+
+ engine = create_engine('mysql://scott:tiger@localhost/test')
+
+The typical usage of :func:`.create_engine()` is once per particular database
+URL, held globally for the lifetime of a single application process. A single
+:class:`.Engine` manages many individual DBAPI connections on behalf of the
+process and is intended to be called upon in a concurrent fashion. The
+:class:`.Engine` is **not** synonymous to the DBAPI ``connect`` function,
+which represents just one connection resource - the :class:`.Engine` is most
+efficient when created just once at the module level of an application, not
+per-object or per-function call.
+
+For a multiple-process application that uses the ``os.fork`` system call, or
+for example the Python ``multiprocessing`` module, it's usually required that a
+separate :class:`.Engine` be used for each child process. This is because the
+:class:`.Engine` maintains a reference to a connection pool that ultimately
+references DBAPI connections - these tend to not be portable across process
+boundaries. An :class:`.Engine` that is configured not to use pooling (which
+is achieved via the usage of :class:`.NullPool`) does not have this
+requirement.
+
+The engine can be used directly to issue SQL to the database. The most generic
+way is first procure a connection resource, which you get via the :class:`connect` method::
+
+ connection = engine.connect()
+ result = connection.execute("select username from users")
+ for row in result:
+ print "username:", row['username']
+ connection.close()
+
+The connection is an instance of :class:`.Connection`,
+which is a **proxy** object for an actual DBAPI connection. The DBAPI
+connection is retrieved from the connection pool at the point at which
+:class:`.Connection` is created.
+
+The returned result is an instance of :class:`.ResultProxy`, which
+references a DBAPI cursor and provides a largely compatible interface
+with that of the DBAPI cursor. The DBAPI cursor will be closed
+by the :class:`.ResultProxy` when all of its result rows (if any) are
+exhausted. A :class:`.ResultProxy` that returns no rows, such as that of
+an UPDATE statement (without any returned rows),
+releases cursor resources immediately upon construction.
+
+When the :meth:`~.Connection.close` method is called, the referenced DBAPI
+connection is returned to the connection pool. From the perspective
+of the database itself, nothing is actually "closed", assuming pooling is
+in use. The pooling mechanism issues a ``rollback()`` call on the DBAPI
+connection so that any transactional state or locks are removed, and
+the connection is ready for its next usage.
+
+The above procedure can be performed in a shorthand way by using the
+:meth:`~.Engine.execute` method of :class:`.Engine` itself::
+
+ result = engine.execute("select username from users")
+ for row in result:
+ print "username:", row['username']
+
+Where above, the :meth:`~.Engine.execute` method acquires a new
+:class:`.Connection` on its own, executes the statement with that object,
+and returns the :class:`.ResultProxy`. In this case, the :class:`.ResultProxy`
+contains a special flag known as ``close_with_result``, which indicates
+that when its underlying DBAPI cursor is closed, the :class:`.Connection`
+object itself is also closed, which again returns the DBAPI connection
+to the connection pool, releasing transactional resources.
+
+If the :class:`.ResultProxy` potentially has rows remaining, it can be
+instructed to close out its resources explicitly::
+
+ result.close()
+
+If the :class:`.ResultProxy` has pending rows remaining and is dereferenced by
+the application without being closed, Python garbage collection will
+ultimately close out the cursor as well as trigger a return of the pooled
+DBAPI connection resource to the pool (SQLAlchemy achieves this by the usage
+of weakref callbacks - *never* the ``__del__`` method) - however it's never a
+good idea to rely upon Python garbage collection to manage resources.
+
+Our example above illustrated the execution of a textual SQL string.
+The :meth:`~.Connection.execute` method can of course accommodate more than
+that, including the variety of SQL expression constructs described
+in :ref:`sqlexpression_toplevel`.
+
+.. autoclass:: Connection
+ :show-inheritance:
+ :members:
+
+.. autoclass:: Connectable
+ :show-inheritance:
+ :members:
+
+.. autoclass:: Engine
+ :show-inheritance:
+ :members:
+
+.. autoclass:: sqlalchemy.engine.base.ResultProxy
+ :members:
+
+.. autoclass:: sqlalchemy.engine.base.RowProxy
+ :members:
+
+Using Transactions
+==================
+
+.. note:: This section describes how to use transactions when working directly
+ with :class:`.Engine` and :class:`.Connection` objects. When using the
+ SQLAlchemy ORM, the public API for transaction control is via the
+ :class:`.Session` object, which makes usage of the :class:`.Transaction`
+ object internally. See :ref:`unitofwork_transaction` for further
+ information.
+
+The :class:`~sqlalchemy.engine.base.Connection` object provides a ``begin()``
+method which returns a :class:`~sqlalchemy.engine.base.Transaction` object.
+This object is usually used within a try/except clause so that it is
+guaranteed to ``rollback()`` or ``commit()``::
+
+ trans = connection.begin()
+ try:
+ r1 = connection.execute(table1.select())
+ connection.execute(table1.insert(), col1=7, col2='this is some data')
+ trans.commit()
+ except:
+ trans.rollback()
+ raise
+
+The :class:`~sqlalchemy.engine.base.Transaction` object also handles "nested"
+behavior by keeping track of the outermost begin/commit pair. In this example,
+two functions both issue a transaction on a Connection, but only the outermost
+Transaction object actually takes effect when it is committed.
+
+.. sourcecode:: python+sql
+
+ # method_a starts a transaction and calls method_b
+ def method_a(connection):
+ trans = connection.begin() # open a transaction
+ try:
+ method_b(connection)
+ trans.commit() # transaction is committed here
+ except:
+ trans.rollback() # this rolls back the transaction unconditionally
+ raise
+
+ # method_b also starts a transaction
+ def method_b(connection):
+ trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction
+ try:
+ connection.execute("insert into mytable values ('bat', 'lala')")
+ connection.execute(mytable.insert(), col1='bat', col2='lala')
+ trans.commit() # transaction is not committed yet
+ except:
+ trans.rollback() # this rolls back the transaction unconditionally
+ raise
+
+ # open a Connection and call method_a
+ conn = engine.connect()
+ method_a(conn)
+ conn.close()
+
+Above, ``method_a`` is called first, which calls ``connection.begin()``. Then
+it calls ``method_b``. When ``method_b`` calls ``connection.begin()``, it just
+increments a counter that is decremented when it calls ``commit()``. If either
+``method_a`` or ``method_b`` calls ``rollback()``, the whole transaction is
+rolled back. The transaction is not committed until ``method_a`` calls the
+``commit()`` method. This "nesting" behavior allows the creation of functions
+which "guarantee" that a transaction will be used if one was not already
+available, but will automatically participate in an enclosing transaction if
+one exists.
+
+.. index::
+ single: thread safety; transactions
+
+.. autoclass:: Transaction
+ :members:
+
+
+Understanding Autocommit
+========================
+
+The previous transaction example illustrates how to use :class:`.Transaction`
+so that several executions can take part in the same transaction. What happens
+when we issue an INSERT, UPDATE or DELETE call without using
+:class:`.Transaction`? The answer is **autocommit**. While many DBAPI
+implementation provide various special "non-transactional" modes, the current
+SQLAlchemy behavior is such that it implements its own "autocommit" which
+works completely consistently across all backends. This is achieved by
+detecting statements which represent data-changing operations, i.e. INSERT,
+UPDATE, DELETE, as well as data definition language (DDL) statements such as
+CREATE TABLE, ALTER TABLE, and then issuing a COMMIT automatically if no
+transaction is in progress. The detection is based on compiled statement
+attributes, or in the case of a text-only statement via regular expressions::
+
+ conn = engine.connect()
+ conn.execute("INSERT INTO users VALUES (1, 'john')") # autocommits
+
+Full control of the "autocommit" behavior is available using the generative
+:meth:`.Connection.execution_options` method provided on :class:`.Connection`,
+:class:`.Engine`, :class:`.Executable`, using the "autocommit" flag which will
+turn on or off the autocommit for the selected scope. For example, a
+:func:`.text` construct representing a stored procedure that commits might use
+it so that a SELECT statement will issue a COMMIT::
+
+ engine.execute(text("SELECT my_mutating_procedure()").execution_options(autocommit=True))
+
+.. _dbengine_implicit:
+
+Connectionless Execution, Implicit Execution
+=============================================
+
+Recall from the first section we mentioned executing with and without explicit
+usage of :class:`.Connection`. "Connectionless" execution
+refers to the usage of the ``execute()`` method on an object which is not a
+:class:`.Connection`. This was illustrated using the :meth:`~.Engine.execute` method
+of :class:`.Engine`.
+
+In addition to "connectionless" execution, it is also possible
+to use the :meth:`~.Executable.execute` method of
+any :class:`.Executable` construct, which is a marker for SQL expression objects
+that support execution. The SQL expression object itself references an
+:class:`.Engine` or :class:`.Connection` known as the **bind**, which it uses
+in order to provide so-called "implicit" execution services.
+
+Given a table as below::
+
+ meta = MetaData()
+ users_table = Table('users', meta,
+ Column('id', Integer, primary_key=True),
+ Column('name', String(50))
+ )
+
+Explicit execution delivers the SQL text or constructed SQL expression to the
+``execute()`` method of :class:`~sqlalchemy.engine.base.Connection`:
+
+.. sourcecode:: python+sql
+
+ engine = create_engine('sqlite:///file.db')
+ connection = engine.connect()
+ result = connection.execute(users_table.select())
+ for row in result:
+ # ....
+ connection.close()
+
+Explicit, connectionless execution delivers the expression to the
+``execute()`` method of :class:`~sqlalchemy.engine.base.Engine`:
+
+.. sourcecode:: python+sql
+
+ engine = create_engine('sqlite:///file.db')
+ result = engine.execute(users_table.select())
+ for row in result:
+ # ....
+ result.close()
+
+Implicit execution is also connectionless, and calls the ``execute()`` method
+on the expression itself, utilizing the fact that either an
+:class:`~sqlalchemy.engine.base.Engine` or
+:class:`~sqlalchemy.engine.base.Connection` has been *bound* to the expression
+object (binding is discussed further in
+:ref:`metadata_toplevel`):
+
+.. sourcecode:: python+sql
+
+ engine = create_engine('sqlite:///file.db')
+ meta.bind = engine
+ result = users_table.select().execute()
+ for row in result:
+ # ....
+ result.close()
+
+In both "connectionless" examples, the
+:class:`~sqlalchemy.engine.base.Connection` is created behind the scenes; the
+:class:`~sqlalchemy.engine.base.ResultProxy` returned by the ``execute()``
+call references the :class:`~sqlalchemy.engine.base.Connection` used to issue
+the SQL statement. When the :class:`.ResultProxy` is closed, the underlying
+:class:`.Connection` is closed for us, resulting in the
+DBAPI connection being returned to the pool with transactional resources removed.
+
+.. _threadlocal_strategy:
+
+Using the Threadlocal Execution Strategy
+========================================
+
+The "threadlocal" engine strategy is an optional feature which
+can be used by non-ORM applications to associate transactions
+with the current thread, such that all parts of the
+application can participate in that transaction implicitly without the need to
+explicitly reference a :class:`.Connection`.
+"threadlocal" is designed for a very specific pattern of use, and is not
+appropriate unless this very specfic pattern, described below, is what's
+desired. It has **no impact** on the "thread safety" of SQLAlchemy components
+or one's application. It also should not be used when using an ORM
+:class:`~sqlalchemy.orm.session.Session` object, as the
+:class:`~sqlalchemy.orm.session.Session` itself represents an ongoing
+transaction and itself handles the job of maintaining connection and
+transactional resources.
+
+Enabling ``threadlocal`` is achieved as follows::
+
+ db = create_engine('mysql://localhost/test', strategy='threadlocal')
+
+The above :class:`.Engine` will now acquire a :class:`.Connection` using
+connection resources derived from a thread-local variable whenever
+:meth:`.Engine.execute` or :meth:`.Engine.contextual_connect` is called. This
+connection resource is maintained as long as it is referenced, which allows
+multiple points of an application to share a transaction while using
+connectionless execution::
+
+ def call_operation1():
+ engine.execute("insert into users values (?, ?)", 1, "john")
+
+ def call_operation2():
+ users.update(users.c.user_id==5).execute(name='ed')
+
+ db.begin()
+ try:
+ call_operation1()
+ call_operation2()
+ db.commit()
+ except:
+ db.rollback()
+
+Explicit execution can be mixed with connectionless execution by
+using the :class:`.Engine.connect` method to acquire a :class:`.Connection`
+that is not part of the threadlocal scope::
+
+ db.begin()
+ conn = db.connect()
+ try:
+ conn.execute(log_table.insert(), message="Operation started")
+ call_operation1()
+ call_operation2()
+ db.commit()
+ conn.execute(log_table.insert(), message="Operation succeeded")
+ except:
+ db.rollback()
+ conn.execute(log_table.insert(), message="Operation failed")
+ finally:
+ conn.close()
+
+To access the :class:`.Connection` that is bound to the threadlocal scope,
+call :meth:`.Engine.contextual_connect`::
+
+ conn = db.contextual_connect()
+ call_operation3(conn)
+ conn.close()
+
+Calling :meth:`~.Connection.close` on the "contextual" connection does not release
+its resources until all other usages of that resource are closed as well, including
+that any ongoing transactions are rolled back or committed.
diff --git a/doc/build/core/engines.rst b/doc/build/core/engines.rst
new file mode 100644
index 000000000..de81a6d73
--- /dev/null
+++ b/doc/build/core/engines.rst
@@ -0,0 +1,311 @@
+.. _engines_toplevel:
+
+====================
+Engine Configuration
+====================
+
+The **Engine** is the starting point for any SQLAlchemy application. It's
+"home base" for the actual database and its DBAPI, delivered to the SQLAlchemy
+application through a connection pool and a **Dialect**, which describes how
+to talk to a specific kind of database/DBAPI combination.
+
+The general structure can be illustrated as follows:
+
+.. image:: sqla_engine_arch.png
+
+Where above, an :class:`~sqlalchemy.engine.base.Engine` references both a
+:class:`~sqlalchemy.engine.base.Dialect` and a :class:`~sqlalchemy.pool.Pool`,
+which together interpret the DBAPI's module functions as well as the behavior
+of the database.
+
+Creating an engine is just a matter of issuing a single call,
+:func:`.create_engine()`::
+
+ engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')
+
+The above engine invokes the ``postgresql`` dialect and a connection pool
+which references ``localhost:5432``.
+
+The :class:`.Engine`, once created, can either be used directly to interact with the database,
+or can be passed to a :class:`.Session` object to work with the ORM. This section
+covers the details of configuring an :class:`.Engine`. The next section, :ref:`connections_toplevel`,
+will detail the usage API of the :class:`.Engine` and similar, typically for non-ORM
+applications.
+
+
+.. _supported_dbapis:
+
+Supported Databases
+====================
+
+SQLAlchemy includes many :class:`~sqlalchemy.engine.base.Dialect` implementations for various
+backends; each is described as its own package in the :ref:`sqlalchemy.dialects_toplevel` package. A
+SQLAlchemy dialect always requires that an appropriate DBAPI driver is installed.
+
+The table below summarizes the state of DBAPI support in SQLAlchemy 0.6. The values
+translate as:
+
+* yes / Python platform - The SQLAlchemy dialect is mostly or fully operational on the target platform.
+* yes / OS platform - The DBAPI supports that platform.
+* no / Python platform - The DBAPI does not support that platform, or there is no SQLAlchemy dialect support.
+* no / OS platform - The DBAPI does not support that platform.
+* partial - the DBAPI is partially usable on the target platform but has major unresolved issues.
+* development - a development version of the dialect exists, but is not yet usable.
+* thirdparty - the dialect itself is maintained by a third party, who should be consulted for
+ information on current support.
+* \* - indicates the given DBAPI is the "default" for SQLAlchemy, i.e. when just the database name is specified
+
+========================= =========================== =========== =========== =========== ================= ============
+Driver Connect string Py2K Py3K Jython Unix Windows
+========================= =========================== =========== =========== =========== ================= ============
+**DB2/Informix IDS**
+ibm-db_ thirdparty thirdparty thirdparty thirdparty thirdparty thirdparty
+**Firebird**
+kinterbasdb_ ``firebird+kinterbasdb``\* yes development no yes yes
+**Informix**
+informixdb_ ``informix+informixdb``\* development development no unknown unknown
+**MaxDB**
+sapdb_ ``maxdb+sapdb``\* development development no yes unknown
+**Microsoft Access**
+pyodbc_ ``access+pyodbc``\* development development no unknown yes
+**Microsoft SQL Server**
+adodbapi_ ``mssql+adodbapi`` development development no no yes
+`jTDS JDBC Driver`_ ``mssql+zxjdbc`` no no development yes yes
+mxodbc_ ``mssql+mxodbc`` yes development no yes with FreeTDS_ yes
+pyodbc_ ``mssql+pyodbc``\* yes development no yes with FreeTDS_ yes
+pymssql_ ``mssql+pymssql`` yes development no yes yes
+**MySQL**
+`MySQL Connector/J`_ ``mysql+zxjdbc`` no no yes yes yes
+`MySQL Connector/Python`_ ``mysql+mysqlconnector`` yes partial no yes yes
+mysql-python_ ``mysql+mysqldb``\* yes development no yes yes
+OurSQL_ ``mysql+oursql`` yes partial no yes yes
+**Oracle**
+cx_oracle_ ``oracle+cx_oracle``\* yes development no yes yes
+`Oracle JDBC Driver`_ ``oracle+zxjdbc`` no no yes yes yes
+**Postgresql**
+pg8000_ ``postgresql+pg8000`` yes yes no yes yes
+`PostgreSQL JDBC Driver`_ ``postgresql+zxjdbc`` no no yes yes yes
+psycopg2_ ``postgresql+psycopg2``\* yes development no yes yes
+pypostgresql_ ``postgresql+pypostgresql`` no yes no yes yes
+**SQLite**
+pysqlite_ ``sqlite+pysqlite``\* yes yes no yes yes
+sqlite3_ ``sqlite+pysqlite``\* yes yes no yes yes
+**Sybase ASE**
+mxodbc_ ``sybase+mxodbc`` development development no yes yes
+pyodbc_ ``sybase+pyodbc``\* partial development no unknown unknown
+python-sybase_ ``sybase+pysybase`` partial development no yes yes
+========================= =========================== =========== =========== =========== ================= ============
+
+.. _psycopg2: http://www.initd.org/
+.. _pg8000: http://pybrary.net/pg8000/
+.. _pypostgresql: http://python.projects.postgresql.org/
+.. _mysql-python: http://sourceforge.net/projects/mysql-python
+.. _MySQL Connector/Python: https://launchpad.net/myconnpy
+.. _OurSQL: http://packages.python.org/oursql/
+.. _PostgreSQL JDBC Driver: http://jdbc.postgresql.org/
+.. _sqlite3: http://docs.python.org/library/sqlite3.html
+.. _pysqlite: http://pypi.python.org/pypi/pysqlite/
+.. _MySQL Connector/J: http://dev.mysql.com/downloads/connector/j/
+.. _cx_Oracle: http://cx-oracle.sourceforge.net/
+.. _Oracle JDBC Driver: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html
+.. _kinterbasdb: http://firebirdsql.org/index.php?op=devel&sub=python
+.. _pyodbc: http://code.google.com/p/pyodbc/
+.. _mxodbc: http://www.egenix.com/products/python/mxODBC/
+.. _FreeTDS: http://www.freetds.org/
+.. _adodbapi: http://adodbapi.sourceforge.net/
+.. _pymssql: http://code.google.com/p/pymssql/
+.. _jTDS JDBC Driver: http://jtds.sourceforge.net/
+.. _ibm-db: http://code.google.com/p/ibm-db/
+.. _informixdb: http://informixdb.sourceforge.net/
+.. _sapdb: http://www.sapdb.org/sapdbapi.html
+.. _python-sybase: http://python-sybase.sourceforge.net/
+
+Further detail on dialects is available at :ref:`sqlalchemy.dialects_toplevel`
+as well as additional notes on the wiki at `Database Notes
+<http://www.sqlalchemy.org/trac/wiki/DatabaseNotes>`_
+
+
+.. _create_engine_args:
+
+Database Engine Options
+========================
+
+Keyword options can also be specified to :func:`~sqlalchemy.create_engine`,
+following the string URL as follows:
+
+.. sourcecode:: python+sql
+
+ db = create_engine('postgresql://...', encoding='latin1', echo=True)
+
+.. autofunction:: sqlalchemy.create_engine
+
+.. autofunction:: sqlalchemy.engine_from_config
+
+Database Urls
+=============
+
+SQLAlchemy indicates the source of an Engine strictly via `RFC-1738
+<http://rfc.net/rfc1738.html>`_ style URLs, combined with optional keyword
+arguments to specify options for the Engine. The form of the URL is::
+
+ dialect+driver://username:password@host:port/database
+
+Dialect names include the identifying name of the SQLAlchemy dialect which
+include ``sqlite``, ``mysql``, ``postgresql``, ``oracle``, ``mssql``, and
+``firebird``. The drivername is the name of the DBAPI to be used to connect to
+the database using all lowercase letters. If not specified, a "default" DBAPI
+will be imported if available - this default is typically the most widely
+known driver available for that backend (i.e. cx_oracle, pysqlite/sqlite3,
+psycopg2, mysqldb). For Jython connections, specify the `zxjdbc` driver, which
+is the JDBC-DBAPI bridge included with Jython.
+
+.. sourcecode:: python+sql
+
+ # postgresql - psycopg2 is the default driver.
+ pg_db = create_engine('postgresql://scott:tiger@localhost/mydatabase')
+ pg_db = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase')
+ pg_db = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase')
+ pg_db = create_engine('postgresql+pypostgresql://scott:tiger@localhost/mydatabase')
+
+ # postgresql on Jython
+ pg_db = create_engine('postgresql+zxjdbc://scott:tiger@localhost/mydatabase')
+
+ # mysql - MySQLdb (mysql-python) is the default driver
+ mysql_db = create_engine('mysql://scott:tiger@localhost/foo')
+ mysql_db = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
+
+ # mysql on Jython
+ mysql_db = create_engine('mysql+zxjdbc://localhost/foo')
+
+ # mysql with pyodbc (buggy)
+ mysql_db = create_engine('mysql+pyodbc://scott:tiger@some_dsn')
+
+ # oracle - cx_oracle is the default driver
+ oracle_db = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
+
+ # oracle via TNS name
+ oracle_db = create_engine('oracle+cx_oracle://scott:tiger@tnsname')
+
+ # mssql using ODBC datasource names. PyODBC is the default driver.
+ mssql_db = create_engine('mssql://mydsn')
+ mssql_db = create_engine('mssql+pyodbc://mydsn')
+ mssql_db = create_engine('mssql+adodbapi://mydsn')
+ mssql_db = create_engine('mssql+pyodbc://username:password@mydsn')
+
+SQLite connects to file based databases. The same URL format is used, omitting
+the hostname, and using the "file" portion as the filename of the database.
+This has the effect of four slashes being present for an absolute file path::
+
+ # sqlite://<nohostname>/<path>
+ # where <path> is relative:
+ sqlite_db = create_engine('sqlite:///foo.db')
+
+ # or absolute, starting with a slash:
+ sqlite_db = create_engine('sqlite:////absolute/path/to/foo.db')
+
+To use a SQLite ``:memory:`` database, specify an empty URL::
+
+ sqlite_memory_db = create_engine('sqlite://')
+
+The :class:`~sqlalchemy.engine.base.Engine` will ask the connection pool for a
+connection when the ``connect()`` or ``execute()`` methods are called. The
+default connection pool, :class:`~sqlalchemy.pool.QueuePool`, as well as the
+default connection pool used with SQLite,
+:class:`~sqlalchemy.pool.SingletonThreadPool`, will open connections to the
+database on an as-needed basis. As concurrent statements are executed,
+:class:`~sqlalchemy.pool.QueuePool` will grow its pool of connections to a
+default size of five, and will allow a default "overflow" of ten. Since the
+:class:`~sqlalchemy.engine.base.Engine` is essentially "home base" for the
+connection pool, it follows that you should keep a single
+:class:`~sqlalchemy.engine.base.Engine` per database established within an
+application, rather than creating a new one for each connection.
+
+.. autoclass:: sqlalchemy.engine.url.URL
+ :members:
+
+Custom DBAPI connect() arguments
+=================================
+
+Custom arguments used when issuing the ``connect()`` call to the underlying
+DBAPI may be issued in three distinct ways. String-based arguments can be
+passed directly from the URL string as query arguments:
+
+.. sourcecode:: python+sql
+
+ db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar')
+
+If SQLAlchemy's database connector is aware of a particular query argument, it
+may convert its type from string to its proper type.
+
+:func:`~sqlalchemy.create_engine` also takes an argument ``connect_args`` which is an additional dictionary that will be passed to ``connect()``. This can be used when arguments of a type other than string are required, and SQLAlchemy's database connector has no type conversion logic present for that parameter:
+
+.. sourcecode:: python+sql
+
+ db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'})
+
+The most customizable connection method of all is to pass a ``creator``
+argument, which specifies a callable that returns a DBAPI connection:
+
+.. sourcecode:: python+sql
+
+ def connect():
+ return psycopg.connect(user='scott', host='localhost')
+
+ db = create_engine('postgresql://', creator=connect)
+
+
+
+.. _dbengine_logging:
+
+Configuring Logging
+====================
+
+Python's standard `logging
+<http://www.python.org/doc/lib/module-logging.html>`_ module is used to
+implement informational and debug log output with SQLAlchemy. This allows
+SQLAlchemy's logging to integrate in a standard way with other applications
+and libraries. The ``echo`` and ``echo_pool`` flags that are present on
+:func:`~sqlalchemy.create_engine`, as well as the ``echo_uow`` flag used on
+:class:`~sqlalchemy.orm.session.Session`, all interact with regular loggers.
+
+This section assumes familiarity with the above linked logging module. All
+logging performed by SQLAlchemy exists underneath the ``sqlalchemy``
+namespace, as used by ``logging.getLogger('sqlalchemy')``. When logging has
+been configured (i.e. such as via ``logging.basicConfig()``), the general
+namespace of SA loggers that can be turned on is as follows:
+
+* ``sqlalchemy.engine`` - controls SQL echoing. set to ``logging.INFO`` for SQL query output, ``logging.DEBUG`` for query + result set output.
+* ``sqlalchemy.dialects`` - controls custom logging for SQL dialects. See the documentation of individual dialects for details.
+* ``sqlalchemy.pool`` - controls connection pool logging. set to ``logging.INFO`` or lower to log connection pool checkouts/checkins.
+* ``sqlalchemy.orm`` - controls logging of various ORM functions. set to ``logging.INFO`` for information on mapper configurations.
+
+For example, to log SQL queries using Python logging instead of the ``echo=True`` flag::
+
+ import logging
+
+ logging.basicConfig()
+ logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
+
+By default, the log level is set to ``logging.ERROR`` within the entire
+``sqlalchemy`` namespace so that no log operations occur, even within an
+application that has logging enabled otherwise.
+
+The ``echo`` flags present as keyword arguments to
+:func:`~sqlalchemy.create_engine` and others as well as the ``echo`` property
+on :class:`~sqlalchemy.engine.base.Engine`, when set to ``True``, will first
+attempt to ensure that logging is enabled. Unfortunately, the ``logging``
+module provides no way of determining if output has already been configured
+(note we are referring to if a logging configuration has been set up, not just
+that the logging level is set). For this reason, any ``echo=True`` flags will
+result in a call to ``logging.basicConfig()`` using sys.stdout as the
+destination. It also sets up a default format using the level name, timestamp,
+and logger name. Note that this configuration has the affect of being
+configured **in addition** to any existing logger configurations. Therefore,
+**when using Python logging, ensure all echo flags are set to False at all
+times**, to avoid getting duplicate log lines.
+
+The logger name of instance such as an :class:`~sqlalchemy.engine.base.Engine`
+or :class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier
+string. To set this to a specific name, use the "logging_name" and
+"pool_logging_name" keyword arguments with :func:`sqlalchemy.create_engine`.
+
diff --git a/doc/build/core/exceptions.rst b/doc/build/core/exceptions.rst
new file mode 100644
index 000000000..f7d384ad9
--- /dev/null
+++ b/doc/build/core/exceptions.rst
@@ -0,0 +1,6 @@
+Core Exceptions
+===============
+
+.. automodule:: sqlalchemy.exc
+ :show-inheritance:
+ :members: \ No newline at end of file
diff --git a/doc/build/reference/sqlalchemy/expressions.rst b/doc/build/core/expression_api.rst
index 98e41fcb8..c01f84c2c 100644
--- a/doc/build/reference/sqlalchemy/expressions.rst
+++ b/doc/build/core/expression_api.rst
@@ -3,6 +3,9 @@ SQL Statements and Expressions
.. module:: sqlalchemy.sql.expression
+This section presents the API reference for the SQL Expression Language. For a full introduction to its usage,
+see :ref:`sqlexpression_toplevel`.
+
Functions
---------
@@ -49,7 +52,10 @@ The expression package uses functions to construct SQL expressions. The return
>>> print func.count(1)
count(:param_1)
- Any name can be given to `func`. If the function name is unknown to SQLAlchemy, it will be rendered exactly as is. For common SQL functions which SQLAlchemy is aware of, the name may be interpreted as a *generic function* which will be compiled appropriately to the target database::
+ Any name can be given to `func`. If the function name is unknown to
+ SQLAlchemy, it will be rendered exactly as is. For common SQL functions
+ which SQLAlchemy is aware of, the name may be interpreted as a *generic
+ function* which will be compiled appropriately to the target database::
>>> print func.current_timestamp()
CURRENT_TIMESTAMP
@@ -59,13 +65,19 @@ The expression package uses functions to construct SQL expressions. The return
>>> print func.stats.yield_curve(5, 10)
stats.yield_curve(:yield_curve_1, :yield_curve_2)
- SQLAlchemy can be made aware of the return type of functions to enable type-specific lexical and result-based behavior. For example, to ensure that a string-based function returns a Unicode value and is similarly treated as a string in expressions, specify :class:`~sqlalchemy.types.Unicode` as the type:
+ SQLAlchemy can be made aware of the return type of functions to enable
+ type-specific lexical and result-based behavior. For example, to ensure
+ that a string-based function returns a Unicode value and is similarly
+ treated as a string in expressions, specify
+ :class:`~sqlalchemy.types.Unicode` as the type:
>>> print func.my_string(u'hi', type_=Unicode) + ' ' + \
... func.my_string(u'there', type_=Unicode)
my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
- Functions which are interpreted as "generic" functions know how to calculate their return type automatically. For a listing of known generic functions, see :ref:`generic_functions`.
+ Functions which are interpreted as "generic" functions know how to
+ calculate their return type automatically. For a listing of known generic
+ functions, see :ref:`generic_functions`.
.. autofunction:: insert
@@ -201,10 +213,18 @@ Classes
Generic Functions
-----------------
-SQL functions which are known to SQLAlchemy with regards to database-specific rendering, return types and argument behavior. Generic functions are invoked like all SQL functions, using the :attr:`func` attribute::
+SQL functions which are known to SQLAlchemy with regards to database-specific
+rendering, return types and argument behavior. Generic functions are invoked
+like all SQL functions, using the :attr:`func` attribute::
select([func.count()]).select_from(sometable)
+Note that any name not known to :attr:`func` generates the function name as is
+- there is no restriction on what SQL functions can be called, known or
+unknown to SQLAlchemy, built-in or user defined. The section here only
+describes those functions where SQLAlchemy already knows what argument and
+return types are in use.
+
.. automodule:: sqlalchemy.sql.functions
:members:
:undoc-members:
diff --git a/doc/build/core/index.rst b/doc/build/core/index.rst
new file mode 100644
index 000000000..377d475e7
--- /dev/null
+++ b/doc/build/core/index.rst
@@ -0,0 +1,21 @@
+.. _core_toplevel:
+
+SQLAlchemy Core
+===============
+
+.. toctree::
+ :maxdepth: 2
+
+ tutorial
+ expression_api
+ engines
+ connections
+ pooling
+ schema
+ types
+ interfaces
+ exceptions
+ compiler
+ serializer
+
+ \ No newline at end of file
diff --git a/doc/build/core/interfaces.rst b/doc/build/core/interfaces.rst
new file mode 100644
index 000000000..71dd8c122
--- /dev/null
+++ b/doc/build/core/interfaces.rst
@@ -0,0 +1,30 @@
+.. _interfaces_core_toplevel:
+
+Core Event Interfaces
+======================
+
+.. module:: sqlalchemy.interfaces
+
+This section describes the various categories of events which can be intercepted
+in SQLAlchemy core, including execution and connection pool events.
+
+For ORM event documentation, see :ref:`interfaces_orm_toplevel`.
+
+A new version of this API with a significantly more flexible and consistent
+interface will be available in version 0.7.
+
+Execution, Connection and Cursor Events
+---------------------------------------
+
+.. autoclass:: ConnectionProxy
+ :members:
+ :undoc-members:
+
+Connection Pool Events
+----------------------
+
+.. autoclass:: PoolListener
+ :members:
+ :undoc-members:
+
+
diff --git a/doc/build/core/pooling.rst b/doc/build/core/pooling.rst
new file mode 100644
index 000000000..edb6a334e
--- /dev/null
+++ b/doc/build/core/pooling.rst
@@ -0,0 +1,226 @@
+.. _pooling_toplevel:
+
+Connection Pooling
+==================
+
+.. module:: sqlalchemy.pool
+
+The establishment of a
+database connection is typically a somewhat expensive operation, and
+applications need a way to get at database connections repeatedly
+with minimal overhead. Particularly for
+server-side web applications, a connection pool is the standard way to
+maintain a "pool" of active database connections in memory which are
+reused across requests.
+
+SQLAlchemy includes several connection pool implementations
+which integrate with the :class:`.Engine`. They can also be used
+directly for applications that want to add pooling to an otherwise
+plain DBAPI approach.
+
+Connection Pool Configuration
+-----------------------------
+
+The :class:`~sqlalchemy.engine.Engine` returned by the
+:func:`~sqlalchemy.create_engine` function in most cases has a :class:`QueuePool`
+integrated, pre-configured with reasonable pooling defaults. If
+you're reading this section to simply enable pooling- congratulations!
+You're already done.
+
+The most common :class:`QueuePool` tuning parameters can be passed
+directly to :func:`~sqlalchemy.create_engine` as keyword arguments:
+``pool_size``, ``max_overflow``, ``pool_recycle`` and
+``pool_timeout``. For example::
+
+ engine = create_engine('postgresql://me@localhost/mydb',
+ pool_size=20, max_overflow=0)
+
+In the case of SQLite, a :class:`SingletonThreadPool` is provided instead,
+to provide compatibility with SQLite's restricted threading model, as well
+as to provide a reasonable default behavior to SQLite "memory" databases,
+which maintain their entire dataset within the scope of a single connection.
+
+All SQLAlchemy pool implementations have in common
+that none of them "pre create" connections - all implementations wait
+until first use before creating a connection. At that point, if
+no additional concurrent checkout requests for more connections
+are made, no additional connections are created. This is why it's perfectly
+fine for :func:`.create_engine` to default to using a :class:`.QueuePool`
+of size five without regard to whether or not the application really needs five connections
+queued up - the pool would only grow to that size if the application
+actually used five connections concurrently, in which case the usage of a
+small pool is an entirely appropriate default behavior.
+
+Switching Pool Implementations
+------------------------------
+
+The usual way to use a different kind of pool with :func:`.create_engine`
+is to use the ``poolclass`` argument. This argument accepts a class
+imported from the ``sqlalchemy.pool`` module, and handles the details
+of building the pool for you. Common options include specifying
+:class:`.QueuePool` with SQLite::
+
+ from sqlalchemy.pool import QueuePool
+ engine = create_engine('sqlite:///file.db', poolclass=QueuePool)
+
+Disabling pooling using :class:`.NullPool`::
+
+ from sqlalchemy.pool import NullPool
+ engine = create_engine(
+ 'postgresql+psycopg2://scott:tiger@localhost/test',
+ poolclass=NullPool)
+
+Using a Custom Connection Function
+----------------------------------
+
+All :class:`.Pool` classes accept an argument ``creator`` which is
+a callable that creates a new connection. :func:`.create_engine`
+accepts this function to pass onto the pool via an argument of
+the same name::
+
+ import sqlalchemy.pool as pool
+ import psycopg2
+
+ def getconn():
+ c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
+ # do things with 'c' to set up
+ return c
+
+ engine = create_engine('postgresql+psycopg2://', creator=getconn)
+
+For most "initialize on connection" routines, it's more convenient
+to use a :class:`.PoolListener`, so that the usual URL argument to
+:func:`.create_engine` is still usable. ``creator`` is there as
+a total last resort for when a DBAPI has some form of ``connect``
+that is not at all supported by SQLAlchemy.
+
+Constructing a Pool
+------------------------
+
+To use a :class:`.Pool` by itself, the ``creator`` function is
+the only argument that's required and is passed first, followed
+by any additional options::
+
+ import sqlalchemy.pool as pool
+ import psycopg2
+
+ def getconn():
+ c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
+ return c
+
+ mypool = pool.QueuePool(getconn, max_overflow=10, pool_size=5)
+
+DBAPI connections can then be procured from the pool using the :meth:`.Pool.connect`
+function. The return value of this method is a DBAPI connection that's contained
+within a transparent proxy::
+
+ # get a connection
+ conn = mypool.connect()
+
+ # use it
+ cursor = conn.cursor()
+ cursor.execute("select foo")
+
+The purpose of the transparent proxy is to intercept the ``close()`` call,
+such that instead of the DBAPI connection being closed, its returned to the
+pool::
+
+ # "close" the connection. Returns
+ # it to the pool.
+ conn.close()
+
+The proxy also returns its contained DBAPI connection to the pool
+when it is garbage collected,
+though it's not deterministic in Python that this occurs immediately (though
+it is typical with cPython).
+
+A particular pre-created :class:`.Pool` can be shared with one or more
+engines by passing it to the ``pool`` argument of :func:`.create_engine`::
+
+ e = create_engine('postgresql://', pool=mypool)
+
+Pool Event Listeners
+--------------------
+
+Connection pools support an event interface that allows hooks to execute
+upon first connect, upon each new connection, and upon checkout and
+checkin of connections. See :class:`.PoolListener` for details.
+
+Builtin Pool Implementations
+----------------------------
+
+.. autoclass:: sqlalchemy.pool.Pool
+
+ .. automethod:: __init__
+ .. automethod:: connect
+ .. automethod:: dispose
+ .. automethod:: recreate
+
+.. autoclass:: sqlalchemy.pool.QueuePool
+ :show-inheritance:
+
+ .. automethod:: __init__
+
+.. autoclass:: SingletonThreadPool
+ :show-inheritance:
+
+ .. automethod:: __init__
+
+.. autoclass:: AssertionPool
+ :show-inheritance:
+
+.. autoclass:: NullPool
+ :show-inheritance:
+
+.. autoclass:: StaticPool
+ :show-inheritance:
+
+
+Pooling Plain DB-API Connections
+--------------------------------
+
+Any :pep:`249` DB-API module can be "proxied" through the connection
+pool transparently. Usage of the DB-API is exactly as before, except
+the ``connect()`` method will consult the pool. Below we illustrate
+this with ``psycopg2``::
+
+ import sqlalchemy.pool as pool
+ import psycopg2 as psycopg
+
+ psycopg = pool.manage(psycopg)
+
+ # then connect normally
+ connection = psycopg.connect(database='test', username='scott',
+ password='tiger')
+
+This produces a :class:`_DBProxy` object which supports the same
+``connect()`` function as the original DB-API module. Upon
+connection, a connection proxy object is returned, which delegates its
+calls to a real DB-API connection object. This connection object is
+stored persistently within a connection pool (an instance of
+:class:`Pool`) that corresponds to the exact connection arguments sent
+to the ``connect()`` function.
+
+The connection proxy supports all of the methods on the original
+connection object, most of which are proxied via ``__getattr__()``.
+The ``close()`` method will return the connection to the pool, and the
+``cursor()`` method will return a proxied cursor object. Both the
+connection proxy and the cursor proxy will also return the underlying
+connection to the pool after they have both been garbage collected,
+which is detected via weakref callbacks (``__del__`` is not used).
+
+Additionally, when connections are returned to the pool, a
+``rollback()`` is issued on the connection unconditionally. This is
+to release any locks still held by the connection that may have
+resulted from normal activity.
+
+By default, the ``connect()`` method will return the same connection
+that is already checked out in the current thread. This allows a
+particular connection to be used in a given thread without needing to
+pass it around between functions. To disable this behavior, specify
+``use_threadlocal=False`` to the ``manage()`` function.
+
+.. autofunction:: sqlalchemy.pool.manage
+
+.. autofunction:: sqlalchemy.pool.clear_managers
+
diff --git a/doc/build/core/schema.rst b/doc/build/core/schema.rst
new file mode 100644
index 000000000..13b70af8b
--- /dev/null
+++ b/doc/build/core/schema.rst
@@ -0,0 +1,1358 @@
+.. _metadata_toplevel:
+
+==========================
+Schema Definition Language
+==========================
+
+.. module:: sqlalchemy.schema
+
+Describing Databases with MetaData
+==================================
+
+The core of SQLAlchemy's query and object mapping operations are supported by
+*database metadata*, which is comprised of Python objects that describe tables
+and other schema-level objects. These objects are at the core of three major
+types of operations - issuing CREATE and DROP statements (known as *DDL*),
+constructing SQL queries, and expressing information about structures that
+already exist within the database.
+
+Database metadata can be expressed by explicitly naming the various components
+and their properties, using constructs such as
+:class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`,
+:class:`~sqlalchemy.schema.ForeignKey` and
+:class:`~sqlalchemy.schema.Sequence`, all of which are imported from the
+``sqlalchemy.schema`` package. It can also be generated by SQLAlchemy using a
+process called *reflection*, which means you start with a single object such
+as :class:`~sqlalchemy.schema.Table`, assign it a name, and then instruct
+SQLAlchemy to load all the additional information related to that name from a
+particular engine source.
+
+A key feature of SQLAlchemy's database metadata constructs is that they are
+designed to be used in a *declarative* style which closely resembles that of
+real DDL. They are therefore most intuitive to those who have some background
+in creating real schema generation scripts.
+
+A collection of metadata entities is stored in an object aptly named
+:class:`~sqlalchemy.schema.MetaData`::
+
+ from sqlalchemy import *
+
+ metadata = MetaData()
+
+:class:`~sqlalchemy.schema.MetaData` is a container object that keeps together
+many different features of a database (or multiple databases) being described.
+
+To represent a table, use the :class:`~sqlalchemy.schema.Table` class. Its two
+primary arguments are the table name, then the
+:class:`~sqlalchemy.schema.MetaData` object which it will be associated with.
+The remaining positional arguments are mostly
+:class:`~sqlalchemy.schema.Column` objects describing each column::
+
+ user = Table('user', metadata,
+ Column('user_id', Integer, primary_key = True),
+ Column('user_name', String(16), nullable = False),
+ Column('email_address', String(60)),
+ Column('password', String(20), nullable = False)
+ )
+
+Above, a table called ``user`` is described, which contains four columns. The
+primary key of the table consists of the ``user_id`` column. Multiple columns
+may be assigned the ``primary_key=True`` flag which denotes a multi-column
+primary key, known as a *composite* primary key.
+
+Note also that each column describes its datatype using objects corresponding
+to genericized types, such as :class:`~sqlalchemy.types.Integer` and
+:class:`~sqlalchemy.types.String`. SQLAlchemy features dozens of types of
+varying levels of specificity as well as the ability to create custom types.
+Documentation on the type system can be found at :ref:`types`.
+
+Accessing Tables and Columns
+----------------------------
+
+The :class:`~sqlalchemy.schema.MetaData` object contains all of the schema
+constructs we've associated with it. It supports a few methods of accessing
+these table objects, such as the ``sorted_tables`` accessor which returns a
+list of each :class:`~sqlalchemy.schema.Table` object in order of foreign key
+dependency (that is, each table is preceded by all tables which it
+references)::
+
+ >>> for t in metadata.sorted_tables:
+ ... print t.name
+ user
+ user_preference
+ invoice
+ invoice_item
+
+In most cases, individual :class:`~sqlalchemy.schema.Table` objects have been
+explicitly declared, and these objects are typically accessed directly as
+module-level variables in an application. Once a
+:class:`~sqlalchemy.schema.Table` has been defined, it has a full set of
+accessors which allow inspection of its properties. Given the following
+:class:`~sqlalchemy.schema.Table` definition::
+
+ employees = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('employee_name', String(60), nullable=False),
+ Column('employee_dept', Integer, ForeignKey("departments.department_id"))
+ )
+
+Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table -
+this construct defines a reference to a remote table, and is fully described
+in :ref:`metadata_foreignkeys`. Methods of accessing information about this
+table include::
+
+ # access the column "EMPLOYEE_ID":
+ employees.columns.employee_id
+
+ # or just
+ employees.c.employee_id
+
+ # via string
+ employees.c['employee_id']
+
+ # iterate through all columns
+ for c in employees.c:
+ print c
+
+ # get the table's primary key columns
+ for primary_key in employees.primary_key:
+ print primary_key
+
+ # get the table's foreign key objects:
+ for fkey in employees.foreign_keys:
+ print fkey
+
+ # access the table's MetaData:
+ employees.metadata
+
+ # access the table's bound Engine or Connection, if its MetaData is bound:
+ employees.bind
+
+ # access a column's name, type, nullable, primary key, foreign key
+ employees.c.employee_id.name
+ employees.c.employee_id.type
+ employees.c.employee_id.nullable
+ employees.c.employee_id.primary_key
+ employees.c.employee_dept.foreign_keys
+
+ # get the "key" of a column, which defaults to its name, but can
+ # be any user-defined string:
+ employees.c.employee_name.key
+
+ # access a column's table:
+ employees.c.employee_id.table is employees
+
+ # get the table related by a foreign key
+ list(employees.c.employee_dept.foreign_keys)[0].column.table
+
+.. _metadata_binding:
+
+
+Creating and Dropping Database Tables
+-------------------------------------
+
+Once you've defined some :class:`~sqlalchemy.schema.Table` objects, assuming
+you're working with a brand new database one thing you might want to do is
+issue CREATE statements for those tables and their related constructs (as an
+aside, it's also quite possible that you *don't* want to do this, if you
+already have some preferred methodology such as tools included with your
+database or an existing scripting system - if that's the case, feel free to
+skip this section - SQLAlchemy has no requirement that it be used to create
+your tables).
+
+The usual way to issue CREATE is to use
+:func:`~sqlalchemy.schema.MetaData.create_all` on the
+:class:`~sqlalchemy.schema.MetaData` object. This method will issue queries
+that first check for the existence of each individual table, and if not found
+will issue the CREATE statements:
+
+ .. sourcecode:: python+sql
+
+ engine = create_engine('sqlite:///:memory:')
+
+ metadata = MetaData()
+
+ user = Table('user', metadata,
+ Column('user_id', Integer, primary_key = True),
+ Column('user_name', String(16), nullable = False),
+ Column('email_address', String(60), key='email'),
+ Column('password', String(20), nullable = False)
+ )
+
+ user_prefs = Table('user_prefs', metadata,
+ Column('pref_id', Integer, primary_key=True),
+ Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
+ Column('pref_name', String(40), nullable=False),
+ Column('pref_value', String(100))
+ )
+
+ {sql}metadata.create_all(engine)
+ PRAGMA table_info(user){}
+ CREATE TABLE user(
+ user_id INTEGER NOT NULL PRIMARY KEY,
+ user_name VARCHAR(16) NOT NULL,
+ email_address VARCHAR(60),
+ password VARCHAR(20) NOT NULL
+ )
+ PRAGMA table_info(user_prefs){}
+ CREATE TABLE user_prefs(
+ pref_id INTEGER NOT NULL PRIMARY KEY,
+ user_id INTEGER NOT NULL REFERENCES user(user_id),
+ pref_name VARCHAR(40) NOT NULL,
+ pref_value VARCHAR(100)
+ )
+
+:func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints
+between tables usually inline with the table definition itself, and for this
+reason it also generates the tables in order of their dependency. There are
+options to change this behavior such that ``ALTER TABLE`` is used instead.
+
+Dropping all tables is similarly achieved using the
+:func:`~sqlalchemy.schema.MetaData.drop_all` method. This method does the
+exact opposite of :func:`~sqlalchemy.schema.MetaData.create_all` - the
+presence of each table is checked first, and tables are dropped in reverse
+order of dependency.
+
+Creating and dropping individual tables can be done via the ``create()`` and
+``drop()`` methods of :class:`~sqlalchemy.schema.Table`. These methods by
+default issue the CREATE or DROP regardless of the table being present:
+
+.. sourcecode:: python+sql
+
+ engine = create_engine('sqlite:///:memory:')
+
+ meta = MetaData()
+
+ employees = Table('employees', meta,
+ Column('employee_id', Integer, primary_key=True),
+ Column('employee_name', String(60), nullable=False, key='name'),
+ Column('employee_dept', Integer, ForeignKey("departments.department_id"))
+ )
+ {sql}employees.create(engine)
+ CREATE TABLE employees(
+ employee_id SERIAL NOT NULL PRIMARY KEY,
+ employee_name VARCHAR(60) NOT NULL,
+ employee_dept INTEGER REFERENCES departments(department_id)
+ )
+ {}
+
+``drop()`` method:
+
+.. sourcecode:: python+sql
+
+ {sql}employees.drop(engine)
+ DROP TABLE employees
+ {}
+
+To enable the "check first for the table existing" logic, add the
+``checkfirst=True`` argument to ``create()`` or ``drop()``::
+
+ employees.create(engine, checkfirst=True)
+ employees.drop(engine, checkfirst=False)
+
+
+Binding MetaData to an Engine or Connection
+--------------------------------------------
+
+Notice in the previous section the creator/dropper methods accept an argument
+for the database engine in use. When a schema construct is combined with an
+:class:`~sqlalchemy.engine.base.Engine` object, or an individual
+:class:`~sqlalchemy.engine.base.Connection` object, we call this the *bind*.
+In the above examples the bind is associated with the schema construct only
+for the duration of the operation. However, the option exists to persistently
+associate a bind with a set of schema constructs via the
+:class:`~sqlalchemy.schema.MetaData` object's ``bind`` attribute::
+
+ engine = create_engine('sqlite://')
+
+ # create MetaData
+ meta = MetaData()
+
+ # bind to an engine
+ meta.bind = engine
+
+We can now call methods like :func:`~sqlalchemy.schema.MetaData.create_all`
+without needing to pass the :class:`~sqlalchemy.engine.base.Engine`::
+
+ meta.create_all()
+
+The MetaData's bind is used for anything that requires an active connection,
+such as loading the definition of a table from the database automatically
+(called *reflection*)::
+
+ # describe a table called 'users', query the database for its columns
+ users_table = Table('users', meta, autoload=True)
+
+As well as for executing SQL constructs that are derived from that MetaData's table objects::
+
+ # generate a SELECT statement and execute
+ result = users_table.select().execute()
+
+Binding the MetaData to the Engine is a **completely optional** feature. The
+above operations can be achieved without the persistent bind using
+parameters::
+
+ # describe a table called 'users', query the database for its columns
+ users_table = Table('users', meta, autoload=True, autoload_with=engine)
+
+ # generate a SELECT statement and execute
+ result = engine.execute(users_table.select())
+
+Should you use bind ? It's probably best to start without it, and wait for a
+specific need to arise. Bind is useful if:
+
+* You aren't using the ORM, are usually using "connectionless" execution, and
+ find yourself constantly needing to specify the same
+ :class:`~sqlalchemy.engine.base.Engine` object throughout the entire
+ application. Bind can be used here to provide "implicit" execution.
+* Your application has multiple schemas that correspond to different engines.
+ Using one :class:`~sqlalchemy.schema.MetaData` for each schema, bound to
+ each engine, provides a decent place to delineate between the schemas. The
+ ORM will also integrate with this approach, where the :class:`.Session` will
+ naturally use the engine that is bound to each table via its metadata
+ (provided the :class:`.Session` itself has no ``bind`` configured.).
+
+Alternatively, the ``bind`` attribute of :class:`~sqlalchemy.schema.MetaData`
+is *confusing* if:
+
+* Your application talks to multiple database engines at different times,
+ which use the *same* set of :class:`Table` objects. It's usually confusing
+ and unnecessary to begin to create "copies" of :class:`Table` objects just
+ so that different engines can be used for different operations. An example
+ is an application that writes data to a "master" database while performing
+ read-only operations from a "read slave". A global
+ :class:`~sqlalchemy.schema.MetaData` object is *not* appropriate for
+ per-request switching like this, although a
+ :class:`~sqlalchemy.schema.ThreadLocalMetaData` object is.
+* You are using the ORM :class:`.Session` to handle which class/table is bound
+ to which engine, or you are using the :class:`.Session` to manage switching
+ between engines. Its a good idea to keep the "binding of tables to engines"
+ in one place - either using :class:`~sqlalchemy.schema.MetaData` only (the
+ :class:`.Session` can of course be present, it just has no ``bind``
+ configured), or using :class:`.Session` only (the ``bind`` attribute of
+ :class:`~sqlalchemy.schema.MetaData` is left empty).
+
+Specifying the Schema Name
+---------------------------
+
+Some databases support the concept of multiple schemas. A :class:`~sqlalchemy.schema.Table` can reference this by specifying the ``schema`` keyword argument::
+
+ financial_info = Table('financial_info', meta,
+ Column('id', Integer, primary_key=True),
+ Column('value', String(100), nullable=False),
+ schema='remote_banks'
+ )
+
+Within the :class:`~sqlalchemy.schema.MetaData` collection, this table will be identified by the combination of ``financial_info`` and ``remote_banks``. If another table called ``financial_info`` is referenced without the ``remote_banks`` schema, it will refer to a different :class:`~sqlalchemy.schema.Table`. :class:`~sqlalchemy.schema.ForeignKey` objects can specify references to columns in this table using the form ``remote_banks.financial_info.id``.
+
+The ``schema`` argument should be used for any name qualifiers required, including Oracle's "owner" attribute and similar. It also can accommodate a dotted name for longer schemes::
+
+ schema="dbo.scott"
+
+Backend-Specific Options
+------------------------
+
+:class:`~sqlalchemy.schema.Table` supports database-specific options. For example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``::
+
+ addresses = Table('engine_email_addresses', meta,
+ Column('address_id', Integer, primary_key = True),
+ Column('remote_user_id', Integer, ForeignKey(users.c.user_id)),
+ Column('email_address', String(20)),
+ mysql_engine='InnoDB'
+ )
+
+Other backends may support table-level options as well - these would be described in the individual documentation sections for each dialect.
+
+Schema API Constructs
+---------------------
+
+.. autoclass:: Column
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: MetaData
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: Table
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: ThreadLocalMetaData
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. _metadata_reflection:
+
+Reflecting Database Objects
+===========================
+
+A :class:`~sqlalchemy.schema.Table` object can be instructed to load
+information about itself from the corresponding database schema object already
+existing within the database. This process is called *reflection*. Most simply
+you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData`
+object, and the ``autoload=True`` flag. If the
+:class:`~sqlalchemy.schema.MetaData` is not persistently bound, also add the
+``autoload_with`` argument::
+
+ >>> messages = Table('messages', meta, autoload=True, autoload_with=engine)
+ >>> [c.name for c in messages.columns]
+ ['message_id', 'message_name', 'date']
+
+The above operation will use the given engine to query the database for
+information about the ``messages`` table, and will then generate
+:class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey`,
+and other objects corresponding to this information as though the
+:class:`~sqlalchemy.schema.Table` object were hand-constructed in Python.
+
+When tables are reflected, if a given table references another one via foreign
+key, a second :class:`~sqlalchemy.schema.Table` object is created within the
+:class:`~sqlalchemy.schema.MetaData` object representing the connection.
+Below, assume the table ``shopping_cart_items`` references a table named
+``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the
+effect such that the ``shopping_carts`` table will also be loaded::
+
+ >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine)
+ >>> 'shopping_carts' in meta.tables:
+ True
+
+The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like"
+behavior such that if you requested both tables individually,
+:class:`~sqlalchemy.schema.MetaData` will ensure that exactly one
+:class:`~sqlalchemy.schema.Table` object is created for each distinct table
+name. The :class:`~sqlalchemy.schema.Table` constructor actually returns to
+you the already-existing :class:`~sqlalchemy.schema.Table` object if one
+already exists with the given name. Such as below, we can access the already
+generated ``shopping_carts`` table just by naming it::
+
+ shopping_carts = Table('shopping_carts', meta)
+
+Of course, it's a good idea to use ``autoload=True`` with the above table
+regardless. This is so that the table's attributes will be loaded if they have
+not been already. The autoload operation only occurs for the table if it
+hasn't already been loaded; once loaded, new calls to
+:class:`~sqlalchemy.schema.Table` with the same name will not re-issue any
+reflection queries.
+
+Overriding Reflected Columns
+-----------------------------
+
+Individual columns can be overridden with explicit values when reflecting
+tables; this is handy for specifying custom datatypes, constraints such as
+primary keys that may not be configured within the database, etc.::
+
+ >>> mytable = Table('mytable', meta,
+ ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key
+ ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode
+ ... autoload=True)
+
+Reflecting Views
+-----------------
+
+The reflection system can also reflect views. Basic usage is the same as that
+of a table::
+
+ my_view = Table("some_view", metadata, autoload=True)
+
+Above, ``my_view`` is a :class:`~sqlalchemy.schema.Table` object with
+:class:`~sqlalchemy.schema.Column` objects representing the names and types of
+each column within the view "some_view".
+
+Usually, it's desired to have at least a primary key constraint when
+reflecting a view, if not foreign keys as well. View reflection doesn't
+extrapolate these constraints.
+
+Use the "override" technique for this, specifying explicitly those columns
+which are part of the primary key or have foreign key constraints::
+
+ my_view = Table("some_view", metadata,
+ Column("view_id", Integer, primary_key=True),
+ Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
+ autoload=True
+ )
+
+Reflecting All Tables at Once
+-----------------------------
+
+The :class:`~sqlalchemy.schema.MetaData` object can also get a listing of
+tables and reflect the full set. This is achieved by using the
+:func:`~sqlalchemy.schema.MetaData.reflect` method. After calling it, all
+located tables are present within the :class:`~sqlalchemy.schema.MetaData`
+object's dictionary of tables::
+
+ meta = MetaData()
+ meta.reflect(bind=someengine)
+ users_table = meta.tables['users']
+ addresses_table = meta.tables['addresses']
+
+``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database::
+
+ meta = MetaData()
+ meta.reflect(bind=someengine)
+ for table in reversed(meta.sorted_tables):
+ someengine.execute(table.delete())
+
+Fine Grained Reflection with Inspector
+--------------------------------------
+
+A low level interface which provides a backend-agnostic system of loading
+lists of schema, table, column, and constraint descriptions from a given
+database is also available. This is known as the "Inspector"::
+
+ from sqlalchemy import create_engine
+ from sqlalchemy.engine import reflection
+ engine = create_engine('...')
+ insp = reflection.Inspector.from_engine(engine)
+ print insp.get_table_names()
+
+.. autoclass:: sqlalchemy.engine.reflection.Inspector
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Column Insert/Update Defaults
+==============================
+
+SQLAlchemy provides a very rich featureset regarding column level events which
+take place during INSERT and UPDATE statements. Options include:
+
+* Scalar values used as defaults during INSERT and UPDATE operations
+* Python functions which execute upon INSERT and UPDATE operations
+* SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand)
+* SQL expressions which are embedded in UPDATE statements
+* Server side default values used during INSERT
+* Markers for server-side triggers used during UPDATE
+
+The general rule for all insert/update defaults is that they only take effect
+if no value for a particular column is passed as an ``execute()`` parameter;
+otherwise, the given value is used.
+
+Scalar Defaults
+---------------
+
+The simplest kind of default is a scalar value used as the default value of a column::
+
+ Table("mytable", meta,
+ Column("somecolumn", Integer, default=12)
+ )
+
+Above, the value "12" will be bound as the column value during an INSERT if no
+other value is supplied.
+
+A scalar value may also be associated with an UPDATE statement, though this is
+not very common (as UPDATE statements are usually looking for dynamic
+defaults)::
+
+ Table("mytable", meta,
+ Column("somecolumn", Integer, onupdate=25)
+ )
+
+
+Python-Executed Functions
+-------------------------
+
+The ``default`` and ``onupdate`` keyword arguments also accept Python
+functions. These functions are invoked at the time of insert or update if no
+other value for that column is supplied, and the value returned is used for
+the column's value. Below illustrates a crude "sequence" that assigns an
+incrementing counter to a primary key column::
+
+ # a function which counts upwards
+ i = 0
+ def mydefault():
+ global i
+ i += 1
+ return i
+
+ t = Table("mytable", meta,
+ Column('id', Integer, primary_key=True, default=mydefault),
+ )
+
+It should be noted that for real "incrementing sequence" behavior, the
+built-in capabilities of the database should normally be used, which may
+include sequence objects or other autoincrementing capabilities. For primary
+key columns, SQLAlchemy will in most cases use these capabilities
+automatically. See the API documentation for
+:class:`~sqlalchemy.schema.Column` including the ``autoincrement`` flag, as
+well as the section on :class:`~sqlalchemy.schema.Sequence` later in this
+chapter for background on standard primary key generation techniques.
+
+To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to
+the ``onupdate`` attribute::
+
+ import datetime
+
+ t = Table("mytable", meta,
+ Column('id', Integer, primary_key=True),
+
+ # define 'last_updated' to be populated with datetime.now()
+ Column('last_updated', DateTime, onupdate=datetime.datetime.now),
+ )
+
+When an update statement executes and no value is passed for ``last_updated``,
+the ``datetime.datetime.now()`` Python function is executed and its return
+value used as the value for ``last_updated``. Notice that we provide ``now``
+as the function itself without calling it (i.e. there are no parenthesis
+following) - SQLAlchemy will execute the function at the time the statement
+executes.
+
+Context-Sensitive Default Functions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Python functions used by ``default`` and ``onupdate`` may also make use of
+the current statement's context in order to determine a value. The `context`
+of a statement is an internal SQLAlchemy object which contains all information
+about the statement being executed, including its source expression, the
+parameters associated with it and the cursor. The typical use case for this
+context with regards to default generation is to have access to the other
+values being inserted or updated on the row. To access the context, provide a
+function that accepts a single ``context`` argument::
+
+ def mydefault(context):
+ return context.current_parameters['counter'] + 12
+
+ t = Table('mytable', meta,
+ Column('counter', Integer),
+ Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault)
+ )
+
+Above we illustrate a default function which will execute for all INSERT and
+UPDATE statements where a value for ``counter_plus_twelve`` was otherwise not
+provided, and the value will be that of whatever value is present in the
+execution for the ``counter`` column, plus the number 12.
+
+While the context object passed to the default function has many attributes,
+the ``current_parameters`` member is a special member provided only during the
+execution of a default function for the purposes of deriving defaults from its
+existing values. For a single statement that is executing many sets of bind
+parameters, the user-defined function is called for each set of parameters,
+and ``current_parameters`` will be provided with each individual parameter set
+for each execution.
+
+SQL Expressions
+---------------
+
+The "default" and "onupdate" keywords may also be passed SQL expressions,
+including select statements or direct function calls::
+
+ t = Table("mytable", meta,
+ Column('id', Integer, primary_key=True),
+
+ # define 'create_date' to default to now()
+ Column('create_date', DateTime, default=func.now()),
+
+ # define 'key' to pull its default from the 'keyvalues' table
+ Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)),
+
+ # define 'last_modified' to use the current_timestamp SQL function on update
+ Column('last_modified', DateTime, onupdate=func.utc_timestamp())
+ )
+
+Above, the ``create_date`` column will be populated with the result of the
+``now()`` SQL function (which, depending on backend, compiles into ``NOW()``
+or ``CURRENT_TIMESTAMP`` in most cases) during an INSERT statement, and the
+``key`` column with the result of a SELECT subquery from another table. The
+``last_modified`` column will be populated with the value of
+``UTC_TIMESTAMP()``, a function specific to MySQL, when an UPDATE statement is
+emitted for this table.
+
+Note that when using ``func`` functions, unlike when using Python `datetime`
+functions we *do* call the function, i.e. with parenthesis "()" - this is
+because what we want in this case is the return value of the function, which
+is the SQL expression construct that will be rendered into the INSERT or
+UPDATE statement.
+
+The above SQL functions are usually executed "inline" with the INSERT or
+UPDATE statement being executed, meaning, a single statement is executed which
+embeds the given expressions or subqueries within the VALUES or SET clause of
+the statement. Although in some cases, the function is "pre-executed" in a
+SELECT statement of its own beforehand. This happens when all of the following
+is true:
+
+* the column is a primary key column
+* the database dialect does not support a usable ``cursor.lastrowid`` accessor
+ (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as
+ well as some MySQL dialects.
+* the dialect does not support the "RETURNING" clause or similar, or the
+ ``implicit_returning`` flag is set to ``False`` for the dialect. Dialects
+ which support RETURNING currently include Postgresql, Oracle, Firebird, and
+ MS-SQL.
+* the statement is a single execution, i.e. only supplies one set of
+ parameters and doesn't use "executemany" behavior
+* the ``inline=True`` flag is not set on the
+ :class:`~sqlalchemy.sql.expression.Insert()` or
+ :class:`~sqlalchemy.sql.expression.Update()` construct, and the statement has
+ not defined an explicit `returning()` clause.
+
+Whether or not the default generation clause "pre-executes" is not something
+that normally needs to be considered, unless it is being addressed for
+performance reasons.
+
+When the statement is executed with a single set of parameters (that is, it is
+not an "executemany" style execution), the returned
+:class:`~sqlalchemy.engine.base.ResultProxy` will contain a collection
+accessible via ``result.postfetch_cols()`` which contains a list of all
+:class:`~sqlalchemy.schema.Column` objects which had an inline-executed
+default. Similarly, all parameters which were bound to the statement,
+including all Python and SQL expressions which were pre-executed, are present
+in the ``last_inserted_params()`` or ``last_updated_params()`` collections on
+:class:`~sqlalchemy.engine.base.ResultProxy`. The ``inserted_primary_key``
+collection contains a list of primary key values for the row inserted (a list
+so that single-column and composite-column primary keys are represented in the
+same format).
+
+Server Side Defaults
+--------------------
+
+A variant on the SQL expression default is the ``server_default``, which gets
+placed in the CREATE TABLE statement during a ``create()`` operation:
+
+.. sourcecode:: python+sql
+
+ t = Table('test', meta,
+ Column('abc', String(20), server_default='abc'),
+ Column('created_at', DateTime, server_default=text("sysdate"))
+ )
+
+A create call for the above table will produce::
+
+ CREATE TABLE test (
+ abc varchar(20) default 'abc',
+ created_at datetime default sysdate
+ )
+
+The behavior of ``server_default`` is similar to that of a regular SQL
+default; if it's placed on a primary key column for a database which doesn't
+have a way to "postfetch" the ID, and the statement is not "inlined", the SQL
+expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on
+the database side normally.
+
+Triggered Columns
+------------------
+
+Columns with values set by a database trigger or other external process may be
+called out with a marker::
+
+ t = Table('test', meta,
+ Column('abc', String(20), server_default=FetchedValue()),
+ Column('def', String(20), server_onupdate=FetchedValue())
+ )
+
+These markers do not emit a "default" clause when the table is created,
+however they do set the same internal flags as a static ``server_default``
+clause, providing hints to higher-level tools that a "post-fetch" of these
+rows should be performed after an insert or update.
+
+Defining Sequences
+-------------------
+
+SQLAlchemy represents database sequences using the
+:class:`~sqlalchemy.schema.Sequence` object, which is considered to be a
+special case of "column default". It only has an effect on databases which
+have explicit support for sequences, which currently includes Postgresql,
+Oracle, and Firebird. The :class:`~sqlalchemy.schema.Sequence` object is
+otherwise ignored.
+
+The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a
+"default" generator to be used during INSERT operations, and can also be
+configured to fire off during UPDATE operations if desired. It is most
+commonly used in conjunction with a single integer primary key column::
+
+ table = Table("cartitems", meta,
+ Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
+ Column("description", String(40)),
+ Column("createdate", DateTime())
+ )
+
+Where above, the table "cartitems" is associated with a sequence named
+"cart_id_seq". When INSERT statements take place for "cartitems", and no value
+is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to
+generate a value.
+
+When the :class:`~sqlalchemy.schema.Sequence` is associated with a table,
+CREATE and DROP statements issued for that table will also issue CREATE/DROP
+for the sequence object as well, thus "bundling" the sequence object with its
+parent table.
+
+The :class:`~sqlalchemy.schema.Sequence` object also implements special
+functionality to accommodate Postgresql's SERIAL datatype. The SERIAL type in
+PG automatically generates a sequence that is used implicitly during inserts.
+This means that if a :class:`~sqlalchemy.schema.Table` object defines a
+:class:`~sqlalchemy.schema.Sequence` on its primary key column so that it
+works with Oracle and Firebird, the :class:`~sqlalchemy.schema.Sequence` would
+get in the way of the "implicit" sequence that PG would normally use. For this
+use case, add the flag ``optional=True`` to the
+:class:`~sqlalchemy.schema.Sequence` object - this indicates that the
+:class:`~sqlalchemy.schema.Sequence` should only be used if the database
+provides no other option for generating primary key identifiers.
+
+The :class:`~sqlalchemy.schema.Sequence` object also has the ability to be
+executed standalone like a SQL expression, which has the effect of calling its
+"next value" function::
+
+ seq = Sequence('some_sequence')
+ nextid = connection.execute(seq)
+
+Default Geneation API Constructs
+--------------------------------
+
+.. autoclass:: ColumnDefault
+ :show-inheritance:
+
+.. autoclass:: DefaultClause
+ :show-inheritance:
+
+.. autoclass:: DefaultGenerator
+ :show-inheritance:
+
+.. autoclass:: FetchedValue
+ :show-inheritance:
+
+.. autoclass:: PassiveDefault
+ :show-inheritance:
+
+.. autoclass:: Sequence
+ :show-inheritance:
+
+Defining Constraints and Indexes
+=================================
+
+.. _metadata_foreignkeys:
+
+Defining Foreign Keys
+---------------------
+
+A *foreign key* in SQL is a table-level construct that constrains one or more
+columns in that table to only allow values that are present in a different set
+of columns, typically but not always located on a different table. We call the
+columns which are constrained the *foreign key* columns and the columns which
+they are constrained towards the *referenced* columns. The referenced columns
+almost always define the primary key for their owning table, though there are
+exceptions to this. The foreign key is the "joint" that connects together
+pairs of rows which have a relationship with each other, and SQLAlchemy
+assigns very deep importance to this concept in virtually every area of its
+operation.
+
+In SQLAlchemy as well as in DDL, foreign key constraints can be defined as
+additional attributes within the table clause, or for single-column foreign
+keys they may optionally be specified within the definition of a single
+column. The single column foreign key is more common, and at the column level
+is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object
+as an argument to a :class:`~sqlalchemy.schema.Column` object::
+
+ user_preference = Table('user_preference', metadata,
+ Column('pref_id', Integer, primary_key=True),
+ Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
+ Column('pref_name', String(40), nullable=False),
+ Column('pref_value', String(100))
+ )
+
+Above, we define a new table ``user_preference`` for which each row must
+contain a value in the ``user_id`` column that also exists in the ``user``
+table's ``user_id`` column.
+
+The argument to :class:`~sqlalchemy.schema.ForeignKey` is most commonly a
+string of the form *<tablename>.<columnname>*, or for a table in a remote
+schema or "owner" of the form *<schemaname>.<tablename>.<columnname>*. It may
+also be an actual :class:`~sqlalchemy.schema.Column` object, which as we'll
+see later is accessed from an existing :class:`~sqlalchemy.schema.Table`
+object via its ``c`` collection::
+
+ ForeignKey(user.c.user_id)
+
+The advantage to using a string is that the in-python linkage between ``user``
+and ``user_preference`` is resolved only when first needed, so that table
+objects can be easily spread across multiple modules and defined in any order.
+
+Foreign keys may also be defined at the table level, using the
+:class:`~sqlalchemy.schema.ForeignKeyConstraint` object. This object can
+describe a single- or multi-column foreign key. A multi-column foreign key is
+known as a *composite* foreign key, and almost always references a table that
+has a composite primary key. Below we define a table ``invoice`` which has a
+composite primary key::
+
+ invoice = Table('invoice', metadata,
+ Column('invoice_id', Integer, primary_key=True),
+ Column('ref_num', Integer, primary_key=True),
+ Column('description', String(60), nullable=False)
+ )
+
+And then a table ``invoice_item`` with a composite foreign key referencing
+``invoice``::
+
+ invoice_item = Table('invoice_item', metadata,
+ Column('item_id', Integer, primary_key=True),
+ Column('item_name', String(60), nullable=False),
+ Column('invoice_id', Integer, nullable=False),
+ Column('ref_num', Integer, nullable=False),
+ ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num'])
+ )
+
+It's important to note that the
+:class:`~sqlalchemy.schema.ForeignKeyConstraint` is the only way to define a
+composite foreign key. While we could also have placed individual
+:class:`~sqlalchemy.schema.ForeignKey` objects on both the
+``invoice_item.invoice_id`` and ``invoice_item.ref_num`` columns, SQLAlchemy
+would not be aware that these two values should be paired together - it would
+be two individual foreign key constraints instead of a single composite
+foreign key referencing two columns.
+
+Creating/Dropping Foreign Key Constraints via ALTER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In all the above examples, the :class:`~sqlalchemy.schema.ForeignKey` object
+causes the "REFERENCES" keyword to be added inline to a column definition
+within a "CREATE TABLE" statement when
+:func:`~sqlalchemy.schema.MetaData.create_all` is issued, and
+:class:`~sqlalchemy.schema.ForeignKeyConstraint` invokes the "CONSTRAINT"
+keyword inline with "CREATE TABLE". There are some cases where this is
+undesireable, particularly when two tables reference each other mutually, each
+with a foreign key referencing the other. In such a situation at least one of
+the foreign key constraints must be generated after both tables have been
+built. To support such a scheme, :class:`~sqlalchemy.schema.ForeignKey` and
+:class:`~sqlalchemy.schema.ForeignKeyConstraint` offer the flag
+``use_alter=True``. When using this flag, the constraint will be generated
+using a definition similar to "ALTER TABLE <tablename> ADD CONSTRAINT <name>
+...". Since a name is required, the ``name`` attribute must also be specified.
+For example::
+
+ node = Table('node', meta,
+ Column('node_id', Integer, primary_key=True),
+ Column('primary_element', Integer,
+ ForeignKey('element.element_id', use_alter=True, name='fk_node_element_id')
+ )
+ )
+
+ element = Table('element', meta,
+ Column('element_id', Integer, primary_key=True),
+ Column('parent_node_id', Integer),
+ ForeignKeyConstraint(
+ ['parent_node_id'],
+ ['node.node_id'],
+ use_alter=True,
+ name='fk_element_parent_node_id'
+ )
+ )
+
+ON UPDATE and ON DELETE
+~~~~~~~~~~~~~~~~~~~~~~~
+
+Most databases support *cascading* of foreign key values, that is the when a
+parent row is updated the new value is placed in child rows, or when the
+parent row is deleted all corresponding child rows are set to null or deleted.
+In data definition language these are specified using phrases like "ON UPDATE
+CASCADE", "ON DELETE CASCADE", and "ON DELETE SET NULL", corresponding to
+foreign key constraints. The phrase after "ON UPDATE" or "ON DELETE" may also
+other allow other phrases that are specific to the database in use. The
+:class:`~sqlalchemy.schema.ForeignKey` and
+:class:`~sqlalchemy.schema.ForeignKeyConstraint` objects support the
+generation of this clause via the ``onupdate`` and ``ondelete`` keyword
+arguments. The value is any string which will be output after the appropriate
+"ON UPDATE" or "ON DELETE" phrase::
+
+ child = Table('child', meta,
+ Column('id', Integer,
+ ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"),
+ primary_key=True
+ )
+ )
+
+ composite = Table('composite', meta,
+ Column('id', Integer, primary_key=True),
+ Column('rev_id', Integer),
+ Column('note_id', Integer),
+ ForeignKeyConstraint(
+ ['rev_id', 'note_id'],
+ ['revisions.id', 'revisions.note_id'],
+ onupdate="CASCADE", ondelete="SET NULL"
+ )
+ )
+
+Note that these clauses are not supported on SQLite, and require ``InnoDB``
+tables when used with MySQL. They may also not be supported on other
+databases.
+
+Foreign Key API Constructs
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ForeignKey
+ :members:
+ :show-inheritance:
+
+.. autoclass:: ForeignKeyConstraint
+ :members:
+ :show-inheritance:
+
+
+UNIQUE Constraint
+-----------------
+
+Unique constraints can be created anonymously on a single column using the
+``unique`` keyword on :class:`~sqlalchemy.schema.Column`. Explicitly named
+unique constraints and/or those with multiple columns are created via the
+:class:`~sqlalchemy.schema.UniqueConstraint` table-level construct.
+
+.. sourcecode:: python+sql
+
+ meta = MetaData()
+ mytable = Table('mytable', meta,
+
+ # per-column anonymous unique constraint
+ Column('col1', Integer, unique=True),
+
+ Column('col2', Integer),
+ Column('col3', Integer),
+
+ # explicit/composite unique constraint. 'name' is optional.
+ UniqueConstraint('col2', 'col3', name='uix_1')
+ )
+
+.. autoclass:: UniqueConstraint
+ :show-inheritance:
+
+CHECK Constraint
+----------------
+
+Check constraints can be named or unnamed and can be created at the Column or
+Table level, using the :class:`~sqlalchemy.schema.CheckConstraint` construct.
+The text of the check constraint is passed directly through to the database,
+so there is limited "database independent" behavior. Column level check
+constraints generally should only refer to the column to which they are
+placed, while table level constraints can refer to any columns in the table.
+
+Note that some databases do not actively support check constraints such as
+MySQL and SQLite.
+
+.. sourcecode:: python+sql
+
+ meta = MetaData()
+ mytable = Table('mytable', meta,
+
+ # per-column CHECK constraint
+ Column('col1', Integer, CheckConstraint('col1>5')),
+
+ Column('col2', Integer),
+ Column('col3', Integer),
+
+ # table level CHECK constraint. 'name' is optional.
+ CheckConstraint('col2 > col3 + 5', name='check1')
+ )
+
+ {sql}mytable.create(engine)
+ CREATE TABLE mytable (
+ col1 INTEGER CHECK (col1>5),
+ col2 INTEGER,
+ col3 INTEGER,
+ CONSTRAINT check1 CHECK (col2 > col3 + 5)
+ ){stop}
+
+.. autoclass:: CheckConstraint
+ :show-inheritance:
+
+Other Constraint Classes
+------------------------
+
+.. autoclass:: Constraint
+ :show-inheritance:
+
+.. autoclass:: ColumnCollectionConstraint
+ :show-inheritance:
+
+.. autoclass:: PrimaryKeyConstraint
+ :show-inheritance:
+
+Indexes
+-------
+
+Indexes can be created anonymously (using an auto-generated name ``ix_<column
+label>``) for a single column using the inline ``index`` keyword on
+:class:`~sqlalchemy.schema.Column`, which also modifies the usage of
+``unique`` to apply the uniqueness to the index itself, instead of adding a
+separate UNIQUE constraint. For indexes with specific names or which encompass
+more than one column, use the :class:`~sqlalchemy.schema.Index` construct,
+which requires a name.
+
+Note that the :class:`~sqlalchemy.schema.Index` construct is created
+**externally** to the table which it corresponds, using
+:class:`~sqlalchemy.schema.Column` objects and not strings.
+
+Below we illustrate a :class:`~sqlalchemy.schema.Table` with several
+:class:`~sqlalchemy.schema.Index` objects associated. The DDL for "CREATE
+INDEX" is issued right after the create statements for the table:
+
+.. sourcecode:: python+sql
+
+ meta = MetaData()
+ mytable = Table('mytable', meta,
+ # an indexed column, with index "ix_mytable_col1"
+ Column('col1', Integer, index=True),
+
+ # a uniquely indexed column with index "ix_mytable_col2"
+ Column('col2', Integer, index=True, unique=True),
+
+ Column('col3', Integer),
+ Column('col4', Integer),
+
+ Column('col5', Integer),
+ Column('col6', Integer),
+ )
+
+ # place an index on col3, col4
+ Index('idx_col34', mytable.c.col3, mytable.c.col4)
+
+ # place a unique index on col5, col6
+ Index('myindex', mytable.c.col5, mytable.c.col6, unique=True)
+
+ {sql}mytable.create(engine)
+ CREATE TABLE mytable (
+ col1 INTEGER,
+ col2 INTEGER,
+ col3 INTEGER,
+ col4 INTEGER,
+ col5 INTEGER,
+ col6 INTEGER
+ )
+ CREATE INDEX ix_mytable_col1 ON mytable (col1)
+ CREATE UNIQUE INDEX ix_mytable_col2 ON mytable (col2)
+ CREATE UNIQUE INDEX myindex ON mytable (col5, col6)
+ CREATE INDEX idx_col34 ON mytable (col3, col4){stop}
+
+The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method:
+
+.. sourcecode:: python+sql
+
+ i = Index('someindex', mytable.c.col5)
+ {sql}i.create(engine)
+ CREATE INDEX someindex ON mytable (col5){stop}
+
+.. autoclass:: Index
+ :show-inheritance:
+
+Customizing DDL
+===============
+
+In the preceding sections we've discussed a variety of schema constructs
+including :class:`~sqlalchemy.schema.Table`,
+:class:`~sqlalchemy.schema.ForeignKeyConstraint`,
+:class:`~sqlalchemy.schema.CheckConstraint`, and
+:class:`~sqlalchemy.schema.Sequence`. Throughout, we've relied upon the
+``create()`` and :func:`~sqlalchemy.schema.MetaData.create_all` methods of
+:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` in
+order to issue data definition language (DDL) for all constructs. When issued,
+a pre-determined order of operations is invoked, and DDL to create each table
+is created unconditionally including all constraints and other objects
+associated with it. For more complex scenarios where database-specific DDL is
+required, SQLAlchemy offers two techniques which can be used to add any DDL
+based on any condition, either accompanying the standard generation of tables
+or by itself.
+
+Controlling DDL Sequences
+-------------------------
+
+The ``sqlalchemy.schema`` package contains SQL expression constructs that
+provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.schema import CreateTable
+ {sql}engine.execute(CreateTable(mytable))
+ CREATE TABLE mytable (
+ col1 INTEGER,
+ col2 INTEGER,
+ col3 INTEGER,
+ col4 INTEGER,
+ col5 INTEGER,
+ col6 INTEGER
+ ){stop}
+
+Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any
+other expression construct (such as ``select()``, ``table.insert()``, etc.). A
+full reference of available constructs is in :ref:`schema_api_ddl`.
+
+The DDL constructs all extend a common base class which provides the
+capability to be associated with an individual
+:class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData`
+object, to be invoked upon create/drop events. Consider the example of a table
+which contains a CHECK constraint:
+
+.. sourcecode:: python+sql
+
+ users = Table('users', metadata,
+ Column('user_id', Integer, primary_key=True),
+ Column('user_name', String(40), nullable=False),
+ CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
+ )
+
+ {sql}users.create(engine)
+ CREATE TABLE users (
+ user_id SERIAL NOT NULL,
+ user_name VARCHAR(40) NOT NULL,
+ PRIMARY KEY (user_id),
+ CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8)
+ ){stop}
+
+The above table contains a column "user_name" which is subject to a CHECK
+constraint that validates that the length of the string is at least eight
+characters. When a ``create()`` is issued for this table, DDL for the
+:class:`~sqlalchemy.schema.CheckConstraint` will also be issued inline within
+the table definition.
+
+The :class:`~sqlalchemy.schema.CheckConstraint` construct can also be
+constructed externally and associated with the
+:class:`~sqlalchemy.schema.Table` afterwards::
+
+ constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
+ users.append_constraint(constraint)
+
+So far, the effect is the same. However, if we create DDL elements
+corresponding to the creation and removal of this constraint, and associate
+them with the :class:`~sqlalchemy.schema.Table` as events, these new events
+will take over the job of issuing DDL for the constraint. Additionally, the
+constraint will be added via ALTER:
+
+.. sourcecode:: python+sql
+
+ AddConstraint(constraint).execute_at("after-create", users)
+ DropConstraint(constraint).execute_at("before-drop", users)
+
+ {sql}users.create(engine)
+ CREATE TABLE users (
+ user_id SERIAL NOT NULL,
+ user_name VARCHAR(40) NOT NULL,
+ PRIMARY KEY (user_id)
+ )
+
+ ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop}
+
+ {sql}users.drop(engine)
+ ALTER TABLE users DROP CONSTRAINT cst_user_name_length
+ DROP TABLE users{stop}
+
+The real usefulness of the above becomes clearer once we illustrate the ``on``
+attribute of a DDL event. The ``on`` parameter is part of the constructor, and
+may be a string name of a database dialect name, a tuple containing dialect
+names, or a Python callable. This will limit the execution of the item to just
+those dialects, or when the return value of the callable is ``True``. So if
+our :class:`~sqlalchemy.schema.CheckConstraint` was only supported by
+Postgresql and not other databases, we could limit it to just that dialect::
+
+ AddConstraint(constraint, on='postgresql').execute_at("after-create", users)
+ DropConstraint(constraint, on='postgresql').execute_at("before-drop", users)
+
+Or to any set of dialects::
+
+ AddConstraint(constraint, on=('postgresql', 'mysql')).execute_at("after-create", users)
+ DropConstraint(constraint, on=('postgresql', 'mysql')).execute_at("before-drop", users)
+
+When using a callable, the callable is passed the ddl element, event name, the
+:class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData`
+object whose "create" or "drop" event is in progress, and the
+:class:`~sqlalchemy.engine.base.Connection` object being used for the
+operation, as well as additional information as keyword arguments. The
+callable can perform checks, such as whether or not a given item already
+exists. Below we define ``should_create()`` and ``should_drop()`` callables
+that check for the presence of our named constraint:
+
+.. sourcecode:: python+sql
+
+ def should_create(ddl, event, target, connection, **kw):
+ row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar()
+ return not bool(row)
+
+ def should_drop(ddl, event, target, connection, **kw):
+ return not should_create(ddl, event, target, connection, **kw)
+
+ AddConstraint(constraint, on=should_create).execute_at("after-create", users)
+ DropConstraint(constraint, on=should_drop).execute_at("before-drop", users)
+
+ {sql}users.create(engine)
+ CREATE TABLE users (
+ user_id SERIAL NOT NULL,
+ user_name VARCHAR(40) NOT NULL,
+ PRIMARY KEY (user_id)
+ )
+
+ select conname from pg_constraint where conname='cst_user_name_length'
+ ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop}
+
+ {sql}users.drop(engine)
+ select conname from pg_constraint where conname='cst_user_name_length'
+ ALTER TABLE users DROP CONSTRAINT cst_user_name_length
+ DROP TABLE users{stop}
+
+Custom DDL
+----------
+
+Custom DDL phrases are most easily achieved using the
+:class:`~sqlalchemy.schema.DDL` construct. This construct works like all the
+other DDL elements except it accepts a string which is the text to be emitted:
+
+.. sourcecode:: python+sql
+
+ DDL("ALTER TABLE users ADD CONSTRAINT "
+ "cst_user_name_length "
+ " CHECK (length(user_name) >= 8)").execute_at("after-create", metadata)
+
+A more comprehensive method of creating libraries of DDL constructs is to use
+custom compilation - see :ref:`sqlalchemy.ext.compiler_toplevel` for
+ details.
+
+.. _schema_api_ddl:
+
+DDL API
+-------
+
+.. autoclass:: DDLElement
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: DDL
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: CreateTable
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: DropTable
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: CreateSequence
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: DropSequence
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: CreateIndex
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: DropIndex
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: AddConstraint
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+.. autoclass:: DropConstraint
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/doc/build/reference/ext/serializer.rst b/doc/build/core/serializer.rst
index 759ba35ce..5423306a9 100644
--- a/doc/build/reference/ext/serializer.rst
+++ b/doc/build/core/serializer.rst
@@ -1,7 +1,5 @@
-serializer
-==========
-
-:author: Mike Bayer
+Expression Serializer Extension
+===============================
.. automodule:: sqlalchemy.ext.serializer
:members:
diff --git a/doc/build/core/sqla_engine_arch.png b/doc/build/core/sqla_engine_arch.png
new file mode 100644
index 000000000..f54d105bd
--- /dev/null
+++ b/doc/build/core/sqla_engine_arch.png
Binary files differ
diff --git a/doc/build/sqlexpression.rst b/doc/build/core/tutorial.rst
index 23190a143..23190a143 100644
--- a/doc/build/sqlexpression.rst
+++ b/doc/build/core/tutorial.rst
diff --git a/doc/build/reference/sqlalchemy/types.rst b/doc/build/core/types.rst
index 593d9c9a6..89a84abc8 100644
--- a/doc/build/reference/sqlalchemy/types.rst
+++ b/doc/build/core/types.rst
@@ -1,4 +1,4 @@
-.. _types:
+.. _types_toplevel:
Column and Data Types
=====================
@@ -239,15 +239,24 @@ such as `collation` and `charset`::
Custom Types
------------
-User-defined types may be created to match special capabilities of a
-particular database or simply for implementing custom processing logic
-in Python.
+A variety of methods exist to redefine the behavior of existing types
+as well as to provide new ones.
-The simplest method is implementing a :class:`TypeDecorator`, a helper
-class that makes it easy to augment the bind parameter and result
-processing capabilities of one of the built in types.
+Overriding Type Compilation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
-To build a type object from scratch, subclass `:class:UserDefinedType`.
+The string produced by any type object, when rendered in a CREATE TABLE
+statement or other SQL function like CAST, can be changed. See the
+section :ref:`type_compilation_extension`, a subsection of
+:ref:`sqlalchemy.ext.compiler_toplevel`, for a short example.
+
+Augmenting Existing Types
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The :class:`TypeDecorator` allows the creation of custom types which
+add bind-parameter and result-processing behavior to an existing
+type object. It is used when additional in-Python marshalling of data
+to and from the database is required.
.. autoclass:: TypeDecorator
:members:
@@ -255,19 +264,28 @@ To build a type object from scratch, subclass `:class:UserDefinedType`.
:inherited-members:
:show-inheritance:
+Creating New Types
+~~~~~~~~~~~~~~~~~~
+
+The :class:`UserDefinedType` class is provided as a simple base class
+for defining entirely new database types:
+
.. autoclass:: UserDefinedType
:members:
:undoc-members:
:inherited-members:
:show-inheritance:
-.. autoclass:: TypeEngine
+Base Type API
+--------------
+
+.. autoclass:: AbstractType
:members:
:undoc-members:
:inherited-members:
:show-inheritance:
-.. autoclass:: AbstractType
+.. autoclass:: TypeEngine
:members:
:undoc-members:
:inherited-members:
diff --git a/doc/build/dbengine.rst b/doc/build/dbengine.rst
deleted file mode 100644
index 637b85d48..000000000
--- a/doc/build/dbengine.rst
+++ /dev/null
@@ -1,516 +0,0 @@
-.. _engines_toplevel:
-
-================
-Database Engines
-================
-The **Engine** is the starting point for any SQLAlchemy application. It's "home base" for the actual database and its DBAPI, delivered to the SQLAlchemy application through a connection pool and a **Dialect**, which describes how to talk to a specific kind of database/DBAPI combination.
-
-The general structure is this::
-
- +-----------+ __________
- /---| Pool |---\ (__________)
- +-------------+ / +-----------+ \ +--------+ | |
- connect() <--| Engine |---x x----| DBAPI |---| database |
- +-------------+ \ +-----------+ / +--------+ | |
- \---| Dialect |---/ |__________|
- +-----------+ (__________)
-
-Where above, a :class:`~sqlalchemy.engine.base.Engine` references both a :class:`~sqlalchemy.engine.base.Dialect` and :class:`~sqlalchemy.pool.Pool`, which together interpret the DBAPI's module functions as well as the behavior of the database.
-
-Creating an engine is just a matter of issuing a single call, :func:`create_engine()`::
-
- engine = create_engine('postgresql://scott:tiger@localhost:5432/mydatabase')
-
-The above engine invokes the ``postgresql`` dialect and a connection pool which references ``localhost:5432``.
-
-Note that the appropriate usage of :func:`create_engine()` is once per particular configuration, held globally for the lifetime of a single application process (not including child processes via ``fork()`` - these would require a new engine). A single :class:`~sqlalchemy.engine.base.Engine` manages connections on behalf of the process and is intended to be called upon in a concurrent fashion. Creating engines for each particular operation is not the intended usage.
-
-The engine can be used directly to issue SQL to the database. The most generic way is to use connections, which you get via the ``connect()`` method::
-
- connection = engine.connect()
- result = connection.execute("select username from users")
- for row in result:
- print "username:", row['username']
- connection.close()
-
-The connection is an instance of :class:`~sqlalchemy.engine.base.Connection`, which is a **proxy** object for an actual DBAPI connection. The returned result is an instance of :class:`~sqlalchemy.engine.ResultProxy`, which acts very much like a DBAPI cursor.
-
-When you say ``engine.connect()``, a new :class:`~sqlalchemy.engine.base.Connection` object is created, and a DBAPI connection is retrieved from the connection pool. Later, when you call ``connection.close()``, the DBAPI connection is returned to the pool; nothing is actually "closed" from the perspective of the database.
-
-To execute some SQL more quickly, you can skip the :class:`~sqlalchemy.engine.base.Connection` part and just say::
-
- result = engine.execute("select username from users")
- for row in result:
- print "username:", row['username']
- result.close()
-
-Where above, the ``execute()`` method on the :class:`~sqlalchemy.engine.base.Engine` does the ``connect()`` part for you, and returns the :class:`~sqlalchemy.engine.base.ResultProxy` directly. The actual :class:`~sqlalchemy.engine.base.Connection` is *inside* the :class:`~sqlalchemy.engine.base.ResultProxy`, waiting for you to finish reading the result. In this case, when you ``close()`` the :class:`~sqlalchemy.engine.base.ResultProxy`, the underlying :class:`~sqlalchemy.engine.base.Connection` is closed, which returns the DBAPI connection to the pool.
-
-To summarize the above two examples, when you use a :class:`~sqlalchemy.engine.base.Connection` object, it's known as **explicit execution**. When you don't see the :class:`~sqlalchemy.engine.base.Connection` object, but you still use the ``execute()`` method on the :class:`~sqlalchemy.engine.base.Engine`, it's called **explicit, connectionless execution**. A third variant of execution also exists called **implicit execution**; this will be described later.
-
-The :class:`~sqlalchemy.engine.base.Engine` and :class:`~sqlalchemy.engine.base.Connection` can do a lot more than what we illustrated above; SQL strings are only its most rudimentary function. Later chapters will describe how "constructed SQL" expressions can be used with engines; in many cases, you don't have to deal with the :class:`~sqlalchemy.engine.base.Engine` at all after it's created. The Object Relational Mapper (ORM), an optional feature of SQLAlchemy, also uses the :class:`~sqlalchemy.engine.base.Engine` in order to get at connections; that's also a case where you can often create the engine once, and then forget about it.
-
-.. _supported_dbapis:
-
-Supported Databases
-====================
-
-SQLAlchemy includes many :class:`~sqlalchemy.engine.base.Dialect` implementations for various
-backends; each is described as its own package in the :ref:`sqlalchemy.dialects_toplevel` package. A
-SQLAlchemy dialect always requires that an appropriate DBAPI driver is installed.
-
-The table below summarizes the state of DBAPI support in SQLAlchemy 0.6. The values
-translate as:
-
-* yes / Python platform - The SQLAlchemy dialect is mostly or fully operational on the target platform.
-* yes / OS platform - The DBAPI supports that platform.
-* no / Python platform - The DBAPI does not support that platform, or there is no SQLAlchemy dialect support.
-* no / OS platform - The DBAPI does not support that platform.
-* partial - the DBAPI is partially usable on the target platform but has major unresolved issues.
-* development - a development version of the dialect exists, but is not yet usable.
-* thirdparty - the dialect itself is maintained by a third party, who should be consulted for
- information on current support.
-* \* - indicates the given DBAPI is the "default" for SQLAlchemy, i.e. when just the database name is specified
-
-========================= =========================== =========== =========== =========== ================= ============
-Driver Connect string Py2K Py3K Jython Unix Windows
-========================= =========================== =========== =========== =========== ================= ============
-**DB2/Informix IDS**
-ibm-db_ thirdparty thirdparty thirdparty thirdparty thirdparty thirdparty
-**Firebird**
-kinterbasdb_ ``firebird+kinterbasdb``\* yes development no yes yes
-**Informix**
-informixdb_ ``informix+informixdb``\* development development no unknown unknown
-**MaxDB**
-sapdb_ ``maxdb+sapdb``\* development development no yes unknown
-**Microsoft Access**
-pyodbc_ ``access+pyodbc``\* development development no unknown yes
-**Microsoft SQL Server**
-adodbapi_ ``mssql+adodbapi`` development development no no yes
-`jTDS JDBC Driver`_ ``mssql+zxjdbc`` no no development yes yes
-mxodbc_ ``mssql+mxodbc`` yes development no yes with FreeTDS_ yes
-pyodbc_ ``mssql+pyodbc``\* yes development no yes with FreeTDS_ yes
-pymssql_ ``mssql+pymssql`` yes development no yes yes
-**MySQL**
-`MySQL Connector/J`_ ``mysql+zxjdbc`` no no yes yes yes
-`MySQL Connector/Python`_ ``mysql+mysqlconnector`` yes partial no yes yes
-mysql-python_ ``mysql+mysqldb``\* yes development no yes yes
-OurSQL_ ``mysql+oursql`` yes partial no yes yes
-**Oracle**
-cx_oracle_ ``oracle+cx_oracle``\* yes development no yes yes
-`Oracle JDBC Driver`_ ``oracle+zxjdbc`` no no yes yes yes
-**Postgresql**
-pg8000_ ``postgresql+pg8000`` yes yes no yes yes
-`PostgreSQL JDBC Driver`_ ``postgresql+zxjdbc`` no no yes yes yes
-psycopg2_ ``postgresql+psycopg2``\* yes development no yes yes
-pypostgresql_ ``postgresql+pypostgresql`` no yes no yes yes
-**SQLite**
-pysqlite_ ``sqlite+pysqlite``\* yes yes no yes yes
-sqlite3_ ``sqlite+pysqlite``\* yes yes no yes yes
-**Sybase ASE**
-mxodbc_ ``sybase+mxodbc`` development development no yes yes
-pyodbc_ ``sybase+pyodbc``\* partial development no unknown unknown
-python-sybase_ ``sybase+pysybase`` partial development no yes yes
-========================= =========================== =========== =========== =========== ================= ============
-
-.. _psycopg2: http://www.initd.org/
-.. _pg8000: http://pybrary.net/pg8000/
-.. _pypostgresql: http://python.projects.postgresql.org/
-.. _mysql-python: http://sourceforge.net/projects/mysql-python
-.. _MySQL Connector/Python: https://launchpad.net/myconnpy
-.. _OurSQL: http://packages.python.org/oursql/
-.. _PostgreSQL JDBC Driver: http://jdbc.postgresql.org/
-.. _sqlite3: http://docs.python.org/library/sqlite3.html
-.. _pysqlite: http://pypi.python.org/pypi/pysqlite/
-.. _MySQL Connector/J: http://dev.mysql.com/downloads/connector/j/
-.. _cx_Oracle: http://cx-oracle.sourceforge.net/
-.. _Oracle JDBC Driver: http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html
-.. _kinterbasdb: http://firebirdsql.org/index.php?op=devel&sub=python
-.. _pyodbc: http://code.google.com/p/pyodbc/
-.. _mxodbc: http://www.egenix.com/products/python/mxODBC/
-.. _FreeTDS: http://www.freetds.org/
-.. _adodbapi: http://adodbapi.sourceforge.net/
-.. _pymssql: http://code.google.com/p/pymssql/
-.. _jTDS JDBC Driver: http://jtds.sourceforge.net/
-.. _ibm-db: http://code.google.com/p/ibm-db/
-.. _informixdb: http://informixdb.sourceforge.net/
-.. _sapdb: http://www.sapdb.org/sapdbapi.html
-.. _python-sybase: http://python-sybase.sourceforge.net/
-
-Further detail on dialects is available at :ref:`sqlalchemy.dialects_toplevel` as well as additional notes on the wiki at `Database Notes <http://www.sqlalchemy.org/trac/wiki/DatabaseNotes>`_
-
-create_engine() URL Arguments
-==============================
-
-SQLAlchemy indicates the source of an Engine strictly via `RFC-1738 <http://rfc.net/rfc1738.html>`_ style URLs, combined with optional keyword arguments to specify options for the Engine. The form of the URL is:
-
- dialect+driver://username:password@host:port/database
-
-Dialect names include the identifying name of the SQLAlchemy dialect which include ``sqlite``, ``mysql``, ``postgresql``, ``oracle``, ``mssql``, and ``firebird``. The drivername is the name of the DBAPI to be used to connect to the database using all lowercase letters. If not specified, a "default" DBAPI will be imported if available - this default is typically the most widely known driver available for that backend (i.e. cx_oracle, pysqlite/sqlite3, psycopg2, mysqldb). For Jython connections, specify the `zxjdbc` driver, which is the JDBC-DBAPI bridge included with Jython.
-
-.. sourcecode:: python+sql
-
- # postgresql - psycopg2 is the default driver.
- pg_db = create_engine('postgresql://scott:tiger@localhost/mydatabase')
- pg_db = create_engine('postgresql+psycopg2://scott:tiger@localhost/mydatabase')
- pg_db = create_engine('postgresql+pg8000://scott:tiger@localhost/mydatabase')
- pg_db = create_engine('postgresql+pypostgresql://scott:tiger@localhost/mydatabase')
-
- # postgresql on Jython
- pg_db = create_engine('postgresql+zxjdbc://scott:tiger@localhost/mydatabase')
-
- # mysql - MySQLdb (mysql-python) is the default driver
- mysql_db = create_engine('mysql://scott:tiger@localhost/foo')
- mysql_db = create_engine('mysql+mysqldb://scott:tiger@localhost/foo')
-
- # mysql on Jython
- mysql_db = create_engine('mysql+zxjdbc://localhost/foo')
-
- # mysql with pyodbc (buggy)
- mysql_db = create_engine('mysql+pyodbc://scott:tiger@some_dsn')
-
- # oracle - cx_oracle is the default driver
- oracle_db = create_engine('oracle://scott:tiger@127.0.0.1:1521/sidname')
-
- # oracle via TNS name
- oracle_db = create_engine('oracle+cx_oracle://scott:tiger@tnsname')
-
- # mssql using ODBC datasource names. PyODBC is the default driver.
- mssql_db = create_engine('mssql://mydsn')
- mssql_db = create_engine('mssql+pyodbc://mydsn')
- mssql_db = create_engine('mssql+adodbapi://mydsn')
- mssql_db = create_engine('mssql+pyodbc://username:password@mydsn')
-
-SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the "file" portion as the filename of the database. This has the effect of four slashes being present for an absolute file path::
-
- # sqlite://<nohostname>/<path>
- # where <path> is relative:
- sqlite_db = create_engine('sqlite:///foo.db')
-
- # or absolute, starting with a slash:
- sqlite_db = create_engine('sqlite:////absolute/path/to/foo.db')
-
-To use a SQLite ``:memory:`` database, specify an empty URL::
-
- sqlite_memory_db = create_engine('sqlite://')
-
-The :class:`~sqlalchemy.engine.base.Engine` will ask the connection pool for a connection when the ``connect()`` or ``execute()`` methods are called. The default connection pool, :class:`~sqlalchemy.pool.QueuePool`, as well as the default connection pool used with SQLite, :class:`~sqlalchemy.pool.SingletonThreadPool`, will open connections to the database on an as-needed basis. As concurrent statements are executed, :class:`~sqlalchemy.pool.QueuePool` will grow its pool of connections to a default size of five, and will allow a default "overflow" of ten. Since the :class:`~sqlalchemy.engine.base.Engine` is essentially "home base" for the connection pool, it follows that you should keep a single :class:`~sqlalchemy.engine.base.Engine` per database established within an application, rather than creating a new one for each connection.
-
-Custom DBAPI connect() arguments
---------------------------------
-
-Custom arguments used when issuing the ``connect()`` call to the underlying DBAPI may be issued in three distinct ways. String-based arguments can be passed directly from the URL string as query arguments:
-
-.. sourcecode:: python+sql
-
- db = create_engine('postgresql://scott:tiger@localhost/test?argument1=foo&argument2=bar')
-
-If SQLAlchemy's database connector is aware of a particular query argument, it may convert its type from string to its proper type.
-
-:func:`~sqlalchemy.create_engine` also takes an argument ``connect_args`` which is an additional dictionary that will be passed to ``connect()``. This can be used when arguments of a type other than string are required, and SQLAlchemy's database connector has no type conversion logic present for that parameter:
-
-.. sourcecode:: python+sql
-
- db = create_engine('postgresql://scott:tiger@localhost/test', connect_args = {'argument1':17, 'argument2':'bar'})
-
-The most customizable connection method of all is to pass a ``creator`` argument, which specifies a callable that returns a DBAPI connection:
-
-.. sourcecode:: python+sql
-
- def connect():
- return psycopg.connect(user='scott', host='localhost')
-
- db = create_engine('postgresql://', creator=connect)
-
-.. _create_engine_args:
-
-Database Engine Options
-========================
-
-Keyword options can also be specified to :func:`~sqlalchemy.create_engine`, following the string URL as follows:
-
-.. sourcecode:: python+sql
-
- db = create_engine('postgresql://...', encoding='latin1', echo=True)
-
-Options common to all database dialects are described at :func:`~sqlalchemy.create_engine`.
-
-More On Connections
-====================
-
-Recall from the beginning of this section that the Engine provides a ``connect()`` method which returns a :class:`~sqlalchemy.engine.base.Connection` object. :class:`~sqlalchemy.engine.base.Connection` is a *proxy* object which maintains a reference to a DBAPI connection instance. The ``close()`` method on :class:`~sqlalchemy.engine.base.Connection` does not actually close the DBAPI connection, but instead returns it to the connection pool referenced by the :class:`~sqlalchemy.engine.base.Engine`. :class:`~sqlalchemy.engine.base.Connection` will also automatically return its resources to the connection pool when the object is garbage collected, i.e. its ``__del__()`` method is called. When using the standard C implementation of Python, this method is usually called immediately as soon as the object is dereferenced. With other Python implementations such as Jython, this is not so guaranteed.
-
-The ``execute()`` methods on both :class:`~sqlalchemy.engine.base.Engine` and :class:`~sqlalchemy.engine.base.Connection` can also receive SQL clause constructs as well::
-
- connection = engine.connect()
- result = connection.execute(select([table1], table1.c.col1==5))
- for row in result:
- print row['col1'], row['col2']
- connection.close()
-
-The above SQL construct is known as a ``select()``. The full range of SQL constructs available are described in :ref:`sqlexpression_toplevel`.
-
-Both :class:`~sqlalchemy.engine.base.Connection` and :class:`~sqlalchemy.engine.base.Engine` fulfill an interface known as :class:`~sqlalchemy.engine.base.Connectable` which specifies common functionality between the two objects, namely being able to call ``connect()`` to return a :class:`~sqlalchemy.engine.base.Connection` object (:class:`~sqlalchemy.engine.base.Connection` just returns itself), and being able to call ``execute()`` to get a result set. Following this, most SQLAlchemy functions and objects which accept an :class:`~sqlalchemy.engine.base.Engine` as a parameter or attribute with which to execute SQL will also accept a :class:`~sqlalchemy.engine.base.Connection`. This argument is named ``bind``::
-
- engine = create_engine('sqlite:///:memory:')
-
- # specify some Table metadata
- metadata = MetaData()
- table = Table('sometable', metadata, Column('col1', Integer))
-
- # create the table with the Engine
- table.create(bind=engine)
-
- # drop the table with a Connection off the Engine
- connection = engine.connect()
- table.drop(bind=connection)
-
-.. index::
- single: thread safety; connections
-
-Connection facts:
-
-* the Connection object is **not thread-safe**. While a Connection can be shared among threads using properly synchronized access, this is also not recommended as many DBAPIs have issues with, if not outright disallow, sharing of connection state between threads.
-* The Connection object represents a single dbapi connection checked out from the connection pool. In this state, the connection pool has no affect upon the connection, including its expiration or timeout state. For the connection pool to properly manage connections, **connections should be returned to the connection pool (i.e. ``connection.close()``) whenever the connection is not in use**. If your application has a need for management of multiple connections or is otherwise long running (this includes all web applications, threaded or not), don't hold a single connection open at the module level.
-
-Using Transactions with Connection
-===================================
-
-The :class:`~sqlalchemy.engine.base.Connection` object provides a ``begin()`` method which returns a :class:`~sqlalchemy.engine.base.Transaction` object. This object is usually used within a try/except clause so that it is guaranteed to ``rollback()`` or ``commit()``::
-
- trans = connection.begin()
- try:
- r1 = connection.execute(table1.select())
- connection.execute(table1.insert(), col1=7, col2='this is some data')
- trans.commit()
- except:
- trans.rollback()
- raise
-
-The :class:`~sqlalchemy.engine.base.Transaction` object also handles "nested" behavior by keeping track of the outermost begin/commit pair. In this example, two functions both issue a transaction on a Connection, but only the outermost Transaction object actually takes effect when it is committed.
-
-.. sourcecode:: python+sql
-
- # method_a starts a transaction and calls method_b
- def method_a(connection):
- trans = connection.begin() # open a transaction
- try:
- method_b(connection)
- trans.commit() # transaction is committed here
- except:
- trans.rollback() # this rolls back the transaction unconditionally
- raise
-
- # method_b also starts a transaction
- def method_b(connection):
- trans = connection.begin() # open a transaction - this runs in the context of method_a's transaction
- try:
- connection.execute("insert into mytable values ('bat', 'lala')")
- connection.execute(mytable.insert(), col1='bat', col2='lala')
- trans.commit() # transaction is not committed yet
- except:
- trans.rollback() # this rolls back the transaction unconditionally
- raise
-
- # open a Connection and call method_a
- conn = engine.connect()
- method_a(conn)
- conn.close()
-
-Above, ``method_a`` is called first, which calls ``connection.begin()``. Then it calls ``method_b``. When ``method_b`` calls ``connection.begin()``, it just increments a counter that is decremented when it calls ``commit()``. If either ``method_a`` or ``method_b`` calls ``rollback()``, the whole transaction is rolled back. The transaction is not committed until ``method_a`` calls the ``commit()`` method. This "nesting" behavior allows the creation of functions which "guarantee" that a transaction will be used if one was not already available, but will automatically participate in an enclosing transaction if one exists.
-
-Note that SQLAlchemy's Object Relational Mapper also provides a way to control transaction scope at a higher level; this is described in :ref:`unitofwork_transaction`.
-
-.. index::
- single: thread safety; transactions
-
-Transaction Facts:
-
-* the Transaction object, just like its parent Connection, is **not thread-safe**.
-
-Understanding Autocommit
-------------------------
-
-
-The above transaction example illustrates how to use :class:`~sqlalchemy.engine.base.Transaction` so that several executions can take part in the same transaction. What happens when we issue an INSERT, UPDATE or DELETE call without using :class:`~sqlalchemy.engine.base.Transaction`? The answer is **autocommit**. While many DBAPIs implement a flag called ``autocommit``, the current SQLAlchemy behavior is such that it implements its own autocommit. This is achieved by detecting statements which represent data-changing operations, i.e. INSERT, UPDATE, DELETE, etc., and then issuing a COMMIT automatically if no transaction is in progress. The detection is based on compiled statement attributes, or in the case of a text-only statement via regular expressions.
-
-.. sourcecode:: python+sql
-
- conn = engine.connect()
- conn.execute("INSERT INTO users VALUES (1, 'john')") # autocommits
-
-.. _dbengine_implicit:
-
-Connectionless Execution, Implicit Execution
-=============================================
-
-Recall from the first section we mentioned executing with and without a :class:`~sqlalchemy.engine.base.Connection`. ``Connectionless`` execution refers to calling the ``execute()`` method on an object which is not a :class:`~sqlalchemy.engine.base.Connection`, which could be on the :class:`~sqlalchemy.engine.base.Engine` itself, or could be a constructed SQL object. When we say "implicit", we mean that we are calling the ``execute()`` method on an object which is neither a :class:`~sqlalchemy.engine.base.Connection` nor an :class:`~sqlalchemy.engine.base.Engine` object; this can only be used with constructed SQL objects which have their own ``execute()`` method, and can be "bound" to an :class:`~sqlalchemy.engine.base.Engine`. A description of "constructed SQL objects" may be found in :ref:`sqlexpression_toplevel`.
-
-A summary of all three methods follows below. First, assume the usage of the following :class:`~sqlalchemy.schema.MetaData` and :class:`~sqlalchemy.schema.Table` objects; while we haven't yet introduced these concepts, for now you only need to know that we are representing a database table, and are creating an "executable" SQL construct which issues a statement to the database. These objects are described in :ref:`metadata_toplevel`.
-
-.. sourcecode:: python+sql
-
- meta = MetaData()
- users_table = Table('users', meta,
- Column('id', Integer, primary_key=True),
- Column('name', String(50))
- )
-
-Explicit execution delivers the SQL text or constructed SQL expression to the ``execute()`` method of :class:`~sqlalchemy.engine.base.Connection`:
-
-.. sourcecode:: python+sql
-
- engine = create_engine('sqlite:///file.db')
- connection = engine.connect()
- result = connection.execute(users_table.select())
- for row in result:
- # ....
- connection.close()
-
-Explicit, connectionless execution delivers the expression to the ``execute()`` method of :class:`~sqlalchemy.engine.base.Engine`:
-
-.. sourcecode:: python+sql
-
- engine = create_engine('sqlite:///file.db')
- result = engine.execute(users_table.select())
- for row in result:
- # ....
- result.close()
-
-Implicit execution is also connectionless, and calls the ``execute()`` method on the expression itself, utilizing the fact that either an :class:`~sqlalchemy.engine.base.Engine` or :class:`~sqlalchemy.engine.base.Connection` has been *bound* to the expression object (binding is discussed further in the next section, :ref:`metadata_toplevel`):
-
-.. sourcecode:: python+sql
-
- engine = create_engine('sqlite:///file.db')
- meta.bind = engine
- result = users_table.select().execute()
- for row in result:
- # ....
- result.close()
-
-In both "connectionless" examples, the :class:`~sqlalchemy.engine.base.Connection` is created behind the scenes; the :class:`~sqlalchemy.engine.base.ResultProxy` returned by the ``execute()`` call references the :class:`~sqlalchemy.engine.base.Connection` used to issue the SQL statement. When we issue ``close()`` on the :class:`~sqlalchemy.engine.base.ResultProxy`, or if the result set object falls out of scope and is garbage collected, the underlying :class:`~sqlalchemy.engine.base.Connection` is closed for us, resulting in the DBAPI connection being returned to the pool.
-
-.. _threadlocal_strategy:
-
-Using the Threadlocal Execution Strategy
------------------------------------------
-
-The "threadlocal" engine strategy is used by non-ORM applications which wish to bind a transaction to the current thread, such that all parts of the application can participate in that transaction implicitly without the need to explicitly reference a :class:`~sqlalchemy.engine.base.Connection`. "threadlocal" is designed for a very specific pattern of use, and is not appropriate unless this very specfic pattern, described below, is what's desired. It has **no impact** on the "thread safety" of SQLAlchemy components or one's application. It also should not be used when using an ORM :class:`~sqlalchemy.orm.session.Session` object, as the :class:`~sqlalchemy.orm.session.Session` itself represents an ongoing transaction and itself handles the job of maintaining connection and transactional resources.
-
-Enabling ``threadlocal`` is achieved as follows:
-
-.. sourcecode:: python+sql
-
- db = create_engine('mysql://localhost/test', strategy='threadlocal')
-
-When the engine above is used in a "connectionless" style, meaning ``engine.execute()`` is called, a DBAPI connection is retrieved from the connection pool and then associated with the current thread. Subsequent operations on the :class:`~sqlalchemy.engine.base.Engine` while the DBAPI connection remains checked out will make use of the *same* DBAPI connection object. The connection stays allocated until all returned :class:`~sqlalchemy.engine.base.ResultProxy` objects are closed, which occurs for a particular :class:`~sqlalchemy.engine.base.ResultProxy` after all pending results are fetched, or immediately for an operation which returns no rows (such as an INSERT).
-
-.. sourcecode:: python+sql
-
- # execute one statement and receive results. r1 now references a DBAPI connection resource.
- r1 = db.execute("select * from table1")
-
- # execute a second statement and receive results. r2 now references the *same* resource as r1
- r2 = db.execute("select * from table2")
-
- # fetch a row on r1 (assume more results are pending)
- row1 = r1.fetchone()
-
- # fetch a row on r2 (same)
- row2 = r2.fetchone()
-
- # close r1. the connection is still held by r2.
- r1.close()
-
- # close r2. with no more references to the underlying connection resources, they
- # are returned to the pool.
- r2.close()
-
-The above example does not illustrate any pattern that is particularly useful, as it is not a frequent occurence that two execute/result fetching operations "leapfrog" one another. There is a slight savings of connection pool checkout overhead between the two operations, and an implicit sharing of the same transactional context, but since there is no explicitly declared transaction, this association is short lived.
-
-The real usage of "threadlocal" comes when we want several operations to occur within the scope of a shared transaction. The :class:`~sqlalchemy.engine.base.Engine` now has ``begin()``, ``commit()`` and ``rollback()`` methods which will retrieve a connection resource from the pool and establish a new transaction, maintaining the connection against the current thread until the transaction is committed or rolled back:
-
-.. sourcecode:: python+sql
-
- db.begin()
- try:
- call_operation1()
- call_operation2()
- db.commit()
- except:
- db.rollback()
-
-``call_operation1()`` and ``call_operation2()`` can make use of the :class:`~sqlalchemy.engine.base.Engine` as a global variable, using the "connectionless" execution style, and their operations will participate in the same transaction:
-
-.. sourcecode:: python+sql
-
- def call_operation1():
- engine.execute("insert into users values (?, ?)", 1, "john")
-
- def call_operation2():
- users.update(users.c.user_id==5).execute(name='ed')
-
-When using threadlocal, operations that do call upon the ``engine.connect()`` method will receive a :class:`~sqlalchemy.engine.base.Connection` that is **outside** the scope of the transaction. This can be used for operations such as logging the status of an operation regardless of transaction success:
-
-.. sourcecode:: python+sql
-
- db.begin()
- conn = db.connect()
- try:
- conn.execute(log_table.insert(), message="Operation started")
- call_operation1()
- call_operation2()
- db.commit()
- conn.execute(log_table.insert(), message="Operation succeeded")
- except:
- db.rollback()
- conn.execute(log_table.insert(), message="Operation failed")
- finally:
- conn.close()
-
-Functions which are written to use an explicit :class:`~sqlalchemy.engine.base.Connection` object, but wish to participate in the threadlocal transaction, can receive their :class:`~sqlalchemy.engine.base.Connection` object from the ``contextual_connect()`` method, which returns a :class:`~sqlalchemy.engine.base.Connection` that is **inside** the scope of the transaction:
-
-.. sourcecode:: python+sql
-
- conn = db.contextual_connect()
- call_operation3(conn)
- conn.close()
-
-Calling ``close()`` on the "contextual" connection does not release the connection resources to the pool if other resources are making use of it. A resource-counting mechanism is employed so that the connection is released back to the pool only when all users of that connection, including the transaction established by ``engine.begin()``, have been completed.
-
-So remember - if you're not sure if you need to use ``strategy="threadlocal"`` or not, the answer is **no** ! It's driven by a specific programming pattern that is generally not the norm.
-
-.. _dbengine_logging:
-
-Configuring Logging
-====================
-
-Python's standard `logging <http://www.python.org/doc/lib/module-logging.html>`_ module is used to implement informational and debug log output with SQLAlchemy. This allows SQLAlchemy's logging to integrate in a standard way with other applications and libraries. The ``echo`` and ``echo_pool`` flags that are present on :func:`~sqlalchemy.create_engine`, as well as the ``echo_uow`` flag used on :class:`~sqlalchemy.orm.session.Session`, all interact with regular loggers.
-
-This section assumes familiarity with the above linked logging module. All logging performed by SQLAlchemy exists underneath the ``sqlalchemy`` namespace, as used by ``logging.getLogger('sqlalchemy')``. When logging has been configured (i.e. such as via ``logging.basicConfig()``), the general namespace of SA loggers that can be turned on is as follows:
-
-* ``sqlalchemy.engine`` - controls SQL echoing. set to ``logging.INFO`` for SQL query output, ``logging.DEBUG`` for query + result set output.
-* ``sqlalchemy.dialects`` - controls custom logging for SQL dialects. See the documentation of individual dialects for details.
-* ``sqlalchemy.pool`` - controls connection pool logging. set to ``logging.INFO`` or lower to log connection pool checkouts/checkins.
-* ``sqlalchemy.orm`` - controls logging of various ORM functions. set to ``logging.INFO`` for configurational logging as well as unit of work dumps, ``logging.DEBUG`` for extensive logging during query and flush() operations. Subcategories of ``sqlalchemy.orm`` include:
- * ``sqlalchemy.orm.attributes`` - logs certain instrumented attribute operations, such as triggered callables
- * ``sqlalchemy.orm.mapper`` - logs Mapper configuration and operations
- * ``sqlalchemy.orm.unitofwork`` - logs flush() operations, including dependency sort graphs and other operations
- * ``sqlalchemy.orm.strategies`` - logs relationship loader operations (i.e. lazy and eager loads)
- * ``sqlalchemy.orm.sync`` - logs synchronization of attributes from parent to child instances during a flush()
-
-For example, to log SQL queries as well as unit of work debugging:
-
-.. sourcecode:: python+sql
-
- import logging
-
- logging.basicConfig()
- logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
- logging.getLogger('sqlalchemy.orm.unitofwork').setLevel(logging.DEBUG)
-
-By default, the log level is set to ``logging.ERROR`` within the entire ``sqlalchemy`` namespace so that no log operations occur, even within an application that has logging enabled otherwise.
-
-The ``echo`` flags present as keyword arguments to :func:`~sqlalchemy.create_engine` and others as well as the ``echo`` property on :class:`~sqlalchemy.engine.base.Engine`, when set to ``True``, will first attempt to ensure that logging is enabled. Unfortunately, the ``logging`` module provides no way of determining if output has already been configured (note we are referring to if a logging configuration has been set up, not just that the logging level is set). For this reason, any ``echo=True`` flags will result in a call to ``logging.basicConfig()`` using sys.stdout as the destination. It also sets up a default format using the level name, timestamp, and logger name. Note that this configuration has the affect of being configured **in addition** to any existing logger configurations. Therefore, **when using Python logging, ensure all echo flags are set to False at all times**, to avoid getting duplicate log lines.
-
-The logger name of instance such as an :class:`~sqlalchemy.engine.base.Engine` or :class:`~sqlalchemy.pool.Pool` defaults to using a truncated hex identifier string. To set this to a specific name, use the "logging_name" and "pool_logging_name" keyword arguments with :func:`sqlalchemy.create_engine`.
diff --git a/doc/build/reference/dialects/access.rst b/doc/build/dialects/access.rst
index 52a2ee371..52a2ee371 100644
--- a/doc/build/reference/dialects/access.rst
+++ b/doc/build/dialects/access.rst
diff --git a/doc/build/reference/dialects/firebird.rst b/doc/build/dialects/firebird.rst
index dd6b6d0ba..dd6b6d0ba 100644
--- a/doc/build/reference/dialects/firebird.rst
+++ b/doc/build/dialects/firebird.rst
diff --git a/doc/build/reference/dialects/index.rst b/doc/build/dialects/index.rst
index a1808dff9..a8cfc3324 100644
--- a/doc/build/reference/dialects/index.rst
+++ b/doc/build/dialects/index.rst
@@ -1,7 +1,12 @@
-.. _sqlalchemy.dialects_toplevel:
+.. _dialect_toplevel:
-sqlalchemy.dialects
-====================
+Dialects
+========
+
+The *dialect* is the system SQLAlchemy uses to communicate with various types of DBAPIs and databases.
+A compatibility chart of supported backends can be found at :ref:`supported_dbapis`.
+
+This section contains all notes and documentation specific to the usage of various backends.
Supported Databases
-------------------
@@ -10,6 +15,7 @@ These backends are fully operational with
current versions of SQLAlchemy.
.. toctree::
+ :maxdepth: 1
:glob:
firebird
@@ -27,6 +33,7 @@ These backends are untested and may not be completely
ported to current versions of SQLAlchemy.
.. toctree::
+ :maxdepth: 1
:glob:
access
diff --git a/doc/build/reference/dialects/informix.rst b/doc/build/dialects/informix.rst
index 7cf271d0b..7cf271d0b 100644
--- a/doc/build/reference/dialects/informix.rst
+++ b/doc/build/dialects/informix.rst
diff --git a/doc/build/reference/dialects/maxdb.rst b/doc/build/dialects/maxdb.rst
index 3edd55a77..3edd55a77 100644
--- a/doc/build/reference/dialects/maxdb.rst
+++ b/doc/build/dialects/maxdb.rst
diff --git a/doc/build/reference/dialects/mssql.rst b/doc/build/dialects/mssql.rst
index 658ca8988..658ca8988 100644
--- a/doc/build/reference/dialects/mssql.rst
+++ b/doc/build/dialects/mssql.rst
diff --git a/doc/build/reference/dialects/mysql.rst b/doc/build/dialects/mysql.rst
index 8796adb3e..8796adb3e 100644
--- a/doc/build/reference/dialects/mysql.rst
+++ b/doc/build/dialects/mysql.rst
diff --git a/doc/build/reference/dialects/oracle.rst b/doc/build/dialects/oracle.rst
index 363cebffd..363cebffd 100644
--- a/doc/build/reference/dialects/oracle.rst
+++ b/doc/build/dialects/oracle.rst
diff --git a/doc/build/reference/dialects/postgresql.rst b/doc/build/dialects/postgresql.rst
index fe1fcb2f7..fe1fcb2f7 100644
--- a/doc/build/reference/dialects/postgresql.rst
+++ b/doc/build/dialects/postgresql.rst
diff --git a/doc/build/reference/dialects/sqlite.rst b/doc/build/dialects/sqlite.rst
index a4e87e1b0..a4e87e1b0 100644
--- a/doc/build/reference/dialects/sqlite.rst
+++ b/doc/build/dialects/sqlite.rst
diff --git a/doc/build/reference/dialects/sybase.rst b/doc/build/dialects/sybase.rst
index 724656a30..724656a30 100644
--- a/doc/build/reference/dialects/sybase.rst
+++ b/doc/build/dialects/sybase.rst
diff --git a/doc/build/index.rst b/doc/build/index.rst
index e2338c098..2c66bd47f 100644
--- a/doc/build/index.rst
+++ b/doc/build/index.rst
@@ -2,16 +2,12 @@ Table of Contents
=================
.. toctree::
-
+ :maxdepth: 2
+
intro
- ormtutorial
- sqlexpression
- mappers
- session
- dbengine
- metadata
- examples
- reference/index
+ orm/index
+ core/index
+ dialects/index
Indices and tables
------------------
diff --git a/doc/build/intro.rst b/doc/build/intro.rst
index dd4a97507..bf57f8d73 100644
--- a/doc/build/intro.rst
+++ b/doc/build/intro.rst
@@ -8,53 +8,79 @@ Overview
========
-The SQLAlchemy SQL Toolkit and Object Relational Mapper is a comprehensive set of tools for working with databases and Python. It has several distinct areas of functionality which can be used individually or combined together. Its major components are illustrated below. The arrows represent the general dependencies of components:
-
-.. image:: sqla_arch_small.jpg
-
-Above, the two most significant front-facing portions of SQLAlchemy are the **Object Relational Mapper** and the **SQL Expression Language**. SQL Expressions can be used independently of the ORM. When using the ORM, the SQL Expression language remains part of the public facing API as it is used within object-relational configurations and queries.
+The SQLAlchemy SQL Toolkit and Object Relational Mapper
+is a comprehensive set of tools for working with
+databases and Python. It has several distinct areas of
+functionality which can be used individually or combined
+together. Its major components are illustrated below. The
+arrows represent the general dependencies of components:
+
+.. image:: sqla_arch_small.png
+
+Above, the two most significant front-facing portions of
+SQLAlchemy are the **Object Relational Mapper** and the
+**SQL Expression Language**. SQL Expressions can be used
+independently of the ORM. When using the ORM, the SQL
+Expression language remains part of the public facing API
+as it is used within object-relational configurations and
+queries.
+
+Documentation Overview
+======================
-Tutorials
-=========
+The documentation is separated into three sections: :ref:`orm_toplevel`, :ref:`core_toplevel`, and :ref:`dialect_toplevel`.
-* :ref:`ormtutorial_toplevel` - This describes the richest feature of SQLAlchemy, its object relational mapper. If you want to work with higher-level SQL which is constructed automatically for you, as well as management of Python objects, proceed to this tutorial.
-* :ref:`sqlexpression_toplevel` - The core of SQLAlchemy is its SQL expression language. The SQL Expression Language is a toolkit all its own, independent of the ORM package, which can be used to construct manipulable SQL expressions which can be programmatically constructed, modified, and executed, returning cursor-like result sets. It's a lot more lightweight than the ORM and is appropriate for higher scaling SQL operations. It's also heavily present within the ORM's public facing API, so advanced ORM users will want to master this language as well.
+In :ref:`orm_toplevel`, the Object Relational Mapper is introduced and fully
+described. New users should begin with the :ref:`ormtutorial_toplevel`. If you
+want to work with higher-level SQL which is constructed automatically for you,
+as well as management of Python objects, proceed to this tutorial.
-Main Documentation
-==================
+In :ref:`core_toplevel`, the breadth of SQLAlchemy's SQL and database
+integration and description services are documented, the core of which is the
+SQL Expression language. The SQL Expression Language is a toolkit all its own,
+independent of the ORM package, which can be used to construct manipulable SQL
+expressions which can be programmatically constructed, modified, and executed,
+returning cursor-like result sets. In contrast to the ORM's domain-centric
+mode of usage, the expression language provides a schema-centric usage
+paradigm. New users should begin here with :ref:`sqlexpression_toplevel`.
+SQLAlchemy engine, connection, and pooling services are also described in
+:ref:`core_toplevel`.
-* :ref:`datamapping_toplevel` - A comprehensive walkthrough of major ORM patterns and techniques.
-* :ref:`session_toplevel` - A detailed description of SQLAlchemy's Session object
-* :ref:`engines_toplevel` - Describes SQLAlchemy's database-connection facilities, including connection documentation and working with connections and transactions.
-* :ref:`metadata_toplevel` - All about schema management using :class:`~sqlalchemy.schema.MetaData` and :class:`~sqlalchemy.schema.Table` objects; reading database schemas into your application, creating and dropping tables, constraints, defaults, sequences, indexes.
-* :ref:`pooling_toplevel` - Further detail about SQLAlchemy's connection pool library.
-* :ref:`types` - Datatypes included with SQLAlchemy, their functions, as well as how to create your own types.
-* :ref:`plugins` - Included addons for SQLAlchemy
+In :ref:`dialect_toplevel`, reference documentation for all provided
+database and DBAPI backends is provided.
Code Examples
=============
-Working code examples are included in the SQLAlchemy distribution, and there are also usage recipes on the SQLAlchemy wiki. A description of all the included example applications is at :ref:`examples_toplevel`.
+Working code examples, mostly regarding the ORM, are included in the
+SQLAlchemy distribution. A description of all the included example
+applications is at :ref:`examples_toplevel`.
-API Reference
-=============
-
-An organized section of all SQLAlchemy APIs is at :ref:`api_reference_toplevel`.
+There is also a wide variety of examples involving both core SQLAlchemy
+constructs as well as the ORM on the wiki. See
+`<http://www.sqlalchemy.org/trac/wiki/UsageRecipes>`_.
Installing SQLAlchemy
======================
-Installing SQLAlchemy from scratch is most easily achieved with `setuptools <http://pypi.python.org/pypi/setuptools/>`_. Assuming it's installed, just run this from the command-line:
+Installing SQLAlchemy from scratch is most easily achieved with `setuptools
+<http://pypi.python.org/pypi/setuptools/>`_, or alternatively
+`pip <http://pypi.python.org/pypi/pip/>`_. Assuming it's installed, just run
+this from the command-line:
.. sourcecode:: none
# easy_install SQLAlchemy
+
+Or with pip:
+
+.. sourcecode:: none
-This command will download the latest version of SQLAlchemy from the `Python Cheese Shop <http://pypi.python.org/pypi/SQLAlchemy>`_ and install it to your system.
+ # pip install SQLAlchemy
-* setuptools_
-* `install setuptools <http://peak.telecommunity.com/DevCenter/EasyInstall#installation-instructions>`_
-* `pypi <http://pypi.python.org/pypi/SQLAlchemy>`_
+This command will download the latest version of SQLAlchemy from the `Python
+Cheese Shop <http://pypi.python.org/pypi/SQLAlchemy>`_ and install it to your
+system.
Otherwise, you can install from the distribution using the ``setup.py`` script:
diff --git a/doc/build/mappers.rst b/doc/build/mappers.rst
deleted file mode 100644
index 71f71a8cf..000000000
--- a/doc/build/mappers.rst
+++ /dev/null
@@ -1,2068 +0,0 @@
-.. _datamapping_toplevel:
-
-====================
-Mapper Configuration
-====================
-This section references most major configurational patterns involving the
-:func:`~.orm.mapper` and :func:`.relationship` functions. It assumes you've
-worked through :ref:`ormtutorial_toplevel` and know how to construct and use
-rudimentary mappers and relationships.
-
-Mapper Configuration
-====================
-
-This section describes a variety of configurational patterns that are usable
-with mappers. Most of these examples apply equally well
-to the usage of distinct :func:`~.orm.mapper` and :class:`.Table` objects
-as well as when using the :mod:`sqlalchemy.ext.declarative` extension.
-
-Any example in this section which takes a form such as::
-
- mapper(User, users_table, primary_key=[users_table.c.id])
-
-Would translate into declarative as::
-
- class User(Base):
- __table__ = users_table
- __mapper_args__ = {
- 'primary_key':users_table.c.id
- }
-
-Or if using ``__tablename__``, :class:`.Column` objects are declared inline
-with the class definition. These are usable as is within ``__mapper_args__``::
-
- class User(Base):
- __tablename__ = 'users'
-
- id = Column(Integer)
-
- __mapper_args__ = {
- 'primary_key':id
- }
-
-For a full reference of all options available on mappers, please see the API
-description of :func:`~.orm.mapper`.
-
-Customizing Column Properties
-------------------------------
-
-The default behavior of :func:`~.orm.mapper` is to assemble all the columns in
-the mapped :class:`.Table` into mapped object attributes. This behavior can be
-modified in several ways, as well as enhanced by SQL expressions.
-
-Mapping a Subset of Table Columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To reference a subset of columns referenced by a table as mapped attributes,
-use the ``include_properties`` or ``exclude_properties`` arguments. For
-example::
-
- mapper(User, users_table, include_properties=['user_id', 'user_name'])
-
-Will map the ``User`` class to the ``users_table`` table, only including
-the "user_id" and "user_name" columns - the rest are not refererenced.
-Similarly::
-
- mapper(Address, addresses_table,
- exclude_properties=['street', 'city', 'state', 'zip'])
-
-will map the ``Address`` class to the ``addresses_table`` table, including
-all columns present except "street", "city", "state", and "zip".
-
-When this mapping is used, the columns that are not included will not be
-referenced in any SELECT statements emitted by :class:`.Query`, nor will there
-be any mapped attribute on the mapped class which represents the column;
-setting a value on the mapped class to a name which matches an un-mapped
-column will have no effect.
-
-It should be noted however that "default", "on_update", "server_default" and
-"server_onupdate" attributes configured on the :class:`.Column` *will* continue to function normally. The columns are ignored only at the mapper
-level, but not at the SQL expression level. The ORM uses the SQL expression
-system to emit SQL to the database.
-
-Attribute Names for Mapped Columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To change the name of the attribute mapped to a particular column, place the
-:class:`~sqlalchemy.schema.Column` object in the ``properties`` dictionary
-with the desired key::
-
- mapper(User, users_table, properties={
- 'id': users_table.c.user_id,
- 'name': users_table.c.user_name,
- })
-
-When using :mod:`~sqlalchemy.ext.declarative`, the above configuration is more
-succinct - place the full column name in the :class:`.Column` definition,
-using the desired attribute name in the class definition::
-
- from sqlalchemy.ext.declarative import declarative_base
- Base = declarative_base()
-
- class User(Base):
- __tablename__ = 'user'
- id = Column('user_id', Integer, primary_key=True)
- name = Column('user_name', String(50))
-
-To change the names of all attributes using a prefix, use the
-``column_prefix`` option. This is useful for some schemes that would like
-to declare alternate attributes::
-
- mapper(User, users_table, column_prefix='_')
-
-The above will place attribute names such as ``_user_id``, ``_user_name``,
-``_password`` etc. on the mapped ``User`` class.
-
-
-Mapping Multiple Columns to a Single Attribute
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-To place multiple columns which are known to be "synonymous" based on foreign
-key relationship or join condition into the same mapped attribute, put them
-together using a list, as below where we map to a :func:`~.expression.join`::
-
- from sqlalchemy.sql import join
-
- # join users and addresses
- usersaddresses = join(users_table, addresses_table, \
- users_table.c.user_id == addresses_table.c.user_id)
-
- # user_id columns are equated under the 'user_id' attribute
- mapper(User, usersaddresses, properties={
- 'id':[users_table.c.user_id, addresses_table.c.user_id],
- })
-
-For further examples on this particular use case, see :ref:`maptojoin`.
-
-Deferred Column Loading
-------------------------
-
-This feature allows particular columns of a table to not be loaded by default,
-instead being loaded later on when first referenced. It is essentially
-"column-level lazy loading". This feature is useful when one wants to avoid
-loading a large text or binary field into memory when it's not needed.
-Individual columns can be lazy loaded by themselves or placed into groups that
-lazy-load together::
-
- book_excerpts = Table('books', db,
- Column('book_id', Integer, primary_key=True),
- Column('title', String(200), nullable=False),
- Column('summary', String(2000)),
- Column('excerpt', String),
- Column('photo', Binary)
- )
-
- class Book(object):
- pass
-
- # define a mapper that will load each of 'excerpt' and 'photo' in
- # separate, individual-row SELECT statements when each attribute
- # is first referenced on the individual object instance
- mapper(Book, book_excerpts, properties={
- 'excerpt': deferred(book_excerpts.c.excerpt),
- 'photo': deferred(book_excerpts.c.photo)
- })
-
-Deferred columns can be placed into groups so that they load together::
-
- book_excerpts = Table('books', db,
- Column('book_id', Integer, primary_key=True),
- Column('title', String(200), nullable=False),
- Column('summary', String(2000)),
- Column('excerpt', String),
- Column('photo1', Binary),
- Column('photo2', Binary),
- Column('photo3', Binary)
- )
-
- class Book(object):
- pass
-
- # define a mapper with a 'photos' deferred group. when one photo is referenced,
- # all three photos will be loaded in one SELECT statement. The 'excerpt' will
- # be loaded separately when it is first referenced.
- mapper(Book, book_excerpts, properties = {
- 'excerpt': deferred(book_excerpts.c.excerpt),
- 'photo1': deferred(book_excerpts.c.photo1, group='photos'),
- 'photo2': deferred(book_excerpts.c.photo2, group='photos'),
- 'photo3': deferred(book_excerpts.c.photo3, group='photos')
- })
-
-You can defer or undefer columns at the :class:`~sqlalchemy.orm.query.Query` level using the :func:`.defer` and :func:`.undefer` query options::
-
- query = session.query(Book)
- query.options(defer('summary')).all()
- query.options(undefer('excerpt')).all()
-
-And an entire "deferred group", i.e. which uses the ``group`` keyword argument to :func:`~sqlalchemy.orm.deferred()`, can be undeferred using :func:`.undefer_group()`, sending in the group name::
-
- query = session.query(Book)
- query.options(undefer_group('photos')).all()
-
-.. _mapper_sql_expressions:
-
-SQL Expressions as Mapped Attributes
--------------------------------------
-
-To add a SQL clause composed of local or external columns as
-a read-only, mapped column attribute, use the
-:func:`~sqlalchemy.orm.column_property()` function. Any
-scalar-returning
-:class:`~sqlalchemy.sql.expression.ClauseElement` may be
-used. Unlike older versions of SQLAlchemy, there is no :func:`~.sql.expression.label` requirement::
-
- mapper(User, users_table, properties={
- 'fullname': column_property(
- users_table.c.firstname + " " + users_table.c.lastname
- )
- })
-
-Correlated subqueries may be used as well:
-
-.. sourcecode:: python+sql
-
- mapper(User, users_table, properties={
- 'address_count': column_property(
- select(
- [func.count(addresses_table.c.address_id)],
- addresses_table.c.user_id==users_table.c.user_id
- )
- )
- })
-
-The declarative form of the above is described in :ref:`declarative_sql_expressions`.
-
-Changing Attribute Behavior
-----------------------------
-
-Simple Validators
-~~~~~~~~~~~~~~~~~~
-
-A quick way to add a "validation" routine to an attribute is to use the :func:`~sqlalchemy.orm.validates` decorator. An attribute validator can raise an exception, halting the process of mutating the attribute's value, or can change the given value into something different. Validators, like all attribute extensions, are only called by normal userland code; they are not issued when the ORM is populating the object.
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.orm import validates
-
- addresses_table = Table('addresses', metadata,
- Column('id', Integer, primary_key=True),
- Column('email', String)
- )
-
- class EmailAddress(object):
- @validates('email')
- def validate_email(self, key, address):
- assert '@' in address
- return address
-
- mapper(EmailAddress, addresses_table)
-
-Validators also receive collection events, when items are added to a collection:
-
-.. sourcecode:: python+sql
-
- class User(object):
- @validates('addresses')
- def validate_address(self, key, address):
- assert '@' in address.email
- return address
-
-.. _synonyms:
-
-Using Descriptors
-~~~~~~~~~~~~~~~~~~
-
-A more comprehensive way to produce modified behavior for an attribute is to use descriptors. These are commonly used in Python using the ``property()`` function. The standard SQLAlchemy technique for descriptors is to create a plain descriptor, and to have it read/write from a mapped attribute with a different name. Below we illustrate
-this using Python 2.6-style properties::
-
- class EmailAddress(object):
-
- @property
- def email(self):
- return self._email
-
- @email.setter
- def email(self, email):
- self._email = email
-
- mapper(EmailAddress, addresses_table, properties={
- '_email': addresses_table.c.email
- })
-
-The approach above will work, but there's more we can add.
-While our ``EmailAddress`` object will shuttle the value
-through the ``email`` descriptor and into the ``_email``
-mapped attribute, the class level ``EmailAddress.email``
-attribute does not have the usual expression semantics
-usable with :class:`.Query`. To provide
-these, we instead use the :func:`.synonym`
-function as follows::
-
- mapper(EmailAddress, addresses_table, properties={
- 'email': synonym('_email', map_column=True)
- })
-
-The ``email`` attribute is now usable in the same way as any
-other mapped attribute, including filter expressions,
-get/set operations, etc.::
-
- address = session.query(EmailAddress).filter(EmailAddress.email == 'some address').one()
-
- address.email = 'some other address'
- session.flush()
-
- q = session.query(EmailAddress).filter_by(email='some other address')
-
-If the mapped class does not provide a property, the :func:`.synonym` construct will create a default getter/setter object automatically.
-
-To use synonyms with :mod:`~sqlalchemy.ext.declarative`, see the section
-:ref:`declarative_synonyms`.
-
-.. _custom_comparators:
-
-Custom Comparators
-~~~~~~~~~~~~~~~~~~~
-
-The expressions returned by comparison operations, such as
-``User.name=='ed'``, can be customized, by implementing an object that
-explicitly defines each comparison method needed. This is a relatively rare
-use case. For most needs, the approach in :ref:`mapper_sql_expressions` will
-often suffice, or alternatively a scheme like that of the
-:mod:`.derived_attributes` example. Those approaches should be tried first
-before resorting to custom comparison objects.
-
-Each of :func:`.column_property`, :func:`~.composite`, :func:`.relationship`, and :func:`.comparable_property` accept an argument called ``comparator_factory``. A subclass of :class:`.PropComparator` can be
-provided for this argument, which can then reimplement basic Python comparison
-methods such as ``__eq__()``, ``__ne__()``, ``__lt__()``, and so on. See
-each of those functions for subclassing guidelines, as it's usually best to
-subclass the :class:`.PropComparator` subclass used by that type of
-property, so that all methods remain implemented. For example, to
-allow a column-mapped attribute to do case-insensitive
-comparison::
-
- from sqlalchemy.orm.properties import ColumnProperty
- from sqlalchemy.sql import func
-
- class MyComparator(ColumnProperty.Comparator):
- def __eq__(self, other):
- return func.lower(self.__clause_element__()) == func.lower(other)
-
- mapper(EmailAddress, addresses_table, properties={
- 'email':column_property(addresses_table.c.email,
- comparator_factory=MyComparator)
- })
-
-Above, comparisons on the ``email`` column are wrapped in the SQL lower() function to produce case-insensitive matching::
-
- >>> str(EmailAddress.email == 'SomeAddress@foo.com')
- lower(addresses.email) = lower(:lower_1)
-
-In contrast, a similar effect is more easily accomplished, although
-with less control of it's behavior, using a column-mapped expression::
-
- from sqlachemy.orm import column_property
- from sqlalchemy.sql import func
-
- mapper(EmailAddress, addresses_table, properties={
- 'email':column_property(func.lower(addresses_table.c.email))
- })
-
-In the above case, the "email" attribute will be rendered as ``lower(email)``
-in all queries, including in the columns clause of the SELECT statement.
-This means the value of "email" will be loaded as lower case, not just in
-comparisons. It's up to the user to decide if the finer-grained control
-but more upfront work of a custom :class:`.PropComparator` is necessary.
-
-.. _mapper_composite:
-
-Composite Column Types
------------------------
-
-Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns
-using the class you provide.
-
-A simple example represents pairs of columns as a "Point" object.
-Starting with a table that represents two points as x1/y1 and x2/y2::
-
- from sqlalchemy import Table, Column
-
- vertices = Table('vertices', metadata,
- Column('id', Integer, primary_key=True),
- Column('x1', Integer),
- Column('y1', Integer),
- Column('x2', Integer),
- Column('y2', Integer),
- )
-
-We create a new class, ``Point``, that will represent each x/y as a
-pair::
-
- class Point(object):
- def __init__(self, x, y):
- self.x = x
- self.y = y
- def __composite_values__(self):
- return self.x, self.y
- def __set_composite_values__(self, x, y):
- self.x = x
- self.y = y
- def __eq__(self, other):
- return other is not None and \
- other.x == self.x and \
- other.y == self.y
- def __ne__(self, other):
- return not self.__eq__(other)
-
-The requirements for the custom datatype class are that it have a
-constructor which accepts positional arguments corresponding to its column
-format, and also provides a method ``__composite_values__()`` which
-returns the state of the object as a list or tuple, in order of its
-column-based attributes. It also should supply adequate ``__eq__()`` and
-``__ne__()`` methods which test the equality of two instances.
-
-The ``__set_composite_values__()`` method is optional. If it's not
-provided, the names of the mapped columns are taken as the names of
-attributes on the object, and ``setattr()`` is used to set data.
-
-The :func:`.composite` function is then used in the mapping::
-
- from sqlalchemy.orm import mapper, composite
-
- class Vertex(object):
- pass
-
- mapper(Vertex, vertices, properties={
- 'start': composite(Point, vertices.c.x1, vertices.c.y1),
- 'end': composite(Point, vertices.c.x2, vertices.c.y2)
- })
-
-We can now use the ``Vertex`` instances as well as querying as though the
-``start`` and ``end`` attributes are regular scalar attributes::
-
- session = Session()
- v = Vertex(Point(3, 4), Point(5, 6))
- session.add(v)
-
- v2 = session.query(Vertex).filter(Vertex.start == Point(3, 4))
-
-The "equals" comparison operation by default produces an AND of all
-corresponding columns equated to one another. This can be changed using
-the ``comparator_factory``, described in :ref:`custom_comparators`.
-Below we illustrate the "greater than" operator, implementing
-the same expression that the base "greater than" does::
-
- from sqlalchemy.orm.properties import CompositeProperty
- from sqlalchemy import sql
-
- class PointComparator(CompositeProperty.Comparator):
- def __gt__(self, other):
- """redefine the 'greater than' operation"""
-
- return sql.and_(*[a>b for a, b in
- zip(self.__clause_element__().clauses,
- other.__composite_values__())])
-
- maper(Vertex, vertices, properties={
- 'start': composite(Point, vertices.c.x1, vertices.c.y1,
- comparator_factory=PointComparator),
- 'end': composite(Point, vertices.c.x2, vertices.c.y2,
- comparator_factory=PointComparator)
- })
-
-Controlling Ordering
----------------------
-
-The ORM does not generate ordering for any query unless explicitly configured.
-
-The "default" ordering for a collection, which applies to list-based collections, can be configured using the ``order_by`` keyword argument on :func:`~sqlalchemy.orm.relationship`::
-
- mapper(Address, addresses_table)
-
- # order address objects by address id
- mapper(User, users_table, properties={
- 'addresses': relationship(Address, order_by=addresses_table.c.address_id)
- })
-
-Note that when using joined eager loaders with relationships, the tables used by the eager load's join are anonymously aliased. You can only order by these columns if you specify it at the :func:`~sqlalchemy.orm.relationship` level. To control ordering at the query level based on a related table, you ``join()`` to that relationship, then order by it::
-
- session.query(User).join('addresses').order_by(Address.street)
-
-Ordering for rows loaded through :class:`~sqlalchemy.orm.query.Query` is usually specified using the ``order_by()`` generative method. There is also an option to set a default ordering for Queries which are against a single mapped entity and where there was no explicit ``order_by()`` stated, which is the ``order_by`` keyword argument to ``mapper()``::
-
- # order by a column
- mapper(User, users_table, order_by=users_table.c.user_id)
-
- # order by multiple items
- mapper(User, users_table, order_by=[users_table.c.user_id, users_table.c.user_name.desc()])
-
-Above, a :class:`~sqlalchemy.orm.query.Query` issued for the ``User`` class will use the value of the mapper's ``order_by`` setting if the :class:`~sqlalchemy.orm.query.Query` itself has no ordering specified.
-
-.. _datamapping_inheritance:
-
-Mapping Class Inheritance Hierarchies
---------------------------------------
-
-SQLAlchemy supports three forms of inheritance: *single table inheritance*, where several types of classes are stored in one table, *concrete table inheritance*, where each type of class is stored in its own table, and *joined table inheritance*, where the parent/child classes are stored in their own tables that are joined together in a select. Whereas support for single and joined table inheritance is strong, concrete table inheritance is a less common scenario with some particular problems so is not quite as flexible.
-
-When mappers are configured in an inheritance relationship, SQLAlchemy has the ability to load elements "polymorphically", meaning that a single query can return objects of multiple types.
-
-For the following sections, assume this class relationship:
-
-.. sourcecode:: python+sql
-
- class Employee(object):
- def __init__(self, name):
- self.name = name
- def __repr__(self):
- return self.__class__.__name__ + " " + self.name
-
- class Manager(Employee):
- def __init__(self, name, manager_data):
- self.name = name
- self.manager_data = manager_data
- def __repr__(self):
- return self.__class__.__name__ + " " + self.name + " " + self.manager_data
-
- class Engineer(Employee):
- def __init__(self, name, engineer_info):
- self.name = name
- self.engineer_info = engineer_info
- def __repr__(self):
- return self.__class__.__name__ + " " + self.name + " " + self.engineer_info
-
-Joined Table Inheritance
-~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In joined table inheritance, each class along a particular classes' list of parents is represented by a unique table. The total set of attributes for a particular instance is represented as a join along all tables in its inheritance path. Here, we first define a table to represent the ``Employee`` class. This table will contain a primary key column (or columns), and a column for each attribute that's represented by ``Employee``. In this case it's just ``name``::
-
- employees = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('type', String(30), nullable=False)
- )
-
-The table also has a column called ``type``. It is strongly advised in both single- and joined- table inheritance scenarios that the root table contains a column whose sole purpose is that of the **discriminator**; it stores a value which indicates the type of object represented within the row. The column may be of any desired datatype. While there are some "tricks" to work around the requirement that there be a discriminator column, they are more complicated to configure when one wishes to load polymorphically.
-
-Next we define individual tables for each of ``Engineer`` and ``Manager``, which contain columns that represent the attributes unique to the subclass they represent. Each table also must contain a primary key column (or columns), and in most cases a foreign key reference to the parent table. It is standard practice that the same column is used for both of these roles, and that the column is also named the same as that of the parent table. However this is optional in SQLAlchemy; separate columns may be used for primary key and parent-relationship, the column may be named differently than that of the parent, and even a custom join condition can be specified between parent and child tables instead of using a foreign key::
-
- engineers = Table('engineers', metadata,
- Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True),
- Column('engineer_info', String(50)),
- )
-
- managers = Table('managers', metadata,
- Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True),
- Column('manager_data', String(50)),
- )
-
-One natural effect of the joined table inheritance configuration is that the identity of any mapped object can be determined entirely from the base table. This has obvious advantages, so SQLAlchemy always considers the primary key columns of a joined inheritance class to be those of the base table only, unless otherwise manually configured. In other words, the ``employee_id`` column of both the ``engineers`` and ``managers`` table is not used to locate the ``Engineer`` or ``Manager`` object itself - only the value in ``employees.employee_id`` is considered, and the primary key in this case is non-composite. ``engineers.employee_id`` and ``managers.employee_id`` are still of course critical to the proper operation of the pattern overall as they are used to locate the joined row, once the parent row has been determined, either through a distinct SELECT statement or all at once within a JOIN.
-
-We then configure mappers as usual, except we use some additional arguments to indicate the inheritance relationship, the polymorphic discriminator column, and the **polymorphic identity** of each class; this is the value that will be stored in the polymorphic discriminator column.
-
-.. sourcecode:: python+sql
-
- mapper(Employee, employees, polymorphic_on=employees.c.type, polymorphic_identity='employee')
- mapper(Engineer, engineers, inherits=Employee, polymorphic_identity='engineer')
- mapper(Manager, managers, inherits=Employee, polymorphic_identity='manager')
-
-And that's it. Querying against ``Employee`` will return a combination of ``Employee``, ``Engineer`` and ``Manager`` objects. Newly saved ``Engineer``, ``Manager``, and ``Employee`` objects will automatically populate the ``employees.type`` column with ``engineer``, ``manager``, or ``employee``, as appropriate.
-
-Controlling Which Tables are Queried
-+++++++++++++++++++++++++++++++++++++
-
-The :func:`~sqlalchemy.orm.query.Query.with_polymorphic` method of :class:`~sqlalchemy.orm.query.Query` affects the specific subclass tables which the Query selects from. Normally, a query such as this:
-
-.. sourcecode:: python+sql
-
- session.query(Employee).all()
-
-...selects only from the ``employees`` table. When loading fresh from the database, our joined-table setup will query from the parent table only, using SQL such as this:
-
-.. sourcecode:: python+sql
-
- {opensql}
- SELECT employees.employee_id AS employees_employee_id, employees.name AS employees_name, employees.type AS employees_type
- FROM employees
- []
-
-As attributes are requested from those ``Employee`` objects which are represented in either the ``engineers`` or ``managers`` child tables, a second load is issued for the columns in that related row, if the data was not already loaded. So above, after accessing the objects you'd see further SQL issued along the lines of:
-
-.. sourcecode:: python+sql
-
- {opensql}
- SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data
- FROM managers
- WHERE ? = managers.employee_id
- [5]
- SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info
- FROM engineers
- WHERE ? = engineers.employee_id
- [2]
-
-This behavior works well when issuing searches for small numbers of items, such as when using ``get()``, since the full range of joined tables are not pulled in to the SQL statement unnecessarily. But when querying a larger span of rows which are known to be of many types, you may want to actively join to some or all of the joined tables. The ``with_polymorphic`` feature of :class:`~sqlalchemy.orm.query.Query` and ``mapper`` provides this.
-
-Telling our query to polymorphically load ``Engineer`` and ``Manager`` objects:
-
-.. sourcecode:: python+sql
-
- query = session.query(Employee).with_polymorphic([Engineer, Manager])
-
-produces a query which joins the ``employees`` table to both the ``engineers`` and ``managers`` tables like the following:
-
-.. sourcecode:: python+sql
-
- query.all()
- {opensql}
- SELECT employees.employee_id AS employees_employee_id, engineers.employee_id AS engineers_employee_id, managers.employee_id AS managers_employee_id, employees.name AS employees_name, employees.type AS employees_type, engineers.engineer_info AS engineers_engineer_info, managers.manager_data AS managers_manager_data
- FROM employees LEFT OUTER JOIN engineers ON employees.employee_id = engineers.employee_id LEFT OUTER JOIN managers ON employees.employee_id = managers.employee_id
- []
-
-:func:`~sqlalchemy.orm.query.Query.with_polymorphic` accepts a single class or mapper, a list of classes/mappers, or the string ``'*'`` to indicate all subclasses:
-
-.. sourcecode:: python+sql
-
- # join to the engineers table
- query.with_polymorphic(Engineer)
-
- # join to the engineers and managers tables
- query.with_polymorphic([Engineer, Manager])
-
- # join to all subclass tables
- query.with_polymorphic('*')
-
-It also accepts a second argument ``selectable`` which replaces the automatic join creation and instead selects directly from the selectable given. This feature is normally used with "concrete" inheritance, described later, but can be used with any kind of inheritance setup in the case that specialized SQL should be used to load polymorphically:
-
-.. sourcecode:: python+sql
-
- # custom selectable
- query.with_polymorphic([Engineer, Manager], employees.outerjoin(managers).outerjoin(engineers))
-
-:func:`~sqlalchemy.orm.query.Query.with_polymorphic` is also needed
-when you wish to add filter criteria that are specific to one or more
-subclasses; It makes the subclasses' columns available to the WHERE clause:
-
-.. sourcecode:: python+sql
-
- session.query(Employee).with_polymorphic([Engineer, Manager]).\
- filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q'))
-
-Note that if you only need to load a single subtype, such as just the ``Engineer`` objects, :func:`~sqlalchemy.orm.query.Query.with_polymorphic` is not needed since you would query against the ``Engineer`` class directly.
-
-The mapper also accepts ``with_polymorphic`` as a configurational argument so that the joined-style load will be issued automatically. This argument may be the string ``'*'``, a list of classes, or a tuple consisting of either, followed by a selectable.
-
-.. sourcecode:: python+sql
-
- mapper(Employee, employees, polymorphic_on=employees.c.type, \
- polymorphic_identity='employee', with_polymorphic='*')
- mapper(Engineer, engineers, inherits=Employee, polymorphic_identity='engineer')
- mapper(Manager, managers, inherits=Employee, polymorphic_identity='manager')
-
-The above mapping will produce a query similar to that of ``with_polymorphic('*')`` for every query of ``Employee`` objects.
-
-Using :func:`~sqlalchemy.orm.query.Query.with_polymorphic` with :class:`~sqlalchemy.orm.query.Query` will override the mapper-level ``with_polymorphic`` setting.
-
-Creating Joins to Specific Subtypes
-++++++++++++++++++++++++++++++++++++
-
-The :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` method is a helper which allows the construction of joins along :func:`~sqlalchemy.orm.relationship` paths while narrowing the criterion to specific subclasses. Suppose the ``employees`` table represents a collection of employees which are associated with a ``Company`` object. We'll add a ``company_id`` column to the ``employees`` table and a new table ``companies``:
-
-.. sourcecode:: python+sql
-
- companies = Table('companies', metadata,
- Column('company_id', Integer, primary_key=True),
- Column('name', String(50))
- )
-
- employees = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('type', String(30), nullable=False),
- Column('company_id', Integer, ForeignKey('companies.company_id'))
- )
-
- class Company(object):
- pass
-
- mapper(Company, companies, properties={
- 'employees': relationship(Employee)
- })
-
-When querying from ``Company`` onto the ``Employee`` relationship, the ``join()`` method as well as the ``any()`` and ``has()`` operators will create a join from ``companies`` to ``employees``, without including ``engineers`` or ``managers`` in the mix. If we wish to have criterion which is specifically against the ``Engineer`` class, we can tell those methods to join or subquery against the joined table representing the subclass using the :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` operator:
-
-.. sourcecode:: python+sql
-
- session.query(Company).join(Company.employees.of_type(Engineer)).filter(Engineer.engineer_info=='someinfo')
-
-A longhand version of this would involve spelling out the full target selectable within a 2-tuple:
-
-.. sourcecode:: python+sql
-
- session.query(Company).join((employees.join(engineers), Company.employees)).filter(Engineer.engineer_info=='someinfo')
-
-Currently, :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts a single class argument. It may be expanded later on to accept multiple classes. For now, to join to any group of subclasses, the longhand notation allows this flexibility:
-
-.. sourcecode:: python+sql
-
- session.query(Company).join((employees.outerjoin(engineers).outerjoin(managers), Company.employees)).\
- filter(or_(Engineer.engineer_info=='someinfo', Manager.manager_data=='somedata'))
-
-The ``any()`` and ``has()`` operators also can be used with :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` when the embedded criterion is in terms of a subclass:
-
-.. sourcecode:: python+sql
-
- session.query(Company).filter(Company.employees.of_type(Engineer).any(Engineer.engineer_info=='someinfo')).all()
-
-Note that the ``any()`` and ``has()`` are both shorthand for a correlated EXISTS query. To build one by hand looks like:
-
-.. sourcecode:: python+sql
-
- session.query(Company).filter(
- exists([1],
- and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id),
- from_obj=employees.join(engineers)
- )
- ).all()
-
-The EXISTS subquery above selects from the join of ``employees`` to ``engineers``, and also specifies criterion which correlates the EXISTS subselect back to the parent ``companies`` table.
-
-Single Table Inheritance
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-Single table inheritance is where the attributes of the base class as well as all subclasses are represented within a single table. A column is present in the table for every attribute mapped to the base class and all subclasses; the columns which correspond to a single subclass are nullable. This configuration looks much like joined-table inheritance except there's only one table. In this case, a ``type`` column is required, as there would be no other way to discriminate between classes. The table is specified in the base mapper only; for the inheriting classes, leave their ``table`` parameter blank:
-
-.. sourcecode:: python+sql
-
- employees_table = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('manager_data', String(50)),
- Column('engineer_info', String(50)),
- Column('type', String(20), nullable=False)
- )
-
- employee_mapper = mapper(Employee, employees_table, \
- polymorphic_on=employees_table.c.type, polymorphic_identity='employee')
- manager_mapper = mapper(Manager, inherits=employee_mapper, polymorphic_identity='manager')
- engineer_mapper = mapper(Engineer, inherits=employee_mapper, polymorphic_identity='engineer')
-
-Note that the mappers for the derived classes Manager and Engineer omit the specification of their associated table, as it is inherited from the employee_mapper. Omitting the table specification for derived mappers in single-table inheritance is required.
-
-.. _concrete_inheritance:
-
-Concrete Table Inheritance
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-This form of inheritance maps each class to a distinct table, as below:
-
-.. sourcecode:: python+sql
-
- employees_table = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- )
-
- managers_table = Table('managers', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('manager_data', String(50)),
- )
-
- engineers_table = Table('engineers', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('engineer_info', String(50)),
- )
-
-Notice in this case there is no ``type`` column. If polymorphic loading is not required, there's no advantage to using ``inherits`` here; you just define a separate mapper for each class.
-
-.. sourcecode:: python+sql
-
- mapper(Employee, employees_table)
- mapper(Manager, managers_table)
- mapper(Engineer, engineers_table)
-
-To load polymorphically, the ``with_polymorphic`` argument is required, along with a selectable indicating how rows should be loaded. In this case we must construct a UNION of all three tables. SQLAlchemy includes a helper function to create these called :func:`~sqlalchemy.orm.util.polymorphic_union`, which will map all the different columns into a structure of selects with the same numbers and names of columns, and also generate a virtual ``type`` column for each subselect:
-
-.. sourcecode:: python+sql
-
- pjoin = polymorphic_union({
- 'employee': employees_table,
- 'manager': managers_table,
- 'engineer': engineers_table
- }, 'type', 'pjoin')
-
- employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), \
- polymorphic_on=pjoin.c.type, polymorphic_identity='employee')
- manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, \
- concrete=True, polymorphic_identity='manager')
- engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, \
- concrete=True, polymorphic_identity='engineer')
-
-Upon select, the polymorphic union produces a query like this:
-
-.. sourcecode:: python+sql
-
- session.query(Employee).all()
- {opensql}
- SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id,
- pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info
- FROM (
- SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name,
- CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type
- FROM employees
- UNION ALL
- SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name,
- CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type
- FROM managers
- UNION ALL
- SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name,
- engineers.engineer_info AS engineer_info, 'engineer' AS type
- FROM engineers
- ) AS pjoin
- []
-
-Using Relationships with Inheritance
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Both joined-table and single table inheritance scenarios produce mappings which are usable in :func:`~sqlalchemy.orm.relationship` functions; that is, it's possible to map a parent object to a child object which is polymorphic. Similarly, inheriting mappers can have :func:`~sqlalchemy.orm.relationship` objects of their own at any level, which are inherited to each child class. The only requirement for relationships is that there is a table relationship between parent and child. An example is the following modification to the joined table inheritance example, which sets a bi-directional relationship between ``Employee`` and ``Company``:
-
-.. sourcecode:: python+sql
-
- employees_table = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('company_id', Integer, ForeignKey('companies.company_id'))
- )
-
- companies = Table('companies', metadata,
- Column('company_id', Integer, primary_key=True),
- Column('name', String(50)))
-
- class Company(object):
- pass
-
- mapper(Company, companies, properties={
- 'employees': relationship(Employee, backref='company')
- })
-
-SQLAlchemy has a lot of experience in this area; the optimized "outer join" approach can be used freely for parent and child relationships, eager loads are fully useable, :func:`~sqlalchemy.orm.aliased` objects and other techniques are fully supported as well.
-
-In a concrete inheritance scenario, mapping relationships is more difficult since the distinct classes do not share a table. In this case, you *can* establish a relationship from parent to child if a join condition can be constructed from parent to child, if each child table contains a foreign key to the parent:
-
-.. sourcecode:: python+sql
-
- companies = Table('companies', metadata,
- Column('id', Integer, primary_key=True),
- Column('name', String(50)))
-
- employees_table = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('company_id', Integer, ForeignKey('companies.id'))
- )
-
- managers_table = Table('managers', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('manager_data', String(50)),
- Column('company_id', Integer, ForeignKey('companies.id'))
- )
-
- engineers_table = Table('engineers', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('name', String(50)),
- Column('engineer_info', String(50)),
- Column('company_id', Integer, ForeignKey('companies.id'))
- )
-
- mapper(Employee, employees_table, with_polymorphic=('*', pjoin), polymorphic_on=pjoin.c.type, polymorphic_identity='employee')
- mapper(Manager, managers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='manager')
- mapper(Engineer, engineers_table, inherits=employee_mapper, concrete=True, polymorphic_identity='engineer')
- mapper(Company, companies, properties={
- 'employees': relationship(Employee)
- })
-
-The big limitation with concrete table inheritance is that :func:`~sqlalchemy.orm.relationship` objects placed on each concrete mapper do **not** propagate to child mappers. If you want to have the same :func:`~sqlalchemy.orm.relationship` objects set up on all concrete mappers, they must be configured manually on each. To configure back references in such a configuration the ``back_populates`` keyword may be used instead of ``backref``, such as below where both ``A(object)`` and ``B(A)`` bidirectionally reference ``C``::
-
- ajoin = polymorphic_union({
- 'a':a_table,
- 'b':b_table
- }, 'type', 'ajoin')
-
- mapper(A, a_table, with_polymorphic=('*', ajoin),
- polymorphic_on=ajoin.c.type, polymorphic_identity='a',
- properties={
- 'some_c':relationship(C, back_populates='many_a')
- })
- mapper(B, b_table,inherits=A, concrete=True,
- polymorphic_identity='b',
- properties={
- 'some_c':relationship(C, back_populates='many_a')
- })
- mapper(C, c_table, properties={
- 'many_a':relationship(A, collection_class=set, back_populates='some_c'),
- })
-
-
-.. _maptojoin:
-
-Mapping a Class against Multiple Tables
-----------------------------------------
-
-Mappers can be constructed against arbitrary relational units (called ``Selectables``) as well as plain ``Tables``. For example, The ``join`` keyword from the SQL package creates a neat selectable unit comprised of multiple tables, complete with its own composite primary key, which can be passed in to a mapper as the table.
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.sql import join
-
- class AddressUser(object):
- pass
-
- # define a Join
- j = join(users_table, addresses_table)
-
- # map to it - the identity of an AddressUser object will be
- # based on (user_id, address_id) since those are the primary keys involved
- mapper(AddressUser, j, properties={
- 'user_id': [users_table.c.user_id, addresses_table.c.user_id]
- })
-
-A second example:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.sql import join
-
- # many-to-many join on an association table
- j = join(users_table, userkeywords,
- users_table.c.user_id==userkeywords.c.user_id).join(keywords,
- userkeywords.c.keyword_id==keywords.c.keyword_id)
-
- # a class
- class KeywordUser(object):
- pass
-
- # map to it - the identity of a KeywordUser object will be
- # (user_id, keyword_id) since those are the primary keys involved
- mapper(KeywordUser, j, properties={
- 'user_id': [users_table.c.user_id, userkeywords.c.user_id],
- 'keyword_id': [userkeywords.c.keyword_id, keywords.c.keyword_id]
- })
-
-In both examples above, "composite" columns were added as properties to the mappers; these are aggregations of multiple columns into one mapper property, which instructs the mapper to keep both of those columns set at the same value.
-
-Mapping a Class against Arbitrary Selects
-------------------------------------------
-
-Similar to mapping against a join, a plain select() object can be used with a mapper as well. Below, an example select which contains two aggregate functions and a group_by is mapped to a class:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.sql import select
-
- s = select([customers,
- func.count(orders).label('order_count'),
- func.max(orders.price).label('highest_order')],
- customers.c.customer_id==orders.c.customer_id,
- group_by=[c for c in customers.c]
- ).alias('somealias')
- class Customer(object):
- pass
-
- mapper(Customer, s)
-
-Above, the "customers" table is joined against the "orders" table to produce a full row for each customer row, the total count of related rows in the "orders" table, and the highest price in the "orders" table, grouped against the full set of columns in the "customers" table. That query is then mapped against the Customer class. New instances of Customer will contain attributes for each column in the "customers" table as well as an "order_count" and "highest_order" attribute. Updates to the Customer object will only be reflected in the "customers" table and not the "orders" table. This is because the primary key columns of the "orders" table are not represented in this mapper and therefore the table is not affected by save or delete operations.
-
-Multiple Mappers for One Class
--------------------------------
-
-The first mapper created for a certain class is known as that class's "primary mapper." Other mappers can be created as well on the "load side" - these are called **secondary mappers**. This is a mapper that must be constructed with the keyword argument ``non_primary=True``, and represents a load-only mapper. Objects that are loaded with a secondary mapper will have their save operation processed by the primary mapper. It is also invalid to add new :func:`~sqlalchemy.orm.relationship` objects to a non-primary mapper. To use this mapper with the Session, specify it to the :class:`~sqlalchemy.orm.session.Session.query` method:
-
-example:
-
-.. sourcecode:: python+sql
-
- # primary mapper
- mapper(User, users_table)
-
- # make a secondary mapper to load User against a join
- othermapper = mapper(User, users_table.join(someothertable), non_primary=True)
-
- # select
- result = session.query(othermapper).select()
-
-The "non primary mapper" is a rarely needed feature of SQLAlchemy; in most cases, the :class:`~sqlalchemy.orm.query.Query` object can produce any kind of query that's desired. It's recommended that a straight :class:`~sqlalchemy.orm.query.Query` be used in place of a non-primary mapper unless the mapper approach is absolutely needed. Current use cases for the "non primary mapper" are when you want to map the class to a particular select statement or view to which additional query criterion can be added, and for when the particular mapped select statement or view is to be placed in a :func:`~sqlalchemy.orm.relationship` of a parent mapper.
-
-Multiple "Persistence" Mappers for One Class
----------------------------------------------
-
-The non_primary mapper defines alternate mappers for the purposes of loading objects. What if we want the same class to be *persisted* differently, such as to different tables ? SQLAlchemy
-refers to this as the "entity name" pattern, and in Python one can use a recipe which creates
-anonymous subclasses which are distinctly mapped. See the recipe at `Entity Name <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName>`_.
-
-Constructors and Object Initialization
----------------------------------------
-
-Mapping imposes no restrictions or requirements on the constructor (``__init__``) method for the class. You are free to require any arguments for the function
-that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor
-for a Python class.
-
-The SQLAlchemy ORM does not call ``__init__`` when recreating objects from database rows. The ORM's process is somewhat akin to the Python standard library's
-``pickle`` module, invoking the low level ``__new__`` method and then quietly restoring attributes directly on the instance rather than calling ``__init__``.
-
-If you need to do some setup on database-loaded instances before they're ready to use, you can use the ``@reconstructor`` decorator to tag a method as the ORM
-counterpart to ``__init__``. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for
-recreating transient properties that are normally assigned in your ``__init__``::
-
- from sqlalchemy import orm
-
- class MyMappedClass(object):
- def __init__(self, data):
- self.data = data
- # we need stuff on all instances, but not in the database.
- self.stuff = []
-
- @orm.reconstructor
- def init_on_load(self):
- self.stuff = []
-
-When ``obj = MyMappedClass()`` is executed, Python calls the ``__init__`` method as normal and the ``data`` argument is required. When instances are loaded
-during a :class:`~sqlalchemy.orm.query.Query` operation as in ``query(MyMappedClass).one()``, ``init_on_load`` is called instead.
-
-Any method may be tagged as the :func:`~sqlalchemy.orm.reconstructor`, even the ``__init__`` method. SQLAlchemy will call the reconstructor method with no arguments. Scalar
-(non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet
-available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush()
-operation, so the activity within a reconstructor should be conservative.
-
-While the ORM does not call your ``__init__`` method, it will modify the class's ``__init__`` slightly. The method is lightly wrapped to act as a trigger for
-the ORM, allowing mappers to be compiled automatically and will fire a :func:`~sqlalchemy.orm.interfaces.MapperExtension.init_instance` event that :class:`~sqlalchemy.orm.interfaces.MapperExtension` objects may listen for.
-:class:`~sqlalchemy.orm.interfaces.MapperExtension` objects can also listen for a ``reconstruct_instance`` event, analogous to the :func:`~sqlalchemy.orm.reconstructor` decorator above.
-
-.. _extending_mapper:
-
-Extending Mapper
------------------
-
-Mappers can have functionality augmented or replaced at many points in its execution via the usage of the MapperExtension class. This class is just a series of "hooks" where various functionality takes place. An application can make its own MapperExtension objects, overriding only the methods it needs. Methods that are not overridden return the special value ``sqlalchemy.orm.EXT_CONTINUE`` to allow processing to continue to the next MapperExtension or simply proceed normally if there are no more extensions.
-
-API documentation for MapperExtension: :class:`sqlalchemy.orm.interfaces.MapperExtension`
-
-To use MapperExtension, make your own subclass of it and just send it off to a mapper::
-
- m = mapper(User, users_table, extension=MyExtension())
-
-Multiple extensions will be chained together and processed in order; they are specified as a list::
-
- m = mapper(User, users_table, extension=[ext1, ext2, ext3])
-
-.. _advdatamapping_relationship:
-
-Relationship Configuration
-==========================
-
-Basic Relational Patterns
---------------------------
-
-A quick walkthrough of the basic relational patterns. Note that the :func:`~sqlalchemy.orm.relationship()` function is known as :func:`~sqlalchemy.orm.relation()`
-in all SQLAlchemy versions prior to 0.6beta2, including the 0.5 and 0.4 series.
-
-One To Many
-~~~~~~~~~~~~
-
-A one to many relationship places a foreign key in the child table referencing the parent. SQLAlchemy creates the relationship as a collection on the parent object containing instances of the child object.
-
-.. sourcecode:: python+sql
-
- parent_table = Table('parent', metadata,
- Column('id', Integer, primary_key=True))
-
- child_table = Table('child', metadata,
- Column('id', Integer, primary_key=True),
- Column('parent_id', Integer, ForeignKey('parent.id')))
-
- class Parent(object):
- pass
-
- class Child(object):
- pass
-
- mapper(Parent, parent_table, properties={
- 'children': relationship(Child)
- })
-
- mapper(Child, child_table)
-
-To establish a bi-directional relationship in one-to-many, where the "reverse" side is a many to one, specify the ``backref`` option:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, parent_table, properties={
- 'children': relationship(Child, backref='parent')
- })
-
- mapper(Child, child_table)
-
-``Child`` will get a ``parent`` attribute with many-to-one semantics.
-
-Many To One
-~~~~~~~~~~~~
-
-
-Many to one places a foreign key in the parent table referencing the child. The mapping setup is identical to one-to-many, however SQLAlchemy creates the relationship as a scalar attribute on the parent object referencing a single instance of the child object.
-
-.. sourcecode:: python+sql
-
- parent_table = Table('parent', metadata,
- Column('id', Integer, primary_key=True),
- Column('child_id', Integer, ForeignKey('child.id')))
-
- child_table = Table('child', metadata,
- Column('id', Integer, primary_key=True),
- )
-
- class Parent(object):
- pass
-
- class Child(object):
- pass
-
- mapper(Parent, parent_table, properties={
- 'child': relationship(Child)
- })
-
- mapper(Child, child_table)
-
-Backref behavior is available here as well, where ``backref="parents"`` will place a one-to-many collection on the ``Child`` class.
-
-One To One
-~~~~~~~~~~~
-
-
-One To One is essentially a bi-directional relationship with a scalar attribute on both sides. To achieve this, the ``uselist=False`` flag indicates the placement of a scalar attribute instead of a collection on the "many" side of the relationship. To convert one-to-many into one-to-one:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, parent_table, properties={
- 'child': relationship(Child, uselist=False, backref='parent')
- })
-
-Or to turn many-to-one into one-to-one:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, parent_table, properties={
- 'child': relationship(Child, backref=backref('parent', uselist=False))
- })
-
-Many To Many
-~~~~~~~~~~~~~
-
-
-Many to Many adds an association table between two classes. The association table is indicated by the ``secondary`` argument to :func:`~sqlalchemy.orm.relationship`.
-
-.. sourcecode:: python+sql
-
- left_table = Table('left', metadata,
- Column('id', Integer, primary_key=True))
-
- right_table = Table('right', metadata,
- Column('id', Integer, primary_key=True))
-
- association_table = Table('association', metadata,
- Column('left_id', Integer, ForeignKey('left.id')),
- Column('right_id', Integer, ForeignKey('right.id')),
- )
-
- mapper(Parent, left_table, properties={
- 'children': relationship(Child, secondary=association_table)
- })
-
- mapper(Child, right_table)
-
-For a bi-directional relationship, both sides of the relationship contain a collection by default, which can be modified on either side via the ``uselist`` flag to be scalar. The ``backref`` keyword will automatically use the same ``secondary`` argument for the reverse relationship:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, left_table, properties={
- 'children': relationship(Child, secondary=association_table, backref='parents')
- })
-
-.. _association_pattern:
-
-Association Object
-~~~~~~~~~~~~~~~~~~
-
-The association object pattern is a variant on many-to-many: it specifically is used when your association table contains additional columns beyond those which are foreign keys to the left and right tables. Instead of using the ``secondary`` argument, you map a new class directly to the association table. The left side of the relationship references the association object via one-to-many, and the association class references the right side via many-to-one.
-
-.. sourcecode:: python+sql
-
- left_table = Table('left', metadata,
- Column('id', Integer, primary_key=True))
-
- right_table = Table('right', metadata,
- Column('id', Integer, primary_key=True))
-
- association_table = Table('association', metadata,
- Column('left_id', Integer, ForeignKey('left.id'), primary_key=True),
- Column('right_id', Integer, ForeignKey('right.id'), primary_key=True),
- Column('data', String(50))
- )
-
- mapper(Parent, left_table, properties={
- 'children':relationship(Association)
- })
-
- mapper(Association, association_table, properties={
- 'child':relationship(Child)
- })
-
- mapper(Child, right_table)
-
-The bi-directional version adds backrefs to both relationships:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, left_table, properties={
- 'children':relationship(Association, backref="parent")
- })
-
- mapper(Association, association_table, properties={
- 'child':relationship(Child, backref="parent_assocs")
- })
-
- mapper(Child, right_table)
-
-Working with the association pattern in its direct form requires that child objects are associated with an association instance before being appended to the parent; similarly, access from parent to child goes through the association object:
-
-.. sourcecode:: python+sql
-
- # create parent, append a child via association
- p = Parent()
- a = Association()
- a.child = Child()
- p.children.append(a)
-
- # iterate through child objects via association, including association
- # attributes
- for assoc in p.children:
- print assoc.data
- print assoc.child
-
-To enhance the association object pattern such that direct
-access to the ``Association`` object is optional, SQLAlchemy
-provides the :ref:`associationproxy` extension. This
-extension allows the configuration of attributes which will
-access two "hops" with a single access, one "hop" to the
-associated object, and a second to a target attribute.
-
-.. note:: When using the association object pattern, it is
- advisable that the association-mapped table not be used
- as the ``secondary`` argument on a :func:`.relationship`
- elsewhere, unless that :func:`.relationship` contains
- the option ``viewonly=True``. SQLAlchemy otherwise
- may attempt to emit redundant INSERT and DELETE
- statements on the same table, if similar state is detected
- on the related attribute as well as the associated
- object.
-
-Adjacency List Relationships
------------------------------
-
-The **adjacency list** pattern is a common relational pattern whereby a table contains a foreign key reference to itself. This is the most common and simple way to represent hierarchical data in flat tables. The other way is the "nested sets" model, sometimes called "modified preorder". Despite what many online articles say about modified preorder, the adjacency list model is probably the most appropriate pattern for the large majority of hierarchical storage needs, for reasons of concurrency, reduced complexity, and that modified preorder has little advantage over an application which can fully load subtrees into the application space.
-
-SQLAlchemy commonly refers to an adjacency list relationship as a **self-referential mapper**. In this example, we'll work with a single table called ``treenodes`` to represent a tree structure::
-
- nodes = Table('treenodes', metadata,
- Column('id', Integer, primary_key=True),
- Column('parent_id', Integer, ForeignKey('treenodes.id')),
- Column('data', String(50)),
- )
-
-A graph such as the following::
-
- root --+---> child1
- +---> child2 --+--> subchild1
- | +--> subchild2
- +---> child3
-
-Would be represented with data such as::
-
- id parent_id data
- --- ------- ----
- 1 NULL root
- 2 1 child1
- 3 1 child2
- 4 3 subchild1
- 5 3 subchild2
- 6 1 child3
-
-SQLAlchemy's ``mapper()`` configuration for a self-referential one-to-many relationship is exactly like a "normal" one-to-many relationship. When SQLAlchemy encounters the foreign key relationship from ``treenodes`` to ``treenodes``, it assumes one-to-many unless told otherwise:
-
-.. sourcecode:: python+sql
-
- # entity class
- class Node(object):
- pass
-
- mapper(Node, nodes, properties={
- 'children': relationship(Node)
- })
-
-To create a many-to-one relationship from child to parent, an extra indicator of the "remote side" is added, which contains the :class:`~sqlalchemy.schema.Column` object or objects indicating the remote side of the relationship:
-
-.. sourcecode:: python+sql
-
- mapper(Node, nodes, properties={
- 'parent': relationship(Node, remote_side=[nodes.c.id])
- })
-
-And the bi-directional version combines both:
-
-.. sourcecode:: python+sql
-
- mapper(Node, nodes, properties={
- 'children': relationship(Node, backref=backref('parent', remote_side=[nodes.c.id]))
- })
-
-There are several examples included with SQLAlchemy illustrating self-referential strategies; these include :ref:`examples_adjacencylist` and :ref:`examples_xmlpersistence`.
-
-Self-Referential Query Strategies
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Querying self-referential structures is done in the same way as any other query in SQLAlchemy, such as below, we query for any node whose ``data`` attribute stores the value ``child2``:
-
-.. sourcecode:: python+sql
-
- # get all nodes named 'child2'
- session.query(Node).filter(Node.data=='child2')
-
-On the subject of joins, i.e. those described in `datamapping_joins`, self-referential structures require the usage of aliases so that the same table can be referenced multiple times within the FROM clause of the query. Aliasing can be done either manually using the ``nodes`` :class:`~sqlalchemy.schema.Table` object as a source of aliases:
-
-.. sourcecode:: python+sql
-
- # get all nodes named 'subchild1' with a parent named 'child2'
- nodealias = nodes.alias()
- {sql}session.query(Node).filter(Node.data=='subchild1').\
- filter(and_(Node.parent_id==nodealias.c.id, nodealias.c.data=='child2')).all()
- SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data
- FROM treenodes, treenodes AS treenodes_1
- WHERE treenodes.data = ? AND treenodes.parent_id = treenodes_1.id AND treenodes_1.data = ?
- ['subchild1', 'child2']
-
-or automatically, using ``join()`` with ``aliased=True``:
-
-.. sourcecode:: python+sql
-
- # get all nodes named 'subchild1' with a parent named 'child2'
- {sql}session.query(Node).filter(Node.data=='subchild1').\
- join('parent', aliased=True).filter(Node.data=='child2').all()
- SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data
- FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id
- WHERE treenodes.data = ? AND treenodes_1.data = ?
- ['subchild1', 'child2']
-
-To add criterion to multiple points along a longer join, use ``from_joinpoint=True``:
-
-.. sourcecode:: python+sql
-
- # get all nodes named 'subchild1' with a parent named 'child2' and a grandparent 'root'
- {sql}session.query(Node).filter(Node.data=='subchild1').\
- join('parent', aliased=True).filter(Node.data=='child2').\
- join('parent', aliased=True, from_joinpoint=True).filter(Node.data=='root').all()
- SELECT treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data
- FROM treenodes JOIN treenodes AS treenodes_1 ON treenodes_1.id = treenodes.parent_id JOIN treenodes AS treenodes_2 ON treenodes_2.id = treenodes_1.parent_id
- WHERE treenodes.data = ? AND treenodes_1.data = ? AND treenodes_2.data = ?
- ['subchild1', 'child2', 'root']
-
-Configuring Eager Loading
-~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Eager loading of relationships occurs using joins or outerjoins from parent to child table during a normal query operation, such that the parent and its child collection can be populated from a single SQL statement, or a second statement for all collections at once. SQLAlchemy's joined and subquery eager loading uses aliased tables in all cases when joining to related items, so it is compatible with self-referential joining. However, to use eager loading with a self-referential relationship, SQLAlchemy needs to be told how many levels deep it should join; otherwise the eager load will not take place. This depth setting is configured via ``join_depth``:
-
-.. sourcecode:: python+sql
-
- mapper(Node, nodes, properties={
- 'children': relationship(Node, lazy='joined', join_depth=2)
- })
-
- {sql}session.query(Node).all()
- SELECT treenodes_1.id AS treenodes_1_id, treenodes_1.parent_id AS treenodes_1_parent_id, treenodes_1.data AS treenodes_1_data, treenodes_2.id AS treenodes_2_id, treenodes_2.parent_id AS treenodes_2_parent_id, treenodes_2.data AS treenodes_2_data, treenodes.id AS treenodes_id, treenodes.parent_id AS treenodes_parent_id, treenodes.data AS treenodes_data
- FROM treenodes LEFT OUTER JOIN treenodes AS treenodes_2 ON treenodes.id = treenodes_2.parent_id LEFT OUTER JOIN treenodes AS treenodes_1 ON treenodes_2.id = treenodes_1.parent_id
- []
-
-Specifying Alternate Join Conditions to relationship()
-------------------------------------------------------
-
-The :func:`~sqlalchemy.orm.relationship` function uses the foreign key relationship between the parent and child tables to formulate the **primary join condition** between parent and child; in the case of a many-to-many relationship it also formulates the **secondary join condition**::
-
- one to many/many to one:
- ------------------------
-
- parent_table --> parent_table.c.id == child_table.c.parent_id --> child_table
- primaryjoin
-
- many to many:
- -------------
-
- parent_table --> parent_table.c.id == secondary_table.c.parent_id -->
- primaryjoin
-
- secondary_table.c.child_id == child_table.c.id --> child_table
- secondaryjoin
-
-If you are working with a :class:`~sqlalchemy.schema.Table` which has no :class:`~sqlalchemy.schema.ForeignKey` objects on it (which can be the case when using reflected tables with MySQL), or if the join condition cannot be expressed by a simple foreign key relationship, use the ``primaryjoin`` and possibly ``secondaryjoin`` conditions to create the appropriate relationship.
-
-In this example we create a relationship ``boston_addresses`` which will only load the user addresses with a city of "Boston":
-
-.. sourcecode:: python+sql
-
- class User(object):
- pass
- class Address(object):
- pass
-
- mapper(Address, addresses_table)
- mapper(User, users_table, properties={
- 'boston_addresses': relationship(Address, primaryjoin=
- and_(users_table.c.user_id==addresses_table.c.user_id,
- addresses_table.c.city=='Boston'))
- })
-
-Many to many relationships can be customized by one or both of ``primaryjoin`` and ``secondaryjoin``, shown below with just the default many-to-many relationship explicitly set:
-
-.. sourcecode:: python+sql
-
- class User(object):
- pass
- class Keyword(object):
- pass
- mapper(Keyword, keywords_table)
- mapper(User, users_table, properties={
- 'keywords': relationship(Keyword, secondary=userkeywords_table,
- primaryjoin=users_table.c.user_id==userkeywords_table.c.user_id,
- secondaryjoin=userkeywords_table.c.keyword_id==keywords_table.c.keyword_id
- )
- })
-
-Specifying Foreign Keys
-~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-When using ``primaryjoin`` and ``secondaryjoin``, SQLAlchemy also needs to be aware of which columns in the relationship reference the other. In most cases, a :class:`~sqlalchemy.schema.Table` construct will have :class:`~sqlalchemy.schema.ForeignKey` constructs which take care of this; however, in the case of reflected tables on a database that does not report FKs (like MySQL ISAM) or when using join conditions on columns that don't have foreign keys, the :func:`~sqlalchemy.orm.relationship` needs to be told specifically which columns are "foreign" using the ``foreign_keys`` collection:
-
-.. sourcecode:: python+sql
-
- mapper(Address, addresses_table)
- mapper(User, users_table, properties={
- 'addresses': relationship(Address, primaryjoin=
- users_table.c.user_id==addresses_table.c.user_id,
- foreign_keys=[addresses_table.c.user_id])
- })
-
-Building Query-Enabled Properties
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Very ambitious custom join conditions may fail to be directly persistable, and in some cases may not even load correctly. To remove the persistence part of the equation, use the flag ``viewonly=True`` on the :func:`~sqlalchemy.orm.relationship`, which establishes it as a read-only attribute (data written to the collection will be ignored on flush()). However, in extreme cases, consider using a regular Python property in conjunction with :class:`~sqlalchemy.orm.query.Query` as follows:
-
-.. sourcecode:: python+sql
-
- class User(object):
- def _get_addresses(self):
- return object_session(self).query(Address).with_parent(self).filter(...).all()
- addresses = property(_get_addresses)
-
-Multiple Relationships against the Same Parent/Child
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Theres no restriction on how many times you can relate from parent to child. SQLAlchemy can usually figure out what you want, particularly if the join conditions are straightforward. Below we add a ``newyork_addresses`` attribute to complement the ``boston_addresses`` attribute:
-
-.. sourcecode:: python+sql
-
- mapper(User, users_table, properties={
- 'boston_addresses': relationship(Address, primaryjoin=
- and_(users_table.c.user_id==addresses_table.c.user_id,
- addresses_table.c.city=='Boston')),
- 'newyork_addresses': relationship(Address, primaryjoin=
- and_(users_table.c.user_id==addresses_table.c.user_id,
- addresses_table.c.city=='New York')),
- })
-
-
-Rows that point to themselves / Mutually Dependent Rows
--------------------------------------------------------
-
-This is a very specific case where relationship() must perform an INSERT and a second UPDATE in order to properly populate a row (and vice versa an UPDATE and DELETE in order to delete without violating foreign key constraints). The two use cases are:
-
- * A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key.
- * Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other.
-
-For example::
-
- user
- ---------------------------------
- user_id name related_user_id
- 1 'ed' 1
-
-Or::
-
- widget entry
- ------------------------------------------- ---------------------------------
- widget_id name favorite_entry_id entry_id name widget_id
- 1 'somewidget' 5 5 'someentry' 1
-
-In the first case, a row points to itself. Technically, a database that uses sequences such as PostgreSQL or Oracle can INSERT the row at once using a previously generated value, but databases which rely upon autoincrement-style primary key identifiers cannot. The :func:`~sqlalchemy.orm.relationship` always assumes a "parent/child" model of row population during flush, so unless you are populating the primary key/foreign key columns directly, :func:`~sqlalchemy.orm.relationship` needs to use two statements.
-
-In the second case, the "widget" row must be inserted before any referring "entry" rows, but then the "favorite_entry_id" column of that "widget" row cannot be set until the "entry" rows have been generated. In this case, it's typically impossible to insert the "widget" and "entry" rows using just two INSERT statements; an UPDATE must be performed in order to keep foreign key constraints fulfilled. The exception is if the foreign keys are configured as "deferred until commit" (a feature some databases support) and if the identifiers were populated manually (again essentially bypassing :func:`~sqlalchemy.orm.relationship`).
-
-To enable the UPDATE after INSERT / UPDATE before DELETE behavior on :func:`~sqlalchemy.orm.relationship`, use the ``post_update`` flag on *one* of the relationships, preferably the many-to-one side::
-
- mapper(Widget, widget, properties={
- 'entries':relationship(Entry, primaryjoin=widget.c.widget_id==entry.c.widget_id),
- 'favorite_entry':relationship(Entry, primaryjoin=widget.c.favorite_entry_id==entry.c.entry_id, post_update=True)
- })
-
-When a structure using the above mapping is flushed, the "widget" row will be INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will be INSERTed referencing the parent "widget" row, and then an UPDATE statement will populate the "favorite_entry_id" column of the "widget" table (it's one row at a time for the time being).
-
-
-.. _alternate_collection_implementations:
-
-Alternate Collection Implementations
--------------------------------------
-
-Mapping a one-to-many or many-to-many relationship results in a collection of values accessible through an attribute on the parent instance. By default, this collection is a ``list``:
-
-.. sourcecode:: python+sql
-
- mapper(Parent, properties={
- children = relationship(Child)
- })
-
- parent = Parent()
- parent.children.append(Child())
- print parent.children[0]
-
-Collections are not limited to lists. Sets, mutable sequences and almost any other Python object that can act as a container can be used in place of the default list, by specifying the ``collection_class`` option on :func:`~sqlalchemy.orm.relationship`.
-
-.. sourcecode:: python+sql
-
- # use a set
- mapper(Parent, properties={
- children = relationship(Child, collection_class=set)
- })
-
- parent = Parent()
- child = Child()
- parent.children.add(child)
- assert child in parent.children
-
-
-Custom Collection Implementations
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-You can use your own types for collections as well. For most cases, simply inherit from ``list`` or ``set`` and add the custom behavior.
-
-Collections in SQLAlchemy are transparently *instrumented*. Instrumentation means that normal operations on the collection are tracked and result in changes being written to the database at flush time. Additionally, collection operations can fire *events* which indicate some secondary operation must take place. Examples of a secondary operation include saving the child item in the parent's :class:`~sqlalchemy.orm.session.Session` (i.e. the ``save-update`` cascade), as well as synchronizing the state of a bi-directional relationship (i.e. a ``backref``).
-
-The collections package understands the basic interface of lists, sets and dicts and will automatically apply instrumentation to those built-in types and their subclasses. Object-derived types that implement a basic collection interface are detected and instrumented via duck-typing:
-
-.. sourcecode:: python+sql
-
- class ListLike(object):
- def __init__(self):
- self.data = []
- def append(self, item):
- self.data.append(item)
- def remove(self, item):
- self.data.remove(item)
- def extend(self, items):
- self.data.extend(items)
- def __iter__(self):
- return iter(self.data)
- def foo(self):
- return 'foo'
-
-``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't be instrumented, and ``foo`` won't be either.
-
-Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be explicit about the interface you are implementing by providing an ``__emulates__`` class attribute:
-
-.. sourcecode:: python+sql
-
- class SetLike(object):
- __emulates__ = set
-
- def __init__(self):
- self.data = set()
- def append(self, item):
- self.data.add(item)
- def remove(self, item):
- self.data.remove(item)
- def __iter__(self):
- return iter(self.data)
-
-This class looks list-like because of ``append``, but ``__emulates__`` forces it to set-like. ``remove`` is known to be part of the set interface and will be instrumented.
-
-But this class won't work quite yet: a little glue is needed to adapt it for use by SQLAlchemy. The ORM needs to know which methods to use to append, remove and iterate over members of the collection. When using a type like ``list`` or ``set``, the appropriate methods are well-known and used automatically when present. This set-like class does not provide the expected ``add`` method, so we must supply an explicit mapping for the ORM via a decorator.
-
-Annotating Custom Collections via Decorators
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Decorators can be used to tag the individual methods the ORM needs to manage collections. Use them when your class doesn't quite meet the regular interface for its container type, or you simply would like to use a different method to get the job done.
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.orm.collections import collection
-
- class SetLike(object):
- __emulates__ = set
-
- def __init__(self):
- self.data = set()
-
- @collection.appender
- def append(self, item):
- self.data.add(item)
-
- def remove(self, item):
- self.data.remove(item)
-
- def __iter__(self):
- return iter(self.data)
-
-And that's all that's needed to complete the example. SQLAlchemy will add instances via the ``append`` method. ``remove`` and ``__iter__`` are the default methods for sets and will be used for removing and iteration. Default methods can be changed as well:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.orm.collections import collection
-
- class MyList(list):
- @collection.remover
- def zark(self, item):
- # do something special...
-
- @collection.iterator
- def hey_use_this_instead_for_iteration(self):
- # ...
-
-There is no requirement to be list-, or set-like at all. Collection classes can be any shape, so long as they have the append, remove and iterate interface marked for SQLAlchemy's use. Append and remove methods will be called with a mapped entity as the single argument, and iterator methods are called with no arguments and must return an iterator.
-
-Dictionary-Based Collections
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-A ``dict`` can be used as a collection, but a keying strategy is needed to map entities loaded by the ORM to key, value pairs. The :mod:`sqlalchemy.orm.collections` package provides several built-in types for dictionary-based collections:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.orm.collections import column_mapped_collection, attribute_mapped_collection, mapped_collection
-
- mapper(Item, items_table, properties={
- # key by column
- 'notes': relationship(Note, collection_class=column_mapped_collection(notes_table.c.keyword)),
- # or named attribute
- 'notes2': relationship(Note, collection_class=attribute_mapped_collection('keyword')),
- # or any callable
- 'notes3': relationship(Note, collection_class=mapped_collection(lambda entity: entity.a + entity.b))
- })
-
- # ...
- item = Item()
- item.notes['color'] = Note('color', 'blue')
- print item.notes['color']
-
-These functions each provide a ``dict`` subclass with decorated ``set`` and ``remove`` methods and the keying strategy of your choice.
-
-The :class:`sqlalchemy.orm.collections.MappedCollection` class can be used as a base class for your custom types or as a mix-in to quickly add ``dict`` collection support to other classes. It uses a keying function to delegate to ``__setitem__`` and ``__delitem__``:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.util import OrderedDict
- from sqlalchemy.orm.collections import MappedCollection
-
- class NodeMap(OrderedDict, MappedCollection):
- """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained."""
-
- def __init__(self, *args, **kw):
- MappedCollection.__init__(self, keyfunc=lambda node: node.name)
- OrderedDict.__init__(self, *args, **kw)
-
-The ORM understands the ``dict`` interface just like lists and sets, and will automatically instrument all dict-like methods if you choose to subclass ``dict`` or provide dict-like collection behavior in a duck-typed class. You must decorate appender and remover methods, however- there are no compatible methods in the basic dictionary interface for SQLAlchemy to use by default. Iteration will go through ``itervalues()`` unless otherwise decorated.
-
-Instrumentation and Custom Types
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-Many custom types and existing library classes can be used as a entity collection type as-is without further ado. However, it is important to note that the instrumentation process _will_ modify the type, adding decorators around methods automatically.
-
-The decorations are lightweight and no-op outside of relationships, but they do add unneeded overhead when triggered elsewhere. When using a library class as a collection, it can be good practice to use the "trivial subclass" trick to restrict the decorations to just your usage in relationships. For example:
-
-.. sourcecode:: python+sql
-
- class MyAwesomeList(some.great.library.AwesomeList):
- pass
-
- # ... relationship(..., collection_class=MyAwesomeList)
-
-The ORM uses this approach for built-ins, quietly substituting a trivial subclass when a ``list``, ``set`` or ``dict`` is used directly.
-
-The collections package provides additional decorators and support for authoring custom types. See the :mod:`sqlalchemy.orm.collections` package for more information and discussion of advanced usage and Python 2.3-compatible decoration options.
-
-.. _mapper_loader_strategies:
-
-Configuring Loader Strategies: Lazy Loading, Eager Loading
------------------------------------------------------------
-
-.. note:: SQLAlchemy version 0.6beta3 introduces the :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.joinedload_all`, :func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.subqueryload_all` functions described in this section. In previous versions, including 0.5 and 0.4, use :func:`~sqlalchemy.orm.eagerload` and :func:`~sqlalchemy.orm.eagerload_all`. Additionally, the ``lazy`` keyword argument on :func:`~sqlalchemy.orm.relationship` accepts the values ``True``, ``False`` and ``None`` in previous versions, whereas in the latest 0.6 it also accepts the arguments ``select``, ``joined``, ``noload``, and ``subquery``.
-
-In the :ref:`ormtutorial_toplevel`, we introduced the concept of **Eager Loading**. We used an ``option`` in conjunction with the :class:`~sqlalchemy.orm.query.Query` object in order to indicate that a relationship should be loaded at the same time as the parent, within a single SQL query:
-
-.. sourcecode:: python+sql
-
- {sql}>>> jack = session.query(User).options(joinedload('addresses')).filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE
- SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address,
- addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name,
- users.fullname AS users_fullname, users.password AS users_password
- FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id
- WHERE users.name = ?
- ['jack']
-
-By default, all inter-object relationships are **lazy loading**. The scalar or collection attribute associated with a :func:`~sqlalchemy.orm.relationship` contains a trigger which fires the first time the attribute is accessed, which issues a SQL call at that point:
-
-.. sourcecode:: python+sql
-
- {sql}>>> jack.addresses
- SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address,
- addresses.user_id AS addresses_user_id
- FROM addresses
- WHERE ? = addresses.user_id
- [5]
- {stop}[<Address(u'jack@google.com')>, <Address(u'j25@yahoo.com')>]
-
-A second option for eager loading exists, called "subquery" loading. This kind of eager loading emits an additional SQL statement for each collection requested, aggregated across all parent objects:
-
-.. sourcecode:: python+sql
-
- {sql}>>>jack = session.query(User).options(subqueryload('addresses')).filter_by(name='jack').all()
- SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname,
- users.password AS users_password
- FROM users
- WHERE users.name = ?
- ('jack',)
- SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address,
- addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id
- FROM (SELECT users.id AS users_id
- FROM users
- WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id
- ORDER BY anon_1.users_id, addresses.id
- ('jack',)
-
-The default **loader strategy** for any :func:`~sqlalchemy.orm.relationship` is configured by the ``lazy`` keyword argument, which defaults to ``select``. Below we set it as ``joined`` so that the ``children`` relationship is eager loading, using a join:
-
-.. sourcecode:: python+sql
-
- # load the 'children' collection using LEFT OUTER JOIN
- mapper(Parent, parent_table, properties={
- 'children': relationship(Child, lazy='joined')
- })
-
-We can also set it to eagerly load using a second query for all collections, using ``subquery``:
-
-.. sourcecode:: python+sql
-
- # load the 'children' attribute using a join to a subquery
- mapper(Parent, parent_table, properties={
- 'children': relationship(Child, lazy='subquery')
- })
-
-When querying, all three choices of loader strategy are available on a per-query basis, using the :func:`~sqlalchemy.orm.joinedload`, :func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.lazyload` query options:
-
-.. sourcecode:: python+sql
-
- # set children to load lazily
- session.query(Parent).options(lazyload('children')).all()
-
- # set children to load eagerly with a join
- session.query(Parent).options(joinedload('children')).all()
-
- # set children to load eagerly with a second statement
- session.query(Parent).options(subqueryload('children')).all()
-
-To reference a relationship that is deeper than one level, separate the names by periods:
-
-.. sourcecode:: python+sql
-
- session.query(Parent).options(joinedload('foo.bar.bat')).all()
-
-When using dot-separated names with :func:`~sqlalchemy.orm.joinedload` or :func:`~sqlalchemy.orm.subqueryload`, option applies **only** to the actual attribute named, and **not** its ancestors. For example, suppose a mapping from ``A`` to ``B`` to ``C``, where the relationships, named ``atob`` and ``btoc``, are both lazy-loading. A statement like the following:
-
-.. sourcecode:: python+sql
-
- session.query(A).options(joinedload('atob.btoc')).all()
-
-will load only ``A`` objects to start. When the ``atob`` attribute on each ``A`` is accessed, the returned ``B`` objects will *eagerly* load their ``C`` objects.
-
-Therefore, to modify the eager load to load both ``atob`` as well as ``btoc``, place joinedloads for both:
-
-.. sourcecode:: python+sql
-
- session.query(A).options(joinedload('atob'), joinedload('atob.btoc')).all()
-
-or more simply just use :func:`~sqlalchemy.orm.joinedload_all` or :func:`~sqlalchemy.orm.subqueryload_all`:
-
-.. sourcecode:: python+sql
-
- session.query(A).options(joinedload_all('atob.btoc')).all()
-
-There are two other loader strategies available, **dynamic loading** and **no loading**; these are described in :ref:`largecollections`.
-
-What Kind of Loading to Use ?
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Which type of loading to use typically comes down to optimizing the tradeoff between number of SQL executions, complexity of SQL emitted, and amount of data fetched. Lets take two examples, a :func:`~sqlalchemy.orm.relationship` which references a collection, and a :func:`~sqlalchemy.orm.relationship` that references a scalar many-to-one reference.
-
-* One to Many Collection
-
- * When using the default lazy loading, if you load 100 objects, and then access a collection on each of
- them, a total of 101 SQL statements will be emitted, although each statement will typically be a
- simple SELECT without any joins.
-
- * When using joined loading, the load of 100 objects and their collections will emit only one SQL
- statement. However, the
- total number of rows fetched will be equal to the sum of the size of all the collections, plus one
- extra row for each parent object that has an empty collection. Each row will also contain the full
- set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not
- re-fetch these columns other than those of the primary key, however most DBAPIs (with some
- exceptions) will transmit the full data of each parent over the wire to the client connection in
- any case. Therefore joined eager loading only makes sense when the size of the collections are
- relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join.
-
- * When using subquery loading, the load of 100 objects will emit two SQL statements. The second
- statement will fetch a total number of rows equal to the sum of the size of all collections. An
- INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a
- subquery load makes sense when the collections are larger.
-
- * When multiple levels of depth are used with joined or subquery loading, loading collections-within-
- collections will multiply the total number of rows fetched in a cartesian fashion. Both forms
- of eager loading always join from the original parent class.
-
-* Many to One Reference
-
- * When using the default lazy loading, a load of 100 objects will like in the case of the collection
- emit as many as 101 SQL statements. However - there is a significant exception to this, in that
- if the many-to-one reference is a simple foreign key reference to the target's primary key, each
- reference will be checked first in the current identity map using ``query.get()``. So here,
- if the collection of objects references a relatively small set of target objects, or the full set
- of possible target objects have already been loaded into the session and are strongly referenced,
- using the default of `lazy='select'` is by far the most efficient way to go.
-
- * When using joined loading, the load of 100 objects will emit only one SQL statement. The join
- will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases.
- If you know that each parent definitely has a child (i.e. the foreign
- key reference is NOT NULL), the joined load can be configured with ``innerjoin=True``, which is
- usually specified within the :func:`~sqlalchemy.orm.relationship`. For a load of objects where
- there are many possible target references which may have not been loaded already, joined loading
- with an INNER JOIN is extremely efficient.
-
- * Subquery loading will issue a second load for all the child objects, so for a load of 100 objects
- there would be two SQL statements emitted. There's probably not much advantage here over
- joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases
- whereas joined loading requires that the foreign key is NOT NULL.
-
-Routing Explicit Joins/Statements into Eagerly Loaded Collections
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The behavior of :func:`~sqlalchemy.orm.joinedload()` is such that joins are created automatically, the results of which are routed into collections and scalar references on loaded objects. It is often the case that a query already includes the necessary joins which represent a particular collection or scalar reference, and the joins added by the joinedload feature are redundant - yet you'd still like the collections/references to be populated.
-
-For this SQLAlchemy supplies the :func:`~sqlalchemy.orm.contains_eager()` option. This option is used in the same manner as the :func:`~sqlalchemy.orm.joinedload()` option except it is assumed that the :class:`~sqlalchemy.orm.query.Query` will specify the appropriate joins explicitly. Below it's used with a ``from_statement`` load::
-
- # mapping is the users->addresses mapping
- mapper(User, users_table, properties={
- 'addresses': relationship(Address, addresses_table)
- })
-
- # define a query on USERS with an outer join to ADDRESSES
- statement = users_table.outerjoin(addresses_table).select().apply_labels()
-
- # construct a Query object which expects the "addresses" results
- query = session.query(User).options(contains_eager('addresses'))
-
- # get results normally
- r = query.from_statement(statement)
-
-It works just as well with an inline ``Query.join()`` or ``Query.outerjoin()``::
-
- session.query(User).outerjoin(User.addresses).options(contains_eager(User.addresses)).all()
-
-If the "eager" portion of the statement is "aliased", the ``alias`` keyword argument to :func:`~sqlalchemy.orm.contains_eager` may be used to indicate it. This is a string alias name or reference to an actual :class:`~sqlalchemy.sql.expression.Alias` (or other selectable) object:
-
-.. sourcecode:: python+sql
-
- # use an alias of the Address entity
- adalias = aliased(Address)
-
- # construct a Query object which expects the "addresses" results
- query = session.query(User).\
- outerjoin((adalias, User.addresses)).\
- options(contains_eager(User.addresses, alias=adalias))
-
- # get results normally
- {sql}r = query.all()
- SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id,
- adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...)
- FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id
-
-The ``alias`` argument is used only as a source of columns to match up to the result set. You can use it even to match up the result to arbitrary label names in a string SQL statement, by passing a selectable() which links those labels to the mapped :class:`~sqlalchemy.schema.Table`::
-
- # label the columns of the addresses table
- eager_columns = select([
- addresses.c.address_id.label('a1'),
- addresses.c.email_address.label('a2'),
- addresses.c.user_id.label('a3')])
-
- # select from a raw SQL statement which uses those label names for the
- # addresses table. contains_eager() matches them up.
- query = session.query(User).\
- from_statement("select users.*, addresses.address_id as a1, "
- "addresses.email_address as a2, addresses.user_id as a3 "
- "from users left outer join addresses on users.user_id=addresses.user_id").\
- options(contains_eager(User.addresses, alias=eager_columns))
-
-The path given as the argument to :func:`~sqlalchemy.orm.contains_eager` needs to be a full path from the starting entity. For example if we were loading ``Users->orders->Order->items->Item``, the string version would look like::
-
- query(User).options(contains_eager('orders', 'items'))
-
-Or using the class-bound descriptor::
-
- query(User).options(contains_eager(User.orders, Order.items))
-
-A variant on :func:`~sqlalchemy.orm.contains_eager` is the ``contains_alias()`` option, which is used in the rare case that the parent object is loaded from an alias within a user-defined SELECT statement::
-
- # define an aliased UNION called 'ulist'
- statement = users.select(users.c.user_id==7).union(users.select(users.c.user_id>7)).alias('ulist')
-
- # add on an eager load of "addresses"
- statement = statement.outerjoin(addresses).select().apply_labels()
-
- # create query, indicating "ulist" is an alias for the main table, "addresses" property should
- # be eager loaded
- query = session.query(User).options(contains_alias('ulist'), contains_eager('addresses'))
-
- # results
- r = query.from_statement(statement)
-
-.. _largecollections:
-
-Working with Large Collections
--------------------------------
-
-The default behavior of :func:`~sqlalchemy.orm.relationship` is to fully load the collection of items in, as according to the loading strategy of the relationship. Additionally, the Session by default only knows how to delete objects which are actually present within the session. When a parent instance is marked for deletion and flushed, the Session loads its full list of child items in so that they may either be deleted as well, or have their foreign key value set to null; this is to avoid constraint violations. For large collections of child items, there are several strategies to bypass full loading of child items both at load time as well as deletion time.
-
-Dynamic Relationship Loaders
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-
-The most useful by far is the :func:`~sqlalchemy.orm.dynamic_loader` relationship. This is a variant of :func:`~sqlalchemy.orm.relationship` which returns a :class:`~sqlalchemy.orm.query.Query` object in place of a collection when accessed. :func:`~sqlalchemy.orm.query.Query.filter` criterion may be applied as well as limits and offsets, either explicitly or via array slices:
-
-.. sourcecode:: python+sql
-
- mapper(User, users_table, properties={
- 'posts': dynamic_loader(Post)
- })
-
- jack = session.query(User).get(id)
-
- # filter Jack's blog posts
- posts = jack.posts.filter(Post.headline=='this is a post')
-
- # apply array slices
- posts = jack.posts[5:20]
-
-The dynamic relationship supports limited write operations, via the ``append()`` and ``remove()`` methods. Since the read side of the dynamic relationship always queries the database, changes to the underlying collection will not be visible until the data has been flushed:
-
-.. sourcecode:: python+sql
-
- oldpost = jack.posts.filter(Post.headline=='old post').one()
- jack.posts.remove(oldpost)
-
- jack.posts.append(Post('new post'))
-
-To place a dynamic relationship on a backref, use ``lazy='dynamic'``:
-
-.. sourcecode:: python+sql
-
- mapper(Post, posts_table, properties={
- 'user': relationship(User, backref=backref('posts', lazy='dynamic'))
- })
-
-Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time.
-
-Setting Noload
-~~~~~~~~~~~~~~~
-
-The opposite of the dynamic relationship is simply "noload", specified using ``lazy='noload'``:
-
-.. sourcecode:: python+sql
-
- mapper(MyClass, table, properties={
- 'children': relationship(MyOtherClass, lazy='noload')
- })
-
-Above, the ``children`` collection is fully writeable, and changes to it will be persisted to the database as well as locally available for reading at the time they are added. However when instances of ``MyClass`` are freshly loaded from the database, the ``children`` collection stays empty.
-
-Using Passive Deletes
-~~~~~~~~~~~~~~~~~~~~~~
-
-Use ``passive_deletes=True`` to disable child object loading on a DELETE operation, in conjunction with "ON DELETE (CASCADE|SET NULL)" on your database to automatically cascade deletes to child objects. Note that "ON DELETE" is not supported on SQLite, and requires ``InnoDB`` tables when using MySQL:
-
-.. sourcecode:: python+sql
-
- mytable = Table('mytable', meta,
- Column('id', Integer, primary_key=True),
- )
-
- myothertable = Table('myothertable', meta,
- Column('id', Integer, primary_key=True),
- Column('parent_id', Integer),
- ForeignKeyConstraint(['parent_id'], ['mytable.id'], ondelete="CASCADE"),
- )
-
- mapper(MyOtherClass, myothertable)
-
- mapper(MyClass, mytable, properties={
- 'children': relationship(MyOtherClass, cascade="all, delete-orphan", passive_deletes=True)
- })
-
-When ``passive_deletes`` is applied, the ``children`` relationship will not be loaded into memory when an instance of ``MyClass`` is marked for deletion. The ``cascade="all, delete-orphan"`` *will* take effect for instances of ``MyOtherClass`` which are currently present in the session; however for instances of ``MyOtherClass`` which are not loaded, SQLAlchemy assumes that "ON DELETE CASCADE" rules will ensure that those rows are deleted by the database and that no foreign key violation will occur.
-
-Mutable Primary Keys / Update Cascades
----------------------------------------
-
-When the primary key of an entity changes, related items which reference the primary key must also be updated as well. For databases which enforce referential integrity, it's required to use the database's ON UPDATE CASCADE functionality in order to propagate primary key changes. For those which don't, the ``passive_updates`` flag can be set to ``False`` which instructs SQLAlchemy to issue UPDATE statements individually. The ``passive_updates`` flag can also be ``False`` in conjunction with ON UPDATE CASCADE functionality, although in that case it issues UPDATE statements unnecessarily.
-
-A typical mutable primary key setup might look like:
-
-.. sourcecode:: python+sql
-
- users = Table('users', metadata,
- Column('username', String(50), primary_key=True),
- Column('fullname', String(100)))
-
- addresses = Table('addresses', metadata,
- Column('email', String(50), primary_key=True),
- Column('username', String(50), ForeignKey('users.username', onupdate="cascade")))
-
- class User(object):
- pass
- class Address(object):
- pass
-
- mapper(User, users, properties={
- 'addresses': relationship(Address, passive_updates=False)
- })
- mapper(Address, addresses)
-
-passive_updates is set to ``True`` by default. Foreign key references to non-primary key columns are supported as well.
-
diff --git a/doc/build/metadata.rst b/doc/build/metadata.rst
deleted file mode 100644
index 19832809e..000000000
--- a/doc/build/metadata.rst
+++ /dev/null
@@ -1,859 +0,0 @@
-.. _metadata_toplevel:
-
-==================
-Database Meta Data
-==================
-
-Describing Databases with MetaData
-==================================
-
-The core of SQLAlchemy's query and object mapping operations are supported by *database metadata*, which is comprised of Python objects that describe tables and other schema-level objects. These objects are at the core of three major types of operations - issuing CREATE and DROP statements (known as *DDL*), constructing SQL queries, and expressing information about structures that already exist within the database.
-
-Database metadata can be expressed by explicitly naming the various components and their properties, using constructs such as :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.Sequence`, all of which are imported from the ``sqlalchemy.schema`` package. It can also be generated by SQLAlchemy using a process called *reflection*, which means you start with a single object such as :class:`~sqlalchemy.schema.Table`, assign it a name, and then instruct SQLAlchemy to load all the additional information related to that name from a particular engine source.
-
-A key feature of SQLAlchemy's database metadata constructs is that they are designed to be used in a *declarative* style which closely resembles that of real DDL. They are therefore most intuitive to those who have some background in creating real schema generation scripts.
-
-A collection of metadata entities is stored in an object aptly named :class:`~sqlalchemy.schema.MetaData`::
-
- from sqlalchemy import *
-
- metadata = MetaData()
-
-:class:`~sqlalchemy.schema.MetaData` is a container object that keeps together many different features of a database (or multiple databases) being described.
-
-To represent a table, use the :class:`~sqlalchemy.schema.Table` class. Its two primary arguments are the table name, then the :class:`~sqlalchemy.schema.MetaData` object which it will be associated with. The remaining positional arguments are mostly :class:`~sqlalchemy.schema.Column` objects describing each column::
-
- user = Table('user', metadata,
- Column('user_id', Integer, primary_key = True),
- Column('user_name', String(16), nullable = False),
- Column('email_address', String(60)),
- Column('password', String(20), nullable = False)
- )
-
-Above, a table called ``user`` is described, which contains four columns. The primary key of the table consists of the ``user_id`` column. Multiple columns may be assigned the ``primary_key=True`` flag which denotes a multi-column primary key, known as a *composite* primary key.
-
-Note also that each column describes its datatype using objects corresponding to genericized types, such as :class:`~sqlalchemy.types.Integer` and :class:`~sqlalchemy.types.String`. SQLAlchemy features dozens of types of varying levels of specificity as well as the ability to create custom types. Documentation on the type system can be found at :ref:`types`.
-
-Accessing Tables and Columns
-----------------------------
-
-The :class:`~sqlalchemy.schema.MetaData` object contains all of the schema constructs we've associated with it. It supports a few methods of accessing these table objects, such as the ``sorted_tables`` accessor which returns a list of each :class:`~sqlalchemy.schema.Table` object in order of foreign key dependency (that is, each table is preceded by all tables which it references)::
-
- >>> for t in metadata.sorted_tables:
- ... print t.name
- user
- user_preference
- invoice
- invoice_item
-
-In most cases, individual :class:`~sqlalchemy.schema.Table` objects have been explicitly declared, and these objects are typically accessed directly as module-level variables in an application.
-Once a :class:`~sqlalchemy.schema.Table` has been defined, it has a full set of accessors which allow inspection of its properties. Given the following :class:`~sqlalchemy.schema.Table` definition::
-
- employees = Table('employees', metadata,
- Column('employee_id', Integer, primary_key=True),
- Column('employee_name', String(60), nullable=False),
- Column('employee_dept', Integer, ForeignKey("departments.department_id"))
- )
-
-Note the :class:`~sqlalchemy.schema.ForeignKey` object used in this table - this construct defines a reference to a remote table, and is fully described in :ref:`metadata_foreignkeys`. Methods of accessing information about this table include::
-
- # access the column "EMPLOYEE_ID":
- employees.columns.employee_id
-
- # or just
- employees.c.employee_id
-
- # via string
- employees.c['employee_id']
-
- # iterate through all columns
- for c in employees.c:
- print c
-
- # get the table's primary key columns
- for primary_key in employees.primary_key:
- print primary_key
-
- # get the table's foreign key objects:
- for fkey in employees.foreign_keys:
- print fkey
-
- # access the table's MetaData:
- employees.metadata
-
- # access the table's bound Engine or Connection, if its MetaData is bound:
- employees.bind
-
- # access a column's name, type, nullable, primary key, foreign key
- employees.c.employee_id.name
- employees.c.employee_id.type
- employees.c.employee_id.nullable
- employees.c.employee_id.primary_key
- employees.c.employee_dept.foreign_keys
-
- # get the "key" of a column, which defaults to its name, but can
- # be any user-defined string:
- employees.c.employee_name.key
-
- # access a column's table:
- employees.c.employee_id.table is employees
-
- # get the table related by a foreign key
- list(employees.c.employee_dept.foreign_keys)[0].column.table
-
-.. _metadata_binding:
-
-
-Creating and Dropping Database Tables
--------------------------------------
-
-Once you've defined some :class:`~sqlalchemy.schema.Table` objects, assuming you're working with a brand new database one thing you might want to do is issue CREATE statements for those tables and their related constructs (as an aside, it's also quite possible that you *don't* want to do this, if you already have some preferred methodology such as tools included with your database or an existing scripting system - if that's the case, feel free to skip this section - SQLAlchemy has no requirement that it be used to create your tables).
-
-The usual way to issue CREATE is to use :func:`~sqlalchemy.schema.MetaData.create_all` on the :class:`~sqlalchemy.schema.MetaData` object. This method will issue queries that first check for the existence of each individual table, and if not found will issue the CREATE statements:
-
- .. sourcecode:: python+sql
-
- engine = create_engine('sqlite:///:memory:')
-
- metadata = MetaData()
-
- user = Table('user', metadata,
- Column('user_id', Integer, primary_key = True),
- Column('user_name', String(16), nullable = False),
- Column('email_address', String(60), key='email'),
- Column('password', String(20), nullable = False)
- )
-
- user_prefs = Table('user_prefs', metadata,
- Column('pref_id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
- Column('pref_name', String(40), nullable=False),
- Column('pref_value', String(100))
- )
-
- {sql}metadata.create_all(engine)
- PRAGMA table_info(user){}
- CREATE TABLE user(
- user_id INTEGER NOT NULL PRIMARY KEY,
- user_name VARCHAR(16) NOT NULL,
- email_address VARCHAR(60),
- password VARCHAR(20) NOT NULL
- )
- PRAGMA table_info(user_prefs){}
- CREATE TABLE user_prefs(
- pref_id INTEGER NOT NULL PRIMARY KEY,
- user_id INTEGER NOT NULL REFERENCES user(user_id),
- pref_name VARCHAR(40) NOT NULL,
- pref_value VARCHAR(100)
- )
-
-:func:`~sqlalchemy.schema.MetaData.create_all` creates foreign key constraints between tables usually inline with the table definition itself, and for this reason it also generates the tables in order of their dependency. There are options to change this behavior such that ``ALTER TABLE`` is used instead.
-
-Dropping all tables is similarly achieved using the :func:`~sqlalchemy.schema.MetaData.drop_all` method. This method does the exact opposite of :func:`~sqlalchemy.schema.MetaData.create_all` - the presence of each table is checked first, and tables are dropped in reverse order of dependency.
-
-Creating and dropping individual tables can be done via the ``create()`` and ``drop()`` methods of :class:`~sqlalchemy.schema.Table`. These methods by default issue the CREATE or DROP regardless of the table being present:
-
-.. sourcecode:: python+sql
-
- engine = create_engine('sqlite:///:memory:')
-
- meta = MetaData()
-
- employees = Table('employees', meta,
- Column('employee_id', Integer, primary_key=True),
- Column('employee_name', String(60), nullable=False, key='name'),
- Column('employee_dept', Integer, ForeignKey("departments.department_id"))
- )
- {sql}employees.create(engine)
- CREATE TABLE employees(
- employee_id SERIAL NOT NULL PRIMARY KEY,
- employee_name VARCHAR(60) NOT NULL,
- employee_dept INTEGER REFERENCES departments(department_id)
- )
- {}
-
-``drop()`` method:
-
-.. sourcecode:: python+sql
-
- {sql}employees.drop(engine)
- DROP TABLE employees
- {}
-
-To enable the "check first for the table existing" logic, add the ``checkfirst=True`` argument to ``create()`` or ``drop()``::
-
- employees.create(engine, checkfirst=True)
- employees.drop(engine, checkfirst=False)
-
-
-Binding MetaData to an Engine or Connection
---------------------------------------------
-
-Notice in the previous section the creator/dropper methods accept an argument for the database engine in use. When a schema construct is combined with an :class:`~sqlalchemy.engine.base.Engine` object, or an individual :class:`~sqlalchemy.engine.base.Connection` object, we call this the *bind*. In the above examples the bind is associated with the schema construct only for the duration of the operation. However, the option exists to persistently associate a bind with a set of schema constructs via the :class:`~sqlalchemy.schema.MetaData` object's ``bind`` attribute::
-
- engine = create_engine('sqlite://')
-
- # create MetaData
- meta = MetaData()
-
- # bind to an engine
- meta.bind = engine
-
-We can now call methods like :func:`~sqlalchemy.schema.MetaData.create_all` without needing to pass the :class:`~sqlalchemy.engine.base.Engine`::
-
- meta.create_all()
-
-The MetaData's bind is used for anything that requires an active connection, such as loading the definition of a table from the database automatically (called *reflection*)::
-
- # describe a table called 'users', query the database for its columns
- users_table = Table('users', meta, autoload=True)
-
-As well as for executing SQL constructs that are derived from that MetaData's table objects::
-
- # generate a SELECT statement and execute
- result = users_table.select().execute()
-
-Binding the MetaData to the Engine is a **completely optional** feature. The above operations can be achieved without the persistent bind using parameters::
-
- # describe a table called 'users', query the database for its columns
- users_table = Table('users', meta, autoload=True, autoload_with=engine)
-
- # generate a SELECT statement and execute
- result = engine.execute(users_table.select())
-
-Should you use bind ? It's probably best to start without it, and wait for a specific need to arise. Bind is useful if:
-
-* You aren't using the ORM, are usually using "connectionless" execution, and find yourself constantly needing to specify the same :class:`~sqlalchemy.engine.base.Engine` object throughout the entire application. Bind can be used here to provide "implicit" execution.
-* Your application has multiple schemas that correspond to different engines. Using one :class:`~sqlalchemy.schema.MetaData` for each schema, bound to each engine, provides a decent place to delineate between the schemas. The ORM will also integrate with this approach, where the :class:`Session` will naturally use the engine that is bound to each table via its metadata (provided the :class:`Session` itself has no ``bind`` configured.).
-
-Alternatively, the ``bind`` attribute of :class:`~sqlalchemy.schema.MetaData` is *confusing* if:
-
-* Your application talks to multiple database engines at different times, which use the *same* set of :class:`Table` objects. It's usually confusing and unnecessary to begin to create "copies" of :class:`Table` objects just so that different engines can be used for different operations. An example is an application that writes data to a "master" database while performing read-only operations from a "read slave". A global :class:`~sqlalchemy.schema.MetaData` object is *not* appropriate for per-request switching like this, although a :class:`~sqlalchemy.schema.ThreadLocalMetaData` object is.
-* You are using the ORM :class:`Session` to handle which class/table is bound to which engine, or you are using the :class:`Session` to manage switching between engines. Its a good idea to keep the "binding of tables to engines" in one place - either using :class:`~sqlalchemy.schema.MetaData` only (the :class:`Session` can of course be present, it just has no ``bind`` configured), or using :class:`Session` only (the ``bind`` attribute of :class:`~sqlalchemy.schema.MetaData` is left empty).
-
-.. _metadata_reflection:
-
-Reflecting Tables
------------------
-
-A :class:`~sqlalchemy.schema.Table` object can be instructed to load information about itself from the corresponding database schema object already existing within the database. This process is called *reflection*. Most simply you need only specify the table name, a :class:`~sqlalchemy.schema.MetaData` object, and the ``autoload=True`` flag. If the :class:`~sqlalchemy.schema.MetaData` is not persistently bound, also add the ``autoload_with`` argument::
-
- >>> messages = Table('messages', meta, autoload=True, autoload_with=engine)
- >>> [c.name for c in messages.columns]
- ['message_id', 'message_name', 'date']
-
-The above operation will use the given engine to query the database for information about the ``messages`` table, and will then generate :class:`~sqlalchemy.schema.Column`, :class:`~sqlalchemy.schema.ForeignKey`, and other objects corresponding to this information as though the :class:`~sqlalchemy.schema.Table` object were hand-constructed in Python.
-
-When tables are reflected, if a given table references another one via foreign key, a second :class:`~sqlalchemy.schema.Table` object is created within the :class:`~sqlalchemy.schema.MetaData` object representing the connection. Below, assume the table ``shopping_cart_items`` references a table named ``shopping_carts``. Reflecting the ``shopping_cart_items`` table has the effect such that the ``shopping_carts`` table will also be loaded::
-
- >>> shopping_cart_items = Table('shopping_cart_items', meta, autoload=True, autoload_with=engine)
- >>> 'shopping_carts' in meta.tables:
- True
-
-The :class:`~sqlalchemy.schema.MetaData` has an interesting "singleton-like" behavior such that if you requested both tables individually, :class:`~sqlalchemy.schema.MetaData` will ensure that exactly one :class:`~sqlalchemy.schema.Table` object is created for each distinct table name. The :class:`~sqlalchemy.schema.Table` constructor actually returns to you the already-existing :class:`~sqlalchemy.schema.Table` object if one already exists with the given name. Such as below, we can access the already generated ``shopping_carts`` table just by naming it::
-
- shopping_carts = Table('shopping_carts', meta)
-
-Of course, it's a good idea to use ``autoload=True`` with the above table regardless. This is so that the table's attributes will be loaded if they have not been already. The autoload operation only occurs for the table if it hasn't already been loaded; once loaded, new calls to :class:`~sqlalchemy.schema.Table` with the same name will not re-issue any reflection queries.
-
-Overriding Reflected Columns
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-Individual columns can be overridden with explicit values when reflecting tables; this is handy for specifying custom datatypes, constraints such as primary keys that may not be configured within the database, etc.::
-
- >>> mytable = Table('mytable', meta,
- ... Column('id', Integer, primary_key=True), # override reflected 'id' to have primary key
- ... Column('mydata', Unicode(50)), # override reflected 'mydata' to be Unicode
- ... autoload=True)
-
-Reflecting Views
-~~~~~~~~~~~~~~~~
-
-The reflection system can also reflect views. Basic usage is the same as that of a table::
-
- my_view = Table("some_view", metadata, autoload=True)
-
-Above, ``my_view`` is a :class:`~sqlalchemy.schema.Table` object with :class:`~sqlalchemy.schema.Column` objects representing the names and types
-of each column within the view "some_view".
-
-Usually, it's desired to have at least a primary key constraint when reflecting a view, if not
-foreign keys as well. View reflection doesn't extrapolate these constraints.
-
-Use the "override" technique for this, specifying explicitly those columns
-which are part of the primary key or have foreign key constraints::
-
- my_view = Table("some_view", metadata,
- Column("view_id", Integer, primary_key=True),
- Column("related_thing", Integer, ForeignKey("othertable.thing_id")),
- autoload=True
- )
-
-Reflecting All Tables at Once
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The :class:`~sqlalchemy.schema.MetaData` object can also get a listing of tables and reflect the full set. This is achieved by using the :func:`~sqlalchemy.schema.MetaData.reflect` method. After calling it, all located tables are present within the :class:`~sqlalchemy.schema.MetaData` object's dictionary of tables::
-
- meta = MetaData()
- meta.reflect(bind=someengine)
- users_table = meta.tables['users']
- addresses_table = meta.tables['addresses']
-
-``metadata.reflect()`` also provides a handy way to clear or delete all the rows in a database::
-
- meta = MetaData()
- meta.reflect(bind=someengine)
- for table in reversed(meta.sorted_tables):
- someengine.execute(table.delete())
-
-Fine Grained Reflection with Inspector
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-A low level interface which provides a backend-agnostic system of loading lists of schema, table, column, and constraint descriptions from a given database is also available. This is known as the "Inspector" and is described in the API documentation at :ref:`inspector_api_toplevel`.
-
-Specifying the Schema Name
----------------------------
-
-Some databases support the concept of multiple schemas. A :class:`~sqlalchemy.schema.Table` can reference this by specifying the ``schema`` keyword argument::
-
- financial_info = Table('financial_info', meta,
- Column('id', Integer, primary_key=True),
- Column('value', String(100), nullable=False),
- schema='remote_banks'
- )
-
-Within the :class:`~sqlalchemy.schema.MetaData` collection, this table will be identified by the combination of ``financial_info`` and ``remote_banks``. If another table called ``financial_info`` is referenced without the ``remote_banks`` schema, it will refer to a different :class:`~sqlalchemy.schema.Table`. :class:`~sqlalchemy.schema.ForeignKey` objects can specify references to columns in this table using the form ``remote_banks.financial_info.id``.
-
-The ``schema`` argument should be used for any name qualifiers required, including Oracle's "owner" attribute and similar. It also can accommodate a dotted name for longer schemes::
-
- schema="dbo.scott"
-
-Backend-Specific Options
-------------------------
-
-:class:`~sqlalchemy.schema.Table` supports database-specific options. For example, MySQL has different table backend types, including "MyISAM" and "InnoDB". This can be expressed with :class:`~sqlalchemy.schema.Table` using ``mysql_engine``::
-
- addresses = Table('engine_email_addresses', meta,
- Column('address_id', Integer, primary_key = True),
- Column('remote_user_id', Integer, ForeignKey(users.c.user_id)),
- Column('email_address', String(20)),
- mysql_engine='InnoDB'
- )
-
-Other backends may support table-level options as well. See the API documentation for each backend for further details.
-
-Column Insert/Update Defaults
-==============================
-
-SQLAlchemy provides a very rich featureset regarding column level events which take place during INSERT and UPDATE statements. Options include:
-
-* Scalar values used as defaults during INSERT and UPDATE operations
-* Python functions which execute upon INSERT and UPDATE operations
-* SQL expressions which are embedded in INSERT statements (or in some cases execute beforehand)
-* SQL expressions which are embedded in UPDATE statements
-* Server side default values used during INSERT
-* Markers for server-side triggers used during UPDATE
-
-The general rule for all insert/update defaults is that they only take effect if no value for a particular column is passed as an ``execute()`` parameter; otherwise, the given value is used.
-
-Scalar Defaults
----------------
-
-The simplest kind of default is a scalar value used as the default value of a column::
-
- Table("mytable", meta,
- Column("somecolumn", Integer, default=12)
- )
-
-Above, the value "12" will be bound as the column value during an INSERT if no other value is supplied.
-
-A scalar value may also be associated with an UPDATE statement, though this is not very common (as UPDATE statements are usually looking for dynamic defaults)::
-
- Table("mytable", meta,
- Column("somecolumn", Integer, onupdate=25)
- )
-
-
-Python-Executed Functions
--------------------------
-
-The ``default`` and ``onupdate`` keyword arguments also accept Python functions. These functions are invoked at the time of insert or update if no other value for that column is supplied, and the value returned is used for the column's value. Below illustrates a crude "sequence" that assigns an incrementing counter to a primary key column::
-
- # a function which counts upwards
- i = 0
- def mydefault():
- global i
- i += 1
- return i
-
- t = Table("mytable", meta,
- Column('id', Integer, primary_key=True, default=mydefault),
- )
-
-It should be noted that for real "incrementing sequence" behavior, the built-in capabilities of the database should normally be used, which may include sequence objects or other autoincrementing capabilities. For primary key columns, SQLAlchemy will in most cases use these capabilities automatically. See the API documentation for :class:`~sqlalchemy.schema.Column` including the ``autoincrement`` flag, as well as the section on :class:`~sqlalchemy.schema.Sequence` later in this chapter for background on standard primary key generation techniques.
-
-To illustrate onupdate, we assign the Python ``datetime`` function ``now`` to the ``onupdate`` attribute::
-
- import datetime
-
- t = Table("mytable", meta,
- Column('id', Integer, primary_key=True),
-
- # define 'last_updated' to be populated with datetime.now()
- Column('last_updated', DateTime, onupdate=datetime.datetime.now),
- )
-
-When an update statement executes and no value is passed for ``last_updated``, the ``datetime.datetime.now()`` Python function is executed and its return value used as the value for ``last_updated``. Notice that we provide ``now`` as the function itself without calling it (i.e. there are no parenthesis following) - SQLAlchemy will execute the function at the time the statement executes.
-
-Context-Sensitive Default Functions
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-The Python functions used by ``default`` and ``onupdate`` may also make use of the current statement's context in order to determine a value. The `context` of a statement is an internal SQLAlchemy object which contains all information about the statement being executed, including its source expression, the parameters associated with it and the cursor. The typical use case for this context with regards to default generation is to have access to the other values being inserted or updated on the row. To access the context, provide a function that accepts a single ``context`` argument::
-
- def mydefault(context):
- return context.current_parameters['counter'] + 12
-
- t = Table('mytable', meta,
- Column('counter', Integer),
- Column('counter_plus_twelve', Integer, default=mydefault, onupdate=mydefault)
- )
-
-Above we illustrate a default function which will execute for all INSERT and UPDATE statements where a value for ``counter_plus_twelve`` was otherwise not provided, and the value will be that of whatever value is present in the execution for the ``counter`` column, plus the number 12.
-
-While the context object passed to the default function has many attributes, the ``current_parameters`` member is a special member provided only during the execution of a default function for the purposes of deriving defaults from its existing values. For a single statement that is executing many sets of bind parameters, the user-defined function is called for each set of parameters, and ``current_parameters`` will be provided with each individual parameter set for each execution.
-
-SQL Expressions
----------------
-
-The "default" and "onupdate" keywords may also be passed SQL expressions, including select statements or direct function calls::
-
- t = Table("mytable", meta,
- Column('id', Integer, primary_key=True),
-
- # define 'create_date' to default to now()
- Column('create_date', DateTime, default=func.now()),
-
- # define 'key' to pull its default from the 'keyvalues' table
- Column('key', String(20), default=keyvalues.select(keyvalues.c.type='type1', limit=1)),
-
- # define 'last_modified' to use the current_timestamp SQL function on update
- Column('last_modified', DateTime, onupdate=func.utc_timestamp())
- )
-
-Above, the ``create_date`` column will be populated with the result of the ``now()`` SQL function (which, depending on backend, compiles into ``NOW()`` or ``CURRENT_TIMESTAMP`` in most cases) during an INSERT statement, and the ``key`` column with the result of a SELECT subquery from another table. The ``last_modified`` column will be populated with the value of ``UTC_TIMESTAMP()``, a function specific to MySQL, when an UPDATE statement is emitted for this table.
-
-Note that when using ``func`` functions, unlike when using Python `datetime` functions we *do* call the function, i.e. with parenthesis "()" - this is because what we want in this case is the return value of the function, which is the SQL expression construct that will be rendered into the INSERT or UPDATE statement.
-
-The above SQL functions are usually executed "inline" with the INSERT or UPDATE statement being executed, meaning, a single statement is executed which embeds the given expressions or subqueries within the VALUES or SET clause of the statement. Although in some cases, the function is "pre-executed" in a SELECT statement of its own beforehand. This happens when all of the following is true:
-
-* the column is a primary key column
-
-* the database dialect does not support a usable ``cursor.lastrowid`` accessor (or equivalent); this currently includes PostgreSQL, Oracle, and Firebird, as well as some MySQL dialects.
-
-* the dialect does not support the "RETURNING" clause or similar, or the ``implicit_returning`` flag is set to ``False`` for the dialect. Dialects which support RETURNING currently include Postgresql, Oracle, Firebird, and MS-SQL.
-
-* the statement is a single execution, i.e. only supplies one set of parameters and doesn't use "executemany" behavior
-
-* the ``inline=True`` flag is not set on the :class:`~sqlalchemy.sql.expression.Insert()` or :class:`~sqlalchemy.sql.expression.Update()` construct, and the statement has not defined an explicit `returning()` clause.
-
-Whether or not the default generation clause "pre-executes" is not something that normally needs to be considered, unless it is being addressed for performance reasons.
-
-When the statement is executed with a single set of parameters (that is, it is not an "executemany" style execution), the returned :class:`~sqlalchemy.engine.base.ResultProxy` will contain a collection accessible via ``result.postfetch_cols()`` which contains a list of all :class:`~sqlalchemy.schema.Column` objects which had an inline-executed default. Similarly, all parameters which were bound to the statement, including all Python and SQL expressions which were pre-executed, are present in the ``last_inserted_params()`` or ``last_updated_params()`` collections on :class:`~sqlalchemy.engine.base.ResultProxy`. The ``inserted_primary_key`` collection contains a list of primary key values for the row inserted (a list so that single-column and composite-column primary keys are represented in the same format).
-
-Server Side Defaults
---------------------
-
-A variant on the SQL expression default is the ``server_default``, which gets placed in the CREATE TABLE statement during a ``create()`` operation:
-
-.. sourcecode:: python+sql
-
- t = Table('test', meta,
- Column('abc', String(20), server_default='abc'),
- Column('created_at', DateTime, server_default=text("sysdate"))
- )
-
-A create call for the above table will produce::
-
- CREATE TABLE test (
- abc varchar(20) default 'abc',
- created_at datetime default sysdate
- )
-
-The behavior of ``server_default`` is similar to that of a regular SQL default; if it's placed on a primary key column for a database which doesn't have a way to "postfetch" the ID, and the statement is not "inlined", the SQL expression is pre-executed; otherwise, SQLAlchemy lets the default fire off on the database side normally.
-
-Triggered Columns
-------------------
-
-Columns with values set by a database trigger or other external process may be called out with a marker::
-
- t = Table('test', meta,
- Column('abc', String(20), server_default=FetchedValue()),
- Column('def', String(20), server_onupdate=FetchedValue())
- )
-
-These markers do not emit a "default" clause when the table is created, however they do set the same internal flags as a static ``server_default`` clause, providing hints to higher-level tools that a "post-fetch" of these rows should be performed after an insert or update.
-
-Defining Sequences
--------------------
-
-SQLAlchemy represents database sequences using the :class:`~sqlalchemy.schema.Sequence` object, which is considered to be a special case of "column default". It only has an effect on databases which have explicit support for sequences, which currently includes Postgresql, Oracle, and Firebird. The :class:`~sqlalchemy.schema.Sequence` object is otherwise ignored.
-
-The :class:`~sqlalchemy.schema.Sequence` may be placed on any column as a "default" generator to be used during INSERT operations, and can also be configured to fire off during UPDATE operations if desired. It is most commonly used in conjunction with a single integer primary key column::
-
- table = Table("cartitems", meta,
- Column("cart_id", Integer, Sequence('cart_id_seq'), primary_key=True),
- Column("description", String(40)),
- Column("createdate", DateTime())
- )
-
-Where above, the table "cartitems" is associated with a sequence named "cart_id_seq". When INSERT statements take place for "cartitems", and no value is passed for the "cart_id" column, the "cart_id_seq" sequence will be used to generate a value.
-
-When the :class:`~sqlalchemy.schema.Sequence` is associated with a table, CREATE and DROP statements issued for that table will also issue CREATE/DROP for the sequence object as well, thus "bundling" the sequence object with its parent table.
-
-The :class:`~sqlalchemy.schema.Sequence` object also implements special functionality to accommodate Postgresql's SERIAL datatype. The SERIAL type in PG automatically generates a sequence that is used implicitly during inserts. This means that if a :class:`~sqlalchemy.schema.Table` object defines a :class:`~sqlalchemy.schema.Sequence` on its primary key column so that it works with Oracle and Firebird, the :class:`~sqlalchemy.schema.Sequence` would get in the way of the "implicit" sequence that PG would normally use. For this use case, add the flag ``optional=True`` to the :class:`~sqlalchemy.schema.Sequence` object - this indicates that the :class:`~sqlalchemy.schema.Sequence` should only be used if the database provides no other option for generating primary key identifiers.
-
-The :class:`~sqlalchemy.schema.Sequence` object also has the ability to be executed standalone like a SQL expression, which has the effect of calling its "next value" function::
-
- seq = Sequence('some_sequence')
- nextid = connection.execute(seq)
-
-Defining Constraints and Indexes
-=================================
-
-.. _metadata_foreignkeys:
-
-Defining Foreign Keys
----------------------
-
-A *foreign key* in SQL is a table-level construct that constrains one or more columns in that table to only allow values that are present in a different set of columns, typically but not always located on a different table. We call the columns which are constrained the *foreign key* columns and the columns which they are constrained towards the *referenced* columns. The referenced columns almost always define the primary key for their owning table, though there are exceptions to this. The foreign key is the "joint" that connects together pairs of rows which have a relationship with each other, and SQLAlchemy assigns very deep importance to this concept in virtually every area of its operation.
-
-In SQLAlchemy as well as in DDL, foreign key constraints can be defined as additional attributes within the table clause, or for single-column foreign keys they may optionally be specified within the definition of a single column. The single column foreign key is more common, and at the column level is specified by constructing a :class:`~sqlalchemy.schema.ForeignKey` object as an argument to a :class:`~sqlalchemy.schema.Column` object::
-
- user_preference = Table('user_preference', metadata,
- Column('pref_id', Integer, primary_key=True),
- Column('user_id', Integer, ForeignKey("user.user_id"), nullable=False),
- Column('pref_name', String(40), nullable=False),
- Column('pref_value', String(100))
- )
-
-Above, we define a new table ``user_preference`` for which each row must contain a value in the ``user_id`` column that also exists in the ``user`` table's ``user_id`` column.
-
-The argument to :class:`~sqlalchemy.schema.ForeignKey` is most commonly a string of the form *<tablename>.<columnname>*, or for a table in a remote schema or "owner" of the form *<schemaname>.<tablename>.<columnname>*. It may also be an actual :class:`~sqlalchemy.schema.Column` object, which as we'll see later is accessed from an existing :class:`~sqlalchemy.schema.Table` object via its ``c`` collection::
-
- ForeignKey(user.c.user_id)
-
-The advantage to using a string is that the in-python linkage between ``user`` and ``user_preference`` is resolved only when first needed, so that table objects can be easily spread across multiple modules and defined in any order.
-
-Foreign keys may also be defined at the table level, using the :class:`~sqlalchemy.schema.ForeignKeyConstraint` object. This object can describe a single- or multi-column foreign key. A multi-column foreign key is known as a *composite* foreign key, and almost always references a table that has a composite primary key. Below we define a table ``invoice`` which has a composite primary key::
-
- invoice = Table('invoice', metadata,
- Column('invoice_id', Integer, primary_key=True),
- Column('ref_num', Integer, primary_key=True),
- Column('description', String(60), nullable=False)
- )
-
-And then a table ``invoice_item`` with a composite foreign key referencing ``invoice``::
-
- invoice_item = Table('invoice_item', metadata,
- Column('item_id', Integer, primary_key=True),
- Column('item_name', String(60), nullable=False),
- Column('invoice_id', Integer, nullable=False),
- Column('ref_num', Integer, nullable=False),
- ForeignKeyConstraint(['invoice_id', 'ref_num'], ['invoice.invoice_id', 'invoice.ref_num'])
- )
-
-It's important to note that the :class:`~sqlalchemy.schema.ForeignKeyConstraint` is the only way to define a composite foreign key. While we could also have placed individual :class:`~sqlalchemy.schema.ForeignKey` objects on both the ``invoice_item.invoice_id`` and ``invoice_item.ref_num`` columns, SQLAlchemy would not be aware that these two values should be paired together - it would be two individual foreign key constraints instead of a single composite foreign key referencing two columns.
-
-Creating/Dropping Foreign Key Constraints via ALTER
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
-In all the above examples, the :class:`~sqlalchemy.schema.ForeignKey` object causes the "REFERENCES" keyword to be added inline to a column definition within a "CREATE TABLE" statement when :func:`~sqlalchemy.schema.MetaData.create_all` is issued, and :class:`~sqlalchemy.schema.ForeignKeyConstraint` invokes the "CONSTRAINT" keyword inline with "CREATE TABLE". There are some cases where this is undesireable, particularly when two tables reference each other mutually, each with a foreign key referencing the other. In such a situation at least one of the foreign key constraints must be generated after both tables have been built. To support such a scheme, :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` offer the flag ``use_alter=True``. When using this flag, the constraint will be generated using a definition similar to "ALTER TABLE <tablename> ADD CONSTRAINT <name> ...". Since a name is required, the ``name`` attribute must also be specified. For example::
-
- node = Table('node', meta,
- Column('node_id', Integer, primary_key=True),
- Column('primary_element', Integer,
- ForeignKey('element.element_id', use_alter=True, name='fk_node_element_id')
- )
- )
-
- element = Table('element', meta,
- Column('element_id', Integer, primary_key=True),
- Column('parent_node_id', Integer),
- ForeignKeyConstraint(
- ['parent_node_id'],
- ['node.node_id'],
- use_alter=True,
- name='fk_element_parent_node_id'
- )
- )
-
-ON UPDATE and ON DELETE
-~~~~~~~~~~~~~~~~~~~~~~~
-
-Most databases support *cascading* of foreign key values, that is the when a parent row is updated the new value is placed in child rows, or when the parent row is deleted all corresponding child rows are set to null or deleted. In data definition language these are specified using phrases like "ON UPDATE CASCADE", "ON DELETE CASCADE", and "ON DELETE SET NULL", corresponding to foreign key constraints. The phrase after "ON UPDATE" or "ON DELETE" may also other allow other phrases that are specific to the database in use. The :class:`~sqlalchemy.schema.ForeignKey` and :class:`~sqlalchemy.schema.ForeignKeyConstraint` objects support the generation of this clause via the ``onupdate`` and ``ondelete`` keyword arguments. The value is any string which will be output after the appropriate "ON UPDATE" or "ON DELETE" phrase::
-
- child = Table('child', meta,
- Column('id', Integer,
- ForeignKey('parent.id', onupdate="CASCADE", ondelete="CASCADE"),
- primary_key=True
- )
- )
-
- composite = Table('composite', meta,
- Column('id', Integer, primary_key=True),
- Column('rev_id', Integer),
- Column('note_id', Integer),
- ForeignKeyConstraint(
- ['rev_id', 'note_id'],
- ['revisions.id', 'revisions.note_id'],
- onupdate="CASCADE", ondelete="SET NULL"
- )
- )
-
-Note that these clauses are not supported on SQLite, and require ``InnoDB`` tables when used with MySQL. They may also not be supported on other databases.
-
-UNIQUE Constraint
------------------
-
-Unique constraints can be created anonymously on a single column using the ``unique`` keyword on :class:`~sqlalchemy.schema.Column`. Explicitly named unique constraints and/or those with multiple columns are created via the :class:`~sqlalchemy.schema.UniqueConstraint` table-level construct.
-
-.. sourcecode:: python+sql
-
- meta = MetaData()
- mytable = Table('mytable', meta,
-
- # per-column anonymous unique constraint
- Column('col1', Integer, unique=True),
-
- Column('col2', Integer),
- Column('col3', Integer),
-
- # explicit/composite unique constraint. 'name' is optional.
- UniqueConstraint('col2', 'col3', name='uix_1')
- )
-
-CHECK Constraint
-----------------
-
-Check constraints can be named or unnamed and can be created at the Column or Table level, using the :class:`~sqlalchemy.schema.CheckConstraint` construct. The text of the check constraint is passed directly through to the database, so there is limited "database independent" behavior. Column level check constraints generally should only refer to the column to which they are placed, while table level constraints can refer to any columns in the table.
-
-Note that some databases do not actively support check constraints such as MySQL and SQLite.
-
-.. sourcecode:: python+sql
-
- meta = MetaData()
- mytable = Table('mytable', meta,
-
- # per-column CHECK constraint
- Column('col1', Integer, CheckConstraint('col1>5')),
-
- Column('col2', Integer),
- Column('col3', Integer),
-
- # table level CHECK constraint. 'name' is optional.
- CheckConstraint('col2 > col3 + 5', name='check1')
- )
-
- {sql}mytable.create(engine)
- CREATE TABLE mytable (
- col1 INTEGER CHECK (col1>5),
- col2 INTEGER,
- col3 INTEGER,
- CONSTRAINT check1 CHECK (col2 > col3 + 5)
- ){stop}
-
-Indexes
--------
-
-Indexes can be created anonymously (using an auto-generated name ``ix_<column label>``) for a single column using the inline ``index`` keyword on :class:`~sqlalchemy.schema.Column`, which also modifies the usage of ``unique`` to apply the uniqueness to the index itself, instead of adding a separate UNIQUE constraint. For indexes with specific names or which encompass more than one column, use the :class:`~sqlalchemy.schema.Index` construct, which requires a name.
-
-Note that the :class:`~sqlalchemy.schema.Index` construct is created **externally** to the table which it corresponds, using :class:`~sqlalchemy.schema.Column` objects and not strings.
-
-Below we illustrate a :class:`~sqlalchemy.schema.Table` with several :class:`~sqlalchemy.schema.Index` objects associated. The DDL for "CREATE INDEX" is issued right after the create statements for the table:
-
-.. sourcecode:: python+sql
-
- meta = MetaData()
- mytable = Table('mytable', meta,
- # an indexed column, with index "ix_mytable_col1"
- Column('col1', Integer, index=True),
-
- # a uniquely indexed column with index "ix_mytable_col2"
- Column('col2', Integer, index=True, unique=True),
-
- Column('col3', Integer),
- Column('col4', Integer),
-
- Column('col5', Integer),
- Column('col6', Integer),
- )
-
- # place an index on col3, col4
- Index('idx_col34', mytable.c.col3, mytable.c.col4)
-
- # place a unique index on col5, col6
- Index('myindex', mytable.c.col5, mytable.c.col6, unique=True)
-
- {sql}mytable.create(engine)
- CREATE TABLE mytable (
- col1 INTEGER,
- col2 INTEGER,
- col3 INTEGER,
- col4 INTEGER,
- col5 INTEGER,
- col6 INTEGER
- )
- CREATE INDEX ix_mytable_col1 ON mytable (col1)
- CREATE UNIQUE INDEX ix_mytable_col2 ON mytable (col2)
- CREATE UNIQUE INDEX myindex ON mytable (col5, col6)
- CREATE INDEX idx_col34 ON mytable (col3, col4){stop}
-
-The :class:`~sqlalchemy.schema.Index` object also supports its own ``create()`` method:
-
-.. sourcecode:: python+sql
-
- i = Index('someindex', mytable.c.col5)
- {sql}i.create(engine)
- CREATE INDEX someindex ON mytable (col5){stop}
-
-Customizing DDL
-===============
-
-In the preceding sections we've discussed a variety of schema constructs including :class:`~sqlalchemy.schema.Table`, :class:`~sqlalchemy.schema.ForeignKeyConstraint`, :class:`~sqlalchemy.schema.CheckConstraint`, and :class:`~sqlalchemy.schema.Sequence`. Throughout, we've relied upon the ``create()`` and :func:`~sqlalchemy.schema.MetaData.create_all` methods of :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` in order to issue data definition language (DDL) for all constructs. When issued, a pre-determined order of operations is invoked, and DDL to create each table is created unconditionally including all constraints and other objects associated with it. For more complex scenarios where database-specific DDL is required, SQLAlchemy offers two techniques which can be used to add any DDL based on any condition, either accompanying the standard generation of tables or by itself.
-
-Controlling DDL Sequences
--------------------------
-
-The ``sqlalchemy.schema`` package contains SQL expression constructs that provide DDL expressions. For example, to produce a ``CREATE TABLE`` statement:
-
-.. sourcecode:: python+sql
-
- from sqlalchemy.schema import CreateTable
- {sql}engine.execute(CreateTable(mytable))
- CREATE TABLE mytable (
- col1 INTEGER,
- col2 INTEGER,
- col3 INTEGER,
- col4 INTEGER,
- col5 INTEGER,
- col6 INTEGER
- ){stop}
-
-Above, the :class:`~sqlalchemy.schema.CreateTable` construct works like any other expression construct (such as ``select()``, ``table.insert()``, etc.). A full reference of available constructs is in :ref:`schema_api_ddl`.
-
-The DDL constructs all extend a common base class which provides the capability to be associated with an individual :class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData` object, to be invoked upon create/drop events. Consider the example of a table which contains a CHECK constraint:
-
-.. sourcecode:: python+sql
-
- users = Table('users', metadata,
- Column('user_id', Integer, primary_key=True),
- Column('user_name', String(40), nullable=False),
- CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
- )
-
- {sql}users.create(engine)
- CREATE TABLE users (
- user_id SERIAL NOT NULL,
- user_name VARCHAR(40) NOT NULL,
- PRIMARY KEY (user_id),
- CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8)
- ){stop}
-
-The above table contains a column "user_name" which is subject to a CHECK constraint that validates that the length of the string is at least eight characters. When a ``create()`` is issued for this table, DDL for the :class:`~sqlalchemy.schema.CheckConstraint` will also be issued inline within the table definition.
-
-The :class:`~sqlalchemy.schema.CheckConstraint` construct can also be constructed externally and associated with the :class:`~sqlalchemy.schema.Table` afterwards::
-
- constraint = CheckConstraint('length(user_name) >= 8',name="cst_user_name_length")
- users.append_constraint(constraint)
-
-So far, the effect is the same. However, if we create DDL elements corresponding to the creation and removal of this constraint, and associate them with the :class:`~sqlalchemy.schema.Table` as events, these new events will take over the job of issuing DDL for the constraint. Additionally, the constraint will be added via ALTER:
-
-.. sourcecode:: python+sql
-
- AddConstraint(constraint).execute_at("after-create", users)
- DropConstraint(constraint).execute_at("before-drop", users)
-
- {sql}users.create(engine)
- CREATE TABLE users (
- user_id SERIAL NOT NULL,
- user_name VARCHAR(40) NOT NULL,
- PRIMARY KEY (user_id)
- )
-
- ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop}
-
- {sql}users.drop(engine)
- ALTER TABLE users DROP CONSTRAINT cst_user_name_length
- DROP TABLE users{stop}
-
-The real usefulness of the above becomes clearer once we illustrate the ``on`` attribute of a DDL event. The ``on`` parameter is part of the constructor, and may be a string name of a database dialect name, a tuple containing dialect names, or a Python callable. This will limit the execution of the item to just those dialects, or when the return value of the callable is ``True``. So if our :class:`~sqlalchemy.schema.CheckConstraint` was only supported by Postgresql and not other databases, we could limit it to just that dialect::
-
- AddConstraint(constraint, on='postgresql').execute_at("after-create", users)
- DropConstraint(constraint, on='postgresql').execute_at("before-drop", users)
-
-Or to any set of dialects::
-
- AddConstraint(constraint, on=('postgresql', 'mysql')).execute_at("after-create", users)
- DropConstraint(constraint, on=('postgresql', 'mysql')).execute_at("before-drop", users)
-
-When using a callable, the callable is passed the ddl element, event name, the :class:`~sqlalchemy.schema.Table` or :class:`~sqlalchemy.schema.MetaData` object whose "create" or "drop" event is in progress, and the :class:`~sqlalchemy.engine.base.Connection` object being used for the operation, as well as additional information as keyword arguments. The callable can perform checks, such as whether or not a given item already exists. Below we define ``should_create()`` and ``should_drop()`` callables that check for the presence of our named constraint:
-
-.. sourcecode:: python+sql
-
- def should_create(ddl, event, target, connection, **kw):
- row = connection.execute("select conname from pg_constraint where conname='%s'" % ddl.element.name).scalar()
- return not bool(row)
-
- def should_drop(ddl, event, target, connection, **kw):
- return not should_create(ddl, event, target, connection, **kw)
-
- AddConstraint(constraint, on=should_create).execute_at("after-create", users)
- DropConstraint(constraint, on=should_drop).execute_at("before-drop", users)
-
- {sql}users.create(engine)
- CREATE TABLE users (
- user_id SERIAL NOT NULL,
- user_name VARCHAR(40) NOT NULL,
- PRIMARY KEY (user_id)
- )
-
- select conname from pg_constraint where conname='cst_user_name_length'
- ALTER TABLE users ADD CONSTRAINT cst_user_name_length CHECK (length(user_name) >= 8){stop}
-
- {sql}users.drop(engine)
- select conname from pg_constraint where conname='cst_user_name_length'
- ALTER TABLE users DROP CONSTRAINT cst_user_name_length
- DROP TABLE users{stop}
-
-Custom DDL
-----------
-
-Custom DDL phrases are most easily achieved using the :class:`~sqlalchemy.schema.DDL` construct. This construct works like all the other DDL elements except it accepts a string which is the
-text to be emitted:
-
-.. sourcecode:: python+sql
-
- DDL("ALTER TABLE users ADD CONSTRAINT "
- "cst_user_name_length "
- " CHECK (length(user_name) >= 8)").execute_at("after-create", metadata)
-
-A more comprehensive method of creating libraries of DDL constructs is to use the :ref:`sqlalchemy.ext.compiler_toplevel` extension. See that chapter for full details.
-
-Adapting Tables to Alternate Metadata
-======================================
-
-
-A :class:`~sqlalchemy.schema.Table` object created against a specific :class:`~sqlalchemy.schema.MetaData` object can be re-created against a new MetaData using the :func:`~sqlalchemy.schema.Table.tometadata` method:
-
-.. sourcecode:: python+sql
-
- # create two metadata
- meta1 = MetaData('sqlite:///querytest.db')
- meta2 = MetaData()
-
- # load 'users' from the sqlite engine
- users_table = Table('users', meta1, autoload=True)
-
- # create the same Table object for the plain metadata
- users_table_2 = users_table.tometadata(meta2)
-
-
diff --git a/doc/build/orm/collections.rst b/doc/build/orm/collections.rst
new file mode 100644
index 000000000..7f585fc50
--- /dev/null
+++ b/doc/build/orm/collections.rst
@@ -0,0 +1,410 @@
+.. _collections_toplevel:
+
+.. currentmodule:: sqlalchemy.orm
+
+Collection Configuration and Techniques
+=======================================
+
+The :func:`.relationship` function defines a linkage between two classes.
+When the linkage defines a one-to-many or many-to-many relationship, it's
+represented as a Python collection when objects are loaded and manipulated.
+This section presents additional information about collection configuration
+and techniques.
+
+.. _largecollections:
+.. currentmodule:: sqlalchemy.orm
+
+Working with Large Collections
+-------------------------------
+
+The default behavior of :func:`.relationship` is to fully load
+the collection of items in, as according to the loading strategy of the
+relationship. Additionally, the Session by default only knows how to delete
+objects which are actually present within the session. When a parent instance
+is marked for deletion and flushed, the Session loads its full list of child
+items in so that they may either be deleted as well, or have their foreign key
+value set to null; this is to avoid constraint violations. For large
+collections of child items, there are several strategies to bypass full
+loading of child items both at load time as well as deletion time.
+
+Dynamic Relationship Loaders
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The most useful by far is the :func:`~sqlalchemy.orm.dynamic_loader`
+relationship. This is a variant of :func:`~sqlalchemy.orm.relationship` which
+returns a :class:`~sqlalchemy.orm.query.Query` object in place of a collection
+when accessed. :func:`~sqlalchemy.orm.query.Query.filter` criterion may be
+applied as well as limits and offsets, either explicitly or via array slices:
+
+.. sourcecode:: python+sql
+
+ mapper(User, users_table, properties={
+ 'posts': dynamic_loader(Post)
+ })
+
+ jack = session.query(User).get(id)
+
+ # filter Jack's blog posts
+ posts = jack.posts.filter(Post.headline=='this is a post')
+
+ # apply array slices
+ posts = jack.posts[5:20]
+
+The dynamic relationship supports limited write operations, via the
+``append()`` and ``remove()`` methods::
+
+ oldpost = jack.posts.filter(Post.headline=='old post').one()
+ jack.posts.remove(oldpost)
+
+ jack.posts.append(Post('new post'))
+
+Since the read side of the dynamic relationship always queries the
+database, changes to the underlying collection will not be visible
+until the data has been flushed. However, as long as "autoflush" is
+enabled on the :class:`.Session` in use, this will occur
+automatically each time the collection is about to emit a
+query.
+
+To place a dynamic relationship on a backref, use ``lazy='dynamic'``:
+
+.. sourcecode:: python+sql
+
+ mapper(Post, posts_table, properties={
+ 'user': relationship(User, backref=backref('posts', lazy='dynamic'))
+ })
+
+Note that eager/lazy loading options cannot be used in conjunction dynamic relationships at this time.
+
+.. autofunction:: dynamic_loader
+
+Setting Noload
+~~~~~~~~~~~~~~~
+
+The opposite of the dynamic relationship is simply "noload", specified using ``lazy='noload'``:
+
+.. sourcecode:: python+sql
+
+ mapper(MyClass, table, properties={
+ 'children': relationship(MyOtherClass, lazy='noload')
+ })
+
+Above, the ``children`` collection is fully writeable, and changes to it will
+be persisted to the database as well as locally available for reading at the
+time they are added. However when instances of ``MyClass`` are freshly loaded
+from the database, the ``children`` collection stays empty.
+
+Using Passive Deletes
+~~~~~~~~~~~~~~~~~~~~~~
+
+Use ``passive_deletes=True`` to disable child object loading on a DELETE
+operation, in conjunction with "ON DELETE (CASCADE|SET NULL)" on your database
+to automatically cascade deletes to child objects. Note that "ON DELETE" is
+not supported on SQLite, and requires ``InnoDB`` tables when using MySQL:
+
+.. sourcecode:: python+sql
+
+ mytable = Table('mytable', meta,
+ Column('id', Integer, primary_key=True),
+ )
+
+ myothertable = Table('myothertable', meta,
+ Column('id', Integer, primary_key=True),
+ Column('parent_id', Integer),
+ ForeignKeyConstraint(['parent_id'], ['mytable.id'], ondelete="CASCADE"),
+ )
+
+ mapper(MyOtherClass, myothertable)
+
+ mapper(MyClass, mytable, properties={
+ 'children': relationship(MyOtherClass, cascade="all, delete-orphan", passive_deletes=True)
+ })
+
+When ``passive_deletes`` is applied, the ``children`` relationship will not be
+loaded into memory when an instance of ``MyClass`` is marked for deletion. The
+``cascade="all, delete-orphan"`` *will* take effect for instances of
+``MyOtherClass`` which are currently present in the session; however for
+instances of ``MyOtherClass`` which are not loaded, SQLAlchemy assumes that
+"ON DELETE CASCADE" rules will ensure that those rows are deleted by the
+database and that no foreign key violation will occur.
+
+.. currentmodule:: sqlalchemy.orm.collections
+.. _custom_collections:
+
+Customizing Collection Access
+-----------------------------
+
+Mapping a one-to-many or many-to-many relationship results in a collection of
+values accessible through an attribute on the parent instance. By default,
+this collection is a ``list``::
+
+ mapper(Parent, properties={
+ 'children' : relationship(Child)
+ })
+
+ parent = Parent()
+ parent.children.append(Child())
+ print parent.children[0]
+
+Collections are not limited to lists. Sets, mutable sequences and almost any
+other Python object that can act as a container can be used in place of the
+default list, by specifying the ``collection_class`` option on
+:func:`~sqlalchemy.orm.relationship`.
+
+.. sourcecode:: python+sql
+
+ # use a set
+ mapper(Parent, properties={
+ 'children' : relationship(Child, collection_class=set)
+ })
+
+ parent = Parent()
+ child = Child()
+ parent.children.add(child)
+ assert child in parent.children
+
+
+Custom Collection Implementations
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+You can use your own types for collections as well. For most cases, simply
+inherit from ``list`` or ``set`` and add the custom behavior.
+
+Collections in SQLAlchemy are transparently *instrumented*. Instrumentation
+means that normal operations on the collection are tracked and result in
+changes being written to the database at flush time. Additionally, collection
+operations can fire *events* which indicate some secondary operation must take
+place. Examples of a secondary operation include saving the child item in the
+parent's :class:`~sqlalchemy.orm.session.Session` (i.e. the ``save-update``
+cascade), as well as synchronizing the state of a bi-directional relationship
+(i.e. a ``backref``).
+
+The collections package understands the basic interface of lists, sets and
+dicts and will automatically apply instrumentation to those built-in types and
+their subclasses. Object-derived types that implement a basic collection
+interface are detected and instrumented via duck-typing:
+
+.. sourcecode:: python+sql
+
+ class ListLike(object):
+ def __init__(self):
+ self.data = []
+ def append(self, item):
+ self.data.append(item)
+ def remove(self, item):
+ self.data.remove(item)
+ def extend(self, items):
+ self.data.extend(items)
+ def __iter__(self):
+ return iter(self.data)
+ def foo(self):
+ return 'foo'
+
+``append``, ``remove``, and ``extend`` are known list-like methods, and will be instrumented automatically. ``__iter__`` is not a mutator method and won't be instrumented, and ``foo`` won't be either.
+
+Duck-typing (i.e. guesswork) isn't rock-solid, of course, so you can be
+explicit about the interface you are implementing by providing an
+``__emulates__`` class attribute::
+
+ class SetLike(object):
+ __emulates__ = set
+
+ def __init__(self):
+ self.data = set()
+ def append(self, item):
+ self.data.add(item)
+ def remove(self, item):
+ self.data.remove(item)
+ def __iter__(self):
+ return iter(self.data)
+
+This class looks list-like because of ``append``, but ``__emulates__`` forces
+it to set-like. ``remove`` is known to be part of the set interface and will
+be instrumented.
+
+But this class won't work quite yet: a little glue is needed to adapt it for
+use by SQLAlchemy. The ORM needs to know which methods to use to append,
+remove and iterate over members of the collection. When using a type like
+``list`` or ``set``, the appropriate methods are well-known and used
+automatically when present. This set-like class does not provide the expected
+``add`` method, so we must supply an explicit mapping for the ORM via a
+decorator.
+
+Annotating Custom Collections via Decorators
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Decorators can be used to tag the individual methods the ORM needs to manage
+collections. Use them when your class doesn't quite meet the regular interface
+for its container type, or you simply would like to use a different method to
+get the job done.
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.orm.collections import collection
+
+ class SetLike(object):
+ __emulates__ = set
+
+ def __init__(self):
+ self.data = set()
+
+ @collection.appender
+ def append(self, item):
+ self.data.add(item)
+
+ def remove(self, item):
+ self.data.remove(item)
+
+ def __iter__(self):
+ return iter(self.data)
+
+And that's all that's needed to complete the example. SQLAlchemy will add
+instances via the ``append`` method. ``remove`` and ``__iter__`` are the
+default methods for sets and will be used for removing and iteration. Default
+methods can be changed as well:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.orm.collections import collection
+
+ class MyList(list):
+ @collection.remover
+ def zark(self, item):
+ # do something special...
+
+ @collection.iterator
+ def hey_use_this_instead_for_iteration(self):
+ # ...
+
+There is no requirement to be list-, or set-like at all. Collection classes
+can be any shape, so long as they have the append, remove and iterate
+interface marked for SQLAlchemy's use. Append and remove methods will be
+called with a mapped entity as the single argument, and iterator methods are
+called with no arguments and must return an iterator.
+
+Dictionary-Based Collections
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A ``dict`` can be used as a collection, but a keying strategy is needed to map
+entities loaded by the ORM to key, value pairs. The
+:mod:`sqlalchemy.orm.collections` package provides several built-in types for
+dictionary-based collections:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.orm.collections import column_mapped_collection, attribute_mapped_collection, mapped_collection
+
+ mapper(Item, items_table, properties={
+ # key by column
+ 'notes': relationship(Note, collection_class=column_mapped_collection(notes_table.c.keyword)),
+ # or named attribute
+ 'notes2': relationship(Note, collection_class=attribute_mapped_collection('keyword')),
+ # or any callable
+ 'notes3': relationship(Note, collection_class=mapped_collection(lambda entity: entity.a + entity.b))
+ })
+
+ # ...
+ item = Item()
+ item.notes['color'] = Note('color', 'blue')
+ print item.notes['color']
+
+These functions each provide a ``dict`` subclass with decorated ``set`` and
+``remove`` methods and the keying strategy of your choice.
+
+The :class:`sqlalchemy.orm.collections.MappedCollection` class can be used as
+a base class for your custom types or as a mix-in to quickly add ``dict``
+collection support to other classes. It uses a keying function to delegate to
+``__setitem__`` and ``__delitem__``:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.util import OrderedDict
+ from sqlalchemy.orm.collections import MappedCollection
+
+ class NodeMap(OrderedDict, MappedCollection):
+ """Holds 'Node' objects, keyed by the 'name' attribute with insert order maintained."""
+
+ def __init__(self, *args, **kw):
+ MappedCollection.__init__(self, keyfunc=lambda node: node.name)
+ OrderedDict.__init__(self, *args, **kw)
+
+When subclassing :class:`.MappedCollection`, user-defined versions
+of ``__setitem__()`` or ``__delitem__()`` should be decorated
+with :meth:`.collection.internally_instrumented`, **if** they call down
+to those same methods on :class:`.MappedCollection`. This because the methods
+on :class:`.MappedCollection` are already instrumented - calling them
+from within an already instrumented call can cause events to be fired off
+repeatedly, or inappropriately, leading to internal state corruption in
+rare cases::
+
+ from sqlalchemy.orm.collections import MappedCollection,\
+ collection
+
+ class MyMappedCollection(MappedCollection):
+ """Use @internally_instrumented when your methods
+ call down to already-instrumented methods.
+
+ """
+
+ @collection.internally_instrumented
+ def __setitem__(self, key, value, _sa_initiator=None):
+ # do something with key, value
+ super(MyMappedCollection, self).__setitem__(key, value, _sa_initiator)
+
+ @collection.internally_instrumented
+ def __delitem__(self, key, _sa_initiator=None):
+ # do something with key
+ super(MyMappedCollection, self).__delitem__(key, _sa_initiator)
+
+The ORM understands the ``dict`` interface just like lists and sets, and will
+automatically instrument all dict-like methods if you choose to subclass
+``dict`` or provide dict-like collection behavior in a duck-typed class. You
+must decorate appender and remover methods, however- there are no compatible
+methods in the basic dictionary interface for SQLAlchemy to use by default.
+Iteration will go through ``itervalues()`` unless otherwise decorated.
+
+Instrumentation and Custom Types
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Many custom types and existing library classes can be used as a entity
+collection type as-is without further ado. However, it is important to note
+that the instrumentation process _will_ modify the type, adding decorators
+around methods automatically.
+
+The decorations are lightweight and no-op outside of relationships, but they
+do add unneeded overhead when triggered elsewhere. When using a library class
+as a collection, it can be good practice to use the "trivial subclass" trick
+to restrict the decorations to just your usage in relationships. For example:
+
+.. sourcecode:: python+sql
+
+ class MyAwesomeList(some.great.library.AwesomeList):
+ pass
+
+ # ... relationship(..., collection_class=MyAwesomeList)
+
+The ORM uses this approach for built-ins, quietly substituting a trivial
+subclass when a ``list``, ``set`` or ``dict`` is used directly.
+
+The collections package provides additional decorators and support for
+authoring custom types. See the :mod:`sqlalchemy.orm.collections` package for
+more information and discussion of advanced usage and Python 2.3-compatible
+decoration options.
+
+Collections API
+~~~~~~~~~~~~~~~
+
+.. autofunction:: attribute_mapped_collection
+
+.. autoclass:: collection
+ :members:
+
+.. autofunction:: collection_adapter
+
+.. autofunction:: column_mapped_collection
+
+.. autofunction:: mapped_collection
+
+.. autoclass:: sqlalchemy.orm.collections.MappedCollection
+ :members:
+
+
diff --git a/doc/build/orm/deprecated.rst b/doc/build/orm/deprecated.rst
new file mode 100644
index 000000000..8388de8d6
--- /dev/null
+++ b/doc/build/orm/deprecated.rst
@@ -0,0 +1 @@
+## In 0.7, move "interfaces" to here. \ No newline at end of file
diff --git a/doc/build/examples.rst b/doc/build/orm/examples.rst
index 821c96952..00b18bc7b 100644
--- a/doc/build/examples.rst
+++ b/doc/build/orm/examples.rst
@@ -23,6 +23,9 @@ Location: /examples/association/
.. automodule:: association
+
+.. _examples_instrumentation:
+
Attribute Instrumentation
-------------------------
diff --git a/doc/build/orm/exceptions.rst b/doc/build/orm/exceptions.rst
new file mode 100644
index 000000000..1dde4248f
--- /dev/null
+++ b/doc/build/orm/exceptions.rst
@@ -0,0 +1,6 @@
+ORM Exceptions
+==============
+
+.. automodule:: sqlalchemy.orm.exc
+ :show-inheritance:
+ :members: \ No newline at end of file
diff --git a/doc/build/reference/ext/associationproxy.rst b/doc/build/orm/extensions/associationproxy.rst
index 68de019a3..4a7ce5dbf 100644
--- a/doc/build/reference/ext/associationproxy.rst
+++ b/doc/build/orm/extensions/associationproxy.rst
@@ -1,7 +1,7 @@
.. _associationproxy:
-associationproxy
-================
+Association Proxy
+=================
.. module:: sqlalchemy.ext.associationproxy
diff --git a/doc/build/reference/ext/declarative.rst b/doc/build/orm/extensions/declarative.rst
index 7372f6bbe..010371314 100644
--- a/doc/build/reference/ext/declarative.rst
+++ b/doc/build/orm/extensions/declarative.rst
@@ -1,4 +1,4 @@
-declarative
+Declarative
===========
.. automodule:: sqlalchemy.ext.declarative
diff --git a/doc/build/reference/ext/horizontal_shard.rst b/doc/build/orm/extensions/horizontal_shard.rst
index 149cf2020..67cd707d8 100644
--- a/doc/build/reference/ext/horizontal_shard.rst
+++ b/doc/build/orm/extensions/horizontal_shard.rst
@@ -1,5 +1,5 @@
-Horizontal Shard
-=================
+Horizontal Sharding
+===================
.. automodule:: sqlalchemy.ext.horizontal_shard
diff --git a/doc/build/orm/extensions/index.rst b/doc/build/orm/extensions/index.rst
new file mode 100644
index 000000000..5033ad5e9
--- /dev/null
+++ b/doc/build/orm/extensions/index.rst
@@ -0,0 +1,18 @@
+.. _plugins:
+.. _sqlalchemy.ext:
+
+ORM Extensions
+==============
+
+SQLAlchemy has a variety of ORM extensions available, which add additional
+functionality to the core behavior.
+
+.. toctree::
+ :maxdepth: 1
+
+ associationproxy
+ declarative
+ orderinglist
+ horizontal_shard
+ sqlsoup
+
diff --git a/doc/build/reference/ext/orderinglist.rst b/doc/build/orm/extensions/orderinglist.rst
index a3581df59..940989ea1 100644
--- a/doc/build/reference/ext/orderinglist.rst
+++ b/doc/build/orm/extensions/orderinglist.rst
@@ -1,5 +1,5 @@
-orderinglist
-============
+Ordering List
+=============
.. automodule:: sqlalchemy.ext.orderinglist
diff --git a/doc/build/reference/ext/sqlsoup.rst b/doc/build/orm/extensions/sqlsoup.rst
index fcc937166..fcc937166 100644
--- a/doc/build/reference/ext/sqlsoup.rst
+++ b/doc/build/orm/extensions/sqlsoup.rst
diff --git a/doc/build/orm/index.rst b/doc/build/orm/index.rst
new file mode 100644
index 000000000..dafff384d
--- /dev/null
+++ b/doc/build/orm/index.rst
@@ -0,0 +1,22 @@
+.. _orm_toplevel:
+
+SQLAlchemy ORM
+===============
+
+.. toctree::
+ :maxdepth: 2
+
+ tutorial
+ mapper_config
+ relationships
+ collections
+ inheritance
+ session
+ query
+ loading
+ interfaces
+ exceptions
+ extensions/index
+ examples
+
+ \ No newline at end of file
diff --git a/doc/build/orm/inheritance.rst b/doc/build/orm/inheritance.rst
new file mode 100644
index 000000000..65bcd06f9
--- /dev/null
+++ b/doc/build/orm/inheritance.rst
@@ -0,0 +1,579 @@
+Mapping Class Inheritance Hierarchies
+======================================
+
+SQLAlchemy supports three forms of inheritance: *single table inheritance*,
+where several types of classes are stored in one table, *concrete table
+inheritance*, where each type of class is stored in its own table, and *joined
+table inheritance*, where the parent/child classes are stored in their own
+tables that are joined together in a select. Whereas support for single and
+joined table inheritance is strong, concrete table inheritance is a less
+common scenario with some particular problems so is not quite as flexible.
+
+When mappers are configured in an inheritance relationship, SQLAlchemy has the
+ability to load elements "polymorphically", meaning that a single query can
+return objects of multiple types.
+
+For the following sections, assume this class relationship:
+
+.. sourcecode:: python+sql
+
+ class Employee(object):
+ def __init__(self, name):
+ self.name = name
+ def __repr__(self):
+ return self.__class__.__name__ + " " + self.name
+
+ class Manager(Employee):
+ def __init__(self, name, manager_data):
+ self.name = name
+ self.manager_data = manager_data
+ def __repr__(self):
+ return self.__class__.__name__ + " " + self.name + " " + self.manager_data
+
+ class Engineer(Employee):
+ def __init__(self, name, engineer_info):
+ self.name = name
+ self.engineer_info = engineer_info
+ def __repr__(self):
+ return self.__class__.__name__ + " " + self.name + " " + self.engineer_info
+
+Joined Table Inheritance
+-------------------------
+
+In joined table inheritance, each class along a particular classes' list of
+parents is represented by a unique table. The total set of attributes for a
+particular instance is represented as a join along all tables in its
+inheritance path. Here, we first define a table to represent the ``Employee``
+class. This table will contain a primary key column (or columns), and a column
+for each attribute that's represented by ``Employee``. In this case it's just
+``name``::
+
+ employees = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('type', String(30), nullable=False)
+ )
+
+The table also has a column called ``type``. It is strongly advised in both
+single- and joined- table inheritance scenarios that the root table contains a
+column whose sole purpose is that of the **discriminator**; it stores a value
+which indicates the type of object represented within the row. The column may
+be of any desired datatype. While there are some "tricks" to work around the
+requirement that there be a discriminator column, they are more complicated to
+configure when one wishes to load polymorphically.
+
+Next we define individual tables for each of ``Engineer`` and ``Manager``,
+which contain columns that represent the attributes unique to the subclass
+they represent. Each table also must contain a primary key column (or
+columns), and in most cases a foreign key reference to the parent table. It is
+standard practice that the same column is used for both of these roles, and
+that the column is also named the same as that of the parent table. However
+this is optional in SQLAlchemy; separate columns may be used for primary key
+and parent-relationship, the column may be named differently than that of the
+parent, and even a custom join condition can be specified between parent and
+child tables instead of using a foreign key::
+
+ engineers = Table('engineers', metadata,
+ Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True),
+ Column('engineer_info', String(50)),
+ )
+
+ managers = Table('managers', metadata,
+ Column('employee_id', Integer, ForeignKey('employees.employee_id'), primary_key=True),
+ Column('manager_data', String(50)),
+ )
+
+One natural effect of the joined table inheritance configuration is that the
+identity of any mapped object can be determined entirely from the base table.
+This has obvious advantages, so SQLAlchemy always considers the primary key
+columns of a joined inheritance class to be those of the base table only,
+unless otherwise manually configured. In other words, the ``employee_id``
+column of both the ``engineers`` and ``managers`` table is not used to locate
+the ``Engineer`` or ``Manager`` object itself - only the value in
+``employees.employee_id`` is considered, and the primary key in this case is
+non-composite. ``engineers.employee_id`` and ``managers.employee_id`` are
+still of course critical to the proper operation of the pattern overall as
+they are used to locate the joined row, once the parent row has been
+determined, either through a distinct SELECT statement or all at once within a
+JOIN.
+
+We then configure mappers as usual, except we use some additional arguments to
+indicate the inheritance relationship, the polymorphic discriminator column,
+and the **polymorphic identity** of each class; this is the value that will be
+stored in the polymorphic discriminator column.
+
+.. sourcecode:: python+sql
+
+ mapper(Employee, employees, polymorphic_on=employees.c.type, polymorphic_identity='employee')
+ mapper(Engineer, engineers, inherits=Employee, polymorphic_identity='engineer')
+ mapper(Manager, managers, inherits=Employee, polymorphic_identity='manager')
+
+And that's it. Querying against ``Employee`` will return a combination of
+``Employee``, ``Engineer`` and ``Manager`` objects. Newly saved ``Engineer``,
+``Manager``, and ``Employee`` objects will automatically populate the
+``employees.type`` column with ``engineer``, ``manager``, or ``employee``, as
+appropriate.
+
+Basic Control of Which Tables are Queried
+++++++++++++++++++++++++++++++++++++++++++
+
+The :func:`~sqlalchemy.orm.query.Query.with_polymorphic` method of
+:class:`~sqlalchemy.orm.query.Query` affects the specific subclass tables
+which the Query selects from. Normally, a query such as this:
+
+.. sourcecode:: python+sql
+
+ session.query(Employee).all()
+
+...selects only from the ``employees`` table. When loading fresh from the
+database, our joined-table setup will query from the parent table only, using
+SQL such as this:
+
+.. sourcecode:: python+sql
+
+ {opensql}
+ SELECT employees.employee_id AS employees_employee_id, employees.name AS employees_name, employees.type AS employees_type
+ FROM employees
+ []
+
+As attributes are requested from those ``Employee`` objects which are
+represented in either the ``engineers`` or ``managers`` child tables, a second
+load is issued for the columns in that related row, if the data was not
+already loaded. So above, after accessing the objects you'd see further SQL
+issued along the lines of:
+
+.. sourcecode:: python+sql
+
+ {opensql}
+ SELECT managers.employee_id AS managers_employee_id, managers.manager_data AS managers_manager_data
+ FROM managers
+ WHERE ? = managers.employee_id
+ [5]
+ SELECT engineers.employee_id AS engineers_employee_id, engineers.engineer_info AS engineers_engineer_info
+ FROM engineers
+ WHERE ? = engineers.employee_id
+ [2]
+
+This behavior works well when issuing searches for small numbers of items,
+such as when using :meth:`.Query.get`, since the full range of joined tables are not
+pulled in to the SQL statement unnecessarily. But when querying a larger span
+of rows which are known to be of many types, you may want to actively join to
+some or all of the joined tables. The ``with_polymorphic`` feature of
+:class:`~sqlalchemy.orm.query.Query` and ``mapper`` provides this.
+
+Telling our query to polymorphically load ``Engineer`` and ``Manager``
+objects:
+
+.. sourcecode:: python+sql
+
+ query = session.query(Employee).with_polymorphic([Engineer, Manager])
+
+produces a query which joins the ``employees`` table to both the ``engineers`` and ``managers`` tables like the following:
+
+.. sourcecode:: python+sql
+
+ query.all()
+ {opensql}
+ SELECT employees.employee_id AS employees_employee_id, engineers.employee_id AS engineers_employee_id, managers.employee_id AS managers_employee_id, employees.name AS employees_name, employees.type AS employees_type, engineers.engineer_info AS engineers_engineer_info, managers.manager_data AS managers_manager_data
+ FROM employees LEFT OUTER JOIN engineers ON employees.employee_id = engineers.employee_id LEFT OUTER JOIN managers ON employees.employee_id = managers.employee_id
+ []
+
+:func:`~sqlalchemy.orm.query.Query.with_polymorphic` accepts a single class or
+mapper, a list of classes/mappers, or the string ``'*'`` to indicate all
+subclasses:
+
+.. sourcecode:: python+sql
+
+ # join to the engineers table
+ query.with_polymorphic(Engineer)
+
+ # join to the engineers and managers tables
+ query.with_polymorphic([Engineer, Manager])
+
+ # join to all subclass tables
+ query.with_polymorphic('*')
+
+It also accepts a second argument ``selectable`` which replaces the automatic
+join creation and instead selects directly from the selectable given. This
+feature is normally used with "concrete" inheritance, described later, but can
+be used with any kind of inheritance setup in the case that specialized SQL
+should be used to load polymorphically:
+
+.. sourcecode:: python+sql
+
+ # custom selectable
+ query.with_polymorphic([Engineer, Manager], employees.outerjoin(managers).outerjoin(engineers))
+
+:func:`~sqlalchemy.orm.query.Query.with_polymorphic` is also needed
+when you wish to add filter criteria that are specific to one or more
+subclasses; it makes the subclasses' columns available to the WHERE clause:
+
+.. sourcecode:: python+sql
+
+ session.query(Employee).with_polymorphic([Engineer, Manager]).\
+ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q'))
+
+Note that if you only need to load a single subtype, such as just the
+``Engineer`` objects, :func:`~sqlalchemy.orm.query.Query.with_polymorphic` is
+not needed since you would query against the ``Engineer`` class directly.
+
+The mapper also accepts ``with_polymorphic`` as a configurational argument so
+that the joined-style load will be issued automatically. This argument may be
+the string ``'*'``, a list of classes, or a tuple consisting of either,
+followed by a selectable.
+
+.. sourcecode:: python+sql
+
+ mapper(Employee, employees, polymorphic_on=employees.c.type, \
+ polymorphic_identity='employee', with_polymorphic='*')
+ mapper(Engineer, engineers, inherits=Employee, polymorphic_identity='engineer')
+ mapper(Manager, managers, inherits=Employee, polymorphic_identity='manager')
+
+The above mapping will produce a query similar to that of
+``with_polymorphic('*')`` for every query of ``Employee`` objects.
+
+Using :func:`~sqlalchemy.orm.query.Query.with_polymorphic` with
+:class:`~sqlalchemy.orm.query.Query` will override the mapper-level
+``with_polymorphic`` setting.
+
+Advanced Control of Which Tables are Queried
++++++++++++++++++++++++++++++++++++++++++++++
+
+The :meth:`.Query.with_polymorphic` method and configuration works fine for
+simplistic scenarios. However, it currently does not work with any
+:class:`.Query` that selects against individual columns or against multiple
+classes - it also has to be called at the outset of a query.
+
+For total control of how :class:`.Query` joins along inheritance relationships,
+use the :class:`.Table` objects directly and construct joins manually. For example, to
+query the name of employees with particular criterion::
+
+ session.query(Employee.name).\
+ outerjoin((engineer, engineer.c.employee_id==Employee.employee_id)).\
+ outerjoin((manager, manager.c.employee_id==Employee.employee_id)).\
+ filter(or_(Engineer.engineer_info=='w', Manager.manager_data=='q'))
+
+The base table, in this case the "employees" table, isn't always necessary. A
+SQL query is always more efficient with fewer joins. Here, if we wanted to
+just load information specific to managers or engineers, we can instruct
+:class:`.Query` to use only those tables. The ``FROM`` clause is determined by
+what's specified in the :meth:`.Session.query`, :meth:`.Query.filter`, or
+:meth:`.Query.select_from` methods::
+
+ session.query(Manager.manager_data).select_from(manager)
+
+ session.query(engineer.c.id).filter(engineer.c.engineer_info==manager.c.manager_data)
+
+Creating Joins to Specific Subtypes
++++++++++++++++++++++++++++++++++++
+
+The :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` method is a
+helper which allows the construction of joins along
+:func:`~sqlalchemy.orm.relationship` paths while narrowing the criterion to
+specific subclasses. Suppose the ``employees`` table represents a collection
+of employees which are associated with a ``Company`` object. We'll add a
+``company_id`` column to the ``employees`` table and a new table
+``companies``:
+
+.. sourcecode:: python+sql
+
+ companies = Table('companies', metadata,
+ Column('company_id', Integer, primary_key=True),
+ Column('name', String(50))
+ )
+
+ employees = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('type', String(30), nullable=False),
+ Column('company_id', Integer, ForeignKey('companies.company_id'))
+ )
+
+ class Company(object):
+ pass
+
+ mapper(Company, companies, properties={
+ 'employees': relationship(Employee)
+ })
+
+When querying from ``Company`` onto the ``Employee`` relationship, the
+``join()`` method as well as the ``any()`` and ``has()`` operators will create
+a join from ``companies`` to ``employees``, without including ``engineers`` or
+``managers`` in the mix. If we wish to have criterion which is specifically
+against the ``Engineer`` class, we can tell those methods to join or subquery
+against the joined table representing the subclass using the
+:func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` operator::
+
+ session.query(Company).join(Company.employees.of_type(Engineer)).filter(Engineer.engineer_info=='someinfo')
+
+A longhand version of this would involve spelling out the full target
+selectable within a 2-tuple::
+
+ session.query(Company).join((employees.join(engineers), Company.employees)).filter(Engineer.engineer_info=='someinfo')
+
+Currently, :func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` accepts a
+single class argument. It may be expanded later on to accept multiple classes.
+For now, to join to any group of subclasses, the longhand notation allows this
+flexibility:
+
+.. sourcecode:: python+sql
+
+ session.query(Company).join((employees.outerjoin(engineers).outerjoin(managers), Company.employees)).\
+ filter(or_(Engineer.engineer_info=='someinfo', Manager.manager_data=='somedata'))
+
+The ``any()`` and ``has()`` operators also can be used with
+:func:`~sqlalchemy.orm.interfaces.PropComparator.of_type` when the embedded
+criterion is in terms of a subclass:
+
+.. sourcecode:: python+sql
+
+ session.query(Company).filter(Company.employees.of_type(Engineer).any(Engineer.engineer_info=='someinfo')).all()
+
+Note that the ``any()`` and ``has()`` are both shorthand for a correlated
+EXISTS query. To build one by hand looks like:
+
+.. sourcecode:: python+sql
+
+ session.query(Company).filter(
+ exists([1],
+ and_(Engineer.engineer_info=='someinfo', employees.c.company_id==companies.c.company_id),
+ from_obj=employees.join(engineers)
+ )
+ ).all()
+
+The EXISTS subquery above selects from the join of ``employees`` to
+``engineers``, and also specifies criterion which correlates the EXISTS
+subselect back to the parent ``companies`` table.
+
+Single Table Inheritance
+------------------------
+
+Single table inheritance is where the attributes of the base class as well as
+all subclasses are represented within a single table. A column is present in
+the table for every attribute mapped to the base class and all subclasses; the
+columns which correspond to a single subclass are nullable. This configuration
+looks much like joined-table inheritance except there's only one table. In
+this case, a ``type`` column is required, as there would be no other way to
+discriminate between classes. The table is specified in the base mapper only;
+for the inheriting classes, leave their ``table`` parameter blank:
+
+.. sourcecode:: python+sql
+
+ employees_table = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('manager_data', String(50)),
+ Column('engineer_info', String(50)),
+ Column('type', String(20), nullable=False)
+ )
+
+ employee_mapper = mapper(Employee, employees_table, \
+ polymorphic_on=employees_table.c.type, polymorphic_identity='employee')
+ manager_mapper = mapper(Manager, inherits=employee_mapper, polymorphic_identity='manager')
+ engineer_mapper = mapper(Engineer, inherits=employee_mapper, polymorphic_identity='engineer')
+
+Note that the mappers for the derived classes Manager and Engineer omit the
+specification of their associated table, as it is inherited from the
+employee_mapper. Omitting the table specification for derived mappers in
+single-table inheritance is required.
+
+.. _concrete_inheritance:
+
+Concrete Table Inheritance
+--------------------------
+
+This form of inheritance maps each class to a distinct table, as below:
+
+.. sourcecode:: python+sql
+
+ employees_table = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ )
+
+ managers_table = Table('managers', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('manager_data', String(50)),
+ )
+
+ engineers_table = Table('engineers', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('engineer_info', String(50)),
+ )
+
+Notice in this case there is no ``type`` column. If polymorphic loading is not
+required, there's no advantage to using ``inherits`` here; you just define a
+separate mapper for each class.
+
+.. sourcecode:: python+sql
+
+ mapper(Employee, employees_table)
+ mapper(Manager, managers_table)
+ mapper(Engineer, engineers_table)
+
+To load polymorphically, the ``with_polymorphic`` argument is required, along
+with a selectable indicating how rows should be loaded. In this case we must
+construct a UNION of all three tables. SQLAlchemy includes a helper function
+to create these called :func:`~sqlalchemy.orm.util.polymorphic_union`, which
+will map all the different columns into a structure of selects with the same
+numbers and names of columns, and also generate a virtual ``type`` column for
+each subselect:
+
+.. sourcecode:: python+sql
+
+ pjoin = polymorphic_union({
+ 'employee': employees_table,
+ 'manager': managers_table,
+ 'engineer': engineers_table
+ }, 'type', 'pjoin')
+
+ employee_mapper = mapper(Employee, employees_table, with_polymorphic=('*', pjoin), \
+ polymorphic_on=pjoin.c.type, polymorphic_identity='employee')
+ manager_mapper = mapper(Manager, managers_table, inherits=employee_mapper, \
+ concrete=True, polymorphic_identity='manager')
+ engineer_mapper = mapper(Engineer, engineers_table, inherits=employee_mapper, \
+ concrete=True, polymorphic_identity='engineer')
+
+Upon select, the polymorphic union produces a query like this:
+
+.. sourcecode:: python+sql
+
+ session.query(Employee).all()
+ {opensql}
+ SELECT pjoin.type AS pjoin_type, pjoin.manager_data AS pjoin_manager_data, pjoin.employee_id AS pjoin_employee_id,
+ pjoin.name AS pjoin_name, pjoin.engineer_info AS pjoin_engineer_info
+ FROM (
+ SELECT employees.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, employees.name AS name,
+ CAST(NULL AS VARCHAR(50)) AS engineer_info, 'employee' AS type
+ FROM employees
+ UNION ALL
+ SELECT managers.employee_id AS employee_id, managers.manager_data AS manager_data, managers.name AS name,
+ CAST(NULL AS VARCHAR(50)) AS engineer_info, 'manager' AS type
+ FROM managers
+ UNION ALL
+ SELECT engineers.employee_id AS employee_id, CAST(NULL AS VARCHAR(50)) AS manager_data, engineers.name AS name,
+ engineers.engineer_info AS engineer_info, 'engineer' AS type
+ FROM engineers
+ ) AS pjoin
+ []
+
+Using Relationships with Inheritance
+------------------------------------
+
+Both joined-table and single table inheritance scenarios produce mappings
+which are usable in :func:`~sqlalchemy.orm.relationship` functions; that is,
+it's possible to map a parent object to a child object which is polymorphic.
+Similarly, inheriting mappers can have :func:`~sqlalchemy.orm.relationship`
+objects of their own at any level, which are inherited to each child class.
+The only requirement for relationships is that there is a table relationship
+between parent and child. An example is the following modification to the
+joined table inheritance example, which sets a bi-directional relationship
+between ``Employee`` and ``Company``:
+
+.. sourcecode:: python+sql
+
+ employees_table = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('company_id', Integer, ForeignKey('companies.company_id'))
+ )
+
+ companies = Table('companies', metadata,
+ Column('company_id', Integer, primary_key=True),
+ Column('name', String(50)))
+
+ class Company(object):
+ pass
+
+ mapper(Company, companies, properties={
+ 'employees': relationship(Employee, backref='company')
+ })
+
+Relationships with Concrete Inheritance
++++++++++++++++++++++++++++++++++++++++
+
+In a concrete inheritance scenario, mapping relationships is more challenging
+since the distinct classes do not share a table. In this case, you *can*
+establish a relationship from parent to child if a join condition can be
+constructed from parent to child, if each child table contains a foreign key
+to the parent:
+
+.. sourcecode:: python+sql
+
+ companies = Table('companies', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('name', String(50)))
+
+ employees_table = Table('employees', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('company_id', Integer, ForeignKey('companies.id'))
+ )
+
+ managers_table = Table('managers', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('manager_data', String(50)),
+ Column('company_id', Integer, ForeignKey('companies.id'))
+ )
+
+ engineers_table = Table('engineers', metadata,
+ Column('employee_id', Integer, primary_key=True),
+ Column('name', String(50)),
+ Column('engineer_info', String(50)),
+ Column('company_id', Integer, ForeignKey('companies.id'))
+ )
+
+ mapper(Employee, employees_table,
+ with_polymorphic=('*', pjoin),
+ polymorphic_on=pjoin.c.type,
+ polymorphic_identity='employee')
+
+ mapper(Manager, managers_table,
+ inherits=employee_mapper,
+ concrete=True,
+ polymorphic_identity='manager')
+
+ mapper(Engineer, engineers_table,
+ inherits=employee_mapper,
+ concrete=True,
+ polymorphic_identity='engineer')
+
+ mapper(Company, companies, properties={
+ 'employees': relationship(Employee)
+ })
+
+The big limitation with concrete table inheritance is that
+:func:`~sqlalchemy.orm.relationship` objects placed on each concrete mapper do
+**not** propagate to child mappers. If you want to have the same
+:func:`~sqlalchemy.orm.relationship` objects set up on all concrete mappers,
+they must be configured manually on each. To configure back references in such
+a configuration the ``back_populates`` keyword may be used instead of
+``backref``, such as below where both ``A(object)`` and ``B(A)``
+bidirectionally reference ``C``::
+
+ ajoin = polymorphic_union({
+ 'a':a_table,
+ 'b':b_table
+ }, 'type', 'ajoin')
+
+ mapper(A, a_table, with_polymorphic=('*', ajoin),
+ polymorphic_on=ajoin.c.type, polymorphic_identity='a',
+ properties={
+ 'some_c':relationship(C, back_populates='many_a')
+ })
+ mapper(B, b_table,inherits=A, concrete=True,
+ polymorphic_identity='b',
+ properties={
+ 'some_c':relationship(C, back_populates='many_a')
+ })
+ mapper(C, c_table, properties={
+ 'many_a':relationship(A, collection_class=set, back_populates='some_c'),
+ })
+
+Using Inheritance with Declarative
+-----------------------------------
+
+Declarative makes inheritance configuration more intuitive. See the docs at :ref:`declarative_inheritance`.
diff --git a/doc/build/orm/interfaces.rst b/doc/build/orm/interfaces.rst
new file mode 100644
index 000000000..321660ac9
--- /dev/null
+++ b/doc/build/orm/interfaces.rst
@@ -0,0 +1,107 @@
+.. _interfaces_orm_toplevel:
+.. _events_orm_toplevel:
+
+ORM Event Interfaces
+====================
+
+.. module:: sqlalchemy.orm.interfaces
+
+This section describes the various categories of events which can be intercepted
+within the SQLAlchemy ORM.
+
+For non-ORM event documentation, see :ref:`interfaces_core_toplevel`.
+
+A new version of this API with a significantly more flexible and consistent
+interface will be available in version 0.7.
+
+Mapper Events
+-----------------
+
+To use :class:`.MapperExtension`, make your own subclass of it and just send it off to a mapper::
+
+ from sqlalchemy.orm.interfaces import MapperExtension
+
+ class MyExtension(MapperExtension):
+ def before_insert(self, mapper, connection, instance):
+ print "instance %s before insert !" % instance
+
+ m = mapper(User, users_table, extension=MyExtension())
+
+Multiple extensions will be chained together and processed in order; they are specified as a list::
+
+ m = mapper(User, users_table, extension=[ext1, ext2, ext3])
+
+.. autoclass:: MapperExtension
+ :members:
+
+Session Events
+-----------------
+
+The :class:`.SessionExtension` applies plugin points for :class:`.Session` objects::
+
+ from sqlalchemy.orm.interfaces import SessionExtension
+
+ class MySessionExtension(SessionExtension):
+ def before_commit(self, session):
+ print "before commit!"
+
+ Session = sessionmaker(extension=MySessionExtension())
+
+The same :class:`~sqlalchemy.orm.interfaces.SessionExtension` instance can be
+used with any number of sessions.
+
+.. autoclass:: SessionExtension
+ :members:
+
+Attribute Events
+--------------------
+
+:class:`.AttributeExtension` is used to listen for set, remove, and append
+events on individual mapped attributes. It is established on an individual
+mapped attribute using the `extension` argument, available on
+:func:`.column_property`, :func:`.relationship`, and others::
+
+ from sqlalchemy.orm.interfaces import AttributeExtension
+ from sqlalchemy.orm import mapper, relationship, column_property
+
+ class MyAttrExt(AttributeExtension):
+ def append(self, state, value, initiator):
+ print "append event !"
+ return value
+
+ def set(self, state, value, oldvalue, initiator):
+ print "set event !"
+ return value
+
+ mapper(SomeClass, sometable, properties={
+ 'foo':column_property(sometable.c.foo, extension=MyAttrExt()),
+ 'bar':relationship(Bar, extension=MyAttrExt())
+ })
+
+Note that the :class:`AttributeExtension` methods
+:meth:`~.AttributeExtension.append` and :meth:`~.AttributeExtension.set` need
+to return the ``value`` parameter. The returned value is used as the effective
+value, and allows the extension to change what is ultimately persisted.
+
+.. autoclass:: AttributeExtension
+ :members:
+
+Instrumentation Events and Re-implementation
+---------------------------------------------
+
+:class:`.InstrumentationManager` can be subclassed in order to receive class
+instrumentation events as well as to change how class instrumentation
+proceeds. This class exists for the purposes of integration with other object
+management frameworks which would like to entirely modify the instrumentation
+methodology of the ORM, and is not intended for regular usage. One possible
+exception is the :meth:`.InstrumentationManager.post_configure_attribute`
+method, which can be useful for adding extensions to all mapped attributes,
+though a much better way to do this will be available in a future release of
+SQLAlchemy.
+
+For an example of :class:`.InstrumentationManager`, see the example
+:ref:`examples_instrumentation`.
+
+.. autoclass:: InstrumentationManager
+ :members:
+ :undoc-members:
diff --git a/doc/build/orm/loading.rst b/doc/build/orm/loading.rst
new file mode 100644
index 000000000..afe4ff6b6
--- /dev/null
+++ b/doc/build/orm/loading.rst
@@ -0,0 +1,356 @@
+.. _loading_toplevel:
+
+.. currentmodule:: sqlalchemy.orm
+
+Relationship Loading Techniques
+===============================
+
+A big part of SQLAlchemy is providing a wide range of control over how related objects get loaded when querying. This behavior
+can be configured at mapper construction time using the ``lazy`` parameter to the :func:`.relationship` function,
+as well as by using options with the :class:`.Query` object.
+
+Using Loader Strategies: Lazy Loading, Eager Loading
+----------------------------------------------------
+
+By default, all inter-object relationships are **lazy loading**. The scalar or
+collection attribute associated with a :func:`~sqlalchemy.orm.relationship`
+contains a trigger which fires the first time the attribute is accessed. This
+trigger, in all but one case, issues a SQL call at the point of access
+in order to load the related object or objects:
+
+.. sourcecode:: python+sql
+
+ {sql}>>> jack.addresses
+ SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address,
+ addresses.user_id AS addresses_user_id
+ FROM addresses
+ WHERE ? = addresses.user_id
+ [5]
+ {stop}[<Address(u'jack@google.com')>, <Address(u'j25@yahoo.com')>]
+
+The one case where SQL is not emitted is for a simple many-to-one relationship, when
+the related object can be identified by its primary key alone and that object is already
+present in the current :class:`.Session`.
+
+This default behavior of "load upon attribute access" is known as "lazy" or
+"select" loading - the name "select" because a "SELECT" statement is typically emitted
+when the attribute is first accessed.
+
+In the :ref:`ormtutorial_toplevel`, we introduced the concept of **Eager
+Loading**. We used an ``option`` in conjunction with the
+:class:`~sqlalchemy.orm.query.Query` object in order to indicate that a
+relationship should be loaded at the same time as the parent, within a single
+SQL query. This option, known as :func:`.joinedload`
+
+.. sourcecode:: python+sql
+
+ {sql}>>> jack = session.query(User).options(joinedload('addresses')).filter_by(name='jack').all() #doctest: +NORMALIZE_WHITESPACE
+ SELECT addresses_1.id AS addresses_1_id, addresses_1.email_address AS addresses_1_email_address,
+ addresses_1.user_id AS addresses_1_user_id, users.id AS users_id, users.name AS users_name,
+ users.fullname AS users_fullname, users.password AS users_password
+ FROM users LEFT OUTER JOIN addresses AS addresses_1 ON users.id = addresses_1.user_id
+ WHERE users.name = ?
+ ['jack']
+
+
+In addition to "joined eager loading", a second option for eager loading
+exists, called "subquery eager loading". This kind of eager loading emits an
+additional SQL statement for each collection requested, aggregated across all
+parent objects:
+
+.. sourcecode:: python+sql
+
+ {sql}>>>jack = session.query(User).options(subqueryload('addresses')).filter_by(name='jack').all()
+ SELECT users.id AS users_id, users.name AS users_name, users.fullname AS users_fullname,
+ users.password AS users_password
+ FROM users
+ WHERE users.name = ?
+ ('jack',)
+ SELECT addresses.id AS addresses_id, addresses.email_address AS addresses_email_address,
+ addresses.user_id AS addresses_user_id, anon_1.users_id AS anon_1_users_id
+ FROM (SELECT users.id AS users_id
+ FROM users
+ WHERE users.name = ?) AS anon_1 JOIN addresses ON anon_1.users_id = addresses.user_id
+ ORDER BY anon_1.users_id, addresses.id
+ ('jack',)
+
+The default **loader strategy** for any :func:`~sqlalchemy.orm.relationship`
+is configured by the ``lazy`` keyword argument, which defaults to ``select`` - this indicates
+a "select" statement .
+Below we set it as ``joined`` so that the ``children`` relationship is eager
+loading, using a join:
+
+.. sourcecode:: python+sql
+
+ # load the 'children' collection using LEFT OUTER JOIN
+ mapper(Parent, parent_table, properties={
+ 'children': relationship(Child, lazy='joined')
+ })
+
+We can also set it to eagerly load using a second query for all collections,
+using ``subquery``:
+
+.. sourcecode:: python+sql
+
+ # load the 'children' attribute using a join to a subquery
+ mapper(Parent, parent_table, properties={
+ 'children': relationship(Child, lazy='subquery')
+ })
+
+When querying, all three choices of loader strategy are available on a
+per-query basis, using the :func:`~sqlalchemy.orm.joinedload`,
+:func:`~sqlalchemy.orm.subqueryload` and :func:`~sqlalchemy.orm.lazyload`
+query options:
+
+.. sourcecode:: python+sql
+
+ # set children to load lazily
+ session.query(Parent).options(lazyload('children')).all()
+
+ # set children to load eagerly with a join
+ session.query(Parent).options(joinedload('children')).all()
+
+ # set children to load eagerly with a second statement
+ session.query(Parent).options(subqueryload('children')).all()
+
+To reference a relationship that is deeper than one level, separate the names by periods:
+
+.. sourcecode:: python+sql
+
+ session.query(Parent).options(joinedload('foo.bar.bat')).all()
+
+When using dot-separated names with :func:`~sqlalchemy.orm.joinedload` or
+:func:`~sqlalchemy.orm.subqueryload`, option applies **only** to the actual
+attribute named, and **not** its ancestors. For example, suppose a mapping
+from ``A`` to ``B`` to ``C``, where the relationships, named ``atob`` and
+``btoc``, are both lazy-loading. A statement like the following:
+
+.. sourcecode:: python+sql
+
+ session.query(A).options(joinedload('atob.btoc')).all()
+
+will load only ``A`` objects to start. When the ``atob`` attribute on each ``A`` is accessed, the returned ``B`` objects will *eagerly* load their ``C`` objects.
+
+Therefore, to modify the eager load to load both ``atob`` as well as ``btoc``, place joinedloads for both:
+
+.. sourcecode:: python+sql
+
+ session.query(A).options(joinedload('atob'), joinedload('atob.btoc')).all()
+
+or more simply just use :func:`~sqlalchemy.orm.joinedload_all` or :func:`~sqlalchemy.orm.subqueryload_all`:
+
+.. sourcecode:: python+sql
+
+ session.query(A).options(joinedload_all('atob.btoc')).all()
+
+There are two other loader strategies available, **dynamic loading** and **no loading**; these are described in :ref:`largecollections`.
+
+The Zen of Eager Loading
+-------------------------
+
+The philosophy behind loader strategies is that any set of loading schemes can be
+applied to a particular query, and *the results don't change* - only the number
+of SQL statements required to fully load related objects and collections changes. A particular
+query might start out using all lazy loads. After using it in context, it might be revealed
+that particular attributes or collections are always accessed, and that it would be more
+efficient to change the loader strategy for these. The strategy can be changed with no other
+modifications to the query, the results will remain identical, but fewer SQL statements would be emitted.
+In theory (and pretty much in practice), nothing you can do to the :class:`.Query` would make it load
+a different set of primary or related objects based on a change in loader strategy.
+
+The way eagerloading does this, and in particular how :func:`joinedload`
+works, is that it creates an anonymous alias of all the joins it adds to your
+query, so that they can't be referenced by other parts of the query. If the
+query contains a DISTINCT, or a limit or offset, the statement is first
+wrapped inside a subquery, and joins are applied to that. As the user, you
+don't have access to these aliases or subqueries, and you cannot affect what
+data they will load at query time - a typical beginner misunderstanding is
+that adding a :meth:`.Query.order_by`, naming the joined relationship, would
+change the order of the collection, or that the entries in the collection as
+it is loaded could be affected by :meth:`.Query.filter`. Not the case ! If
+you'd like to join from one table to another, filtering or ordering on the
+joined result, you'd use :meth:`.Query.join`. If you then wanted that joined
+result to populate itself into a related collection, this is also available,
+via :func:`.contains_eager` option - see :ref:`contains_eager`.
+
+What Kind of Loading to Use ?
+-----------------------------
+
+Which type of loading to use typically comes down to optimizing the tradeoff
+between number of SQL executions, complexity of SQL emitted, and amount of
+data fetched. Lets take two examples, a :func:`~sqlalchemy.orm.relationship`
+which references a collection, and a :func:`~sqlalchemy.orm.relationship` that
+references a scalar many-to-one reference.
+
+* One to Many Collection
+
+ * When using the default lazy loading, if you load 100 objects, and then access a collection on each of
+ them, a total of 101 SQL statements will be emitted, although each statement will typically be a
+ simple SELECT without any joins.
+
+ * When using joined loading, the load of 100 objects and their collections will emit only one SQL
+ statement. However, the
+ total number of rows fetched will be equal to the sum of the size of all the collections, plus one
+ extra row for each parent object that has an empty collection. Each row will also contain the full
+ set of columns represented by the parents, repeated for each collection item - SQLAlchemy does not
+ re-fetch these columns other than those of the primary key, however most DBAPIs (with some
+ exceptions) will transmit the full data of each parent over the wire to the client connection in
+ any case. Therefore joined eager loading only makes sense when the size of the collections are
+ relatively small. The LEFT OUTER JOIN can also be performance intensive compared to an INNER join.
+
+ * When using subquery loading, the load of 100 objects will emit two SQL statements. The second
+ statement will fetch a total number of rows equal to the sum of the size of all collections. An
+ INNER JOIN is used, and a minimum of parent columns are requested, only the primary keys. So a
+ subquery load makes sense when the collections are larger.
+
+ * When multiple levels of depth are used with joined or subquery loading, loading collections-within-
+ collections will multiply the total number of rows fetched in a cartesian fashion. Both forms
+ of eager loading always join from the original parent class.
+
+* Many to One Reference
+
+ * When using the default lazy loading, a load of 100 objects will like in the case of the collection
+ emit as many as 101 SQL statements. However - there is a significant exception to this, in that
+ if the many-to-one reference is a simple foreign key reference to the target's primary key, each
+ reference will be checked first in the current identity map using ``query.get()``. So here,
+ if the collection of objects references a relatively small set of target objects, or the full set
+ of possible target objects have already been loaded into the session and are strongly referenced,
+ using the default of `lazy='select'` is by far the most efficient way to go.
+
+ * When using joined loading, the load of 100 objects will emit only one SQL statement. The join
+ will be a LEFT OUTER JOIN, and the total number of rows will be equal to 100 in all cases.
+ If you know that each parent definitely has a child (i.e. the foreign
+ key reference is NOT NULL), the joined load can be configured with ``innerjoin=True``, which is
+ usually specified within the :func:`~sqlalchemy.orm.relationship`. For a load of objects where
+ there are many possible target references which may have not been loaded already, joined loading
+ with an INNER JOIN is extremely efficient.
+
+ * Subquery loading will issue a second load for all the child objects, so for a load of 100 objects
+ there would be two SQL statements emitted. There's probably not much advantage here over
+ joined loading, however, except perhaps that subquery loading can use an INNER JOIN in all cases
+ whereas joined loading requires that the foreign key is NOT NULL.
+
+.. _contains_eager:
+
+Routing Explicit Joins/Statements into Eagerly Loaded Collections
+------------------------------------------------------------------
+
+The behavior of :func:`~sqlalchemy.orm.joinedload()` is such that joins are
+created automatically, the results of which are routed into collections and
+scalar references on loaded objects. It is often the case that a query already
+includes the necessary joins which represent a particular collection or scalar
+reference, and the joins added by the joinedload feature are redundant - yet
+you'd still like the collections/references to be populated.
+
+For this SQLAlchemy supplies the :func:`~sqlalchemy.orm.contains_eager()`
+option. This option is used in the same manner as the
+:func:`~sqlalchemy.orm.joinedload()` option except it is assumed that the
+:class:`~sqlalchemy.orm.query.Query` will specify the appropriate joins
+explicitly. Below it's used with a ``from_statement`` load::
+
+ # mapping is the users->addresses mapping
+ mapper(User, users_table, properties={
+ 'addresses': relationship(Address, addresses_table)
+ })
+
+ # define a query on USERS with an outer join to ADDRESSES
+ statement = users_table.outerjoin(addresses_table).select().apply_labels()
+
+ # construct a Query object which expects the "addresses" results
+ query = session.query(User).options(contains_eager('addresses'))
+
+ # get results normally
+ r = query.from_statement(statement)
+
+It works just as well with an inline ``Query.join()`` or
+``Query.outerjoin()``::
+
+ session.query(User).outerjoin(User.addresses).options(contains_eager(User.addresses)).all()
+
+If the "eager" portion of the statement is "aliased", the ``alias`` keyword
+argument to :func:`~sqlalchemy.orm.contains_eager` may be used to indicate it.
+This is a string alias name or reference to an actual
+:class:`~sqlalchemy.sql.expression.Alias` (or other selectable) object:
+
+.. sourcecode:: python+sql
+
+ # use an alias of the Address entity
+ adalias = aliased(Address)
+
+ # construct a Query object which expects the "addresses" results
+ query = session.query(User).\
+ outerjoin((adalias, User.addresses)).\
+ options(contains_eager(User.addresses, alias=adalias))
+
+ # get results normally
+ {sql}r = query.all()
+ SELECT users.user_id AS users_user_id, users.user_name AS users_user_name, adalias.address_id AS adalias_address_id,
+ adalias.user_id AS adalias_user_id, adalias.email_address AS adalias_email_address, (...other columns...)
+ FROM users LEFT OUTER JOIN email_addresses AS email_addresses_1 ON users.user_id = email_addresses_1.user_id
+
+The ``alias`` argument is used only as a source of columns to match up to the
+result set. You can use it even to match up the result to arbitrary label
+names in a string SQL statement, by passing a selectable() which links those
+labels to the mapped :class:`~sqlalchemy.schema.Table`::
+
+ # label the columns of the addresses table
+ eager_columns = select([
+ addresses.c.address_id.label('a1'),
+ addresses.c.email_address.label('a2'),
+ addresses.c.user_id.label('a3')])
+
+ # select from a raw SQL statement which uses those label names for the
+ # addresses table. contains_eager() matches them up.
+ query = session.query(User).\
+ from_statement("select users.*, addresses.address_id as a1, "
+ "addresses.email_address as a2, addresses.user_id as a3 "
+ "from users left outer join addresses on users.user_id=addresses.user_id").\
+ options(contains_eager(User.addresses, alias=eager_columns))
+
+The path given as the argument to :func:`~sqlalchemy.orm.contains_eager` needs
+to be a full path from the starting entity. For example if we were loading
+``Users->orders->Order->items->Item``, the string version would look like::
+
+ query(User).options(contains_eager('orders', 'items'))
+
+Or using the class-bound descriptor::
+
+ query(User).options(contains_eager(User.orders, Order.items))
+
+A variant on :func:`~sqlalchemy.orm.contains_eager` is the
+``contains_alias()`` option, which is used in the rare case that the parent
+object is loaded from an alias within a user-defined SELECT statement::
+
+ # define an aliased UNION called 'ulist'
+ statement = users.select(users.c.user_id==7).union(users.select(users.c.user_id>7)).alias('ulist')
+
+ # add on an eager load of "addresses"
+ statement = statement.outerjoin(addresses).select().apply_labels()
+
+ # create query, indicating "ulist" is an alias for the main table, "addresses" property should
+ # be eager loaded
+ query = session.query(User).options(contains_alias('ulist'), contains_eager('addresses'))
+
+ # results
+ r = query.from_statement(statement)
+
+Relation Loader API
+--------------------
+
+.. autofunction:: contains_alias
+
+.. autofunction:: contains_eager
+
+.. autofunction:: eagerload
+
+.. autofunction:: eagerload_all
+
+.. autofunction:: joinedload
+
+.. autofunction:: joinedload_all
+
+.. autofunction:: lazyload
+
+.. autofunction:: subqueryload
+
+.. autofunction:: subqueryload_all
diff --git a/doc/build/orm/mapper_config.rst b/doc/build/orm/mapper_config.rst
new file mode 100644
index 000000000..574646ca7
--- /dev/null
+++ b/doc/build/orm/mapper_config.rst
@@ -0,0 +1,722 @@
+.. _mapper_config_toplevel:
+
+.. module:: sqlalchemy.orm
+
+Mapper Configuration
+====================
+
+This section describes a variety of configurational patterns that are usable
+with mappers. It assumes you've worked through :ref:`ormtutorial_toplevel` and
+know how to construct and use rudimentary mappers and relationships.
+
+Note that all patterns here apply both to the usage of explicit
+:func:`~.orm.mapper` and :class:`.Table` objects as well as when using the
+:mod:`sqlalchemy.ext.declarative` extension. Any example in this section which
+takes a form such as::
+
+ mapper(User, users_table, primary_key=[users_table.c.id])
+
+Would translate into declarative as::
+
+ class User(Base):
+ __table__ = users_table
+ __mapper_args__ = {
+ 'primary_key':users_table.c.id
+ }
+
+Or if using ``__tablename__``, :class:`.Column` objects are declared inline
+with the class definition. These are usable as is within ``__mapper_args__``::
+
+ class User(Base):
+ __tablename__ = 'users'
+
+ id = Column(Integer)
+
+ __mapper_args__ = {
+ 'primary_key':id
+ }
+
+
+Customizing Column Properties
+------------------------------
+
+The default behavior of :func:`~.orm.mapper` is to assemble all the columns in
+the mapped :class:`.Table` into mapped object attributes. This behavior can be
+modified in several ways, as well as enhanced by SQL expressions.
+
+Mapping a Subset of Table Columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To reference a subset of columns referenced by a table as mapped attributes,
+use the ``include_properties`` or ``exclude_properties`` arguments. For
+example::
+
+ mapper(User, users_table, include_properties=['user_id', 'user_name'])
+
+...will map the ``User`` class to the ``users_table`` table, only including
+the "user_id" and "user_name" columns - the rest are not refererenced.
+Similarly::
+
+ mapper(Address, addresses_table,
+ exclude_properties=['street', 'city', 'state', 'zip'])
+
+...will map the ``Address`` class to the ``addresses_table`` table, including
+all columns present except "street", "city", "state", and "zip".
+
+When this mapping is used, the columns that are not included will not be
+referenced in any SELECT statements emitted by :class:`.Query`, nor will there
+be any mapped attribute on the mapped class which represents the column;
+assigning an attribute of that name will have no effect beyond that of
+a normal Python attribute assignment.
+
+In some cases, multiple columns may have the same name, such as when
+mapping to a join of two or more tables that share some column name. To
+exclude or include individual columns, :class:`.Column` objects
+may also be placed within the "include_properties" and "exclude_properties"
+collections (new feature as of 0.6.4)::
+
+ mapper(UserAddress, users_table.join(addresses_table),
+ exclude_properties=[addresses_table.c.id],
+ primary_key=users_table.c.id
+ )
+
+It should be noted that insert and update defaults configured on individal
+:class:`.Column` objects, such as those configured by the "default",
+"on_update", "server_default" and "server_onupdate" arguments, will continue
+to function normally even if those :class:`.Column` objects are not mapped.
+This functionality is part of the SQL expression and execution system and
+occurs below the level of the ORM.
+
+
+Attribute Names for Mapped Columns
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To change the name of the attribute mapped to a particular column, place the
+:class:`~sqlalchemy.schema.Column` object in the ``properties`` dictionary
+with the desired key::
+
+ mapper(User, users_table, properties={
+ 'id': users_table.c.user_id,
+ 'name': users_table.c.user_name,
+ })
+
+When using :mod:`~sqlalchemy.ext.declarative`, the above configuration is more
+succinct - place the full column name in the :class:`.Column` definition,
+using the desired attribute name in the class definition::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class User(Base):
+ __tablename__ = 'user'
+ id = Column('user_id', Integer, primary_key=True)
+ name = Column('user_name', String(50))
+
+To change the names of all attributes using a prefix, use the
+``column_prefix`` option. This is useful for some schemes that would like
+to declare alternate attributes::
+
+ mapper(User, users_table, column_prefix='_')
+
+The above will place attribute names such as ``_user_id``, ``_user_name``,
+``_password`` etc. on the mapped ``User`` class.
+
+
+Mapping Multiple Columns to a Single Attribute
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+To place multiple columns which are known to be "synonymous" based on foreign
+key relationship or join condition into the same mapped attribute, put them
+together using a list, as below where we map to a :func:`~.expression.join`::
+
+ from sqlalchemy.sql import join
+
+ # join users and addresses
+ usersaddresses = join(users_table, addresses_table, \
+ users_table.c.user_id == addresses_table.c.user_id)
+
+ # user_id columns are equated under the 'user_id' attribute
+ mapper(User, usersaddresses, properties={
+ 'id':[users_table.c.user_id, addresses_table.c.user_id],
+ })
+
+For further examples on this particular use case, see :ref:`maptojoin`.
+
+.. _deferred:
+
+Deferred Column Loading
+------------------------
+
+This feature allows particular columns of a table to not be loaded by default,
+instead being loaded later on when first referenced. It is essentially
+"column-level lazy loading". This feature is useful when one wants to avoid
+loading a large text or binary field into memory when it's not needed.
+Individual columns can be lazy loaded by themselves or placed into groups that
+lazy-load together::
+
+ book_excerpts = Table('books', metadata,
+ Column('book_id', Integer, primary_key=True),
+ Column('title', String(200), nullable=False),
+ Column('summary', String(2000)),
+ Column('excerpt', Text),
+ Column('photo', Binary)
+ )
+
+ class Book(object):
+ pass
+
+ # define a mapper that will load each of 'excerpt' and 'photo' in
+ # separate, individual-row SELECT statements when each attribute
+ # is first referenced on the individual object instance
+ mapper(Book, book_excerpts, properties={
+ 'excerpt': deferred(book_excerpts.c.excerpt),
+ 'photo': deferred(book_excerpts.c.photo)
+ })
+
+With declarative, :class:`.Column` objects can be declared directly inside of :func:`deferred`::
+
+ class Book(Base):
+ __tablename__ = 'books'
+
+ book_id = Column(Integer, primary_key=True)
+ title = Column(String(200), nullable=False)
+ summary = Column(String(2000))
+ excerpt = deferred(Column(Text))
+ photo = deferred(Column(Binary))
+
+Deferred columns can be associted with a "group" name, so that they load
+together when any of them are first accessed::
+
+ book_excerpts = Table('books', metadata,
+ Column('book_id', Integer, primary_key=True),
+ Column('title', String(200), nullable=False),
+ Column('summary', String(2000)),
+ Column('excerpt', Text),
+ Column('photo1', Binary),
+ Column('photo2', Binary),
+ Column('photo3', Binary)
+ )
+
+ class Book(object):
+ pass
+
+ # define a mapper with a 'photos' deferred group. when one photo is referenced,
+ # all three photos will be loaded in one SELECT statement. The 'excerpt' will
+ # be loaded separately when it is first referenced.
+ mapper(Book, book_excerpts, properties = {
+ 'excerpt': deferred(book_excerpts.c.excerpt),
+ 'photo1': deferred(book_excerpts.c.photo1, group='photos'),
+ 'photo2': deferred(book_excerpts.c.photo2, group='photos'),
+ 'photo3': deferred(book_excerpts.c.photo3, group='photos')
+ })
+
+You can defer or undefer columns at the :class:`~sqlalchemy.orm.query.Query`
+level using the :func:`.defer` and :func:`.undefer` query options::
+
+ query = session.query(Book)
+ query.options(defer('summary')).all()
+ query.options(undefer('excerpt')).all()
+
+And an entire "deferred group", i.e. which uses the ``group`` keyword argument
+to :func:`~sqlalchemy.orm.deferred()`, can be undeferred using
+:func:`.undefer_group()`, sending in the group name::
+
+ query = session.query(Book)
+ query.options(undefer_group('photos')).all()
+
+.. autofunction:: deferred
+
+.. autofunction:: defer
+
+.. autofunction:: undefer
+
+.. autofunction:: undefer_group
+
+.. _mapper_sql_expressions:
+
+SQL Expressions as Mapped Attributes
+-------------------------------------
+
+Any SQL expression that relates to the primary mapped selectable can be mapped as a
+read-only attribute which will be bundled into the SELECT emitted
+for the target mapper when rows are loaded. This effect is achieved
+using the :func:`.column_property` function. Any
+scalar-returning
+:class:`.ClauseElement` may be
+used. Unlike older versions of SQLAlchemy, there is no :func:`~.sql.expression.label` requirement::
+
+ from sqlalchemy.orm import column_property
+
+ mapper(User, users_table, properties={
+ 'fullname': column_property(
+ users_table.c.firstname + " " + users_table.c.lastname
+ )
+ })
+
+Correlated subqueries may be used as well::
+
+ from sqlalchemy.orm import column_property
+ from sqlalchemy import select, func
+
+ mapper(User, users_table, properties={
+ 'address_count': column_property(
+ select([func.count(addresses_table.c.address_id)]).\
+ where(addresses_table.c.user_id==users_table.c.user_id)
+ )
+ })
+
+The declarative form of the above is described in :ref:`declarative_sql_expressions`.
+
+.. autofunction:: column_property
+
+Note that :func:`.column_property` is used to provide the effect of a SQL
+expression that is actively rendered into the SELECT generated for a
+particular mapped class. Alternatively, for the typical attribute that
+represents a composed value, its usually simpler to define it as a Python
+property which is evaluated as it is invoked on instances after they've been
+loaded::
+
+ class User(object):
+ @property
+ def fullname(self):
+ return self.firstname + " " + self.lastname
+
+To invoke a SQL statement from an instance that's already been loaded, the
+session associated with the instance can be acquired using
+:func:`~.session.object_session` which will provide the appropriate
+transactional context from which to emit a statement::
+
+ from sqlalchemy.orm import object_session
+ from sqlalchemy import select, func
+
+ class User(object):
+ @property
+ def address_count(self):
+ return object_session(self).\
+ scalar(
+ select([func.count(addresses_table.c.address_id)]).\
+ where(addresses_table.c.user_id==self.user_id)
+ )
+
+On the subject of object-level methods, be sure to see the :mod:`.derived_attributes` example,
+which provides a simple method of reusing instance-level expressions simultaneously
+as SQL expressions. The :mod:`.derived_attributes` example is slated to become a
+built-in feature of SQLAlchemy in a future release.
+
+
+Changing Attribute Behavior
+----------------------------
+
+Simple Validators
+~~~~~~~~~~~~~~~~~~
+
+A quick way to add a "validation" routine to an attribute is to use the
+:func:`~sqlalchemy.orm.validates` decorator. An attribute validator can raise
+an exception, halting the process of mutating the attribute's value, or can
+change the given value into something different. Validators, like all
+attribute extensions, are only called by normal userland code; they are not
+issued when the ORM is populating the object.
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.orm import validates
+
+ addresses_table = Table('addresses', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('email', String)
+ )
+
+ class EmailAddress(object):
+ @validates('email')
+ def validate_email(self, key, address):
+ assert '@' in address
+ return address
+
+ mapper(EmailAddress, addresses_table)
+
+Validators also receive collection events, when items are added to a collection:
+
+.. sourcecode:: python+sql
+
+ class User(object):
+ @validates('addresses')
+ def validate_address(self, key, address):
+ assert '@' in address.email
+ return address
+
+.. autofunction:: validates
+
+.. _synonyms:
+
+Using Descriptors
+~~~~~~~~~~~~~~~~~~
+
+A more comprehensive way to produce modified behavior for an attribute is to
+use descriptors. These are commonly used in Python using the ``property()``
+function. The standard SQLAlchemy technique for descriptors is to create a
+plain descriptor, and to have it read/write from a mapped attribute with a
+different name. Below we illustrate this using Python 2.6-style properties::
+
+ class EmailAddress(object):
+
+ @property
+ def email(self):
+ return self._email
+
+ @email.setter
+ def email(self, email):
+ self._email = email
+
+ mapper(EmailAddress, addresses_table, properties={
+ '_email': addresses_table.c.email
+ })
+
+The approach above will work, but there's more we can add. While our
+``EmailAddress`` object will shuttle the value through the ``email``
+descriptor and into the ``_email`` mapped attribute, the class level
+``EmailAddress.email`` attribute does not have the usual expression semantics
+usable with :class:`.Query`. To provide these, we instead use the
+:func:`.synonym` function as follows::
+
+ mapper(EmailAddress, addresses_table, properties={
+ 'email': synonym('_email', map_column=True)
+ })
+
+The ``email`` attribute is now usable in the same way as any
+other mapped attribute, including filter expressions,
+get/set operations, etc.::
+
+ address = session.query(EmailAddress).filter(EmailAddress.email == 'some address').one()
+
+ address.email = 'some other address'
+ session.flush()
+
+ q = session.query(EmailAddress).filter_by(email='some other address')
+
+If the mapped class does not provide a property, the :func:`.synonym` construct will create a default getter/setter object automatically.
+
+To use synonyms with :mod:`~sqlalchemy.ext.declarative`, see the section
+:ref:`declarative_synonyms`.
+
+Note that the "synonym" feature is eventually to be replaced by the superior
+"hybrid attributes" approach, slated to become a built in feature of SQLAlchemy
+in a future release. "hybrid" attributes are simply Python properties that evaulate
+at both the class level and at the instance level. For an example of their usage,
+see the :mod:`derived_attributes` example.
+
+.. autofunction:: synonym
+
+.. _custom_comparators:
+
+Custom Comparators
+~~~~~~~~~~~~~~~~~~~
+
+The expressions returned by comparison operations, such as
+``User.name=='ed'``, can be customized, by implementing an object that
+explicitly defines each comparison method needed. This is a relatively rare
+use case. For most needs, the approach in :ref:`mapper_sql_expressions` will
+often suffice, or alternatively a scheme like that of the
+:mod:`.derived_attributes` example. Those approaches should be tried first
+before resorting to custom comparison objects.
+
+Each of :func:`.column_property`, :func:`~.composite`, :func:`.relationship`,
+and :func:`.comparable_property` accept an argument called
+``comparator_factory``. A subclass of :class:`.PropComparator` can be provided
+for this argument, which can then reimplement basic Python comparison methods
+such as ``__eq__()``, ``__ne__()``, ``__lt__()``, and so on.
+
+It's best to subclass the :class:`.PropComparator` subclass provided by
+each type of property. For example, to allow a column-mapped attribute to
+do case-insensitive comparison::
+
+ from sqlalchemy.orm.properties import ColumnProperty
+ from sqlalchemy.sql import func
+
+ class MyComparator(ColumnProperty.Comparator):
+ def __eq__(self, other):
+ return func.lower(self.__clause_element__()) == func.lower(other)
+
+ mapper(EmailAddress, addresses_table, properties={
+ 'email':column_property(addresses_table.c.email,
+ comparator_factory=MyComparator)
+ })
+
+Above, comparisons on the ``email`` column are wrapped in the SQL lower()
+function to produce case-insensitive matching::
+
+ >>> str(EmailAddress.email == 'SomeAddress@foo.com')
+ lower(addresses.email) = lower(:lower_1)
+
+When building a :class:`.PropComparator`, the ``__clause_element__()`` method
+should be used in order to acquire the underlying mapped column. This will
+return a column that is appropriately wrapped in any kind of subquery
+or aliasing that has been applied in the context of the generated SQL statement.
+
+.. autoclass:: sqlalchemy.orm.interfaces.PropComparator
+ :show-inheritance:
+
+.. autofunction:: comparable_property
+
+
+.. _mapper_composite:
+
+Composite Column Types
+-----------------------
+
+Sets of columns can be associated with a single user-defined datatype. The ORM provides a single attribute which represents the group of columns
+using the class you provide.
+
+A simple example represents pairs of columns as a "Point" object.
+Starting with a table that represents two points as x1/y1 and x2/y2::
+
+ from sqlalchemy import Table, Column
+
+ vertices = Table('vertices', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('x1', Integer),
+ Column('y1', Integer),
+ Column('x2', Integer),
+ Column('y2', Integer),
+ )
+
+We create a new class, ``Point``, that will represent each x/y as a
+pair::
+
+ class Point(object):
+ def __init__(self, x, y):
+ self.x = x
+ self.y = y
+ def __composite_values__(self):
+ return self.x, self.y
+ def __set_composite_values__(self, x, y):
+ self.x = x
+ self.y = y
+ def __eq__(self, other):
+ return other is not None and \
+ other.x == self.x and \
+ other.y == self.y
+ def __ne__(self, other):
+ return not self.__eq__(other)
+
+The requirements for the custom datatype class are that it have a
+constructor which accepts positional arguments corresponding to its column
+format, and also provides a method ``__composite_values__()`` which
+returns the state of the object as a list or tuple, in order of its
+column-based attributes. It also should supply adequate ``__eq__()`` and
+``__ne__()`` methods which test the equality of two instances.
+
+The ``__set_composite_values__()`` method is optional. If it's not
+provided, the names of the mapped columns are taken as the names of
+attributes on the object, and ``setattr()`` is used to set data.
+
+The :func:`.composite` function is then used in the mapping::
+
+ from sqlalchemy.orm import composite
+
+ class Vertex(object):
+ pass
+
+ mapper(Vertex, vertices, properties={
+ 'start': composite(Point, vertices.c.x1, vertices.c.y1),
+ 'end': composite(Point, vertices.c.x2, vertices.c.y2)
+ })
+
+We can now use the ``Vertex`` instances as well as querying as though the
+``start`` and ``end`` attributes are regular scalar attributes::
+
+ session = Session()
+ v = Vertex(Point(3, 4), Point(5, 6))
+ session.add(v)
+
+ v2 = session.query(Vertex).filter(Vertex.start == Point(3, 4))
+
+The "equals" comparison operation by default produces an AND of all
+corresponding columns equated to one another. This can be changed using
+the ``comparator_factory``, described in :ref:`custom_comparators`.
+Below we illustrate the "greater than" operator, implementing
+the same expression that the base "greater than" does::
+
+ from sqlalchemy.orm.properties import CompositeProperty
+ from sqlalchemy import sql
+
+ class PointComparator(CompositeProperty.Comparator):
+ def __gt__(self, other):
+ """redefine the 'greater than' operation"""
+
+ return sql.and_(*[a>b for a, b in
+ zip(self.__clause_element__().clauses,
+ other.__composite_values__())])
+
+ maper(Vertex, vertices, properties={
+ 'start': composite(Point, vertices.c.x1, vertices.c.y1,
+ comparator_factory=PointComparator),
+ 'end': composite(Point, vertices.c.x2, vertices.c.y2,
+ comparator_factory=PointComparator)
+ })
+
+.. autofunction:: composite
+
+
+.. _maptojoin:
+
+Mapping a Class against Multiple Tables
+----------------------------------------
+
+Mappers can be constructed against arbitrary relational units (called
+``Selectables``) as well as plain ``Tables``. For example, The ``join``
+keyword from the SQL package creates a neat selectable unit comprised of
+multiple tables, complete with its own composite primary key, which can be
+passed in to a mapper as the table.
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.sql import join
+
+ class AddressUser(object):
+ pass
+
+ # define a Join
+ j = join(users_table, addresses_table)
+
+ # map to it - the identity of an AddressUser object will be
+ # based on (user_id, address_id) since those are the primary keys involved
+ mapper(AddressUser, j, properties={
+ 'user_id': [users_table.c.user_id, addresses_table.c.user_id]
+ })
+
+A second example:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.sql import join
+
+ # many-to-many join on an association table
+ j = join(users_table, userkeywords,
+ users_table.c.user_id==userkeywords.c.user_id).join(keywords,
+ userkeywords.c.keyword_id==keywords.c.keyword_id)
+
+ # a class
+ class KeywordUser(object):
+ pass
+
+ # map to it - the identity of a KeywordUser object will be
+ # (user_id, keyword_id) since those are the primary keys involved
+ mapper(KeywordUser, j, properties={
+ 'user_id': [users_table.c.user_id, userkeywords.c.user_id],
+ 'keyword_id': [userkeywords.c.keyword_id, keywords.c.keyword_id]
+ })
+
+In both examples above, "composite" columns were added as properties to the
+mappers; these are aggregations of multiple columns into one mapper property,
+which instructs the mapper to keep both of those columns set at the same
+value.
+
+Mapping a Class against Arbitrary Selects
+------------------------------------------
+
+Similar to mapping against a join, a plain select() object can be used with a mapper as well. Below, an example select which contains two aggregate functions and a group_by is mapped to a class:
+
+.. sourcecode:: python+sql
+
+ from sqlalchemy.sql import select
+
+ s = select([customers,
+ func.count(orders).label('order_count'),
+ func.max(orders.price).label('highest_order')],
+ customers.c.customer_id==orders.c.customer_id,
+ group_by=[c for c in customers.c]
+ ).alias('somealias')
+ class Customer(object):
+ pass
+
+ mapper(Customer, s)
+
+Above, the "customers" table is joined against the "orders" table to produce a full row for each customer row, the total count of related rows in the "orders" table, and the highest price in the "orders" table, grouped against the full set of columns in the "customers" table. That query is then mapped against the Customer class. New instances of Customer will contain attributes for each column in the "customers" table as well as an "order_count" and "highest_order" attribute. Updates to the Customer object will only be reflected in the "customers" table and not the "orders" table. This is because the primary key columns of the "orders" table are not represented in this mapper and therefore the table is not affected by save or delete operations.
+
+Multiple Mappers for One Class
+-------------------------------
+
+The first mapper created for a certain class is known as that class's "primary mapper." Other mappers can be created as well on the "load side" - these are called **secondary mappers**. This is a mapper that must be constructed with the keyword argument ``non_primary=True``, and represents a load-only mapper. Objects that are loaded with a secondary mapper will have their save operation processed by the primary mapper. It is also invalid to add new :func:`~sqlalchemy.orm.relationship` objects to a non-primary mapper. To use this mapper with the Session, specify it to the :class:`~sqlalchemy.orm.session.Session.query` method:
+
+example:
+
+.. sourcecode:: python+sql
+
+ # primary mapper
+ mapper(User, users_table)
+
+ # make a secondary mapper to load User against a join
+ othermapper = mapper(User, users_table.join(someothertable), non_primary=True)
+
+ # select
+ result = session.query(othermapper).select()
+
+The "non primary mapper" is a rarely needed feature of SQLAlchemy; in most cases, the :class:`~sqlalchemy.orm.query.Query` object can produce any kind of query that's desired. It's recommended that a straight :class:`~sqlalchemy.orm.query.Query` be used in place of a non-primary mapper unless the mapper approach is absolutely needed. Current use cases for the "non primary mapper" are when you want to map the class to a particular select statement or view to which additional query criterion can be added, and for when the particular mapped select statement or view is to be placed in a :func:`~sqlalchemy.orm.relationship` of a parent mapper.
+
+Multiple "Persistence" Mappers for One Class
+---------------------------------------------
+
+The non_primary mapper defines alternate mappers for the purposes of loading objects. What if we want the same class to be *persisted* differently, such as to different tables ? SQLAlchemy
+refers to this as the "entity name" pattern, and in Python one can use a recipe which creates
+anonymous subclasses which are distinctly mapped. See the recipe at `Entity Name <http://www.sqlalchemy.org/trac/wiki/UsageRecipes/EntityName>`_.
+
+Constructors and Object Initialization
+---------------------------------------
+
+Mapping imposes no restrictions or requirements on the constructor (``__init__``) method for the class. You are free to require any arguments for the function
+that you wish, assign attributes to the instance that are unknown to the ORM, and generally do anything else you would normally do when writing a constructor
+for a Python class.
+
+The SQLAlchemy ORM does not call ``__init__`` when recreating objects from database rows. The ORM's process is somewhat akin to the Python standard library's
+``pickle`` module, invoking the low level ``__new__`` method and then quietly restoring attributes directly on the instance rather than calling ``__init__``.
+
+If you need to do some setup on database-loaded instances before they're ready to use, you can use the ``@reconstructor`` decorator to tag a method as the ORM
+counterpart to ``__init__``. SQLAlchemy will call this method with no arguments every time it loads or reconstructs one of your instances. This is useful for
+recreating transient properties that are normally assigned in your ``__init__``::
+
+ from sqlalchemy import orm
+
+ class MyMappedClass(object):
+ def __init__(self, data):
+ self.data = data
+ # we need stuff on all instances, but not in the database.
+ self.stuff = []
+
+ @orm.reconstructor
+ def init_on_load(self):
+ self.stuff = []
+
+When ``obj = MyMappedClass()`` is executed, Python calls the ``__init__`` method as normal and the ``data`` argument is required. When instances are loaded
+during a :class:`~sqlalchemy.orm.query.Query` operation as in ``query(MyMappedClass).one()``, ``init_on_load`` is called instead.
+
+Any method may be tagged as the :func:`~sqlalchemy.orm.reconstructor`, even the ``__init__`` method. SQLAlchemy will call the reconstructor method with no arguments. Scalar
+(non-collection) database-mapped attributes of the instance will be available for use within the function. Eagerly-loaded collections are generally not yet
+available and will usually only contain the first element. ORM state changes made to objects at this stage will not be recorded for the next flush()
+operation, so the activity within a reconstructor should be conservative.
+
+While the ORM does not call your ``__init__`` method, it will modify the class's ``__init__`` slightly. The method is lightly wrapped to act as a trigger for
+the ORM, allowing mappers to be compiled automatically and will fire a :func:`~sqlalchemy.orm.interfaces.MapperExtension.init_instance` event that :class:`~sqlalchemy.orm.interfaces.MapperExtension` objects may listen for.
+:class:`~sqlalchemy.orm.interfaces.MapperExtension` objects can also listen for a ``reconstruct_instance`` event, analogous to the :func:`~sqlalchemy.orm.reconstructor` decorator above.
+
+.. autofunction:: reconstructor
+
+The :func:`mapper` API
+----------------------
+
+.. autofunction:: mapper
+
+.. autofunction:: object_mapper
+
+.. autofunction:: class_mapper
+
+.. autofunction:: compile_mappers
+
+.. autofunction:: clear_mappers
+
+.. autofunction:: sqlalchemy.orm.util.identity_key
+
+.. autofunction:: sqlalchemy.orm.util.polymorphic_union
+
+.. autoclass:: sqlalchemy.orm.mapper.Mapper
+ :members:
+
diff --git a/doc/build/reference/orm/query.rst b/doc/build/orm/query.rst
index 931bbf064..29b4196d1 100644
--- a/doc/build/reference/orm/query.rst
+++ b/doc/build/orm/query.rst
@@ -3,6 +3,11 @@
Querying
========
+This section provides API documentation for the :class:`.Query` object and related constructs.
+
+For an in-depth introduction to querying with the SQLAlchemy ORM, please see the :ref:`ormtutorial_toplevel`.
+
+
.. module:: sqlalchemy.orm
The Query Object
@@ -31,34 +36,5 @@ The public name of the :class:`.AliasedClass` class.
.. autofunction:: outerjoin
-Query Options
--------------
-
-Options which are passed to ``query.options()``, to affect the behavior of loading.
-
-.. autofunction:: contains_alias
-
-.. autofunction:: contains_eager
-
-.. autofunction:: defer
-
-.. autofunction:: eagerload
-
-.. autofunction:: eagerload_all
-
-.. autofunction:: extension
-
-.. autofunction:: joinedload
-
-.. autofunction:: joinedload_all
-
-.. autofunction:: lazyload
-
-.. autofunction:: subqueryload
-
-.. autofunction:: subqueryload_all
-
-.. autofunction:: undefer
-
-.. autofunction:: undefer_group
+.. autofunction:: with_parent
diff --git a/doc/build/orm/relationships.rst b/doc/build/orm/relationships.rst
new file mode 100644
index 000000000..342847328
--- /dev/null
+++ b/doc/build/orm/relationships.rst
@@ -0,0 +1,792 @@
+.. module:: sqlalchemy.orm
+
+Relationship Configuration
+==========================
+
+This section describes the :func:`relationship` function and in depth discussion
+of its usage. The reference material here continues into the next section,
+:ref:`collections_toplevel`, which has additional detail on configuration
+of collections via :func:`relationship`.
+
+Basic Relational Patterns
+--------------------------
+
+A quick walkthrough of the basic relational patterns. In this section we
+illustrate the classical mapping using :func:`mapper` in conjunction with
+:func:`relationship`. Then (by popular demand), we illustrate the declarative
+form using the :mod:`~sqlalchemy.ext.declarative` module.
+
+Note that :func:`.relationship` is historically known as
+:func:`.relation` in older versions of SQLAlchemy.
+
+One To Many
+~~~~~~~~~~~~
+
+A one to many relationship places a foreign key in the child table referencing
+the parent. SQLAlchemy creates the relationship as a collection on the parent
+object containing instances of the child object.
+
+.. sourcecode:: python+sql
+
+ parent_table = Table('parent', metadata,
+ Column('id', Integer, primary_key=True))
+
+ child_table = Table('child', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('parent_id', Integer, ForeignKey('parent.id'))
+ )
+
+ class Parent(object):
+ pass
+
+ class Child(object):
+ pass
+
+ mapper(Parent, parent_table, properties={
+ 'children': relationship(Child)
+ })
+
+ mapper(Child, child_table)
+
+To establish a bi-directional relationship in one-to-many, where the "reverse" side is a many to one, specify the ``backref`` option:
+
+.. sourcecode:: python+sql
+
+ mapper(Parent, parent_table, properties={
+ 'children': relationship(Child, backref='parent')
+ })
+
+ mapper(Child, child_table)
+
+``Child`` will get a ``parent`` attribute with many-to-one semantics.
+
+Declarative::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child", backref="parent")
+
+ class Child(Base):
+ __tablename__ = 'child'
+ id = Column(Integer, primary_key=True)
+ parent_id = Column(Integer, ForeignKey('parent.id'))
+
+
+Many To One
+~~~~~~~~~~~~
+
+Many to one places a foreign key in the parent table referencing the child.
+The mapping setup is identical to one-to-many, however SQLAlchemy creates the
+relationship as a scalar attribute on the parent object referencing a single
+instance of the child object.
+
+.. sourcecode:: python+sql
+
+ parent_table = Table('parent', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('child_id', Integer, ForeignKey('child.id')))
+
+ child_table = Table('child', metadata,
+ Column('id', Integer, primary_key=True),
+ )
+
+ class Parent(object):
+ pass
+
+ class Child(object):
+ pass
+
+ mapper(Parent, parent_table, properties={
+ 'child': relationship(Child)
+ })
+
+ mapper(Child, child_table)
+
+Backref behavior is available here as well, where ``backref="parents"`` will
+place a one-to-many collection on the ``Child`` class::
+
+ mapper(Parent, parent_table, properties={
+ 'child': relationship(Child, backref="parents")
+ })
+
+Declarative::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ child_id = Column(Integer, ForeignKey('child.id'))
+ child = relationship("Child", backref="parents")
+
+ class Child(Base):
+ __tablename__ = 'child'
+ id = Column(Integer, primary_key=True)
+
+One To One
+~~~~~~~~~~~
+
+One To One is essentially a bi-directional relationship with a scalar
+attribute on both sides. To achieve this, the ``uselist=False`` flag indicates
+the placement of a scalar attribute instead of a collection on the "many" side
+of the relationship. To convert one-to-many into one-to-one::
+
+ parent_table = Table('parent', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ child_table = Table('child', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('parent_id', Integer, ForeignKey('parent.id'))
+ )
+
+ mapper(Parent, parent_table, properties={
+ 'child': relationship(Child, uselist=False, backref='parent')
+ })
+
+ mapper(Child, child_table)
+
+Or to turn a one-to-many backref into one-to-one, use the :func:`.backref` function
+to provide arguments for the reverse side::
+
+ from sqlalchemy.orm import backref
+
+ parent_table = Table('parent', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('child_id', Integer, ForeignKey('child.id'))
+ )
+
+ child_table = Table('child', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ mapper(Parent, parent_table, properties={
+ 'child': relationship(Child, backref=backref('parent', uselist=False))
+ })
+
+ mapper(Child, child_table)
+
+The second example above as declarative::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ id = Column(Integer, primary_key=True)
+ child_id = Column(Integer, ForeignKey('child.id'))
+ child = relationship("Child", backref=backref("parent", uselist=False))
+
+ class Child(Base):
+ __tablename__ = 'child'
+ id = Column(Integer, primary_key=True)
+
+Many To Many
+~~~~~~~~~~~~~
+
+Many to Many adds an association table between two classes. The association
+table is indicated by the ``secondary`` argument to
+:func:`.relationship`.
+
+.. sourcecode:: python+sql
+
+ left_table = Table('left', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ right_table = Table('right', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ association_table = Table('association', metadata,
+ Column('left_id', Integer, ForeignKey('left.id')),
+ Column('right_id', Integer, ForeignKey('right.id'))
+ )
+
+ mapper(Parent, left_table, properties={
+ 'children': relationship(Child, secondary=association_table)
+ })
+
+ mapper(Child, right_table)
+
+For a bi-directional relationship, both sides of the relationship contain a
+collection. The ``backref`` keyword will automatically use
+the same ``secondary`` argument for the reverse relationship:
+
+.. sourcecode:: python+sql
+
+ mapper(Parent, left_table, properties={
+ 'children': relationship(Child, secondary=association_table,
+ backref='parents')
+ })
+
+With declarative, we still use the :class:`.Table` for the ``secondary``
+argument. A class is not mapped to this table, so it remains in its
+plain schematic form::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ association_table = Table('association', Base.metadata,
+ Column('left_id', Integer, ForeignKey('left.id')),
+ Column('right_id', Integer, ForeignKey('right.id'))
+ )
+
+ class Parent(Base):
+ __tablename__ = 'left'
+ id = Column(Integer, primary_key=True)
+ children = relationship("Child",
+ secondary=association_table,
+ backref="parents")
+
+ class Child(Base):
+ __tablename__ = 'right'
+ id = Column(Integer, primary_key=True)
+
+.. _association_pattern:
+
+Association Object
+~~~~~~~~~~~~~~~~~~
+
+The association object pattern is a variant on many-to-many: it specifically
+is used when your association table contains additional columns beyond those
+which are foreign keys to the left and right tables. Instead of using the
+``secondary`` argument, you map a new class directly to the association table.
+The left side of the relationship references the association object via
+one-to-many, and the association class references the right side via
+many-to-one.
+
+.. sourcecode:: python+sql
+
+ left_table = Table('left', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ right_table = Table('right', metadata,
+ Column('id', Integer, primary_key=True)
+ )
+
+ association_table = Table('association', metadata,
+ Column('left_id', Integer, ForeignKey('left.id'), primary_key=True),
+ Column('right_id', Integer, ForeignKey('right.id'), primary_key=True),
+ Column('data', String(50))
+ )
+
+ mapper(Parent, left_table, properties={
+ 'children':relationship(Association)
+ })
+
+ mapper(Association, association_table, properties={
+ 'child':relationship(Child)
+ })
+
+ mapper(Child, right_table)
+
+The bi-directional version adds backrefs to both relationships:
+
+.. sourcecode:: python+sql
+
+ mapper(Parent, left_table, properties={
+ 'children':relationship(Association, backref="parent")
+ })
+
+ mapper(Association, association_table, properties={
+ 'child':relationship(Child, backref="parent_assocs")
+ })
+
+ mapper(Child, right_table)
+
+Declarative::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class Association(Base):
+ __tablename__ = 'association'
+ left_id = Column(Integer, ForeignKey('left.id'), primary_key=True)
+ right_id = Column(Integer, ForeignKey('right.id'), primary_key=True)
+ child = relationship("Child", backref="parent_assocs")
+
+ class Parent(Base):
+ __tablename__ = 'left'
+ id = Column(Integer, primary_key=True)
+ children = relationship(Association, backref="parent")
+
+ class Child(Base):
+ __tablename__ = 'right'
+ id = Column(Integer, primary_key=True)
+
+Working with the association pattern in its direct form requires that child
+objects are associated with an association instance before being appended to
+the parent; similarly, access from parent to child goes through the
+association object:
+
+.. sourcecode:: python+sql
+
+ # create parent, append a child via association
+ p = Parent()
+ a = Association()
+ a.child = Child()
+ p.children.append(a)
+
+ # iterate through child objects via association, including association
+ # attributes
+ for assoc in p.children:
+ print assoc.data
+ print assoc.child
+
+To enhance the association object pattern such that direct
+access to the ``Association`` object is optional, SQLAlchemy
+provides the :ref:`associationproxy` extension. This
+extension allows the configuration of attributes which will
+access two "hops" with a single access, one "hop" to the
+associated object, and a second to a target attribute.
+
+.. note:: When using the association object pattern, it is
+ advisable that the association-mapped table not be used
+ as the ``secondary`` argument on a :func:`.relationship`
+ elsewhere, unless that :func:`.relationship` contains
+ the option ``viewonly=True``. SQLAlchemy otherwise
+ may attempt to emit redundant INSERT and DELETE
+ statements on the same table, if similar state is detected
+ on the related attribute as well as the associated
+ object.
+
+Adjacency List Relationships
+-----------------------------
+
+The **adjacency list** pattern is a common relational pattern whereby a table
+contains a foreign key reference to itself. This is the most common and simple
+way to represent hierarchical data in flat tables. The other way is the
+"nested sets" model, sometimes called "modified preorder". Despite what many
+online articles say about modified preorder, the adjacency list model is
+probably the most appropriate pattern for the large majority of hierarchical
+storage needs, for reasons of concurrency, reduced complexity, and that
+modified preorder has little advantage over an application which can fully
+load subtrees into the application space.
+
+SQLAlchemy commonly refers to an adjacency list relationship as a
+**self-referential mapper**. In this example, we'll work with a single table
+called ``nodes`` to represent a tree structure::
+
+ nodes = Table('nodes', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('parent_id', Integer, ForeignKey('nodes.id')),
+ Column('data', String(50)),
+ )
+
+A graph such as the following::
+
+ root --+---> child1
+ +---> child2 --+--> subchild1
+ | +--> subchild2
+ +---> child3
+
+Would be represented with data such as::
+
+ id parent_id data
+ --- ------- ----
+ 1 NULL root
+ 2 1 child1
+ 3 1 child2
+ 4 3 subchild1
+ 5 3 subchild2
+ 6 1 child3
+
+SQLAlchemy's :func:`.mapper` configuration for a self-referential one-to-many
+relationship is exactly like a "normal" one-to-many relationship. When
+SQLAlchemy encounters the foreign key relationship from ``nodes`` to
+``nodes``, it assumes one-to-many unless told otherwise:
+
+.. sourcecode:: python+sql
+
+ # entity class
+ class Node(object):
+ pass
+
+ mapper(Node, nodes, properties={
+ 'children': relationship(Node)
+ })
+
+To create a many-to-one relationship from child to parent, an extra indicator
+of the "remote side" is added, which contains the
+:class:`~sqlalchemy.schema.Column` object or objects indicating the remote
+side of the relationship:
+
+.. sourcecode:: python+sql
+
+ mapper(Node, nodes, properties={
+ 'parent': relationship(Node, remote_side=[nodes.c.id])
+ })
+
+And the bi-directional version combines both:
+
+.. sourcecode:: python+sql
+
+ mapper(Node, nodes, properties={
+ 'children': relationship(Node,
+ backref=backref('parent', remote_side=[nodes.c.id])
+ )
+ })
+
+For comparison, the declarative version typically uses the inline ``id``
+:class:`.Column` attribute to declare remote_side (note the list form is optional
+when the collection is only one column)::
+
+ from sqlalchemy.ext.declarative import declarative_base
+ Base = declarative_base()
+
+ class Node(Base):
+ __tablename__ = 'nodes'
+ id = Column(Integer, primary_key=True)
+ parent_id = Column(Integer, ForeignKey('nodes.id'))
+ data = Column(String(50))
+ children = relationship("Node",
+ backref=backref('parent', remote_side=id)
+ )
+
+There are several examples included with SQLAlchemy illustrating
+self-referential strategies; these include :ref:`examples_adjacencylist` and
+:ref:`examples_xmlpersistence`.
+
+Self-Referential Query Strategies
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+
+Querying self-referential structures is done in the same way as any other
+query in SQLAlchemy, such as below, we query for any node whose ``data``
+attribute stores the value ``child2``:
+
+.. sourcecode:: python+sql
+
+ # get all nodes named 'child2'
+ session.query(Node).filter(Node.data=='child2')
+
+On the subject of joins, i.e. those described in `datamapping_joins`,
+self-referential structures require the usage of aliases so that the same
+table can be referenced multiple times within the FROM clause of the query.
+Aliasing can be done either manually using the ``nodes``
+:class:`~sqlalchemy.schema.Table` object as a source of aliases:
+
+.. sourcecode:: python+sql
+
+ # get all nodes named 'subchild1' with a parent named 'child2'
+ nodealias = nodes.alias()
+ {sql}session.query(Node).filter(Node.data=='subchild1').\
+ filter(and_(Node.parent_id==nodealias.c.id, nodealias.c.data=='child2')).all()
+ SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data
+ FROM nodes, nodes AS nodes_1
+ WHERE nodes.data = ? AND nodes.parent_id = nodes_1.id AND nodes_1.data = ?
+ ['subchild1', 'child2']
+
+or automatically, using ``join()`` with ``aliased=True``:
+
+.. sourcecode:: python+sql
+
+ # get all nodes named 'subchild1' with a parent named 'child2'
+ {sql}session.query(Node).filter(Node.data=='subchild1').\
+ join('parent', aliased=True).filter(Node.data=='child2').all()
+ SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data
+ FROM nodes JOIN nodes AS nodes_1 ON nodes_1.id = nodes.parent_id
+ WHERE nodes.data = ? AND nodes_1.data = ?
+ ['subchild1', 'child2']
+
+To add criterion to multiple points along a longer join, use ``from_joinpoint=True``:
+
+.. sourcecode:: python+sql
+
+ # get all nodes named 'subchild1' with a parent named 'child2' and a grandparent 'root'
+ {sql}session.query(Node).filter(Node.data=='subchild1').\
+ join('parent', aliased=True).filter(Node.data=='child2').\
+ join('parent', aliased=True, from_joinpoint=True).filter(Node.data=='root').all()
+ SELECT nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data
+ FROM nodes JOIN nodes AS nodes_1 ON nodes_1.id = nodes.parent_id JOIN nodes AS nodes_2 ON nodes_2.id = nodes_1.parent_id
+ WHERE nodes.data = ? AND nodes_1.data = ? AND nodes_2.data = ?
+ ['subchild1', 'child2', 'root']
+
+Configuring Eager Loading
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Eager loading of relationships occurs using joins or outerjoins from parent to
+child table during a normal query operation, such that the parent and its
+child collection can be populated from a single SQL statement, or a second
+statement for all collections at once. SQLAlchemy's joined and subquery eager
+loading uses aliased tables in all cases when joining to related items, so it
+is compatible with self-referential joining. However, to use eager loading
+with a self-referential relationship, SQLAlchemy needs to be told how many
+levels deep it should join; otherwise the eager load will not take place. This
+depth setting is configured via ``join_depth``:
+
+.. sourcecode:: python+sql
+
+ mapper(Node, nodes, properties={
+ 'children': relationship(Node, lazy='joined', join_depth=2)
+ })
+
+ {sql}session.query(Node).all()
+ SELECT nodes_1.id AS nodes_1_id, nodes_1.parent_id AS nodes_1_parent_id, nodes_1.data AS nodes_1_data, nodes_2.id AS nodes_2_id, nodes_2.parent_id AS nodes_2_parent_id, nodes_2.data AS nodes_2_data, nodes.id AS nodes_id, nodes.parent_id AS nodes_parent_id, nodes.data AS nodes_data
+ FROM nodes LEFT OUTER JOIN nodes AS nodes_2 ON nodes.id = nodes_2.parent_id LEFT OUTER JOIN nodes AS nodes_1 ON nodes_2.id = nodes_1.parent_id
+ []
+
+Specifying Alternate Join Conditions to relationship()
+------------------------------------------------------
+
+The :func:`~sqlalchemy.orm.relationship` function uses the foreign key
+relationship between the parent and child tables to formulate the **primary
+join condition** between parent and child; in the case of a many-to-many
+relationship it also formulates the **secondary join condition**::
+
+ one to many/many to one:
+ ------------------------
+
+ parent_table --> parent_table.c.id == child_table.c.parent_id --> child_table
+ primaryjoin
+
+ many to many:
+ -------------
+
+ parent_table --> parent_table.c.id == secondary_table.c.parent_id -->
+ primaryjoin
+
+ secondary_table.c.child_id == child_table.c.id --> child_table
+ secondaryjoin
+
+If you are working with a :class:`~sqlalchemy.schema.Table` which has no
+:class:`~sqlalchemy.schema.ForeignKey` objects on it (which can be the case
+when using reflected tables with MySQL), or if the join condition cannot be
+expressed by a simple foreign key relationship, use the ``primaryjoin`` and
+possibly ``secondaryjoin`` conditions to create the appropriate relationship.
+
+In this example we create a relationship ``boston_addresses`` which will only
+load the user addresses with a city of "Boston":
+
+.. sourcecode:: python+sql
+
+ class User(object):
+ pass
+ class Address(object):
+ pass
+
+ mapper(Address, addresses_table)
+ mapper(User, users_table, properties={
+ 'boston_addresses': relationship(Address, primaryjoin=
+ and_(users_table.c.user_id==addresses_table.c.user_id,
+ addresses_table.c.city=='Boston'))
+ })
+
+Many to many relationships can be customized by one or both of ``primaryjoin``
+and ``secondaryjoin``, shown below with just the default many-to-many
+relationship explicitly set:
+
+.. sourcecode:: python+sql
+
+ class User(object):
+ pass
+ class Keyword(object):
+ pass
+ mapper(Keyword, keywords_table)
+ mapper(User, users_table, properties={
+ 'keywords': relationship(Keyword, secondary=userkeywords_table,
+ primaryjoin=users_table.c.user_id==userkeywords_table.c.user_id,
+ secondaryjoin=userkeywords_table.c.keyword_id==keywords_table.c.keyword_id
+ )
+ })
+
+Specifying Foreign Keys
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+When using ``primaryjoin`` and ``secondaryjoin``, SQLAlchemy also needs to be
+aware of which columns in the relationship reference the other. In most cases,
+a :class:`~sqlalchemy.schema.Table` construct will have
+:class:`~sqlalchemy.schema.ForeignKey` constructs which take care of this;
+however, in the case of reflected tables on a database that does not report
+FKs (like MySQL ISAM) or when using join conditions on columns that don't have
+foreign keys, the :func:`~sqlalchemy.orm.relationship` needs to be told
+specifically which columns are "foreign" using the ``foreign_keys``
+collection:
+
+.. sourcecode:: python+sql
+
+ mapper(Address, addresses_table)
+ mapper(User, users_table, properties={
+ 'addresses': relationship(Address, primaryjoin=
+ users_table.c.user_id==addresses_table.c.user_id,
+ foreign_keys=[addresses_table.c.user_id])
+ })
+
+Building Query-Enabled Properties
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Very ambitious custom join conditions may fail to be directly persistable, and
+in some cases may not even load correctly. To remove the persistence part of
+the equation, use the flag ``viewonly=True`` on the
+:func:`~sqlalchemy.orm.relationship`, which establishes it as a read-only
+attribute (data written to the collection will be ignored on flush()).
+However, in extreme cases, consider using a regular Python property in
+conjunction with :class:`~sqlalchemy.orm.query.Query` as follows:
+
+.. sourcecode:: python+sql
+
+ class User(object):
+ def _get_addresses(self):
+ return object_session(self).query(Address).with_parent(self).filter(...).all()
+ addresses = property(_get_addresses)
+
+Multiple Relationships against the Same Parent/Child
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Theres no restriction on how many times you can relate from parent to child.
+SQLAlchemy can usually figure out what you want, particularly if the join
+conditions are straightforward. Below we add a ``newyork_addresses`` attribute
+to complement the ``boston_addresses`` attribute:
+
+.. sourcecode:: python+sql
+
+ mapper(User, users_table, properties={
+ 'boston_addresses': relationship(Address, primaryjoin=
+ and_(users_table.c.user_id==addresses_table.c.user_id,
+ addresses_table.c.city=='Boston')),
+ 'newyork_addresses': relationship(Address, primaryjoin=
+ and_(users_table.c.user_id==addresses_table.c.user_id,
+ addresses_table.c.city=='New York')),
+ })
+
+
+Rows that point to themselves / Mutually Dependent Rows
+-------------------------------------------------------
+
+This is a very specific case where relationship() must perform an INSERT and a
+second UPDATE in order to properly populate a row (and vice versa an UPDATE
+and DELETE in order to delete without violating foreign key constraints). The
+two use cases are:
+
+ * A table contains a foreign key to itself, and a single row will have a foreign key value pointing to its own primary key.
+ * Two tables each contain a foreign key referencing the other table, with a row in each table referencing the other.
+
+For example::
+
+ user
+ ---------------------------------
+ user_id name related_user_id
+ 1 'ed' 1
+
+Or::
+
+ widget entry
+ ------------------------------------------- ---------------------------------
+ widget_id name favorite_entry_id entry_id name widget_id
+ 1 'somewidget' 5 5 'someentry' 1
+
+In the first case, a row points to itself. Technically, a database that uses
+sequences such as PostgreSQL or Oracle can INSERT the row at once using a
+previously generated value, but databases which rely upon autoincrement-style
+primary key identifiers cannot. The :func:`~sqlalchemy.orm.relationship`
+always assumes a "parent/child" model of row population during flush, so
+unless you are populating the primary key/foreign key columns directly,
+:func:`~sqlalchemy.orm.relationship` needs to use two statements.
+
+In the second case, the "widget" row must be inserted before any referring
+"entry" rows, but then the "favorite_entry_id" column of that "widget" row
+cannot be set until the "entry" rows have been generated. In this case, it's
+typically impossible to insert the "widget" and "entry" rows using just two
+INSERT statements; an UPDATE must be performed in order to keep foreign key
+constraints fulfilled. The exception is if the foreign keys are configured as
+"deferred until commit" (a feature some databases support) and if the
+identifiers were populated manually (again essentially bypassing
+:func:`~sqlalchemy.orm.relationship`).
+
+To enable the UPDATE after INSERT / UPDATE before DELETE behavior on
+:func:`~sqlalchemy.orm.relationship`, use the ``post_update`` flag on *one* of
+the relationships, preferably the many-to-one side::
+
+ mapper(Widget, widget, properties={
+ 'entries':relationship(Entry, primaryjoin=widget.c.widget_id==entry.c.widget_id),
+ 'favorite_entry':relationship(Entry, primaryjoin=widget.c.favorite_entry_id==entry.c.entry_id, post_update=True)
+ })
+
+When a structure using the above mapping is flushed, the "widget" row will be
+INSERTed minus the "favorite_entry_id" value, then all the "entry" rows will
+be INSERTed referencing the parent "widget" row, and then an UPDATE statement
+will populate the "favorite_entry_id" column of the "widget" table (it's one
+row at a time for the time being).
+
+
+Mutable Primary Keys / Update Cascades
+---------------------------------------
+
+When the primary key of an entity changes, related items
+which reference the primary key must also be updated as
+well. For databases which enforce referential integrity,
+it's required to use the database's ON UPDATE CASCADE
+functionality in order to propagate primary key changes
+to referenced foreign keys - the values cannot be out
+of sync for any moment.
+
+For databases that don't support this, such as SQLite and
+MySQL without their referential integrity options turned
+on, the ``passive_updates`` flag can
+be set to ``False``, most preferably on a one-to-many or
+many-to-many :func:`.relationship`, which instructs
+SQLAlchemy to issue UPDATE statements individually for
+objects referenced in the collection, loading them into
+memory if not already locally present. The
+``passive_updates`` flag can also be ``False`` in
+conjunction with ON UPDATE CASCADE functionality,
+although in that case the unit of work will be issuing
+extra SELECT and UPDATE statements unnecessarily.
+
+A typical mutable primary key setup might look like:
+
+.. sourcecode:: python+sql
+
+ users = Table('users', metadata,
+ Column('username', String(50), primary_key=True),
+ Column('fullname', String(100)))
+
+ addresses = Table('addresses', metadata,
+ Column('email', String(50), primary_key=True),
+ Column('username', String(50), ForeignKey('users.username', onupdate="cascade")))
+
+ class User(object):
+ pass
+ class Address(object):
+ pass
+
+ # passive_updates=False *only* needed if the database
+ # does not implement ON UPDATE CASCADE
+
+ mapper(User, users, properties={
+ 'addresses': relationship(Address, passive_updates=False)
+ })
+ mapper(Address, addresses)
+
+``passive_updates`` is set to ``True`` by default,
+indicating that ON UPDATE CASCADE is expected to be in
+place in the usual case for foreign keys that expect
+to have a mutating parent key.
+
+``passive_updates=False`` may be configured on any
+direction of relationship, i.e. one-to-many, many-to-one,
+and many-to-many, although it is much more effective when
+placed just on the one-to-many or many-to-many side.
+Configuring the ``passive_updates=False`` only on the
+many-to-one side will have only a partial effect, as the
+unit of work searches only through the current identity
+map for objects that may be referencing the one with a
+mutating primary key, not throughout the database.
+
+The :func:`relationship` API
+----------------------------
+
+.. autofunction:: relationship
+
+.. autofunction:: backref
+
+.. autofunction:: relation
+
+
diff --git a/doc/build/session.rst b/doc/build/orm/session.rst
index 65f73e662..7448392fe 100644
--- a/doc/build/session.rst
+++ b/doc/build/orm/session.rst
@@ -4,6 +4,8 @@
Using the Session
=================
+.. module:: sqlalchemy.orm.session
+
The :func:`.orm.mapper` function and :mod:`~sqlalchemy.ext.declarative` extensions
are the primary configurational interface for the ORM. Once mappings are
configured, the primary usage interface for persistence operations is the
@@ -110,49 +112,14 @@ on each invocation::
session = Session(bind=engine)
-...or directly with a :class:`.Connection`. This is useful in some situations,
-such as within a test fixture that maintains an external transaction::
-
- from sqlalchemy.orm import sessionmaker
- from sqlalchemy import create_engine
- from unittest import TestCase
-
- # global application scope. create Session class, engine
- Session = sessionmaker()
-
- engine = create_engine('postgresql://...')
-
- class SomeTest(TestCase):
- def setUp(self):
- # connect to the database
- self.connection = engine.connect()
-
- # begin a non-ORM transaction
- self.trans = connection.begin()
-
- # bind an individual Session to the connection
- self.session = Session(bind=self.connection)
-
- def test_something(self):
- # use the session in tests.
-
- self.session.add(Foo())
- self.session.commit()
-
- def tearDown(self):
- # rollback - everything that happened with the
- # Session above (including calls to commit())
- # is rolled back.
- self.trans.rollback()
- self.session.close()
-
+...or directly with a :class:`.Connection`::
-Configurational Arguments
--------------------------
+ conn = engine.connect()
+ session = Session(bind=conn)
-Configurational arguments accepted by :func:`.sessionmaker` are the same as that of the
-:class:`.Session` class itself, and are described at
-:func:`.sessionmaker`.
+While the rationale for the above example may not be apparent, the typical
+usage is in a test fixture that maintains an external transaction - see
+:ref:`session_external_transaction` below for a full example.
Using the Session
==================
@@ -293,6 +260,7 @@ Frequently Asked Questions
particularly when you do a flush operation, it definitely is not open to
concurrent threads accessing it, because it holds onto a single database
connection at that point. If you use a session which is non-transactional
+ (meaning, ``autocommit`` is set to ``True``, not the default setting)
for read operations only, it's still not thread-"safe", but you also wont
get any catastrophic failures either, since it checks out and returns
connections to the connection pool on an as-needed basis; it's just that
@@ -347,6 +315,10 @@ expires all instances along transaction boundaries, so that with a normally
isolated transaction, there shouldn't be any issue of instances representing
data which is stale with regards to the current transaction.
+The :class:`.Query` object is introduced in great detail in
+:ref:`ormtutorial_toplevel`, and further documented in
+:ref:`query_api_toplevel`.
+
Adding New or Existing Items
----------------------------
@@ -811,10 +783,12 @@ transaction::
item1.foo = 'bar'
item2.bar = 'foo'
- # commit- will immediately go into a new transaction afterwards
+ # commit- will immediately go into
+ # a new transaction on next use.
session.commit()
except:
- # rollback - will immediately go into a new transaction afterwards.
+ # rollback - will immediately go into
+ # a new transaction on next use.
session.rollback()
A session which is configured with ``autocommit=True`` may be placed into a
@@ -982,59 +956,66 @@ proper context for the desired engine::
connection = session.connection(MyMappedClass)
+.. _session_external_transaction:
+
Joining a Session into an External Transaction
===============================================
-If a :class:`~sqlalchemy.engine.base.Connection` is being used which is
-already in a transactional state (i.e. has a
-:class:`~sqlalchemy.engine.base.Transaction`), a
-:class:`~sqlalchemy.orm.session.Session` can be made to participate within
-that transaction by just binding the :class:`~sqlalchemy.orm.session.Session`
-to that :class:`~sqlalchemy.engine.base.Connection`::
+If a :class:`.Connection` is being used which is already in a transactional
+state (i.e. has a :class:`.Transaction` established), a :class:`.Session` can
+be made to participate within that transaction by just binding the
+:class:`.Session` to that :class:`.Connection`. The usual rationale for this
+is a test suite that allows ORM code to work freely with a :class:`.Session`,
+including the ability to call :meth:`.Session.commit`, where afterwards the
+entire database interaction is rolled back::
+ from sqlalchemy.orm import sessionmaker
+ from sqlalchemy import create_engine
+ from unittest import TestCase
+
+ # global application scope. create Session class, engine
Session = sessionmaker()
- # non-ORM connection + transaction
- conn = engine.connect()
- trans = conn.begin()
-
- # create a Session, bind to the connection
- session = Session(bind=conn)
-
- # ... work with session
-
- session.commit() # commit the session
- session.close() # close it out, prohibit further actions
-
- trans.commit() # commit the actual transaction
-
-Note that above, we issue a ``commit()`` both on the
-:class:`~sqlalchemy.orm.session.Session` as well as the
-:class:`~sqlalchemy.engine.base.Transaction`. This is an example of where we
-take advantage of :class:`~sqlalchemy.engine.base.Connection`'s ability to
-maintain *subtransactions*, or nested begin/commit pairs. The
-:class:`~sqlalchemy.orm.session.Session` is used exactly as though it were
-managing the transaction on its own; its
-:func:`~sqlalchemy.orm.session.Session.commit` method issues its
-:func:`~sqlalchemy.orm.session.Session.flush`, and commits the subtransaction.
-The subsequent transaction the :class:`~sqlalchemy.orm.session.Session` starts
-after commit will not begin until it's next used. Above we issue a
-:func:`~sqlalchemy.orm.session.Session.close` to prevent this from occurring.
-Finally, the actual transaction is committed using ``Transaction.commit()``.
+ engine = create_engine('postgresql://...')
-When using the ``threadlocal`` engine context, the process above is
-simplified; the :class:`~sqlalchemy.orm.session.Session` uses the same
-connection/transaction as everyone else in the current thread, whether or not
-you explicitly bind it::
+ class SomeTest(TestCase):
+ def setUp(self):
+ # connect to the database
+ self.connection = engine.connect()
- engine = create_engine('postgresql://mydb', strategy="threadlocal")
- engine.begin()
+ # begin a non-ORM transaction
+ self.trans = connection.begin()
+
+ # bind an individual Session to the connection
+ self.session = Session(bind=self.connection)
+
+ def test_something(self):
+ # use the session in tests.
+
+ self.session.add(Foo())
+ self.session.commit()
+
+ def tearDown(self):
+ # rollback - everything that happened with the
+ # Session above (including calls to commit())
+ # is rolled back.
+ self.trans.rollback()
+ self.session.close()
+
+Above, we issue :meth:`.Session.commit` as well as
+:meth:`.Transaction.rollback`. This is an example of where we take advantage
+of the :class:`.Connection` object's ability to maintain *subtransactions*, or
+nested begin/commit-or-rollback pairs where only the outermost begin/commit
+pair actually commits the transaction, or if the outermost block rolls back,
+everything is rolled back.
- session = Session() # session takes place in the transaction like everyone else
+The :class:`.Session` object and :func:`.sessionmaker` function
+================================================================
- # ... go nuts
+.. autofunction:: sessionmaker
- engine.commit() # commit the transaction
+.. autoclass:: sqlalchemy.orm.session.Session
+ :members:
.. _unitofwork_contextual:
@@ -1143,41 +1124,29 @@ in a web application::
Session.remove() <-
web response <-
-The above example illustrates an explicit call to ``Session.remove()``. This
+The above example illustrates an explicit call to :meth:`.ScopedSession.remove`. This
has the effect such that each web request starts fresh with a brand new
-session. When integrating with a web framework, there's actually many options
-on how to proceed for this step:
-
-* Session.remove() - this is the most cut and dry approach; the
- :class:`~sqlalchemy.orm.session.Session` is thrown away, all of its
- transactional resources rolled back and connections checked back to the
- connection pool. A new :class:`~sqlalchemy.orm.session.Session` will be used
- on the next request.
-* Session.close() - Similar to calling ``remove()``, in that all objects are
- explicitly expunged, transactional resources are rolled back, connection
- resources checked back into the connection pool, except the actual
- :class:`~sqlalchemy.orm.session.Session` object hangs around. It doesn't
- make too much difference here unless the start of the web request would like
- to pass specific options to the initial construction of
- :class:`~sqlalchemy.orm.session.Session()`, such as a specific
- :class:`~sqlalchemy.engine.base.Engine` to bind to.
-* Session.commit() - In this case, the behavior is that any remaining changes
- pending are flushed, and the transaction is committed; connection resources
- are returned to the connection pool. The full state of the session is
- expired, so that when the next web request is started, all data will be
- reloaded. In reality, the contents of the
- :class:`~sqlalchemy.orm.session.Session` are weakly referenced anyway so its
- likely that it will be empty on the next request in any case.
-* Session.rollback() - Similar to calling commit, except we assume that the
- user would have called commit explicitly if that was desired; the
- :func:`~sqlalchemy.orm.session.Session.rollback` ensures that no
- transactional state remains, returns connections to the connection pool, and
- expires all data, in the case that the request was aborted and did not roll
- back itself.
-* do nothing - this is a valid option as well. The controller code is
- responsible for doing one of the above steps at the end of the request.
-
-Scoped Session API docs: :func:`sqlalchemy.orm.scoped_session`
+session, and is the most definitive approach to closing out a request.
+
+It's not strictly necessary to remove the session at the end of the request -
+other options include calling :meth:`.Session.close`, :meth:`.Session.rollback`,
+:meth:`.Session.commit` at the end so that the existing session returns
+its connections to the pool and removes any existing transactional context.
+Doing nothing is an option too, if individual controller methods take responsibility
+for ensuring that no transactions remain open after a request ends.
+
+Contextual Session API
+-----------------------
+
+.. autofunction:: sqlalchemy.orm.scoped_session
+
+.. autoclass:: sqlalchemy.orm.scoping.ScopedSession
+ :members:
+
+.. autoclass:: sqlalchemy.util.ScopedRegistry
+ :members:
+
+.. autoclass:: sqlalchemy.util.ThreadLocalRegistry
.. _session_partitioning:
@@ -1206,29 +1175,63 @@ Horizontal Partitioning
Horizontal partitioning partitions the rows of a single table (or a set of
tables) across multiple databases.
-See the "sharding" example in `attribute_shard.py
-<http://www.sqlalchemy.org/trac/browser/sqlalchemy/trunk/examples/sharding/attribute_shard.py>`_
+See the "sharding" example: :ref:`examples_sharding`.
-Extending Session
+Session Utilities
=================
-Extending the session can be achieved through subclassing as well as through a
-simple extension class, which resembles the style of :ref:`extending_mapper`
-called :class:`~sqlalchemy.orm.interfaces.SessionExtension`. See the
-docstrings for more information on this class' methods.
+.. autofunction:: make_transient
+
+.. autofunction:: object_session
+
+Attribute and State Management Utilities
+========================================
+
+These functions are provided by the SQLAlchemy attribute
+instrumentation API to provide a detailed interface for dealing
+with instances, attribute values, and history. Some of them
+are useful when constructing event listener functions, such as
+those described in :ref:`events_orm_toplevel`.
+
+.. currentmodule:: sqlalchemy.orm.attributes
+
+.. autofunction:: del_attribute
+
+.. autofunction:: get_attribute
+
+.. autofunction:: get_history
+
+.. autofunction:: init_collection
+
+.. function:: instance_state
+
+ Return the :class:`InstanceState` for a given object.
+
+.. autofunction:: is_instrumented
+
+.. function:: manager_of_class
+
+ Return the :class:`ClassManager` for a given class.
+
+.. autofunction:: set_attribute
+
+.. autofunction:: set_committed_value
+
+.. autoclass:: History
+ :members:
+
+.. attribute:: sqlalchemy.orm.attributes.PASSIVE_NO_INITIALIZE
-Basic usage is similar to
-:class:`~sqlalchemy.orm.interfaces.MapperExtension`::
+ Symbol indicating that loader callables should
+ not be fired off, and a non-initialized attribute
+ should remain that way.
- class MySessionExtension(SessionExtension):
- def before_commit(self, session):
- print "before commit!"
+.. attribute:: sqlalchemy.orm.attributes.PASSIVE_NO_FETCH
- Session = sessionmaker(extension=MySessionExtension())
+ Symbol indicating that loader callables should not boe fired off.
+ Non-initialized attributes should be initialized to an empty value.
-or with :func:`~sqlalchemy.orm.create_session`::
+.. attribute:: sqlalchemy.orm.attributes.PASSIVE_OFF
- session = create_session(extension=MySessionExtension())
+ Symbol indicating that loader callables should be executed.
-The same :class:`~sqlalchemy.orm.interfaces.SessionExtension` instance can be
-used with any number of sessions.
diff --git a/doc/build/ormtutorial.rst b/doc/build/orm/tutorial.rst
index 6f38a35c9..6f38a35c9 100644
--- a/doc/build/ormtutorial.rst
+++ b/doc/build/orm/tutorial.rst
diff --git a/doc/build/reference/ext/compiler.rst b/doc/build/reference/ext/compiler.rst
deleted file mode 100644
index 0e39a6d0e..000000000
--- a/doc/build/reference/ext/compiler.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-.. _sqlalchemy.ext.compiler_toplevel:
-
-compiler
-========
-
-.. automodule:: sqlalchemy.ext.compiler
- :members: \ No newline at end of file
diff --git a/doc/build/reference/ext/index.rst b/doc/build/reference/ext/index.rst
deleted file mode 100644
index 19ea3dedc..000000000
--- a/doc/build/reference/ext/index.rst
+++ /dev/null
@@ -1,21 +0,0 @@
-.. _plugins:
-.. _sqlalchemy.ext:
-
-sqlalchemy.ext
-==============
-
-SQLAlchemy has a variety of extensions available which provide extra
-functionality to SA, either via explicit usage or by augmenting the
-core behavior.
-
-.. toctree::
- :glob:
-
- declarative
- associationproxy
- orderinglist
- serializer
- sqlsoup
- compiler
- horizontal_shard
-
diff --git a/doc/build/reference/index.rst b/doc/build/reference/index.rst
deleted file mode 100644
index 96b078651..000000000
--- a/doc/build/reference/index.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-.. _api_reference_toplevel:
-
-API Reference
-=============
-
-.. toctree::
- :maxdepth: 3
-
- sqlalchemy/index
- orm/index
- dialects/index
- ext/index
-
diff --git a/doc/build/reference/orm/collections.rst b/doc/build/reference/orm/collections.rst
deleted file mode 100644
index 818f11498..000000000
--- a/doc/build/reference/orm/collections.rst
+++ /dev/null
@@ -1,20 +0,0 @@
-Advanced Collection Mapping
-===========================
-
-This is an in-depth discussion of collection mechanics. For simple examples, see :ref:`alternate_collection_implementations`.
-
-.. automodule:: sqlalchemy.orm.collections
-
-.. autofunction:: attribute_mapped_collection
-
-.. autoclass:: collection
-
-.. autoclass:: sqlalchemy.orm.collections.MappedCollection
- :members:
-
-.. autofunction:: collection_adapter
-
-.. autofunction:: column_mapped_collection
-
-.. autofunction:: mapped_collection
-
diff --git a/doc/build/reference/orm/index.rst b/doc/build/reference/orm/index.rst
deleted file mode 100644
index 001d7b4ee..000000000
--- a/doc/build/reference/orm/index.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. _sqlalchemy_orm_toplevel:
-
-sqlalchemy.orm
-==============
-
-.. toctree::
- :glob:
-
- mapping
- collections
- query
- sessions
- interfaces
- utilities
-
-
diff --git a/doc/build/reference/orm/interfaces.rst b/doc/build/reference/orm/interfaces.rst
deleted file mode 100644
index eb17dfb52..000000000
--- a/doc/build/reference/orm/interfaces.rst
+++ /dev/null
@@ -1,7 +0,0 @@
-Interfaces
-==========
-
-.. automodule:: sqlalchemy.orm.interfaces
- :members: AttributeExtension, InstrumentationManager, MapperExtension, PropComparator, SessionExtension
- :undoc-members:
- \ No newline at end of file
diff --git a/doc/build/reference/orm/mapping.rst b/doc/build/reference/orm/mapping.rst
deleted file mode 100644
index 8bab92e59..000000000
--- a/doc/build/reference/orm/mapping.rst
+++ /dev/null
@@ -1,96 +0,0 @@
-Class Mapping
-=============
-
-.. module:: sqlalchemy.orm
-
-Defining Mappings
------------------
-
-Python classes are mapped to the database using the :func:`mapper` function.
-
-.. autofunction:: mapper
-
-Mapper Properties
------------------
-
-A basic mapping of a class will simply make the columns of the
-database table or selectable available as attributes on the class.
-**Mapper properties** allow you to customize and add additional
-properties to your classes, for example making the results one-to-many
-join available as a Python list of :func:`related <relationship>` objects.
-
-Mapper properties are most commonly included in the :func:`mapper`
-call::
-
- mapper(Parent, properties={
- 'children': relationship(Children)
- }
-
-.. autofunction:: backref
-
-.. autofunction:: column_property
-
-.. autofunction:: comparable_property
-
-.. autofunction:: composite
-
-.. autofunction:: deferred
-
-.. autofunction:: dynamic_loader
-
-.. autofunction:: relation
-
-.. autofunction:: relationship
-
-.. autofunction:: synonym
-
-Decorators
-----------
-
-.. autofunction:: reconstructor
-
-.. autofunction:: validates
-
-Utilities
----------
-
-.. autofunction:: object_mapper
-
-.. autofunction:: class_mapper
-
-.. autofunction:: compile_mappers
-
-.. autofunction:: clear_mappers
-
-Attribute Utilities
--------------------
-.. autofunction:: sqlalchemy.orm.attributes.del_attribute
-
-.. autofunction:: sqlalchemy.orm.attributes.get_attribute
-
-.. autofunction:: sqlalchemy.orm.attributes.get_history
-
-.. autofunction:: sqlalchemy.orm.attributes.init_collection
-
-.. function:: sqlalchemy.orm.attributes.instance_state
-
- Return the :class:`InstanceState` for a given object.
-
-.. autofunction:: sqlalchemy.orm.attributes.is_instrumented
-
-.. function:: sqlalchemy.orm.attributes.manager_of_class
-
- Return the :class:`ClassManager` for a given class.
-
-.. autofunction:: sqlalchemy.orm.attributes.set_attribute
-
-.. autofunction:: sqlalchemy.orm.attributes.set_committed_value
-
-Internals
----------
-
-.. autoclass:: sqlalchemy.orm.mapper.Mapper
- :members:
-
-.. autoclass:: sqlalchemy.orm.interfaces.MapperProperty
- :members:
diff --git a/doc/build/reference/orm/sessions.rst b/doc/build/reference/orm/sessions.rst
deleted file mode 100644
index 1272e051c..000000000
--- a/doc/build/reference/orm/sessions.rst
+++ /dev/null
@@ -1,18 +0,0 @@
-Sessions
-========
-
-.. module:: sqlalchemy.orm.session
-
-.. autofunction:: sqlalchemy.orm.create_session
-
-.. autofunction:: make_transient
-
-.. autofunction:: sqlalchemy.orm.scoped_session
-
-.. autofunction:: sessionmaker
-
-.. autoclass:: sqlalchemy.orm.session.Session
- :members:
-
-.. autoclass:: sqlalchemy.orm.scoping.ScopedSession
- :members:
diff --git a/doc/build/reference/orm/utilities.rst b/doc/build/reference/orm/utilities.rst
deleted file mode 100644
index c1dff6f09..000000000
--- a/doc/build/reference/orm/utilities.rst
+++ /dev/null
@@ -1,6 +0,0 @@
-Utilities
-=========
-
-.. automodule:: sqlalchemy.orm.util
- :members: identity_key, Validator, with_parent, polymorphic_union
- :undoc-members:
diff --git a/doc/build/reference/sqlalchemy/connections.rst b/doc/build/reference/sqlalchemy/connections.rst
deleted file mode 100644
index c5ffcb48c..000000000
--- a/doc/build/reference/sqlalchemy/connections.rst
+++ /dev/null
@@ -1,65 +0,0 @@
-Connections
-===========
-
-Creating Engines
-----------------
-
-.. autofunction:: sqlalchemy.create_engine
-
-.. autofunction:: sqlalchemy.engine_from_config
-
-.. autoclass:: sqlalchemy.engine.url.URL
- :members:
-
-Connectables
-------------
-
-.. currentmodule:: sqlalchemy.engine.base
-
-.. autoclass:: Engine
- :members:
- :undoc-members:
-
-.. autoclass:: Connection
- :members:
- :undoc-members:
-
-.. autoclass:: Connectable
- :members:
- :undoc-members:
-
-Result Objects
---------------
-
-.. autoclass:: sqlalchemy.engine.base.ResultProxy
- :members:
-
-.. autoclass:: sqlalchemy.engine.base.RowProxy
- :members:
-
-Transactions
-------------
-
-.. autoclass:: Transaction
- :members:
- :undoc-members:
-
-Internals
----------
-
-.. autofunction:: connection_memoize
-
-.. autoclass:: Dialect
- :members:
-
-.. autoclass:: sqlalchemy.engine.default.DefaultDialect
- :members:
- :show-inheritance:
-
-.. autoclass:: sqlalchemy.engine.default.DefaultExecutionContext
- :members:
- :show-inheritance:
-
-.. autoclass:: ExecutionContext
- :members:
-
diff --git a/doc/build/reference/sqlalchemy/index.rst b/doc/build/reference/sqlalchemy/index.rst
deleted file mode 100644
index 9970a669c..000000000
--- a/doc/build/reference/sqlalchemy/index.rst
+++ /dev/null
@@ -1,15 +0,0 @@
-sqlalchemy
-==========
-
-.. toctree::
- :glob:
-
- connections
- pooling
- expressions
- schema
- inspector
- types
- interfaces
- util
-
diff --git a/doc/build/reference/sqlalchemy/inspector.rst b/doc/build/reference/sqlalchemy/inspector.rst
deleted file mode 100644
index e0ef91460..000000000
--- a/doc/build/reference/sqlalchemy/inspector.rst
+++ /dev/null
@@ -1,36 +0,0 @@
-.. _inspector_api_toplevel:
-
-Schema Introspection
-====================
-
-.. module:: sqlalchemy.engine.reflection
-
-SQLAlchemy provides rich schema introspection capabilities. The most common methods for this include the "autoload" argument of :class:`~sqlalchemy.schema.Table`::
-
- from sqlalchemy import create_engine, MetaData, Table
- engine = create_engine('...')
- meta = MetaData()
- user_table = Table('user', meta, autoload=True, autoload_with=engine)
-
-As well as the :meth:`~sqlalchemy.schema.MetaData.reflect` method of :class:`~sqlalchemy.schema.MetaData`::
-
- from sqlalchemy import create_engine, MetaData, Table
- engine = create_engine('...')
- meta = MetaData()
- meta.reflect(engine)
- user_table = meta.tables['user']
-
-Further examples of reflection using :class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.MetaData` can be found at :ref:`metadata_reflection`.
-
-There is also a low-level inspection interface available for more specific operations, known as the :class:`Inspector`::
-
- from sqlalchemy import create_engine
- from sqlalchemy.engine import reflection
- engine = create_engine('...')
- insp = reflection.Inspector.from_engine(engine)
- print insp.get_table_names()
-
-.. autoclass:: Inspector
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/build/reference/sqlalchemy/interfaces.rst b/doc/build/reference/sqlalchemy/interfaces.rst
deleted file mode 100644
index f9f60882c..000000000
--- a/doc/build/reference/sqlalchemy/interfaces.rst
+++ /dev/null
@@ -1,8 +0,0 @@
-Interfaces
-----------
-
-.. automodule:: sqlalchemy.interfaces
- :members:
- :undoc-members:
-
-
diff --git a/doc/build/reference/sqlalchemy/pooling.rst b/doc/build/reference/sqlalchemy/pooling.rst
deleted file mode 100644
index d37425e3a..000000000
--- a/doc/build/reference/sqlalchemy/pooling.rst
+++ /dev/null
@@ -1,153 +0,0 @@
-.. _pooling_toplevel:
-
-Connection Pooling
-==================
-
-.. module:: sqlalchemy.pool
-
-SQLAlchemy ships with a connection pooling framework that integrates
-with the Engine system and can also be used on its own to manage plain
-DB-API connections.
-
-At the base of any database helper library is a system for efficiently
-acquiring connections to the database. Since the establishment of a
-database connection is typically a somewhat expensive operation, an
-application needs a way to get at database connections repeatedly
-without incurring the full overhead each time. Particularly for
-server-side web applications, a connection pool is the standard way to
-maintain a group or "pool" of active database connections which are
-reused from request to request in a single server process.
-
-Connection Pool Configuration
------------------------------
-
-The :class:`~sqlalchemy.engine.Engine` returned by the
-:func:`~sqlalchemy.create_engine` function in most cases has a :class:`QueuePool`
-integrated, pre-configured with reasonable pooling defaults. If
-you're reading this section to simply enable pooling- congratulations!
-You're already done.
-
-The most common :class:`QueuePool` tuning parameters can be passed
-directly to :func:`~sqlalchemy.create_engine` as keyword arguments:
-``pool_size``, ``max_overflow``, ``pool_recycle`` and
-``pool_timeout``. For example::
-
- engine = create_engine('postgresql://me@localhost/mydb',
- pool_size=20, max_overflow=0)
-
-In the case of SQLite, a :class:`SingletonThreadPool` is provided instead,
-to provide compatibility with SQLite's restricted threading model.
-
-
-Custom Pool Construction
-------------------------
-
-:class:`Pool` instances may be created directly for your own use or to
-supply to :func:`sqlalchemy.create_engine` via the ``pool=``
-keyword argument.
-
-Constructing your own pool requires supplying a callable function the
-Pool can use to create new connections. The function will be called
-with no arguments.
-
-Through this method, custom connection schemes can be made, such as a
-using connections from another library's pool, or making a new
-connection that automatically executes some initialization commands::
-
- import sqlalchemy.pool as pool
- import psycopg2
-
- def getconn():
- c = psycopg2.connect(username='ed', host='127.0.0.1', dbname='test')
- # execute an initialization function on the connection before returning
- c.cursor.execute("setup_encodings()")
- return c
-
- p = pool.QueuePool(getconn, max_overflow=10, pool_size=5)
-
-Or with SingletonThreadPool::
-
- import sqlalchemy.pool as pool
- import sqlite
-
- p = pool.SingletonThreadPool(lambda: sqlite.connect(filename='myfile.db'))
-
-
-Builtin Pool Implementations
-----------------------------
-
-.. autoclass:: AssertionPool
- :members:
- :show-inheritance:
-
-.. autoclass:: NullPool
- :members:
- :show-inheritance:
-
-.. autoclass:: sqlalchemy.pool.Pool
- :members:
- :show-inheritance:
- :undoc-members:
- :inherited-members:
-
-.. autoclass:: sqlalchemy.pool.QueuePool
- :members:
- :show-inheritance:
-
-.. autoclass:: SingletonThreadPool
- :members:
- :show-inheritance:
-
-.. autoclass:: StaticPool
- :members:
- :show-inheritance:
-
-
-Pooling Plain DB-API Connections
---------------------------------
-
-Any :pep:`249` DB-API module can be "proxied" through the connection
-pool transparently. Usage of the DB-API is exactly as before, except
-the ``connect()`` method will consult the pool. Below we illustrate
-this with ``psycopg2``::
-
- import sqlalchemy.pool as pool
- import psycopg2 as psycopg
-
- psycopg = pool.manage(psycopg)
-
- # then connect normally
- connection = psycopg.connect(database='test', username='scott',
- password='tiger')
-
-This produces a :class:`_DBProxy` object which supports the same
-``connect()`` function as the original DB-API module. Upon
-connection, a connection proxy object is returned, which delegates its
-calls to a real DB-API connection object. This connection object is
-stored persistently within a connection pool (an instance of
-:class:`Pool`) that corresponds to the exact connection arguments sent
-to the ``connect()`` function.
-
-The connection proxy supports all of the methods on the original
-connection object, most of which are proxied via ``__getattr__()``.
-The ``close()`` method will return the connection to the pool, and the
-``cursor()`` method will return a proxied cursor object. Both the
-connection proxy and the cursor proxy will also return the underlying
-connection to the pool after they have both been garbage collected,
-which is detected via weakref callbacks (``__del__`` is not used).
-
-Additionally, when connections are returned to the pool, a
-``rollback()`` is issued on the connection unconditionally. This is
-to release any locks still held by the connection that may have
-resulted from normal activity.
-
-By default, the ``connect()`` method will return the same connection
-that is already checked out in the current thread. This allows a
-particular connection to be used in a given thread without needing to
-pass it around between functions. To disable this behavior, specify
-``use_threadlocal=False`` to the ``manage()`` function.
-
-.. autofunction:: sqlalchemy.pool.manage
-
-.. autofunction:: sqlalchemy.pool.clear_managers
-
diff --git a/doc/build/reference/sqlalchemy/schema.rst b/doc/build/reference/sqlalchemy/schema.rst
deleted file mode 100644
index 32ebaa616..000000000
--- a/doc/build/reference/sqlalchemy/schema.rst
+++ /dev/null
@@ -1,169 +0,0 @@
-.. _schema_api_toplevel:
-
-Database Schema
-===============
-
-.. module:: sqlalchemy.schema
-
-SQLAlchemy schema definition language. For more usage examples, see :ref:`metadata_toplevel`.
-
-Tables and Columns
-------------------
-
-.. autoclass:: Column
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: MetaData
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: Table
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: ThreadLocalMetaData
- :members:
- :undoc-members:
- :show-inheritance:
-
-Constraints
------------
-
-.. autoclass:: CheckConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: Constraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: ForeignKey
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: ForeignKeyConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: Index
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: PrimaryKeyConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: UniqueConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-Default Generators and Markers
-------------------------------
-
-.. autoclass:: ColumnDefault
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DefaultClause
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DefaultGenerator
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: FetchedValue
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: PassiveDefault
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: Sequence
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. _schema_api_ddl:
-
-DDL Generation
---------------
-
-.. autoclass:: DDLElement
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DDL
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: CreateTable
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DropTable
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: CreateSequence
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DropSequence
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: CreateIndex
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DropIndex
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: AddConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: DropConstraint
- :members:
- :undoc-members:
- :show-inheritance:
-
-Internals
----------
-
-.. autoclass:: SchemaItem
- :members:
- :undoc-members:
- :show-inheritance:
-
-.. autoclass:: SchemaVisitor
- :members:
- :undoc-members:
- :show-inheritance:
diff --git a/doc/build/reference/sqlalchemy/util.rst b/doc/build/reference/sqlalchemy/util.rst
deleted file mode 100755
index 92fe14b34..000000000
--- a/doc/build/reference/sqlalchemy/util.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Utilities
-=========
-
-.. automodule:: sqlalchemy.util
- :members:
diff --git a/doc/build/sqla_arch_small.jpg b/doc/build/sqla_arch_small.jpg
deleted file mode 100644
index 2976030d7..000000000
--- a/doc/build/sqla_arch_small.jpg
+++ /dev/null
Binary files differ
diff --git a/doc/build/sqla_arch_small.png b/doc/build/sqla_arch_small.png
new file mode 100644
index 000000000..521a0813b
--- /dev/null
+++ b/doc/build/sqla_arch_small.png
Binary files differ
diff --git a/doc/build/static/docs.css b/doc/build/static/docs.css
index 145e49b8c..23513dbba 100644
--- a/doc/build/static/docs.css
+++ b/doc/build/static/docs.css
@@ -260,7 +260,8 @@ dl.function > dt,
dl.attribute > dt,
dl.classmethod > dt,
dl.method > dt,
-dl.class > dt
+dl.class > dt,
+dl.exception > dt
{
background-color:#F0F0F0;
margin:0px -10px;
diff --git a/doc/build/templates/layout.mako b/doc/build/templates/layout.mako
index 62bbc383a..d842fdd63 100644
--- a/doc/build/templates/layout.mako
+++ b/doc/build/templates/layout.mako
@@ -57,8 +57,6 @@
<div class="topnav">
<div id="pagecontrol">
- <a href="${pathto('reference/index')}">API Reference</a>
- |
<a href="${pathto('genindex')}">Index</a>
% if sourcename:
diff --git a/doc/build/templates/site_base.mako b/doc/build/templates/site_base.mako
index 36bd0d973..301c6a6dc 100644
--- a/doc/build/templates/site_base.mako
+++ b/doc/build/templates/site_base.mako
@@ -7,7 +7,6 @@
</%text>
<div style="text-align:right">
-<b>Quick Select:</b> <a href="/docs/06/">0.6</a> | <a href="/docs/05/">0.5</a> | <a href="/docs/04/">0.4</a><br/>
<b>PDF Download:</b> <a href="${pathto('sqlalchemy_' + release.replace('.', '_') + '.pdf', 1)}">download</a>
</div>
diff --git a/doc/build/testdocs.py b/doc/build/testdocs.py
index 1f57e3272..05c7ac52f 100644
--- a/doc/build/testdocs.py
+++ b/doc/build/testdocs.py
@@ -62,7 +62,7 @@ def replace_file(s, newfile):
raise ValueError("Couldn't find suitable create_engine call to replace '%s' in it" % oldfile)
return s
-for filename in ('ormtutorial', 'sqlexpression'):
+for filename in ('orm/tutorial', 'core/tutorial'):
filename = '%s.rst' % filename
s = open(filename).read()
#s = replace_file(s, ':memory:')
diff --git a/doc/build/texinputs/sphinx.sty b/doc/build/texinputs/sphinx.sty
index e44bf714a..3782b69fa 100644
--- a/doc/build/texinputs/sphinx.sty
+++ b/doc/build/texinputs/sphinx.sty
@@ -6,7 +6,7 @@
%
\NeedsTeXFormat{LaTeX2e}[1995/12/01]
-\ProvidesPackage{sphinx}[2008/05/01 LaTeX package (Sphinx markup)]
+\ProvidesPackage{sphinx}[2010/01/15 LaTeX package (Sphinx markup)]
\RequirePackage{textcomp}
\RequirePackage{fancyhdr}
@@ -17,8 +17,17 @@
\RequirePackage{makeidx}
\RequirePackage{framed}
\RequirePackage{color}
+% For highlighted code.
\RequirePackage{fancyvrb}
+% For table captions.
\RequirePackage{threeparttable}
+% Handle footnotes in tables.
+\RequirePackage{footnote}
+\makesavenoteenv{tabulary}
+% For floating figures in the text.
+\RequirePackage{wrapfig}
+% Separate paragraphs by space by default.
+\RequirePackage{parskip}
% Redefine these colors to your liking in the preamble.
\definecolor{TitleColor}{rgb}{0.126,0.263,0.361}
@@ -29,7 +38,7 @@
\definecolor{VerbatimColor}{rgb}{1,1,1}
\definecolor{VerbatimBorderColor}{rgb}{1,1,1}
-% Uncomment these two lines to ignore the paper size and make the page
+% Uncomment these two lines to ignore the paper size and make the page
% size more like a typical published manual.
%\renewcommand{\paperheight}{9in}
%\renewcommand{\paperwidth}{8.5in} % typical squarish manual
@@ -82,8 +91,6 @@
% Style parameters and macros used by most documents here
\raggedbottom
\sloppy
-\parindent = 0mm
-\parskip = 2mm
\hbadness = 5000 % don't print trivial gripes
\pagestyle{empty} % start this way; change for
@@ -123,8 +130,6 @@
\newcommand{\samp}[1]{`\code{#1}'}
\newcommand{\email}[1]{\textsf{#1}}
-\newcommand{\py@modulebadkey}{{--just-some-junk--}}
-
% Redefine the Verbatim environment to allow border and background colors.
% The original environment is still used for verbatims within tables.
\let\OriginalVerbatim=\Verbatim
@@ -135,12 +140,11 @@
\newlength\leftsidespace
\def\mycolorbox#1{%
\setlength\leftsidespace{\@totalleftmargin}%
- \setlength\distancetoright{\textwidth}%
+ \setlength\distancetoright{\linewidth}%
\advance\distancetoright -\@totalleftmargin %
\noindent\hspace*{\@totalleftmargin}%
\fcolorbox{VerbatimBorderColor}{VerbatimColor}{%
\begin{minipage}{\distancetoright}%
- \smallskip%
\noindent\hspace*{-\leftsidespace}%
#1
\end{minipage}%
@@ -173,89 +177,12 @@
\index{#4!#1 #2 #3}
}
-% support for the module index
-\newif\ifpy@UseModuleIndex
-\py@UseModuleIndexfalse
-
-\newcommand{\makemodindex}{
- \newwrite\modindexfile
- \openout\modindexfile=mod\jobname.idx
- \py@UseModuleIndextrue
-}
-
-\newcommand{\printmodindex}{
- \@input@{mod\jobname.ind}
-}
-
-% Add the defining entry for a module
-\newcommand{\py@modindex}[2]{%
- \renewcommand{\py@thismodule}{#1}
- \ifpy@UseModuleIndex%
- \@ifundefined{py@modplat@\py@thismodulekey}{
- \write\modindexfile{\protect\indexentry{#1@{\texttt{#1}}|hyperpage}{\thepage}}%
- }{\write\modindexfile{\protect\indexentry{#1@{\texttt{#1 }%
- \emph{(\platformof{\py@thismodulekey})}}|hyperpage}{\thepage}}%
- }
- \fi%
-}
-
-% "Current" keys
-\newcommand{\py@thisclass}{}
-\newcommand{\py@thismodule}{}
-\newcommand{\py@thismodulekey}{}
-\newcommand{\py@thismoduletype}{}
-\newcommand{\py@emptymodule}{}
-
-% \declaremodule[key]{type}{name}
-\newcommand{\declaremodule}[3][\py@modulebadkey]{
- \renewcommand{\py@thismoduletype}{#2}
- \ifx\py@modulebadkey#1
- \renewcommand{\py@thismodulekey}{#3}
- \else
- \renewcommand{\py@thismodulekey}{#1}
- \fi
- \py@modindex{#3}{}
- %\label{module-\py@thismodulekey}
-}
-
-% Record module platforms for the Module Index
-\newif\ifpy@ModPlatformFileIsOpen \py@ModPlatformFileIsOpenfalse
-\long\def\py@writeModPlatformFile#1{%
- \protected@write\py@ModPlatformFile%
- {\let\label\@gobble \let\index\@gobble \let\glossary\@gobble}%
- {\string#1}%
-}
-\newcommand{\py@ModPlatformFilename}{\jobname.pla}
-\newcommand{\platform}[1]{
- \ifpy@ModPlatformFileIsOpen\else
- \newwrite\py@ModPlatformFile
- \openout\py@ModPlatformFile=\py@ModPlatformFilename
- \py@ModPlatformFileIsOpentrue
- \fi
- \py@writeModPlatformFile{\py@defplatform{\py@thismodulekey}{#1}}
-}
-\newcommand{\py@defplatform}[2]{\expandafter\def\csname py@modplat@#1\endcsname{#2}}
-\newcommand{\platformof}[1]{\csname py@modplat@#1\endcsname}
-
-\InputIfFileExists{\jobname.pla}{}{}
-
% \moduleauthor{name}{email}
\newcommand{\moduleauthor}[2]{}
% \sectionauthor{name}{email}
\newcommand{\sectionauthor}[2]{}
-% Ignore module synopsis.
-\newcommand{\modulesynopsis}[1]{}
-
-% Reset "current" objects.
-\newcommand{\resetcurrentobjects}{
- \renewcommand{\py@thisclass}{}
- \renewcommand{\py@thismodule}{}
- \renewcommand{\py@thismodulekey}{}
- \renewcommand{\py@thismoduletype}{}
-}
-
% Augment the sectioning commands used to get our own font family in place,
% and reset some internal data items:
\titleformat{\section}{\Large\py@HeaderFamily}%
@@ -267,14 +194,7 @@
\titleformat{\paragraph}{\large\py@HeaderFamily}%
{\py@TitleColor}{0em}{\py@TitleColor}{\py@NormalColor}
-
-% Now for a lot of semantically-loaded environments that do a ton of magical
-% things to get the right formatting and index entries for the stuff in
-% Python modules and C API.
-
-
-% {fulllineitems} is used in one place in libregex.tex, but is really for
-% internal use in this file.
+% {fulllineitems} is the main environment for object descriptions.
%
\newcommand{\py@itemnewline}[1]{%
\@tempdima\linewidth%
@@ -288,223 +208,19 @@
\let\makelabel=\py@itemnewline}
}{\end{list}}
-% \optional is mostly for use in the arguments parameters to the various
-% {*desc} environments defined below, but may be used elsewhere. Known to
-% be used in the debugger chapter.
-%
-% Typical usage:
-%
-% \begin{funcdesc}{myfunc}{reqparm\optional{, optparm}}
-% ^^^ ^^^
-% No space here No space here
-%
-% When a function has multiple optional parameters, \optional should be
-% nested, not chained. This is right:
-%
-% \begin{funcdesc}{myfunc}{\optional{parm1\optional{, parm2}}}
-%
-\let\py@badkey=\@undefined
-
+% \optional is used for ``[, arg]``, i.e. desc_optional nodes.
\newcommand{\optional}[1]{%
{\textnormal{\Large[}}{#1}\hspace{0.5mm}{\textnormal{\Large]}}}
-% This can be used when a function or method accepts an varying number
-% of arguments, such as by using the *args syntax in the parameter list.
-\newcommand{\py@moreargs}{...}
-
-% This can be used when you don't want to document the parameters to a
-% function or method, but simply state that it's an alias for
-% something else.
-\newcommand{\py@unspecified}{...}
-
-\newcommand{\py@varvars}[1]{{%
- {\let\unspecified=\py@unspecified%
- \let\moreargs=\py@moreargs%
- \emph{#1}}}}
-
\newlength{\py@argswidth}
-\newcommand{\py@sigparams}[1]{%
- \parbox[t]{\py@argswidth}{\py@varvars{#1}\code{)}}}
-\newcommand{\py@sigline}[2]{%
+\newcommand{\py@sigparams}[2]{%
+ \parbox[t]{\py@argswidth}{#1\code{)}#2}}
+\newcommand{\pysigline}[1]{\item[#1]\nopagebreak}
+\newcommand{\pysiglinewithargsret}[3]{%
\settowidth{\py@argswidth}{#1\code{(}}%
\addtolength{\py@argswidth}{-2\py@argswidth}%
- \addtolength{\py@argswidth}{\textwidth}%
- \item[#1\code{(}\py@sigparams{#2}]}
-
-% C functions ------------------------------------------------------------
-% \begin{cfuncdesc}[refcount]{type}{name}{arglist}
-% Note that the [refcount] slot should only be filled in by
-% tools/anno-api.py; it pulls the value from the refcounts database.
-\newcommand{\cfuncline}[3]{
- \py@sigline{\code{#1 \bfcode{#2}}}{#3}%
-}
-\newenvironment{cfuncdesc}[3]{
- \begin{fulllineitems}
- \cfuncline{#1}{#2}{#3}
-}{\end{fulllineitems}}
-
-% C variables ------------------------------------------------------------
-% \begin{cvardesc}{type}{name}
-\newenvironment{cvardesc}[2]{
- \begin{fulllineitems}
- \item[\code{#1 \bfcode{#2}}]
-}{\end{fulllineitems}}
-
-% C data types -----------------------------------------------------------
-% \begin{ctypedesc}[index name]{typedef name}
-\newenvironment{ctypedesc}[2][\py@badkey]{
- \begin{fulllineitems}
- \item[\bfcode{#2}]
-}{\end{fulllineitems}}
-
-% C type fields ----------------------------------------------------------
-% \begin{cmemberdesc}{container type}{ctype}{membername}
-\newcommand{\cmemberline}[3]{
- \item[\code{#2 \bfcode{#3}}]
-}
-\newenvironment{cmemberdesc}[3]{
- \begin{fulllineitems}
- \cmemberline{#1}{#2}{#3}
-}{\end{fulllineitems}}
-
-% Funky macros -----------------------------------------------------------
-% \begin{csimplemacrodesc}{name}
-% -- "simple" because it has no args; NOT for constant definitions!
-\newenvironment{csimplemacrodesc}[1]{
- \begin{fulllineitems}
- \item[\bfcode{#1}]
-}{\end{fulllineitems}}
-
-% simple functions (not methods) -----------------------------------------
-% \begin{funcdesc}{name}{args}
-\newcommand{\funcline}[2]{%
- \py@sigline{\bfcode{#1}}{#2}}
-\newenvironment{funcdesc}[2]{
- \begin{fulllineitems}
- \funcline{#1}{#2}
-}{\end{fulllineitems}}
-
-% classes ----------------------------------------------------------------
-% \begin{classdesc}{name}{constructor args}
-\newcommand{\classline}[2]{
- \py@sigline{\strong{class }\bfcode{#1}}{#2}}
-\newenvironment{classdesc}[2]{
- % Using \renewcommand doesn't work for this, for unknown reasons:
- \global\def\py@thisclass{#1}
- \begin{fulllineitems}
- \classline{#1}{#2}
-}{\end{fulllineitems}}
-
-% \begin{excclassdesc}{name}{constructor args}
-% but indexes as an exception
-\newenvironment{excclassdesc}[2]{
- % Using \renewcommand doesn't work for this, for unknown reasons:
- \global\def\py@thisclass{#1}
- \begin{fulllineitems}
- \py@sigline{\strong{exception }\bfcode{#1}}{#2}%
-}{\end{fulllineitems}}
-
-% There is no corresponding {excclassdesc*} environment. To describe
-% a class exception without parameters, use the {excdesc} environment.
-
-
-\let\py@classbadkey=\@undefined
-
-% object method ----------------------------------------------------------
-% \begin{methoddesc}[classname]{methodname}{args}
-\newcommand{\methodline}[3][\@undefined]{
- \py@sigline{\bfcode{#2}}{#3}}
-\newenvironment{methoddesc}[3][\@undefined]{
- \begin{fulllineitems}
- \ifx\@undefined#1\relax
- \methodline{#2}{#3}
- \else
- \def\py@thisclass{#1}
- \methodline{#2}{#3}
- \fi
-}{\end{fulllineitems}}
-
-% static method ----------------------------------------------------------
-% \begin{staticmethoddesc}[classname]{methodname}{args}
-\newcommand{\staticmethodline}[3][\@undefined]{
- \py@sigline{static \bfcode{#2}}{#3}}
-\newenvironment{staticmethoddesc}[3][\@undefined]{
- \begin{fulllineitems}
- \ifx\@undefined#1\relax
- \staticmethodline{#2}{#3}
- \else
- \def\py@thisclass{#1}
- \staticmethodline{#2}{#3}
- \fi
-}{\end{fulllineitems}}
-
-% class method ----------------------------------------------------------
-% \begin{classmethoddesc}[classname]{methodname}{args}
-\newcommand{\classmethodline}[3][\@undefined]{
-\py@sigline{class \bfcode{#2}}{#3}}
-\newenvironment{classmethoddesc}[3][\@undefined]{
- \begin{fulllineitems}
- \ifx\@undefined#1\relax
- \classmethodline{#2}{#3}
- \else
- \def\py@thisclass{#1}
- \classmethodline{#2}{#3}
- \fi
-}{\end{fulllineitems}}
-
-
-% object data attribute --------------------------------------------------
-% \begin{memberdesc}[classname]{membername}
-\newcommand{\memberline}[2][\py@classbadkey]{%
- \ifx\@undefined#1\relax
- \item[\bfcode{#2}]
- \else
- \item[\bfcode{#2}]
- \fi
-}
-\newenvironment{memberdesc}[2][\py@classbadkey]{
- \begin{fulllineitems}
- \ifx\@undefined#1\relax
- \memberline{#2}
- \else
- \def\py@thisclass{#1}
- \memberline{#2}
- \fi
-}{\end{fulllineitems}}
-
-% For exceptions: --------------------------------------------------------
-% \begin{excdesc}{name}
-% -- for constructor information, use excclassdesc instead
-\newenvironment{excdesc}[1]{
- \begin{fulllineitems}
- \item[\strong{exception }\bfcode{#1}]
-}{\end{fulllineitems}}
-
-% Module data or constants: ----------------------------------------------
-% \begin{datadesc}{name}
-\newcommand{\dataline}[1]{%
- \item[\bfcode{#1}]\nopagebreak}
-\newenvironment{datadesc}[1]{
- \begin{fulllineitems}
- \dataline{#1}
-}{\end{fulllineitems}}
-
-% bytecode instruction ---------------------------------------------------
-% \begin{opcodedesc}{name}{var}
-% -- {var} may be {}
-\newenvironment{opcodedesc}[2]{
- \begin{fulllineitems}
- \item[\bfcode{#1}\quad\emph{#2}]
-}{\end{fulllineitems}}
-
-% generic description ----------------------------------------------------
-\newcommand{\descline}[1]{%
- \item[\bfcode{#1}]\nopagebreak%
-}
-\newenvironment{describe}[1]{
- \begin{fulllineitems}
- \descline{#1}
-}{\end{fulllineitems}}
+ \addtolength{\py@argswidth}{\linewidth}%
+ \item[#1\code{(}\py@sigparams{#2}{#3}]}
% This version is being checked in for the historical record; it shows
% how I've managed to get some aspects of this to work. It will not
@@ -514,9 +230,10 @@
% the example completely.
%
\newcommand{\grammartoken}[1]{\texttt{#1}}
-\newenvironment{productionlist}[1][\py@badkey]{
+\newenvironment{productionlist}[1][\@undefined]{
\def\optional##1{{\Large[}##1{\Large]}}
- \def\production##1##2{\code{##1}&::=&\code{##2}\\}
+ \def\production##1##2{\hypertarget{grammar-token-##1}{}%
+ \code{##1}&::=&\code{##2}\\}
\def\productioncont##1{& &\code{##1}\\}
\def\token##1{##1}
\let\grammartoken=\token
@@ -676,8 +393,10 @@
\image@width\wd\image@box%
\ifdim \image@width>\linewidth%
\setbox\image@box=\hbox{\py@Oldincludegraphics[width=\linewidth]{#2}}%
+ \box\image@box%
+ \else%
+ \py@Oldincludegraphics{#2}%
\fi%
- \box\image@box%
\else%
\py@Oldincludegraphics[#1]{#2}%
\fi%
@@ -707,5 +426,21 @@
% Include hyperref last.
\RequirePackage[colorlinks,breaklinks,
linkcolor=InnerLinkColor,filecolor=OuterLinkColor,
- menucolor=OuterLinkColor,pagecolor=OuterLinkColor,
- urlcolor=OuterLinkColor]{hyperref}
+ menucolor=OuterLinkColor,urlcolor=OuterLinkColor,
+ citecolor=InnerLinkColor]{hyperref}
+% Fix anchor placement for figures with captions.
+% (Note: we don't use a package option here; instead, we give an explicit
+% \capstart for figures that actually have a caption.)
+\RequirePackage{hypcap}
+
+% From docutils.writers.latex2e
+\providecommand{\DUspan}[2]{%
+ {% group ("span") to limit the scope of styling commands
+ \@for\node@class@name:=#1\do{%
+ \ifcsname docutilsrole\node@class@name\endcsname%
+ \csname docutilsrole\node@class@name\endcsname%
+ \fi%
+ }%
+ {#2}% node content
+ }% close "span"
+}
diff --git a/examples/derived_attributes/attributes.py b/examples/derived_attributes/attributes.py
index ade2a4ed3..f36cbd541 100644
--- a/examples/derived_attributes/attributes.py
+++ b/examples/derived_attributes/attributes.py
@@ -1,27 +1,38 @@
-
+from functools import update_wrapper
import new
-class hybrid(object):
- def __init__(self, func):
+class method(object):
+ def __init__(self, func, expr=None):
self.func = func
+ self.expr = expr or func
+
def __get__(self, instance, owner):
if instance is None:
- return new.instancemethod(self.func, owner, owner.__class__)
+ return new.instancemethod(self.expr, owner, owner.__class__)
else:
return new.instancemethod(self.func, instance, owner)
-class hybrid_property(object):
- def __init__(self, fget, fset=None, fdel=None):
+ def expression(self, expr):
+ self.expr = expr
+ return self
+
+class property_(object):
+ def __init__(self, fget, fset=None, fdel=None, expr=None):
self.fget = fget
self.fset = fset
self.fdel = fdel
+ self.expr = expr or fget
+ update_wrapper(self, fget)
+
def __get__(self, instance, owner):
if instance is None:
- return self.fget(owner)
+ return self.expr(owner)
else:
return self.fget(instance)
+
def __set__(self, instance, value):
self.fset(instance, value)
+
def __delete__(self, instance):
self.fdel(instance)
@@ -29,67 +40,97 @@ class hybrid_property(object):
self.fset = fset
return self
-### Example code
-
-from sqlalchemy import MetaData, Table, Column, Integer
-from sqlalchemy.orm import mapper, sessionmaker
-
-metadata = MetaData('sqlite://')
-metadata.bind.echo = True
-
-print "Set up database metadata"
-
-interval_table1 = Table('interval1', metadata,
- Column('id', Integer, primary_key=True),
- Column('start', Integer, nullable=False),
- Column('end', Integer, nullable=False))
+ def deleter(self, fdel):
+ self.fdel = fdel
+ return self
+
+ def expression(self, expr):
+ self.expr = expr
+ return self
-interval_table2 = Table('interval2', metadata,
- Column('id', Integer, primary_key=True),
- Column('start', Integer, nullable=False),
- Column('length', Integer, nullable=False))
+### Example code
-metadata.create_all()
+from sqlalchemy import Table, Column, Integer, create_engine, func
+from sqlalchemy.orm import sessionmaker, aliased
+from sqlalchemy.ext.declarative import declarative_base
-# A base class for intervals
+Base = declarative_base()
class BaseInterval(object):
- @hybrid
+ @method
def contains(self,point):
+ """Return true if the interval contains the given interval."""
+
return (self.start <= point) & (point < self.end)
- @hybrid
+ @method
def intersects(self, other):
+ """Return true if the interval intersects the given interval."""
+
return (self.start < other.end) & (self.end > other.start)
-
+
+ @method
+ def _max(self, x, y):
+ """Return the max of two values."""
+
+ return max(x, y)
+
+ @_max.expression
+ def _max(cls, x, y):
+ """Return the SQL max of two values."""
+
+ return func.max(x, y)
+
+ @method
+ def max_length(self, other):
+ """Return the longer length of this interval and another."""
+
+ return self._max(self.length, other.length)
+
def __repr__(self):
return "%s(%s..%s)" % (self.__class__.__name__, self.start, self.end)
+
+class Interval1(BaseInterval, Base):
+ """Interval stored as endpoints"""
+
+ __table__ = Table('interval1', Base.metadata,
+ Column('id', Integer, primary_key=True),
+ Column('start', Integer, nullable=False),
+ Column('end', Integer, nullable=False)
+ )
-# Interval stored as endpoints
-
-class Interval1(BaseInterval):
def __init__(self, start, end):
self.start = start
self.end = end
-
- length = hybrid_property(lambda s: s.end - s.start)
-mapper(Interval1, interval_table1)
+ @property_
+ def length(self):
+ return self.end - self.start
-# Interval stored as start and length
+class Interval2(BaseInterval, Base):
+ """Interval stored as start and length"""
+
+ __table__ = Table('interval2', Base.metadata,
+ Column('id', Integer, primary_key=True),
+ Column('start', Integer, nullable=False),
+ Column('length', Integer, nullable=False)
+ )
-class Interval2(BaseInterval):
def __init__(self, start, length):
self.start = start
self.length = length
- end = hybrid_property(lambda s: s.start + s.length)
+ @property_
+ def end(self):
+ return self.start + self.length
+
+
-mapper(Interval2, interval_table2)
+engine = create_engine('sqlite://', echo=True)
-print "Create the data"
+Base.metadata.create_all(engine)
-session = sessionmaker()()
+session = sessionmaker(engine)()
intervals = [Interval1(1,4), Interval1(3,15), Interval1(11,16)]
@@ -99,7 +140,6 @@ for interval in intervals:
session.commit()
-
for Interval in (Interval1, Interval2):
print "Querying using interval class %s" % Interval.__name__
@@ -119,4 +159,10 @@ for Interval in (Interval1, Interval2):
filter(Interval.intersects(other)).\
order_by(Interval.length).all()
print [(interval, interval.intersects(other)) for interval in result]
-
+
+ print
+ print '-- longer length'
+ interval_alias = aliased(Interval)
+ print session.query(Interval.length,
+ interval_alias.length,
+ Interval.max_length(interval_alias)).all()
diff --git a/examples/inheritance/polymorph.py b/examples/inheritance/polymorph.py
index 872318060..87c2de10e 100644
--- a/examples/inheritance/polymorph.py
+++ b/examples/inheritance/polymorph.py
@@ -112,6 +112,14 @@ c = session.query(Company).get(1)
for e in c.employees:
print e
+# illustrate querying using direct table access:
+
+print session.query(Engineer.engineer_name).\
+ select_from(engineers).\
+ filter(Engineer.primary_language=='python').\
+ all()
+
+
session.delete(c)
session.commit()
diff --git a/examples/versioning/history_meta.py b/examples/versioning/history_meta.py
index c2b283f1a..0a631e849 100644
--- a/examples/versioning/history_meta.py
+++ b/examples/versioning/history_meta.py
@@ -123,7 +123,7 @@ def create_version(obj, session, deleted = False):
# mapped column. this will allow usage of MapperProperties
# that have a different keyname than that of the mapped column.
try:
- prop = obj_mapper._get_col_to_prop(obj_col)
+ prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
# in the case of single table inheritance, there may be
# columns on the mapped table intended for the subclass only.
@@ -144,7 +144,9 @@ def create_version(obj, session, deleted = False):
elif u:
attr[hist_col.key] = u[0]
else:
- raise Exception("TODO: what makes us arrive here ?")
+ # if the attribute had no value.
+ attr[hist_col.key] = a[0]
+ obj_changed = True
if not obj_changed and not deleted:
return
diff --git a/examples/versioning/test_versioning.py b/examples/versioning/test_versioning.py
index 2a7a2ca66..031d7ca26 100644
--- a/examples/versioning/test_versioning.py
+++ b/examples/versioning/test_versioning.py
@@ -86,8 +86,23 @@ class TestVersioning(TestBase):
]
)
-
-
+ def test_from_null(self):
+ class SomeClass(Base, ComparableEntity):
+ __tablename__ = 'sometable'
+
+ id = Column(Integer, primary_key=True)
+ name = Column(String(50))
+
+ self.create_tables()
+ sess = Session()
+ sc = SomeClass()
+ sess.add(sc)
+ sess.commit()
+
+ sc.name = 'sc1'
+ sess.commit()
+
+ assert sc.version == 2
def test_deferred(self):
"""test versioning of unloaded, deferred columns."""
diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py
index 2d809e339..cb4e8e10b 100644
--- a/lib/sqlalchemy/__init__.py
+++ b/lib/sqlalchemy/__init__.py
@@ -114,6 +114,6 @@ from sqlalchemy.engine import create_engine, engine_from_config
__all__ = sorted(name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)))
-__version__ = '0.6.4'
+__version__ = '0.6.5'
del inspect, sys
diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py
index 04439afb9..1eb2549c9 100644
--- a/lib/sqlalchemy/dialects/firebird/base.py
+++ b/lib/sqlalchemy/dialects/firebird/base.py
@@ -571,9 +571,10 @@ class FBDialect(default.DefaultDialect):
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
+ # and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
- assert defexpr[:8].rstrip() == \
+ assert defexpr[:8].rstrip().upper() == \
'DEFAULT', "Unrecognized default value: %s" % \
defexpr
defvalue = defexpr[8:].strip()
diff --git a/lib/sqlalchemy/dialects/informix/base.py b/lib/sqlalchemy/dialects/informix/base.py
index bc7b6c3e7..242b8a328 100644
--- a/lib/sqlalchemy/dialects/informix/base.py
+++ b/lib/sqlalchemy/dialects/informix/base.py
@@ -105,29 +105,20 @@ class InfoSQLCompiler(compiler.SQLCompiler):
s += ""
return s
- def visit_select(self, select):
- # the column in order by clause must in select too
-
- def __label(c):
- try:
- return c._label.lower()
- except:
- return ''
-
- # TODO: dont modify the original select, generate a new one
- a = [__label(c) for c in select._raw_columns]
- for c in select._order_by_clause.clauses:
- if __label(c) not in a:
- select.append_column(c)
-
- return compiler.SQLCompiler.visit_select(self, select)
+ def visit_select(self, select, asfrom=False, parens=True, **kw):
+ text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw)
+ if asfrom and parens and self.dialect.server_version_info < (11,):
+ #assuming that 11 version doesn't need this, not tested
+ return "table(multiset" + text + ")"
+ else:
+ return text
def limit_clause(self, select):
if select._offset is not None and select._offset > 0:
raise NotImplementedError("Informix does not support OFFSET")
return ""
- def visit_function(self, func):
+ def visit_function(self, func, **kw):
if func.name.lower() == 'current_date':
return "today"
elif func.name.lower() == 'current_time':
@@ -135,7 +126,7 @@ class InfoSQLCompiler(compiler.SQLCompiler):
elif func.name.lower() in ('current_timestamp', 'now'):
return "CURRENT YEAR TO SECOND"
else:
- return compiler.SQLCompiler.visit_function(self, func)
+ return compiler.SQLCompiler.visit_function(self, func, **kw)
class InfoDDLCompiler(compiler.DDLCompiler):
diff --git a/lib/sqlalchemy/dialects/informix/informixdb.py b/lib/sqlalchemy/dialects/informix/informixdb.py
index 54e5a994a..8edcc953b 100644
--- a/lib/sqlalchemy/dialects/informix/informixdb.py
+++ b/lib/sqlalchemy/dialects/informix/informixdb.py
@@ -31,10 +31,13 @@ class InformixDialect_informixdb(InformixDialect):
def _get_server_version_info(self, connection):
# http://informixdb.sourceforge.net/manual.html#inspecting-version-numbers
- vers = connection.dbms_version
-
- # TODO: not tested
- return tuple([int(x) for x in vers.split('.')])
+ version = []
+ for n in connection.connection.dbms_version.split('.'):
+ try:
+ version.append(int(n))
+ except ValueError:
+ version.append(n)
+ return tuple(version)
def is_disconnect(self, e):
if isinstance(e, self.dbapi.OperationalError):
diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py
index 089d2f71d..a17b2484d 100644
--- a/lib/sqlalchemy/dialects/mssql/base.py
+++ b/lib/sqlalchemy/dialects/mssql/base.py
@@ -104,7 +104,7 @@ Compatibility Levels
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatibile with SQL2000 while running on a SQL2005 database
-server. ``server_version_info`` will always retrun the database
+server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibiility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py
index 4c153dac2..0aa348953 100644
--- a/lib/sqlalchemy/dialects/oracle/base.py
+++ b/lib/sqlalchemy/dialects/oracle/base.py
@@ -899,11 +899,22 @@ class OracleDialect(default.DefaultDialect):
uniqueness = dict(NONUNIQUE=False, UNIQUE=True)
oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE)
+
+ def upper_name_set(names):
+ return set([i.upper() for i in names])
+
+ pk_names = upper_name_set(pkeys)
+
+ def remove_if_primary_key(index):
+ # don't include the primary key index
+ if index is not None and \
+ upper_name_set(index['column_names']) == pk_names:
+ indexes.pop()
+
+ index = None
for rset in rp:
- # don't include the primary key columns
- if rset.column_name in [s.upper() for s in pkeys]:
- continue
if rset.index_name != last_index_name:
+ remove_if_primary_key(index)
index = dict(name=self.normalize_name(rset.index_name), column_names=[])
indexes.append(index)
index['unique'] = uniqueness.get(rset.uniqueness, False)
@@ -913,6 +924,7 @@ class OracleDialect(default.DefaultDialect):
if not oracle_sys_col.match(rset.column_name):
index['column_names'].append(self.normalize_name(rset.column_name))
last_index_name = rset.index_name
+ remove_if_primary_key(index)
return indexes
@reflection.cache
@@ -945,7 +957,6 @@ class OracleDialect(default.DefaultDialect):
constraint_data = rp.fetchall()
return constraint_data
- @reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
"""
@@ -956,7 +967,10 @@ class OracleDialect(default.DefaultDialect):
dblink
"""
+ return self._get_primary_keys(connection, table_name, schema, **kw)[0]
+ @reflection.cache
+ def _get_primary_keys(self, connection, table_name, schema=None, **kw):
resolve_synonyms = kw.get('oracle_resolve_synonyms', False)
dblink = kw.get('dblink', '')
info_cache = kw.get('info_cache')
@@ -966,6 +980,7 @@ class OracleDialect(default.DefaultDialect):
resolve_synonyms, dblink,
info_cache=info_cache)
pkeys = []
+ constraint_name = None
constraint_data = self._get_constraint_data(connection, table_name,
schema, dblink,
info_cache=kw.get('info_cache'))
@@ -975,8 +990,18 @@ class OracleDialect(default.DefaultDialect):
(cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \
row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]])
if cons_type == 'P':
+ if constraint_name is None:
+ constraint_name = self.normalize_name(cons_name)
pkeys.append(local_column)
- return pkeys
+ return pkeys, constraint_name
+
+ def get_pk_constraint(self, connection, table_name, schema=None, **kw):
+ cols, name = self._get_primary_keys(connection, table_name, schema=schema, **kw)
+
+ return {
+ 'constrained_columns':cols,
+ 'name':name
+ }
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py
index b4894250f..43d3dd038 100644
--- a/lib/sqlalchemy/engine/__init__.py
+++ b/lib/sqlalchemy/engine/__init__.py
@@ -132,12 +132,19 @@ def create_engine(*args, **kwargs):
additional keyword arguments.
:param convert_unicode=False: if set to True, all
- String/character based types will convert Unicode values to raw
- byte values going into the database, and all raw byte values to
+ String/character based types will convert Python Unicode values to raw
+ byte values sent to the DBAPI as bind parameters, and all raw byte values to
Python Unicode coming out in result sets. This is an
- engine-wide method to provide unicode conversion across the
- board. For unicode conversion on a column-by-column level, use
- the ``Unicode`` column type instead, described in `types`.
+ engine-wide method to provide Unicode conversion across the
+ board for those DBAPIs that do not accept Python Unicode objects
+ as input. For Unicode conversion on a column-by-column level, use
+ the ``Unicode`` column type instead, described in :ref:`types_toplevel`. Note that
+ many DBAPIs have the ability to return Python Unicode objects in
+ result sets directly - SQLAlchemy will use these modes of operation
+ if possible and will also attempt to detect "Unicode returns"
+ behavior by the DBAPI upon first connect by the
+ :class:`.Engine`. When this is detected, string values in
+ result sets are passed through without further processing.
:param creator: a callable which returns a DBAPI connection.
This creation function will be passed to the underlying
@@ -188,10 +195,13 @@ def create_engine(*args, **kwargs):
opened above and beyond the pool_size setting, which defaults
to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`.
- :param module=None: used by database implementations which
- support multiple DBAPI modules, this is a reference to a DBAPI2
- module to be used instead of the engine's default module. For
- PostgreSQL, the default is psycopg2. For Oracle, it's cx_Oracle.
+ :param module=None: reference to a Python module object (the module itself, not
+ its string name). Specifies an alternate DBAPI module to be used
+ by the engine's dialect. Each sub-dialect references a specific DBAPI which
+ will be imported before first connect. This parameter causes the
+ import to be bypassed, and the given module to be used instead.
+ Can be used for testing of DBAPIs as well as to inject "mock"
+ DBAPI implementations into the :class:`.Engine`.
:param pool=None: an already-constructed instance of
:class:`~sqlalchemy.pool.Pool`, such as a
@@ -199,7 +209,7 @@ def create_engine(*args, **kwargs):
pool will be used directly as the underlying connection pool
for the engine, bypassing whatever connection parameters are
present in the URL argument. For information on constructing
- connection pools manually, see `pooling`.
+ connection pools manually, see :ref:`pooling_toplevel`.
:param poolclass=None: a :class:`~sqlalchemy.pool.Pool`
subclass, which will be used to create a connection pool
@@ -224,7 +234,7 @@ def create_engine(*args, **kwargs):
connections after the given number of seconds has passed. It
defaults to -1, or no timeout. For example, setting to 3600
means connections will be recycled after one hour. Note that
- MySQL in particular will ``disconnect automatically`` if no
+ MySQL in particular will disconnect automatically if no
activity is detected on a connection for eight hours (although
this is configurable with the MySQLDB connection itself and the
server configuration as well).
@@ -233,8 +243,8 @@ def create_engine(*args, **kwargs):
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
- :param strategy='plain': used to invoke alternate :class:`~sqlalchemy.engine.base.Engine.`
- implementations. Currently available is the ``threadlocal``
+ :param strategy='plain': selects alternate engine implementations.
+ Currently available is the ``threadlocal``
strategy, which is described in :ref:`threadlocal_strategy`.
"""
diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py
index 14ebf916b..4d6912ce4 100644
--- a/lib/sqlalchemy/engine/base.py
+++ b/lib/sqlalchemy/engine/base.py
@@ -797,11 +797,22 @@ class Connectable(object):
class Connection(Connectable):
"""Provides high-level functionality for a wrapped DB-API connection.
- Provides execution support for string-based SQL statements as well
- as ClauseElement, Compiled and DefaultGenerator objects. Provides
- a :meth:`begin` method to return Transaction objects.
-
- The Connection object is **not** thread-safe.
+ Provides execution support for string-based SQL statements as well as
+ :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator`
+ objects. Provides a :meth:`begin` method to return :class:`.Transaction`
+ objects.
+
+ The Connection object is **not** thread-safe. While a Connection can be
+ shared among threads using properly synchronized access, it is still
+ possible that the underlying DBAPI connection may not support shared
+ access between threads. Check the DBAPI documentation for details.
+
+ The Connection object represents a single dbapi connection checked out
+ from the connection pool. In this state, the connection pool has no affect
+ upon the connection, including its expiration or timeout state. For the
+ connection pool to properly manage connections, connections should be
+ returned to the connection pool (i.e. ``connection.close()``) whenever the
+ connection is not in use.
.. index::
single: thread safety; Connection
@@ -812,9 +823,9 @@ class Connection(Connectable):
_branch=False, _execution_options=None):
"""Construct a new Connection.
- Connection objects are typically constructed by an
- :class:`~sqlalchemy.engine.Engine`, see the ``connect()`` and
- ``contextual_connect()`` methods of Engine.
+ The constructor here is not public and is only called only by an
+ :class:`.Engine`. See :meth:`.Engine.connect` and
+ :meth:`.Engine.contextual_connect` methods.
"""
self.engine = engine
@@ -1154,7 +1165,22 @@ class Connection(Connectable):
return self.execute(object, *multiparams, **params).scalar()
def execute(self, object, *multiparams, **params):
- """Executes and returns a ResultProxy."""
+ """Executes the given construct and returns a :class:`.ResultProxy`.
+
+ The construct can be one of:
+
+ * a textual SQL string
+ * any :class:`.ClauseElement` construct that is also
+ a subclass of :class:`.Executable`, such as a
+ :func:`.select` construct
+ * a :class:`.FunctionElement`, such as that generated
+ by :attr:`.func`, will be automatically wrapped in
+ a SELECT statement, which is then executed.
+ * a :class:`.DDLElement` object
+ * a :class:`.DefaultGenerator` object
+ * a :class:`.Compiled` object
+
+ """
for c in type(object).__mro__:
if c in Connection.executors:
@@ -1451,6 +1477,12 @@ class Connection(Connectable):
class Transaction(object):
"""Represent a Transaction in progress.
+ The object provides :meth:`.rollback` and :meth:`.commit`
+ methods in order to control transaction boundaries. It
+ also implements a context manager interface so that
+ the Python ``with`` statement can be used with the
+ :meth:`.Connection.begin` method.
+
The Transaction object is **not** threadsafe.
.. index::
@@ -1458,12 +1490,17 @@ class Transaction(object):
"""
def __init__(self, connection, parent):
+ """The constructor for :class:`.Transaction` is private
+ and is called from within the :class:`.Connection.begin`
+ implementation.
+
+ """
self.connection = connection
self._parent = parent or self
self.is_active = True
def close(self):
- """Close this transaction.
+ """Close this :class:`.Transaction`.
If this transaction is the base transaction in a begin/commit
nesting, the transaction will rollback(). Otherwise, the
@@ -1471,6 +1508,7 @@ class Transaction(object):
This is used to cancel a Transaction without affecting the scope of
an enclosing transaction.
+
"""
if not self._parent.is_active:
return
@@ -1478,6 +1516,9 @@ class Transaction(object):
self.rollback()
def rollback(self):
+ """Roll back this :class:`.Transaction`.
+
+ """
if not self._parent.is_active:
return
self._do_rollback()
@@ -1487,6 +1528,8 @@ class Transaction(object):
self._parent.rollback()
def commit(self):
+ """Commit this :class:`.Transaction`."""
+
if not self._parent.is_active:
raise exc.InvalidRequestError("This transaction is inactive")
self._do_commit()
@@ -1726,7 +1769,20 @@ class Engine(Connectable, log.Identified):
conn.close()
def execute(self, statement, *multiparams, **params):
- """Executes and returns a ResultProxy."""
+ """Executes the given construct and returns a :class:`.ResultProxy`.
+
+ The arguments are the same as those used by
+ :meth:`.Connection.execute`.
+
+ Here, a :class:`.Connection` is acquired using the
+ :meth:`~.Engine.contextual_connect` method, and the statement executed
+ with that connection. The returned :class:`.ResultProxy` is flagged
+ such that when the :class:`.ResultProxy` is exhausted and its
+ underlying cursor is closed, the :class:`.Connection` created here
+ will also be closed, which allows its associated DBAPI connection
+ resource to be returned to the connection pool.
+
+ """
connection = self.contextual_connect(close_with_result=True)
return connection.execute(statement, *multiparams, **params)
@@ -1743,16 +1799,30 @@ class Engine(Connectable, log.Identified):
return connection._execute_compiled(compiled, multiparams, params)
def connect(self, **kwargs):
- """Return a newly allocated Connection object."""
+ """Return a new :class:`.Connection` object.
+
+ The :class:`.Connection`, upon construction, will procure a DBAPI connection
+ from the :class:`.Pool` referenced by this :class:`.Engine`,
+ returning it back to the :class:`.Pool` after the :meth:`.Connection.close`
+ method is called.
+
+ """
return self.Connection(self, **kwargs)
def contextual_connect(self, close_with_result=False, **kwargs):
- """Return a Connection object which may be newly allocated,
- or may be part of some ongoing context.
+ """Return a :class:`.Connection` object which may be part of some ongoing context.
+
+ By default, this method does the same thing as :meth:`.Engine.connect`.
+ Subclasses of :class:`.Engine` may override this method
+ to provide contextual behavior.
- This Connection is meant to be used by the various
- "auto-connecting" operations.
+ :param close_with_result: When True, the first :class:`.ResultProxy` created
+ by the :class:`.Connection` will call the :meth:`.Connection.close` method
+ of that connection as soon as any pending result rows are exhausted.
+ This is used to supply the "connectionless execution" behavior provided
+ by the :meth:`.Engine.execute` method.
+
"""
return self.Connection(self,
@@ -2179,7 +2249,7 @@ class ResultProxy(object):
self.context = context
self.dialect = context.dialect
self.closed = False
- self.cursor = context.cursor
+ self.cursor = self._saved_cursor = context.cursor
self.connection = context.root_connection
self._echo = self.connection._echo and \
context.engine._should_log_debug()
@@ -2234,12 +2304,12 @@ class ResultProxy(object):
regardless of database backend.
"""
- return self.cursor.lastrowid
+ return self._saved_cursor.lastrowid
def _cursor_description(self):
"""May be overridden by subclasses."""
- return self.cursor.description
+ return self._saved_cursor.description
def _autoclose(self):
"""called by the Connection to autoclose cursors that have no pending
diff --git a/lib/sqlalchemy/engine/reflection.py b/lib/sqlalchemy/engine/reflection.py
index 4a34ef1c6..def889e6d 100644
--- a/lib/sqlalchemy/engine/reflection.py
+++ b/lib/sqlalchemy/engine/reflection.py
@@ -50,27 +50,27 @@ class Inspector(object):
consistent interface as well as caching support for previously
fetched metadata.
- The preferred method to construct an :class:`Inspector` is via the
+ The preferred method to construct an :class:`.Inspector` is via the
:meth:`Inspector.from_engine` method. I.e.::
engine = create_engine('...')
insp = Inspector.from_engine(engine)
Where above, the :class:`~sqlalchemy.engine.base.Dialect` may opt
- to return an :class:`Inspector` subclass that provides additional
+ to return an :class:`.Inspector` subclass that provides additional
methods specific to the dialect's target database.
"""
def __init__(self, bind):
- """Initialize a new :class:`Inspector`.
+ """Initialize a new :class:`.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.base.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.base.Engine` or
:class:`~sqlalchemy.engine.base.Connection`.
- For a dialect-specific instance of :class:`Inspector`, see
+ For a dialect-specific instance of :class:`.Inspector`, see
:meth:`Inspector.from_engine`
"""
@@ -98,12 +98,12 @@ class Inspector(object):
:class:`~sqlalchemy.engine.base.Engine` or
:class:`~sqlalchemy.engine.base.Connection`.
- This method differs from direct a direct constructor call of :class:`Inspector`
+ This method differs from direct a direct constructor call of :class:`.Inspector`
in that the :class:`~sqlalchemy.engine.base.Dialect` is given a chance to provide
- a dialect-specific :class:`Inspector` instance, which may provide additional
+ a dialect-specific :class:`.Inspector` instance, which may provide additional
methods.
- See the example at :class:`Inspector`.
+ See the example at :class:`.Inspector`.
"""
if hasattr(bind.dialect, 'inspector'):
diff --git a/lib/sqlalchemy/exc.py b/lib/sqlalchemy/exc.py
index 1c412824c..003969f56 100644
--- a/lib/sqlalchemy/exc.py
+++ b/lib/sqlalchemy/exc.py
@@ -5,9 +5,9 @@
"""Exceptions used with SQLAlchemy.
-The base exception class is SQLAlchemyError. Exceptions which are raised as a
+The base exception class is :class:`.SQLAlchemyError`. Exceptions which are raised as a
result of DBAPI exceptions are all subclasses of
-:class:`~sqlalchemy.exc.DBAPIError`.
+:class:`.DBAPIError`.
"""
diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py
index 12f1e443d..1bf4b1447 100644
--- a/lib/sqlalchemy/ext/compiler.py
+++ b/lib/sqlalchemy/ext/compiler.py
@@ -119,6 +119,8 @@ overriding routine and cause an endless loop. Such as, to add "prefix" to all
The above compiler will prefix all INSERT statements with "some prefix" when compiled.
+.. _type_compilation_extension:
+
Changing Compilation of Types
=============================
diff --git a/lib/sqlalchemy/ext/declarative.py b/lib/sqlalchemy/ext/declarative.py
index 3201433f4..40263c7b8 100755
--- a/lib/sqlalchemy/ext/declarative.py
+++ b/lib/sqlalchemy/ext/declarative.py
@@ -251,7 +251,8 @@ Similarly, :func:`comparable_using` is a front end for the
Defining SQL Expressions
========================
-The usage of :func:`.column_property` with Declarative is
+The usage of :func:`.column_property` with Declarative to define
+load-time, mapped SQL expressions is
pretty much the same as that described in
:ref:`mapper_sql_expressions`. Local columns within the same
class declaration can be referenced directly::
@@ -391,6 +392,8 @@ class declaration::
'version_id_generator': lambda v:datetime.now()
}
+.. _declarative_inheritance:
+
Inheritance Configuration
=========================
@@ -1189,7 +1192,7 @@ def _deferred_relationship(cls, prop):
return x
except NameError, n:
raise exceptions.InvalidRequestError(
- "When compiling mapper %s, expression %r failed to "
+ "When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
diff --git a/lib/sqlalchemy/ext/sqlsoup.py b/lib/sqlalchemy/ext/sqlsoup.py
index e54cde3ed..e8234e7c7 100644
--- a/lib/sqlalchemy/ext/sqlsoup.py
+++ b/lib/sqlalchemy/ext/sqlsoup.py
@@ -3,7 +3,15 @@ Introduction
============
SqlSoup provides a convenient way to access existing database tables without
-having to declare table or mapper classes ahead of time. It is built on top of the SQLAlchemy ORM and provides a super-minimalistic interface to an existing database.
+having to declare table or mapper classes ahead of time. It is built on top of
+the SQLAlchemy ORM and provides a super-minimalistic interface to an existing
+database.
+
+SqlSoup effectively provides a coarse grained, alternative interface to
+working with the SQLAlchemy ORM, providing a "self configuring" interface
+for extremely rudimental operations. It's somewhat akin to a
+"super novice mode" version of the ORM. While SqlSoup can be very handy,
+users are strongly encouraged to use the full ORM for non-trivial applications.
Suppose we have a database with users, books, and loans tables
(corresponding to the PyWebOff dataset, if you're curious).
diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py
index 50ea4c283..39c68f0aa 100644
--- a/lib/sqlalchemy/orm/__init__.py
+++ b/lib/sqlalchemy/orm/__init__.py
@@ -51,7 +51,7 @@ from sqlalchemy.orm.mapper import reconstructor, validates
from sqlalchemy.orm import strategies
from sqlalchemy.orm.query import AliasOption, Query
from sqlalchemy.sql import util as sql_util
-from sqlalchemy.orm.session import Session as _Session
+from sqlalchemy.orm.session import Session
from sqlalchemy.orm.session import object_session, sessionmaker, \
make_transient
from sqlalchemy.orm.scoping import ScopedSession
@@ -66,6 +66,7 @@ __all__ = (
'Validator',
'PropComparator',
'Query',
+ 'Session',
'aliased',
'backref',
'class_mapper',
@@ -109,17 +110,19 @@ __all__ = (
def scoped_session(session_factory, scopefunc=None):
- """Provides thread-local management of Sessions.
+ """Provides thread-local or scoped management of :class:`.Session` objects.
This is a front-end function to
- :class:`~sqlalchemy.orm.scoping.ScopedSession`.
+ :class:`.ScopedSession`.
:param session_factory: a callable function that produces
:class:`Session` instances, such as :func:`sessionmaker`.
- :param scopefunc: optional, TODO
+ :param scopefunc: Optional "scope" function which would be
+ passed to the :class:`.ScopedRegistry`. If None, the
+ :class:`.ThreadLocalRegistry` is used by default.
- :returns: an :class:`~sqlalchemy.orm.scoping.ScopedSession` instance
+ :returns: an :class:`.ScopedSession` instance
Usage::
@@ -173,17 +176,13 @@ def create_session(bind=None, **kwargs):
kwargs.setdefault('autoflush', False)
kwargs.setdefault('autocommit', True)
kwargs.setdefault('expire_on_commit', False)
- return _Session(bind=bind, **kwargs)
+ return Session(bind=bind, **kwargs)
def relationship(argument, secondary=None, **kwargs):
"""Provide a relationship of a primary Mapper to a secondary Mapper.
- .. note:: This function is known as :func:`relation` in all versions
- of SQLAlchemy prior to version 0.6beta2, including the 0.5 and 0.4
- series. :func:`~sqlalchemy.orm.relationship()` is only available
- starting with SQLAlchemy 0.6beta2. The :func:`relation` name will
- remain available for the foreseeable future in order to enable
- cross-compatibility.
+ .. note:: :func:`relationship` is historically known as
+ :func:`relation` prior to version 0.6.
This corresponds to a parent-child or associative table relationship. The
constructed class is an instance of :class:`RelationshipProperty`.
@@ -260,6 +259,8 @@ def relationship(argument, secondary=None, **kwargs):
:param collection_class:
a class or callable that returns a new list-holding object. will
be used in place of a plain list for storing elements.
+ Behavior of this attribute is described in detail at
+ :ref:`custom_collections`.
:param comparator_factory:
a class which extends :class:`RelationshipProperty.Comparator` which
@@ -317,25 +318,28 @@ def relationship(argument, secondary=None, **kwargs):
which is already higher up in the chain. This option applies
both to joined- and subquery- eager loaders.
- :param lazy=('select'|'joined'|'subquery'|'noload'|'dynamic'): specifies
- how the related items should be loaded. Values include:
+ :param lazy='select': specifies
+ how the related items should be loaded. Default value is
+ ``select``. Values include:
- * 'select' - items should be loaded lazily when the property is first
- accessed.
+ * ``select`` - items should be loaded lazily when the property is first
+ accessed, using a separate SELECT statement.
- * 'joined' - items should be loaded "eagerly" in the same query as
- that of the parent, using a JOIN or LEFT OUTER JOIN.
+ * ``joined`` - items should be loaded "eagerly" in the same query as
+ that of the parent, using a JOIN or LEFT OUTER JOIN. Whether
+ the join is "outer" or not is determined by the ``innerjoin``
+ parameter.
- * 'subquery' - items should be loaded "eagerly" within the same
+ * ``subquery`` - items should be loaded "eagerly" within the same
query as that of the parent, using a second SQL statement
which issues a JOIN to a subquery of the original
statement.
- * 'noload' - no loading should occur at any time. This is to
+ * ``noload`` - no loading should occur at any time. This is to
support "write-only" attributes, or attributes which are
populated in some manner specific to the application.
- * 'dynamic' - the attribute will return a pre-configured
+ * ``dynamic`` - the attribute will return a pre-configured
:class:`~sqlalchemy.orm.query.Query` object for all read
operations, onto which further filtering operations can be
applied before iterating the results. The dynamic
@@ -350,6 +354,33 @@ def relationship(argument, secondary=None, **kwargs):
* None - a synonym for 'noload'
+ Detailed discussion of loader strategies is at :ref:`loading_toplevel`.
+
+ :param load_on_pending=False:
+ Indicates loading behavior for transient or pending parent objects.
+
+ When set to ``True``, causes the lazy-loader to
+ issue a query for a parent object that is not persistent, meaning it has
+ never been flushed. This may take effect for a pending object when
+ autoflush is disabled, or for a transient object that has been
+ "attached" to a :class:`.Session` but is not part of its pending
+ collection. Attachment of transient objects to the session without
+ moving to the "pending" state is not a supported behavior at this time.
+
+ Note that the load of related objects on a pending or transient object
+ also does not trigger any attribute change events - no user-defined
+ events will be emitted for these attributes, and if and when the
+ object is ultimately flushed, only the user-specific foreign key
+ attributes will be part of the modified state.
+
+ The load_on_pending flag does not improve behavior
+ when the ORM is used normally - object references should be constructed
+ at the object level, not at the foreign key level, so that they
+ are present in an ordinary way before flush() proceeds. This flag
+ is not not intended for general use.
+
+ New in 0.6.5.
+
:param order_by:
indicates the ordering that should be applied when loading these
items.
@@ -628,7 +659,7 @@ def deferred(*columns, **kwargs):
return ColumnProperty(deferred=True, *columns, **kwargs)
def mapper(class_, local_table=None, *args, **params):
- """Return a new :class:`~sqlalchemy.orm.Mapper` object.
+ """Return a new :class:`~.Mapper` object.
:param class\_: The class to be mapped.
@@ -670,22 +701,27 @@ def mapper(class_, local_table=None, *args, **params):
:param concrete: If True, indicates this mapper should use concrete
table inheritance with its parent mapper.
- :param exclude_properties: A list of properties not to map. Columns
- present in the mapped table and present in this list will not be
- automatically converted into properties. Note that neither this
- option nor include_properties will allow an end-run around Python
- inheritance. If mapped class ``B`` inherits from mapped class
- ``A``, no combination of includes or excludes will allow ``B`` to
- have fewer properties than its superclass, ``A``.
+ :param exclude_properties: A list or set of string column names to
+ be excluded from mapping. As of SQLAlchemy 0.6.4, this collection
+ may also include :class:`.Column` objects. Columns named or present
+ in this list will not be automatically mapped. Note that neither
+ this option nor include_properties will allow one to circumvent plan
+ Python inheritance - if mapped class ``B`` inherits from mapped
+ class ``A``, no combination of includes or excludes will allow ``B``
+ to have fewer properties than its superclass, ``A``.
:param extension: A :class:`.MapperExtension` instance or
list of :class:`~sqlalchemy.orm.interfaces.MapperExtension`
instances which will be applied to all operations by this
:class:`~sqlalchemy.orm.mapper.Mapper`.
- :param include_properties: An inclusive list of properties to map.
- Columns present in the mapped table but not present in this list
- will not be automatically converted into properties.
+ :param include_properties: An inclusive list or set of string column
+ names to map. As of SQLAlchemy 0.6.4, this collection may also
+ include :class:`.Column` objects in order to disambiguate between
+ same-named columns in a selectable (such as a
+ :func:`~.expression.join()`). If this list is not ``None``, columns
+ present in the mapped table but not named or present in this list
+ will not be automatically mapped. See also "exclude_properties".
:param inherits: Another :class:`~sqlalchemy.orm.Mapper` for which
this :class:`~sqlalchemy.orm.Mapper` will have an inheritance
@@ -714,7 +750,7 @@ def mapper(class_, local_table=None, *args, **params):
when a primary key changes on a joined-table inheritance or other
joined table mapping.
- When True, it is assumed that ON UPDATE CASCADE is configured on
+ When True, it is assumed that ON UPDATE CASCADE is configured on
the foreign key in the database, and that the database will handle
propagation of an UPDATE from a source column to dependent rows.
Note that with databases which enforce referential integrity (i.e.
@@ -723,20 +759,20 @@ def mapper(class_, local_table=None, *args, **params):
value of the attribute on related items which are locally present
in the session during a flush.
- When False, it is assumed that the database does not enforce
+ When False, it is assumed that the database does not enforce
referential integrity and will not be issuing its own CASCADE
operation for an update. The relationship() will issue the
appropriate UPDATE statements to the database in response to the
change of a referenced key, and items locally present in the
session during a flush will also be refreshed.
- This flag should probably be set to False if primary key changes
+ This flag should probably be set to False if primary key changes
are expected and the database in use doesn't support CASCADE (i.e.
SQLite, MySQL MyISAM tables).
Also see the passive_updates flag on :func:`relationship()`.
- A future SQLAlchemy release will provide a "detect" feature for
+ A future SQLAlchemy release will provide a "detect" feature for
this flag.
:param polymorphic_on: Used with mappers in an inheritance
@@ -852,7 +888,8 @@ def synonym(name, map_column=False, descriptor=None,
doc=doc)
def comparable_property(comparator_factory, descriptor=None):
- """Provide query semantics for an unmanaged attribute.
+ """Provides a method of applying a :class:`.PropComparator`
+ to any Python descriptor attribute.
Allows a regular Python @property (descriptor) to be used in Queries and
SQL constructs like a managed attribute. comparable_property wraps a
@@ -1035,8 +1072,6 @@ def subqueryload(*keys):
"""Return a ``MapperOption`` that will convert the property
of the given name into an subquery eager load.
- .. note:: This function is new as of SQLAlchemy version 0.6beta3.
-
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
examples::
@@ -1061,8 +1096,6 @@ def subqueryload_all(*keys):
"""Return a ``MapperOption`` that will convert all properties along the
given dot-separated path into a subquery eager load.
- .. note:: This function is new as of SQLAlchemy version 0.6beta3.
-
Used with :meth:`~sqlalchemy.orm.query.Query.options`.
For example::
@@ -1147,6 +1180,9 @@ def contains_eager(*keys, **kwargs):
See also :func:`eagerload` for the "automatic" version of this
functionality.
+ For additional examples of :func:`contains_eager` see
+ :ref:`contains_eager`.
+
"""
alias = kwargs.pop('alias', None)
if kwargs:
diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py
index 800f3889f..b56de5f05 100644
--- a/lib/sqlalchemy/orm/attributes.py
+++ b/lib/sqlalchemy/orm/attributes.py
@@ -35,18 +35,25 @@ NEVER_SET = util.symbol('NEVER_SET')
# "passive" get settings
# TODO: the True/False values need to be factored out
-# of the rest of ORM code
-# don't fire off any callables, and don't initialize the attribute to
-# an empty value
PASSIVE_NO_INITIALIZE = True #util.symbol('PASSIVE_NO_INITIALIZE')
+"""Symbol indicating that loader callables should
+ not be fired off, and a non-initialized attribute
+ should remain that way."""
-# don't fire off any callables, but if no callables present
-# then initialize to an empty value/collection
# this is used by backrefs.
PASSIVE_NO_FETCH = util.symbol('PASSIVE_NO_FETCH')
+"""Symbol indicating that loader callables should not be fired off.
+ Non-initialized attributes should be initialized to an empty value."""
+
+PASSIVE_ONLY_PERSISTENT = util.symbol('PASSIVE_ONLY_PERSISTENT')
+"""Symbol indicating that loader callables should only fire off for
+persistent objects.
+
+Loads of "previous" values during change events use this flag.
+"""
-# fire callables/initialize as needed
PASSIVE_OFF = False #util.symbol('PASSIVE_OFF')
+"""Symbol indicating that loader callables should be executed."""
INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__'
"""Attribute, elects custom instrumentation when present on a mapped class.
@@ -448,7 +455,7 @@ class ScalarAttributeImpl(AttributeImpl):
self, state, dict_.get(self.key, NO_VALUE))
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
if self.active_history:
@@ -527,7 +534,7 @@ class MutableScalarAttributeImpl(ScalarAttributeImpl):
state.mutable_dict.pop(self.key)
def set(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
if self.extensions:
@@ -589,14 +596,14 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
setter operation.
"""
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
if self.active_history:
- old = self.get(state, dict_)
+ old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
else:
old = self.get(state, dict_, passive=PASSIVE_NO_FETCH)
-
+
value = self.fire_replace_event(state, dict_, value, old, initiator)
dict_[self.key] = value
@@ -615,7 +622,7 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
previous is not None and
previous is not PASSIVE_NO_RESULT):
self.sethasparent(instance_state(previous), False)
-
+
for ext in self.extensions:
value = ext.set(state, value, previous, initiator or self)
@@ -719,7 +726,7 @@ class CollectionAttributeImpl(AttributeImpl):
self.key, state, self.collection_factory)
def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
collection = self.get_collection(state, dict_, passive=passive)
@@ -732,7 +739,7 @@ class CollectionAttributeImpl(AttributeImpl):
collection.append_with_event(value, initiator)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
collection = self.get_collection(state, state.dict, passive=passive)
@@ -752,7 +759,7 @@ class CollectionAttributeImpl(AttributeImpl):
setter operation.
"""
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
self._set_iterable(
@@ -777,15 +784,16 @@ class CollectionAttributeImpl(AttributeImpl):
else:
new_values = list(iterable)
- old = self.get(state, dict_)
-
- # ignore re-assignment of the current collection, as happens
- # implicitly with in-place operators (foo.collection |= other)
- if old is iterable:
+ old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT)
+ if old is PASSIVE_NO_RESULT:
+ old = self.initialize(state, dict_)
+ elif old is iterable:
+ # ignore re-assignment of the current collection, as happens
+ # implicitly with in-place operators (foo.collection |= other)
return
state.modified_event(dict_, self, True, old)
-
+
old_collection = self.get_collection(state, dict_, old)
dict_[self.key] = user_data
@@ -855,7 +863,7 @@ class GenericBackrefExtension(interfaces.AttributeExtension):
def set(self, state, child, oldchild, initiator):
if oldchild is child:
return child
-
+
if oldchild is not None and oldchild is not PASSIVE_NO_RESULT:
# With lazy=None, there's no guarantee that the full collection is
# present when updating via a backref.
@@ -1293,8 +1301,10 @@ class _ClassInstrumentationAdapter(ClassManager):
return self._get_dict
class History(tuple):
- """A 3-tuple of added, unchanged and deleted values.
-
+ """A 3-tuple of added, unchanged and deleted values,
+ representing the changes which have occured on an instrumented
+ attribute.
+
Each tuple member is an iterable sequence.
"""
@@ -1302,9 +1312,18 @@ class History(tuple):
__slots__ = ()
added = property(itemgetter(0))
+ """Return the collection of items added to the attribute (the first tuple
+ element)."""
+
unchanged = property(itemgetter(1))
+ """Return the collection of items that have not changed on the attribute
+ (the second tuple element)."""
+
+
deleted = property(itemgetter(2))
-
+ """Return the collection of items that have been removed from the
+ attribute (the third tuple element)."""
+
def __new__(cls, added, unchanged, deleted):
return tuple.__new__(cls, (added, unchanged, deleted))
@@ -1312,25 +1331,38 @@ class History(tuple):
return self != HISTORY_BLANK
def empty(self):
+ """Return True if this :class:`History` has no changes
+ and no existing, unchanged state.
+
+ """
+
return not bool(
(self.added or self.deleted)
or self.unchanged and self.unchanged != [None]
)
def sum(self):
+ """Return a collection of added + unchanged + deleted."""
+
return (self.added or []) +\
(self.unchanged or []) +\
(self.deleted or [])
def non_deleted(self):
+ """Return a collection of added + unchanged."""
+
return (self.added or []) +\
(self.unchanged or [])
def non_added(self):
+ """Return a collection of unchanged + deleted."""
+
return (self.unchanged or []) +\
(self.deleted or [])
def has_changes(self):
+ """Return True if this :class:`History` has changes."""
+
return bool(self.added or self.deleted)
def as_state(self):
@@ -1391,11 +1423,19 @@ class History(tuple):
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, **kwargs):
- """Return a History record for the given object and attribute key.
+ """Return a :class:`.History` record for the given object
+ and attribute key.
- obj is an instrumented object instance. An InstanceState
- is accepted directly for backwards compatibility but
- this usage is deprecated.
+ :param obj: an object whose class is instrumented by the
+ attributes package.
+
+ :param key: string attribute name.
+
+ :param kwargs: Optional keyword arguments currently
+ include the ``passive`` flag, which indicates if the attribute should be
+ loaded from the database if not already present (:attr:`PASSIVE_NO_FETCH`), and
+ if the attribute should be not initialized to a blank value otherwise
+ (:attr:`PASSIVE_NO_INITIALIZE`). Default is :attr:`PASSIVE_OFF`.
"""
return get_state_history(instance_state(obj), key, **kwargs)
diff --git a/lib/sqlalchemy/orm/collections.py b/lib/sqlalchemy/orm/collections.py
index a9ad34239..b52329523 100644
--- a/lib/sqlalchemy/orm/collections.py
+++ b/lib/sqlalchemy/orm/collections.py
@@ -189,7 +189,7 @@ class collection(object):
The recipe decorators all require parens, even those that take no
arguments::
- @collection.adds('entity'):
+ @collection.adds('entity')
def insert(self, position, entity): ...
@collection.removes_return()
@@ -253,7 +253,7 @@ class collection(object):
The remover method is called with one positional argument: the value
to remove. The method will be automatically decorated with
- 'removes_return()' if not already decorated::
+ :meth:`removes_return` if not already decorated::
@collection.remover
def zap(self, entity): ...
@@ -293,7 +293,7 @@ class collection(object):
"""Tag the method as instrumented.
This tag will prevent any decoration from being applied to the method.
- Use this if you are orchestrating your own calls to collection_adapter
+ Use this if you are orchestrating your own calls to :func:`.collection_adapter`
in one of the basic SQLAlchemy interface methods, or to prevent
an automatic ABC method decoration from wrapping your implementation::
@@ -339,7 +339,7 @@ class collection(object):
The default converter implementation will use duck-typing to do the
conversion. A dict-like collection will be convert into an iterable
- of dictionary values, and other types will simply be iterated.
+ of dictionary values, and other types will simply be iterated::
@collection.converter
def convert(self, other): ...
@@ -442,7 +442,8 @@ class collection(object):
# public instrumentation interface for 'internally instrumented'
# implementations
def collection_adapter(collection):
- """Fetch the CollectionAdapter for a collection."""
+ """Fetch the :class:`.CollectionAdapter` for a collection."""
+
return getattr(collection, '_sa_adapter', None)
def collection_iter(collection):
@@ -545,6 +546,7 @@ class CollectionAdapter(object):
def append_with_event(self, item, initiator=None):
"""Add an entity to the collection, firing mutation events."""
+
getattr(self._data(), '_sa_appender')(item, _sa_initiator=initiator)
def append_without_event(self, item):
@@ -585,7 +587,7 @@ class CollectionAdapter(object):
def fire_append_event(self, item, initiator=None):
"""Notify that a entity has entered the collection.
- Initiator is the InstrumentedAttribute that initiated the membership
+ Initiator is a token owned by the InstrumentedAttribute that initiated the membership
mutation, and should be left as None unless you are passing along
an initiator value from a chained operation.
diff --git a/lib/sqlalchemy/orm/dependency.py b/lib/sqlalchemy/orm/dependency.py
index 376afd88d..4458a8547 100644
--- a/lib/sqlalchemy/orm/dependency.py
+++ b/lib/sqlalchemy/orm/dependency.py
@@ -734,7 +734,12 @@ class DetectKeySwitch(DependencyProcessor):
def per_property_preprocessors(self, uow):
if self.prop._reverse_property:
- return
+ if self.passive_updates:
+ return
+ else:
+ if False in (prop.passive_updates for \
+ prop in self.prop._reverse_property):
+ return
uow.register_preprocessor(self, False)
@@ -797,14 +802,14 @@ class DetectKeySwitch(DependencyProcessor):
if switchers:
# if primary key values have actually changed somewhere, perform
# a linear search through the UOW in search of a parent.
- # note that this handler isn't used if the many-to-one
- # relationship has a backref.
for state in uowcommit.session.identity_map.all_states():
if not issubclass(state.class_, self.parent.class_):
continue
dict_ = state.dict
- related = dict_.get(self.key)
- if related is not None:
+ related = state.get_impl(self.key).get(state, dict_,
+ passive=self.passive_updates)
+ if related is not attributes.PASSIVE_NO_RESULT and \
+ related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
diff --git a/lib/sqlalchemy/orm/dynamic.py b/lib/sqlalchemy/orm/dynamic.py
index d55838011..c5ddaca40 100644
--- a/lib/sqlalchemy/orm/dynamic.py
+++ b/lib/sqlalchemy/orm/dynamic.py
@@ -112,7 +112,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
def set(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
- if initiator is self:
+ if initiator and initiator.parent_token is self.parent_token:
return
self._set_iterable(state, dict_, value)
diff --git a/lib/sqlalchemy/orm/evaluator.py b/lib/sqlalchemy/orm/evaluator.py
index 3ee70782d..e3cbffe98 100644
--- a/lib/sqlalchemy/orm/evaluator.py
+++ b/lib/sqlalchemy/orm/evaluator.py
@@ -35,7 +35,8 @@ class EvaluatorCompiler(object):
def visit_column(self, clause):
if 'parentmapper' in clause._annotations:
- key = clause._annotations['parentmapper']._get_col_to_prop(clause).key
+ key = clause._annotations['parentmapper'].\
+ _columntoproperty[clause].key
else:
key = clause.key
get_corresponding_attr = operator.attrgetter(key)
diff --git a/lib/sqlalchemy/orm/exc.py b/lib/sqlalchemy/orm/exc.py
index 3f28a3dd3..8f257bdd5 100644
--- a/lib/sqlalchemy/orm/exc.py
+++ b/lib/sqlalchemy/orm/exc.py
@@ -38,7 +38,7 @@ class FlushError(sa.exc.SQLAlchemyError):
class UnmappedError(sa.exc.InvalidRequestError):
- """TODO"""
+ """Base for exceptions that involve expected mappings not present."""
class DetachedInstanceError(sa.exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
diff --git a/lib/sqlalchemy/orm/identity.py b/lib/sqlalchemy/orm/identity.py
index 4650b066f..30c3a06b7 100644
--- a/lib/sqlalchemy/orm/identity.py
+++ b/lib/sqlalchemy/orm/identity.py
@@ -15,7 +15,7 @@ class IdentityMap(dict):
self._mutable_attrs = set()
self._modified = set()
self._wr = weakref.ref(self)
-
+
def replace(self, state):
raise NotImplementedError()
@@ -61,7 +61,7 @@ class IdentityMap(dict):
def has_key(self, key):
return key in self
-
+
def popitem(self):
raise NotImplementedError("IdentityMap uses remove() to remove data")
@@ -81,6 +81,9 @@ class IdentityMap(dict):
raise NotImplementedError("IdentityMap uses remove() to remove data")
class WeakInstanceDict(IdentityMap):
+ def __init__(self):
+ IdentityMap.__init__(self)
+ self._remove_mutex = base_util.threading.Lock()
def __getitem__(self, key):
state = dict.__getitem__(self, key)
@@ -134,8 +137,13 @@ class WeakInstanceDict(IdentityMap):
self.remove(state)
def remove(self, state):
- if dict.pop(self, state.key) is not state:
- raise AssertionError("State %s is not present in this identity map" % state)
+ self._remove_mutex.acquire()
+ try:
+ if dict.pop(self, state.key) is not state:
+ raise AssertionError("State %s is not present in this identity map" % state)
+ finally:
+ self._remove_mutex.release()
+
self._manage_removed_state(state)
def discard(self, state):
@@ -153,43 +161,56 @@ class WeakInstanceDict(IdentityMap):
if o is None:
return default
return o
-
- # Py2K
+
+
def items(self):
+ # Py2K
return list(self.iteritems())
-
+
def iteritems(self):
- for state in dict.itervalues(self):
# end Py2K
- # Py3K
- #def items(self):
- # for state in dict.values(self):
- value = state.obj()
- if value is not None:
- yield state.key, value
+ self._remove_mutex.acquire()
+ try:
+ result = []
+ for state in dict.values(self):
+ value = state.obj()
+ if value is not None:
+ result.append((state.key, value))
- # Py2K
+ return iter(result)
+ finally:
+ self._remove_mutex.release()
+
def values(self):
+ # Py2K
return list(self.itervalues())
def itervalues(self):
- for state in dict.itervalues(self):
# end Py2K
- # Py3K
- #def values(self):
- # for state in dict.values(self):
- instance = state.obj()
- if instance is not None:
- yield instance
+ self._remove_mutex.acquire()
+ try:
+ result = []
+ for state in dict.values(self):
+ value = state.obj()
+ if value is not None:
+ result.append(value)
+ return iter(result)
+ finally:
+ self._remove_mutex.release()
+
def all_states(self):
- # Py3K
- # return list(dict.values(self))
+ self._remove_mutex.acquire()
+ try:
+ # Py3K
+ # return list(dict.values(self))
- # Py2K
- return dict.values(self)
- # end Py2K
-
+ # Py2K
+ return dict.values(self)
+ # end Py2K
+ finally:
+ self._remove_mutex.release()
+
def prune(self):
return 0
diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py
index edd0558ac..0a65c8a44 100644
--- a/lib/sqlalchemy/orm/interfaces.py
+++ b/lib/sqlalchemy/orm/interfaces.py
@@ -51,7 +51,7 @@ MANYTOMANY = util.symbol('MANYTOMANY')
class MapperExtension(object):
"""Base implementation for customizing ``Mapper`` behavior.
-
+
New extension classes subclass ``MapperExtension`` and are specified
using the ``extension`` mapper() argument, which is a single
``MapperExtension`` or a list of such. A single mapper
@@ -74,8 +74,9 @@ class MapperExtension(object):
when this symbol is returned. Like EXT_CONTINUE, it also
has additional significance in some cases that a default
mapper activity will not be performed.
-
+
"""
+
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
@@ -185,7 +186,7 @@ class MapperExtension(object):
\**flags
extra information about the row, same as criterion in
``create_row_processor()`` method of
- :class:`~sqlalchemy.orm.interfaces.MapperProperty`
+ :class:`~sqlalchemy.orm.interfaces.MapperProperty`
"""
return EXT_CONTINUE
@@ -324,10 +325,10 @@ class MapperExtension(object):
def after_delete(self, mapper, connection, instance):
"""Receive an object instance after that instance is deleted.
-
- The return value is only significant within the ``MapperExtension``
+
+ The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
-
+
"""
return EXT_CONTINUE
@@ -552,11 +553,29 @@ class MapperProperty(object):
return operator(self.comparator, value)
class PropComparator(expression.ColumnOperators):
- """defines comparison operations for MapperProperty objects.
+ """Defines comparison operations for MapperProperty objects.
+
+ User-defined subclasses of :class:`.PropComparator` may be created. The
+ built-in Python comparison and math operator methods, such as
+ ``__eq__()``, ``__lt__()``, ``__add__()``, can be overridden to provide
+ new operator behaivor. The custom :class:`.PropComparator` is passed to
+ the mapper property via the ``comparator_factory`` argument. In each case,
+ the appropriate subclass of :class:`.PropComparator` should be used::
+
+ from sqlalchemy.orm.properties import \\
+ ColumnProperty,\\
+ CompositeProperty,\\
+ RelationshipProperty
- PropComparator instances should also define an accessor 'property'
- which returns the MapperProperty associated with this
- PropComparator.
+ class MyColumnComparator(ColumnProperty.Comparator):
+ pass
+
+ class MyCompositeComparator(CompositeProperty.Comparator):
+ pass
+
+ class MyRelationshipComparator(RelationshipProperty.Comparator):
+ pass
+
"""
def __init__(self, prop, mapper, adapter=None):
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index c3e4b042e..49f5d2190 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -30,6 +30,7 @@ from sqlalchemy.orm.util import (
ExtensionCarrier, _INSTRUMENTOR, _class_to_mapper,
_state_mapper, class_mapper, instance_str, state_str,
)
+import sys
__all__ = (
'Mapper',
@@ -43,6 +44,7 @@ _new_mappers = False
_already_compiling = False
_none_set = frozenset([None])
+_memoized_compiled_property = util.group_expirable_memoized_property()
# a list of MapperExtensions that will be installed in all mappers by default
global_extensions = []
@@ -102,7 +104,7 @@ class Mapper(object):
"""Construct a new mapper.
Mappers are normally constructed via the
- :func:`~sqlalchemy.orm.mapper` function. See for details.
+ :func:`~sqlalchemy.orm.mapper` function. See for details.
"""
@@ -192,8 +194,14 @@ class Mapper(object):
else:
self.polymorphic_map = _polymorphic_map
- self.include_properties = include_properties
- self.exclude_properties = exclude_properties
+ if include_properties is not None:
+ self.include_properties = util.to_set(include_properties)
+ else:
+ self.include_properties = None
+ if exclude_properties:
+ self.exclude_properties = util.to_set(exclude_properties)
+ else:
+ self.exclude_properties = None
self.compiled = False
@@ -210,6 +218,7 @@ class Mapper(object):
global _new_mappers
_new_mappers = True
self._log("constructed")
+ self._expire_memoizations()
finally:
_COMPILE_MUTEX.release()
@@ -274,11 +283,6 @@ class Mapper(object):
self.version_id_col = self.inherits.version_id_col
self.version_id_generator = self.inherits.version_id_generator
- for mapper in self.iterate_to_root():
- util.reset_memoized(mapper, '_equivalent_columns')
- util.reset_memoized(mapper, '_sorted_tables')
- util.reset_memoized(mapper, '_compiled_cache')
-
if self.order_by is False and \
not self.concrete and \
self.inherits.order_by is not False:
@@ -471,7 +475,7 @@ class Mapper(object):
for col in self._columntoproperty
if not hasattr(col, 'table') or
col.table not in self._cols_by_table)
-
+
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self.primary_key_argument:
@@ -479,13 +483,14 @@ class Mapper(object):
if k.table not in self._pks_by_table:
self._pks_by_table[k.table] = util.OrderedSet()
self._pks_by_table[k.table].add(k)
-
- if self.mapped_table not in self._pks_by_table or \
- len(self._pks_by_table[self.mapped_table]) == 0:
- raise sa_exc.ArgumentError(
- "Mapper %s could not assemble any primary "
- "key columns for mapped table '%s'" %
- (self, self.mapped_table.description))
+
+ # otherwise, see that we got a full PK for the mapped table
+ elif self.mapped_table not in self._pks_by_table or \
+ len(self._pks_by_table[self.mapped_table]) == 0:
+ raise sa_exc.ArgumentError(
+ "Mapper %s could not assemble any primary "
+ "key columns for mapped table '%s'" %
+ (self, self.mapped_table.description))
if self.inherits and \
not self.concrete and \
@@ -526,7 +531,7 @@ class Mapper(object):
# table columns mapped to lists of MapperProperty objects
# using a list allows a single column to be defined as
# populating multiple object attributes
- self._columntoproperty = util.column_dict()
+ self._columntoproperty = _ColumnMapping(self)
# load custom properties
if self._init_properties:
@@ -537,7 +542,7 @@ class Mapper(object):
if self.inherits:
for key, prop in self.inherits._props.iteritems():
if key not in self._props and \
- not self._should_exclude(key, key, local=False):
+ not self._should_exclude(key, key, local=False, column=None):
self._adapt_inherited_property(key, prop, False)
# create properties for each column in the mapped table,
@@ -550,7 +555,8 @@ class Mapper(object):
if self._should_exclude(
column.key, column_key,
- local=self.local_table.c.contains_column(column)
+ local=self.local_table.c.contains_column(column),
+ column=column
):
continue
@@ -583,7 +589,7 @@ class Mapper(object):
% col.description)
else:
instrument = True
- if self._should_exclude(col.key, col.key, local=False):
+ if self._should_exclude(col.key, col.key, local=False, column=col):
raise sa_exc.InvalidRequestError(
"Cannot exclude or override the discriminator column %r" %
col.key)
@@ -625,8 +631,18 @@ class Mapper(object):
# existing ColumnProperty from an inheriting mapper.
# make a copy and append our column to it
prop = prop.copy()
+ else:
+ util.warn(
+ "Implicitly combining column %s with column "
+ "%s under attribute '%s'. This usage will be "
+ "prohibited in 0.7. Please configure one "
+ "or more attributes for these same-named columns "
+ "explicitly."
+ % (prop.columns[-1], column, key))
+
prop.columns.append(column)
self._log("appending to existing ColumnProperty %s" % (key))
+
elif prop is None or isinstance(prop, ConcreteInheritedProperty):
mapped_column = []
for c in columns:
@@ -772,12 +788,13 @@ class Mapper(object):
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_compile_failed', False):
- raise sa_exc.InvalidRequestError(
- "One or more mappers failed to compile. "
- "Exception was probably "
- "suppressed within a hasattr() call. "
- "Message was: %s" %
- mapper._compile_failed)
+ e = sa_exc.InvalidRequestError(
+ "One or more mappers failed to initialize - "
+ "can't proceed with initialization of other "
+ "mappers. Original exception was: %s"
+ % mapper._compile_failed)
+ e._compile_failed = mapper._compile_failed
+ raise e
if not mapper.compiled:
mapper._post_configure_properties()
@@ -786,11 +803,12 @@ class Mapper(object):
finally:
_already_compiling = False
except:
- import sys
exc = sys.exc_info()[1]
- self._compile_failed = exc
+ if not hasattr(exc, '_compile_failed'):
+ self._compile_failed = exc
raise
finally:
+ self._expire_memoizations()
_COMPILE_MUTEX.release()
def _post_configure_properties(self):
@@ -835,7 +853,11 @@ class Mapper(object):
"""
self._init_properties[key] = prop
self._configure_property(key, prop, init=self.compiled)
+ self._expire_memoizations()
+ def _expire_memoizations(self):
+ for mapper in self.iterate_to_root():
+ _memoized_compiled_property.expire_instance(mapper)
def _log(self, msg, *args):
self.logger.info(
@@ -911,7 +933,19 @@ class Mapper(object):
"Mapper '%s' has no property '%s'" % (self, key))
else:
return None
+
+ @util.deprecated('0.6.4',
+ 'Call to deprecated function mapper._get_col_to_pr'
+ 'op(). Use mapper.get_property_by_column()')
+ def _get_col_to_prop(self, col):
+ return self._columntoproperty[col]
+
+ def get_property_by_column(self, column):
+ """Given a :class:`.Column` object, return the
+ :class:`.MapperProperty` which maps this column."""
+ return self._columntoproperty[column]
+
@property
def iterate_properties(self):
"""return an iterator of all MapperProperty objects."""
@@ -928,7 +962,7 @@ class Mapper(object):
"""
if spec == '*':
- mappers = list(self.polymorphic_iterator())
+ mappers = list(self.self_and_descendants)
elif spec:
mappers = [_class_to_mapper(m) for m in util.to_list(spec)]
for m in mappers:
@@ -967,7 +1001,7 @@ class Mapper(object):
return from_obj
- @property
+ @_memoized_compiled_property
def _single_table_criterion(self):
if self.single and \
self.inherits and \
@@ -975,18 +1009,17 @@ class Mapper(object):
self.polymorphic_identity is not None:
return self.polymorphic_on.in_(
m.polymorphic_identity
- for m in self.polymorphic_iterator())
+ for m in self.self_and_descendants)
else:
return None
-
-
- @util.memoized_property
+
+ @_memoized_compiled_property
def _with_polymorphic_mappers(self):
if not self.with_polymorphic:
return [self]
return self._mappers_from_spec(*self.with_polymorphic)
- @util.memoized_property
+ @_memoized_compiled_property
def _with_polymorphic_selectable(self):
if not self.with_polymorphic:
return self.mapped_table
@@ -1011,6 +1044,11 @@ class Mapper(object):
else:
return mappers, self._selectable_from_mappers(mappers)
+ @_memoized_compiled_property
+ def _polymorphic_properties(self):
+ return tuple(self._iterate_polymorphic_properties(
+ self._with_polymorphic_mappers))
+
def _iterate_polymorphic_properties(self, mappers=None):
"""Return an iterator of MapperProperty objects which will render into
a SELECT."""
@@ -1042,7 +1080,7 @@ class Mapper(object):
"provided by the get_property() and iterate_properties "
"accessors.")
- @util.memoized_property
+ @_memoized_compiled_property
def _get_clause(self):
"""create a "get clause" based on the primary key. this is used
by query.get() and many-to-one lazyloads to load this item
@@ -1054,7 +1092,7 @@ class Mapper(object):
return sql.and_(*[k==v for (k, v) in params]), \
util.column_dict(params)
- @util.memoized_property
+ @_memoized_compiled_property
def _equivalent_columns(self):
"""Create a map of all *equivalent* columns, based on
the determination of column pairs that are equated to
@@ -1086,7 +1124,7 @@ class Mapper(object):
result[binary.right].add(binary.left)
else:
result[binary.right] = util.column_set((binary.left,))
- for mapper in self.base_mapper.polymorphic_iterator():
+ for mapper in self.base_mapper.self_and_descendants:
if mapper.inherit_condition is not None:
visitors.traverse(
mapper.inherit_condition, {},
@@ -1099,7 +1137,7 @@ class Mapper(object):
(MapperProperty, attributes.InstrumentedAttribute)) and \
hasattr(obj, '__get__')
- def _should_exclude(self, name, assigned_name, local):
+ def _should_exclude(self, name, assigned_name, local, column):
"""determine whether a particular property should be implicitly
present on the class.
@@ -1121,13 +1159,17 @@ class Mapper(object):
getattr(self.class_, assigned_name)):
return True
- if (self.include_properties is not None and
- name not in self.include_properties):
+ if self.include_properties is not None and \
+ name not in self.include_properties and \
+ (column is None or column not in self.include_properties):
self._log("not including property %s" % (name))
return True
- if (self.exclude_properties is not None and
- name in self.exclude_properties):
+ if self.exclude_properties is not None and \
+ (
+ name in self.exclude_properties or \
+ (column is not None and column in self.exclude_properties)
+ ):
self._log("excluding property %s" % (name))
return True
@@ -1160,6 +1202,22 @@ class Mapper(object):
yield m
m = m.inherits
+ @_memoized_compiled_property
+ def self_and_descendants(self):
+ """The collection including this mapper and all descendant mappers.
+
+ This includes not just the immediately inheriting mappers but
+ all their inheriting mappers as well.
+
+ """
+ descendants = []
+ stack = deque([self])
+ while stack:
+ item = stack.popleft()
+ descendants.append(item)
+ stack.extend(item._inheriting_mappers)
+ return tuple(descendants)
+
def polymorphic_iterator(self):
"""Iterate through the collection including this mapper and
all descendant mappers.
@@ -1169,14 +1227,9 @@ class Mapper(object):
To iterate through an entire hierarchy, use
``mapper.base_mapper.polymorphic_iterator()``.
-
+
"""
- stack = deque([self])
- while stack:
- item = stack.popleft()
- yield item
- stack.extend(item._inheriting_mappers)
-
+ return iter(self.self_and_descendants)
def primary_mapper(self):
"""Return the primary mapper corresponding to this mapper's class key
@@ -1240,33 +1293,15 @@ class Mapper(object):
def _primary_key_from_state(self, state):
dict_ = state.dict
- return [
- self._get_state_attr_by_column(state, dict_, column) for
- column in self.primary_key]
-
- def _get_col_to_prop(self, column):
- try:
- return self._columntoproperty[column]
- except KeyError:
- prop = self._props.get(column.key, None)
- if prop:
- raise orm_exc.UnmappedColumnError(
- "Column '%s.%s' is not available, due to "
- "conflicting property '%s':%r" %
- (column.table.name, column.name,
- column.key, prop))
- else:
- raise orm_exc.UnmappedColumnError(
- "No column %s is configured on mapper %s..." %
- (column, self))
+ return [self._get_state_attr_by_column(state, dict_, column) for
+ column in self.primary_key]
# TODO: improve names?
- def _get_state_attr_by_column(self, state, dict_, column):
- return self._get_col_to_prop(column)._getattr(state, dict_, column)
+ def _get_state_attr_by_column(self, state, dict_, column, passive=False):
+ return self._columntoproperty[column]._getattr(state, dict_, column, passive=passive)
def _set_state_attr_by_column(self, state, dict_, column, value):
- return self._get_col_to_prop(column).\
- _setattr(state, dict_, value, column)
+ return self._columntoproperty[column]._setattr(state, dict_, value, column)
def _get_committed_attr_by_column(self, obj, column):
state = attributes.instance_state(obj)
@@ -1275,9 +1310,8 @@ class Mapper(object):
def _get_committed_state_attr_by_column(self, state, dict_, column,
passive=False):
- return self._get_col_to_prop(column).\
- _getcommitted(state, dict_,
- column, passive=passive)
+ return self._columntoproperty[column]._getcommitted(
+ state, dict_, column, passive=passive)
def _optimized_get_statement(self, state, attribute_names):
"""assemble a WHERE clause which retrieves a given state by primary
@@ -1354,11 +1388,11 @@ class Mapper(object):
"""Iterate each element and its mapper in an object graph,
for all relationships that meet the given cascade rule.
- ``type\_``:
+ :param type_:
The name of the cascade rule (i.e. save-update, delete,
etc.)
- ``state``:
+ :param state:
The lead InstanceState. child items will be processed per
the relationships defined for this object's mapper.
@@ -1387,14 +1421,14 @@ class Mapper(object):
except StopIteration:
visitables.pop()
- @util.memoized_property
+ @_memoized_compiled_property
def _compiled_cache(self):
return util.LRUCache(self._compiled_cache_size)
- @util.memoized_property
+ @_memoized_compiled_property
def _sorted_tables(self):
table_to_mapper = {}
- for mapper in self.base_mapper.polymorphic_iterator():
+ for mapper in self.base_mapper.self_and_descendants:
for t in mapper.tables:
table_to_mapper[t] = mapper
@@ -2341,6 +2375,11 @@ def validates(*names):
can then raise validation exceptions to halt the process from continuing,
or can modify or replace the value before proceeding. The function
should otherwise return the given value.
+
+ Note that a validator for a collection **cannot** issue a load of that
+ collection within the validation routine - this usage raises
+ an assertion to avoid recursion overflows. This is a reentrant
+ condition which is not supported.
"""
def wrap(fn):
@@ -2411,7 +2450,8 @@ def _load_scalar_attributes(state, attribute_names):
# this codepath is rare - only valid when inside a flush, and the
# object is becoming persistent but hasn't yet been assigned an identity_key.
# check here to ensure we have the attrs we need.
- pk_attrs = [mapper._get_col_to_prop(col).key for col in mapper.primary_key]
+ pk_attrs = [mapper._columntoproperty[col].key
+ for col in mapper.primary_key]
if state.expired_attributes.intersection(pk_attrs):
raise sa_exc.InvalidRequestError("Instance %s cannot be refreshed - it's not "
" persistent and does not "
@@ -2438,3 +2478,21 @@ def _load_scalar_attributes(state, attribute_names):
raise orm_exc.ObjectDeletedError(
"Instance '%s' has been deleted." %
state_str(state))
+
+
+class _ColumnMapping(util.py25_dict):
+ """Error reporting helper for mapper._columntoproperty."""
+
+ def __init__(self, mapper):
+ self.mapper = mapper
+
+ def __missing__(self, column):
+ prop = self.mapper._props.get(column)
+ if prop:
+ raise orm_exc.UnmappedColumnError(
+ "Column '%s.%s' is not available, due to "
+ "conflicting property '%s':%r" % (
+ column.table.name, column.name, column.key, prop))
+ raise orm_exc.UnmappedColumnError(
+ "No column %s is configured on mapper %s..." %
+ (column, self.mapper))
diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py
index 7e19d7b16..80443a7f3 100644
--- a/lib/sqlalchemy/orm/properties.py
+++ b/lib/sqlalchemy/orm/properties.py
@@ -60,7 +60,17 @@ class ColumnProperty(StrategizedProperty):
self.__class__.Comparator)
self.descriptor = kwargs.pop('descriptor', None)
self.extension = kwargs.pop('extension', None)
- self.doc = kwargs.pop('doc', getattr(columns[0], 'doc', None))
+
+ if 'doc' in kwargs:
+ self.doc = kwargs.pop('doc')
+ else:
+ for col in reversed(self.columns):
+ doc = getattr(col, 'doc', None)
+ if doc is not None:
+ self.doc = doc
+ break
+ else:
+ self.doc = None
if kwargs:
raise TypeError(
@@ -106,8 +116,8 @@ class ColumnProperty(StrategizedProperty):
group=self.group,
*self.columns)
- def _getattr(self, state, dict_, column):
- return state.get_impl(self.key).get(state, dict_)
+ def _getattr(self, state, dict_, column, passive=False):
+ return state.get_impl(self.key).get(state, dict_, passive=passive)
def _getcommitted(self, state, dict_, column, passive=False):
return state.get_impl(self.key).\
@@ -181,8 +191,8 @@ class CompositeProperty(ColumnProperty):
# which issues assertions that do not apply to CompositeColumnProperty
super(ColumnProperty, self).do_init()
- def _getattr(self, state, dict_, column):
- obj = state.get_impl(self.key).get(state, dict_)
+ def _getattr(self, state, dict_, column, passive=False):
+ obj = state.get_impl(self.key).get(state, dict_, passive=passive)
return self.get_col_value(column, obj)
def _getcommitted(self, state, dict_, column, passive=False):
@@ -434,6 +444,7 @@ class RelationshipProperty(StrategizedProperty):
comparator_factory=None,
single_parent=False, innerjoin=False,
doc=None,
+ load_on_pending=False,
strategy_class=None, _local_remote_pairs=None, query_class=None):
self.uselist = uselist
@@ -458,6 +469,7 @@ class RelationshipProperty(StrategizedProperty):
self.join_depth = join_depth
self.local_remote_pairs = _local_remote_pairs
self.extension = extension
+ self.load_on_pending = load_on_pending
self.comparator_factory = comparator_factory or \
RelationshipProperty.Comparator
self.comparator = self.comparator_factory(self, None)
@@ -710,7 +722,9 @@ class RelationshipProperty(StrategizedProperty):
self.prop.parent.compile()
return self.prop
- def compare(self, op, value, value_is_parent=False, alias_secondary=True):
+ def compare(self, op, value,
+ value_is_parent=False,
+ alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
@@ -720,14 +734,15 @@ class RelationshipProperty(StrategizedProperty):
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
- return self._optimized_compare(value,
- value_is_parent=value_is_parent,
- alias_secondary=alias_secondary)
+ return self._optimized_compare(value,
+ value_is_parent=value_is_parent,
+ alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
- adapt_source=None, alias_secondary=True):
+ adapt_source=None,
+ alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
return self._get_strategy(strategies.LazyLoader).lazy_clause(value,
@@ -1197,6 +1212,10 @@ class RelationshipProperty(StrategizedProperty):
'when single_parent is not set. Set '
'single_parent=True on the relationship().'
% self)
+ if self.direction is MANYTOONE and self.passive_deletes:
+ util.warn("On %s, 'passive_deletes' is normally configured "
+ "on one-to-many, one-to-one, many-to-many relationships only."
+ % self)
def _determine_local_remote_pairs(self):
if not self.local_remote_pairs:
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index 18ffd108a..b22a10b55 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -32,7 +32,7 @@ from sqlalchemy.orm import (
from sqlalchemy.orm.util import (
AliasedClass, ORMAdapter, _entity_descriptor, _entity_info,
_is_aliased_class, _is_mapped_class, _orm_columns, _orm_selectable,
- join as orm_join,
+ join as orm_join,with_parent
)
@@ -98,6 +98,7 @@ class Query(object):
_attributes = util.frozendict()
_with_options = ()
_with_hints = ()
+ _enable_single_crit = True
def __init__(self, entities, session=None):
self.session = session
@@ -633,44 +634,41 @@ class Query(object):
@_generative()
def populate_existing(self):
- """Return a Query that will refresh all instances loaded.
-
- This includes all entities accessed from the database, including
- secondary entities, eagerly-loaded collection items.
-
- All changes present on entities which are already present in the
- session will be reset and the entities will all be marked "clean".
-
- An alternative to populate_existing() is to expire the Session
- fully using session.expire_all().
+ """Return a :class:`Query` that will expire and refresh all instances
+ as they are loaded, or reused from the current :class:`.Session`.
+
+ :meth:`.populate_existing` does not improve behavior when
+ the ORM is used normally - the :class:`.Session` object's usual
+ behavior of maintaining a transaction and expiring all attributes
+ after rollback or commit handles object state automatically.
+ This method is not intended for general use.
"""
self._populate_existing = True
def with_parent(self, instance, property=None):
- """Add a join criterion corresponding to a relationship to the given
- parent instance.
-
- instance
- a persistent or detached instance which is related to class
- represented by this query.
-
- property
- string name of the property which relates this query's class to the
- instance. if None, the method will attempt to find a suitable
- property.
-
- Currently, this method only works with immediate parent relationships,
- but in the future may be enhanced to work across a chain of parent
- mappers.
-
+ """Add filtering criterion that relates the given instance
+ to a child object or collection, using its attribute state
+ as well as an established :func:`.relationship()`
+ configuration.
+
+ The method uses the :func:`.with_parent` function to generate
+ the clause, the result of which is passed to :meth:`.Query.filter`.
+
+ Parameters are the same as :func:`.with_parent`, with the exception
+ that the given property can be None, in which case a search is
+ performed against this :class:`.Query` object's target mapper.
+
"""
- from sqlalchemy.orm import properties
- mapper = object_mapper(instance)
+
if property is None:
+ from sqlalchemy.orm import properties
+ mapper = object_mapper(instance)
+
for prop in mapper.iterate_properties:
if isinstance(prop, properties.PropertyLoader) and \
prop.mapper is self._mapper_zero():
+ property = prop
break
else:
raise sa_exc.InvalidRequestError(
@@ -680,11 +678,8 @@ class Query(object):
self._mapper_zero().class_.__name__,
instance.__class__.__name__)
)
- else:
- prop = mapper.get_property(property, resolve_synonyms=True)
- return self.filter(prop.compare(
- operators.eq,
- instance, value_is_parent=True))
+
+ return self.filter(with_parent(instance, property))
@_generative()
def add_entity(self, entity, alias=None):
@@ -707,12 +702,17 @@ class Query(object):
"""
fromclause = self.with_labels().enable_eagerloads(False).\
+ _enable_single_crit(False).\
statement.correlate(None)
q = self._from_selectable(fromclause)
if entities:
q._set_entities(entities)
return q
-
+
+ @_generative()
+ def _enable_single_crit(self, val):
+ self._enable_single_crit = val
+
@_generative()
def _from_selectable(self, fromclause):
for attr in ('_statement', '_criterion', '_order_by', '_group_by',
@@ -779,8 +779,13 @@ class Query(object):
def options(self, *args):
"""Return a new Query object, applying the given list of
- MapperOptions.
-
+ mapper options.
+
+ Most supplied options regard changing how column- and
+ relationship-mapped attributes are loaded. See the sections
+ :ref:`deferred` and :ref:`loading_toplevel` for reference
+ documentation.
+
"""
return self._options(False, *args)
@@ -1937,7 +1942,8 @@ class Query(object):
else:
from_obj = context.froms
- self._adjust_for_single_inheritance(context)
+ if self._enable_single_crit:
+ self._adjust_for_single_inheritance(context)
whereclause = context.whereclause
@@ -2274,7 +2280,8 @@ class Query(object):
# i.e. when each _MappedEntity has its own FROM
froms = context.froms
- self._adjust_for_single_inheritance(context)
+ if self._enable_single_crit:
+ self._adjust_for_single_inheritance(context)
if not context.primary_columns:
if self._only_load_props:
@@ -2406,6 +2413,7 @@ class Query(object):
selected from the total results.
"""
+
for entity, (mapper, adapter, s, i, w) in \
self._mapper_adapter_map.iteritems():
single_crit = mapper._single_table_criterion
@@ -2567,8 +2575,12 @@ class _MapperEntity(_QueryEntity):
)
)
- for value in self.mapper._iterate_polymorphic_properties(
- self._with_polymorphic):
+ if self._with_polymorphic:
+ poly_properties = self.mapper._iterate_polymorphic_properties(
+ self._with_polymorphic)
+ else:
+ poly_properties = self.mapper._polymorphic_properties
+ for value in poly_properties:
if query._only_load_props and \
value.key not in query._only_load_props:
continue
diff --git a/lib/sqlalchemy/orm/scoping.py b/lib/sqlalchemy/orm/scoping.py
index af518e407..c1a5fd577 100644
--- a/lib/sqlalchemy/orm/scoping.py
+++ b/lib/sqlalchemy/orm/scoping.py
@@ -22,9 +22,13 @@ class ScopedSession(object):
Usage::
- Session = scoped_session(sessionmaker(autoflush=True))
+ Session = scoped_session(sessionmaker())
- ... use session normally.
+ ... use Session normally.
+
+ The internal registry is accessible as well,
+ and by default is an instance of :class:`.ThreadLocalRegistry`.
+
"""
@@ -89,6 +93,7 @@ class ScopedSession(object):
class when called.
e.g.::
+
Session = scoped_session(sessionmaker())
class MyClass(object):
diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py
index 06d5b89a1..bab98f4fa 100644
--- a/lib/sqlalchemy/orm/session.py
+++ b/lib/sqlalchemy/orm/session.py
@@ -22,6 +22,7 @@ from sqlalchemy.orm.util import (
from sqlalchemy.orm.mapper import Mapper, _none_set
from sqlalchemy.orm.unitofwork import UOWTransaction
from sqlalchemy.orm import identity
+import sys
__all__ = ['Session', 'SessionTransaction', 'SessionExtension']
@@ -206,7 +207,9 @@ class SessionTransaction(object):
single: thread safety; SessionTransaction
"""
-
+
+ _rollback_exception = None
+
def __init__(self, session, parent=None, nested=False):
self.session = session
self._connections = {}
@@ -229,9 +232,21 @@ class SessionTransaction(object):
def _assert_is_active(self):
self._assert_is_open()
if not self._active:
- raise sa_exc.InvalidRequestError(
- "The transaction is inactive due to a rollback in a "
- "subtransaction. Issue rollback() to cancel the transaction.")
+ if self._rollback_exception:
+ raise sa_exc.InvalidRequestError(
+ "This Session's transaction has been rolled back "
+ "due to a previous exception during flush."
+ " To begin a new transaction with this Session, "
+ "first issue Session.rollback()."
+ " Original exception was: %s"
+ % self._rollback_exception
+ )
+ else:
+ raise sa_exc.InvalidRequestError(
+ "This Session's transaction has been rolled back "
+ "by a nested rollback() call. To begin a new "
+ "transaction, issue Session.rollback() first."
+ )
def _assert_is_open(self, error_msg="The transaction is closed"):
if self.session is None:
@@ -288,14 +303,16 @@ class SessionTransaction(object):
assert not self.session._deleted
for s in self.session.identity_map.all_states():
- _expire_state(s, s.dict, None, instance_dict=self.session.identity_map)
+ _expire_state(s, s.dict, None,
+ instance_dict=self.session.identity_map)
def _remove_snapshot(self):
assert self._is_transaction_boundary
if not self.nested and self.session.expire_on_commit:
for s in self.session.identity_map.all_states():
- _expire_state(s, s.dict, None, instance_dict=self.session.identity_map)
+ _expire_state(s, s.dict, None,
+ instance_dict=self.session.identity_map)
def _connection_for_bind(self, bind):
self._assert_is_active()
@@ -379,7 +396,7 @@ class SessionTransaction(object):
self.close()
return self._parent
- def rollback(self):
+ def rollback(self, _capture_exception=False):
self._assert_is_open()
stx = self.session.transaction
@@ -397,6 +414,8 @@ class SessionTransaction(object):
transaction._deactivate()
self.close()
+ if self._parent and _capture_exception:
+ self._parent._rollback_exception = sys.exc_info()[1]
return self._parent
def _rollback_impl(self):
@@ -415,7 +434,8 @@ class SessionTransaction(object):
def close(self):
self.session.transaction = self._parent
if self._parent is None:
- for connection, transaction, autoclose in set(self._connections.values()):
+ for connection, transaction, autoclose in \
+ set(self._connections.values()):
if autoclose:
connection.close()
else:
@@ -446,71 +466,8 @@ class SessionTransaction(object):
class Session(object):
"""Manages persistence operations for ORM-mapped objects.
- The Session is the front end to SQLAlchemy's **Unit of Work**
- implementation. The concept behind Unit of Work is to track modifications
- to a field of objects, and then be able to flush those changes to the
- database in a single operation.
-
- SQLAlchemy's unit of work includes these functions:
-
- * The ability to track in-memory changes on scalar- and collection-based
- object attributes, such that database persistence operations can be
- assembled based on those changes.
-
- * The ability to organize individual SQL queries and population of newly
- generated primary and foreign key-holding attributes during a persist
- operation such that referential integrity is maintained at all times.
-
- * The ability to maintain insert ordering against the order in which new
- instances were added to the session.
-
- * An Identity Map, which is a dictionary keying instances to their unique
- primary key identity. This ensures that only one copy of a particular
- entity is ever present within the session, even if repeated load
- operations for the same entity occur. This allows many parts of an
- application to get a handle to a particular object without any chance of
- modifications going to two different places.
-
- When dealing with instances of mapped classes, an instance may be
- *attached* to a particular Session, else it is *unattached* . An instance
- also may or may not correspond to an actual row in the database. These
- conditions break up into four distinct states:
-
- * *Transient* - an instance that's not in a session, and is not saved to
- the database; i.e. it has no database identity. The only relationship
- such an object has to the ORM is that its class has a ``mapper()``
- associated with it.
-
- * *Pending* - when you ``add()`` a transient instance, it becomes
- pending. It still wasn't actually flushed to the database yet, but it
- will be when the next flush occurs.
-
- * *Persistent* - An instance which is present in the session and has a
- record in the database. You get persistent instances by either flushing
- so that the pending instances become persistent, or by querying the
- database for existing instances (or moving persistent instances from
- other sessions into your local session).
-
- * *Detached* - an instance which has a record in the database, but is not
- in any session. Theres nothing wrong with this, and you can use objects
- normally when they're detached, **except** they will not be able to
- issue any SQL in order to load collections or attributes which are not
- yet loaded, or were marked as "expired".
-
- The session methods which control instance state include ``add()``,
- ``delete()``, ``merge()``, and ``expunge()``.
-
- The Session object is generally **not** threadsafe. A session which is
- set to ``autocommit`` and is only read from may be used by concurrent
- threads if it's acceptable that some object instances may be loaded twice.
-
- The typical pattern to managing Sessions in a multi-threaded environment
- is either to use mutexes to limit concurrent access to one thread at a
- time, or more commonly to establish a unique session for every thread,
- using a threadlocal variable. SQLAlchemy provides a thread-managed
- Session adapter, provided by the :func:`~sqlalchemy.orm.scoped_session`
- function.
-
+ The Session's usage paradigm is described at :ref:`session_toplevel`.
+
"""
public_methods = (
@@ -529,8 +486,9 @@ class Session(object):
query_cls=query.Query):
"""Construct a new Session.
- Arguments to ``Session`` are described using the
- :func:`~sqlalchemy.orm.sessionmaker` function.
+ Arguments to :class:`.Session` are described using the
+ :func:`.sessionmaker` function, which is the
+ typical point of entry.
"""
@@ -1080,6 +1038,14 @@ class Session(object):
if obj is not None:
instance_key = mapper._identity_key_from_state(state)
+
+ if _none_set.issubset(instance_key[1]) and \
+ not mapper.allow_partial_pks or \
+ _none_set.issuperset(instance_key[1]):
+ raise exc.FlushError('Instance %s has a NULL identity '
+ 'key. Check if this flush is occuring at an '
+ 'inappropriate time, such as during a load '
+ 'operation.' % mapperutil.state_str(state))
if state.key is None:
state.key = instance_key
@@ -1505,7 +1471,7 @@ class Session(object):
ext.after_flush(self, flush_context)
transaction.commit()
except:
- transaction.rollback()
+ transaction.rollback(_capture_exception=True)
raise
flush_context.finalize_flush_changes()
diff --git a/lib/sqlalchemy/orm/state.py b/lib/sqlalchemy/orm/state.py
index f6828f5a9..e6502df8c 100644
--- a/lib/sqlalchemy/orm/state.py
+++ b/lib/sqlalchemy/orm/state.py
@@ -333,7 +333,7 @@ class InstanceState(object):
previous = dict_[attr.key]
else:
previous = attr.get(self, dict_)
-
+
if should_copy and previous not in (None, NO_VALUE, NEVER_SET):
previous = attr.copy(previous)
diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py
index 1c4571aed..3e6b6a21f 100644
--- a/lib/sqlalchemy/orm/strategies.py
+++ b/lib/sqlalchemy/orm/strategies.py
@@ -48,7 +48,7 @@ def _register_attribute(strategy, mapper, useobject,
attribute_ext.append(sessionlib.UOWEventHandler(prop.key))
- for m in mapper.polymorphic_iterator():
+ for m in mapper.self_and_descendants:
if prop is m._props.get(prop.key):
attributes.register_attribute_impl(
@@ -118,17 +118,20 @@ class ColumnLoader(LoaderStrategy):
)
def create_row_processor(self, selectcontext, path, mapper, row, adapter):
- key, col = self.key, self.columns[0]
- if adapter:
- col = adapter.columns[col]
-
- if col is not None and col in row:
- def new_execute(state, dict_, row):
- dict_[key] = row[col]
+ key = self.key
+ # look through list of columns represented here
+ # to see which, if any, is present in the row.
+ for col in self.columns:
+ if adapter:
+ col = adapter.columns[col]
+ if col is not None and col in row:
+ def new_execute(state, dict_, row):
+ dict_[key] = row[col]
+ return new_execute, None
else:
def new_execute(state, dict_, row):
state.expire_attribute_pre_commit(dict_, key)
- return new_execute, None
+ return new_execute, None
log.class_logger(ColumnLoader)
@@ -253,8 +256,8 @@ class LoadDeferredColumns(object):
def __init__(self, state, key):
self.state, self.key = state, key
- def __call__(self, **kw):
- if kw.get('passive') is attributes.PASSIVE_NO_FETCH:
+ def __call__(self, passive=False):
+ if passive is attributes.PASSIVE_NO_FETCH:
return attributes.PASSIVE_NO_RESULT
state = self.state
@@ -401,7 +404,8 @@ class LazyLoader(AbstractRelationshipLoader):
)
def lazy_clause(self, state, reverse_direction=False,
- alias_secondary=False, adapt_source=None):
+ alias_secondary=False,
+ adapt_source=None):
if state is None:
return self._lazy_none_clause(
reverse_direction,
@@ -423,18 +427,26 @@ class LazyLoader(AbstractRelationshipLoader):
else:
mapper = self.parent_property.parent
- def visit_bindparam(bindparam):
- if bindparam.key in bind_to_col:
- # use the "committed" (database) version to get
- # query column values
- # also its a deferred value; so that when used
- # by Query, the committed value is used
- # after an autoflush occurs
- o = state.obj() # strong ref
- bindparam.value = \
- lambda: mapper._get_committed_attr_by_column(
- o, bind_to_col[bindparam.key])
-
+ o = state.obj() # strong ref
+ dict_ = attributes.instance_dict(o)
+
+ # use the "committed state" only if we're in a flush
+ # for this state.
+
+ sess = sessionlib._state_session(state)
+ if sess is not None and sess._flushing:
+ def visit_bindparam(bindparam):
+ if bindparam.key in bind_to_col:
+ bindparam.value = \
+ lambda: mapper._get_committed_state_attr_by_column(
+ state, dict_, bind_to_col[bindparam.key])
+ else:
+ def visit_bindparam(bindparam):
+ if bindparam.key in bind_to_col:
+ bindparam.value = lambda: mapper._get_state_attr_by_column(
+ state, dict_, bind_to_col[bindparam.key])
+
+
if self.parent_property.secondary is not None and alias_secondary:
criterion = sql_util.ClauseAdapter(
self.parent_property.secondary.alias()).\
@@ -442,6 +454,7 @@ class LazyLoader(AbstractRelationshipLoader):
criterion = visitors.cloned_traverse(
criterion, {}, {'bindparam':visit_bindparam})
+
if adapt_source:
criterion = adapt_source(criterion)
return criterion
@@ -465,7 +478,8 @@ class LazyLoader(AbstractRelationshipLoader):
return criterion
def _class_level_loader(self, state):
- if not state.has_identity:
+ if not state.has_identity and \
+ (not self.parent_property.load_on_pending or not state.session_id):
return None
return LoadLazyAttribute(state, self.key)
@@ -555,16 +569,22 @@ class LoadLazyAttribute(object):
def __setstate__(self, state):
self.state, self.key = state
- def __call__(self, **kw):
+ def __call__(self, passive=False):
state = self.state
instance_mapper = mapper._state_mapper(state)
prop = instance_mapper.get_property(self.key)
strategy = prop._get_strategy(LazyLoader)
-
- if kw.get('passive') is attributes.PASSIVE_NO_FETCH and \
- not strategy.use_get:
+ pending = not state.key
+
+ if (
+ passive is attributes.PASSIVE_NO_FETCH and
+ not strategy.use_get
+ ) or (
+ passive is attributes.PASSIVE_ONLY_PERSISTENT and
+ pending
+ ):
return attributes.PASSIVE_NO_RESULT
-
+
if strategy._should_log_debug():
strategy.logger.debug("loading %s",
mapperutil.state_attribute_str(
@@ -580,21 +600,35 @@ class LoadLazyAttribute(object):
q = session.query(prop.mapper)._adapt_all_clauses()
+ # don't autoflush on pending
+ # this would be something that's prominent in the
+ # docs and such
+ if pending:
+ q = q.autoflush(False)
+
if state.load_path:
q = q._with_current_path(state.load_path + (self.key,))
-
+
# if we have a simple primary key load, use mapper.get()
# to possibly save a DB round trip
if strategy.use_get:
ident = []
allnulls = True
+ if session._flushing:
+ get_attr = instance_mapper._get_committed_state_attr_by_column
+ else:
+ get_attr = instance_mapper._get_state_attr_by_column
+
+ # The many-to-one get is intended to be very fast. Note
+ # that we don't want to autoflush() if the get() doesn't
+ # actually have to hit the DB. It is now not necessary
+ # now that we use the pending attribute state.
for primary_key in prop.mapper.primary_key:
- val = instance_mapper.\
- _get_committed_state_attr_by_column(
+ val = get_attr(
state,
state.dict,
strategy._equated_columns[primary_key],
- **kw)
+ passive=passive)
if val is attributes.PASSIVE_NO_RESULT:
return val
allnulls = allnulls and val is None
@@ -607,7 +641,7 @@ class LoadLazyAttribute(object):
q = q._conditional_options(*state.load_options)
key = prop.mapper.identity_key_from_primary_key(ident)
- return q._get(key, ident, **kw)
+ return q._get(key, ident, passive=passive)
if prop.order_by:
@@ -623,8 +657,15 @@ class LoadLazyAttribute(object):
if state.load_options:
q = q._conditional_options(*state.load_options)
+
+ lazy_clause = strategy.lazy_clause(state)
+
+ if pending:
+ bind_values = sql_util.bind_values(lazy_clause)
+ if None in bind_values:
+ return None
- q = q.filter(strategy.lazy_clause(state))
+ q = q.filter(lazy_clause)
result = q.all()
if strategy.uselist:
@@ -693,7 +734,7 @@ class SubqueryLoader(AbstractRelationshipLoader):
leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop)
leftmost_attr = [
- leftmost_mapper._get_col_to_prop(c).class_attribute
+ leftmost_mapper._columntoproperty[c].class_attribute
for c in leftmost_cols
]
@@ -724,6 +765,7 @@ class SubqueryLoader(AbstractRelationshipLoader):
("orig_query", SubqueryLoader): orig_query,
('subquery_path', None) : subq_path
}
+ q = q._enable_single_crit(False)
# figure out what's being joined. a.k.a. the fun part
to_join = [
@@ -740,7 +782,7 @@ class SubqueryLoader(AbstractRelationshipLoader):
self._local_remote_columns(self.parent_property)
local_attr = [
- getattr(parent_alias, self.parent._get_col_to_prop(c).key)
+ getattr(parent_alias, self.parent._columntoproperty[c].key)
for c in local_cols
]
q = q.order_by(*local_attr)
@@ -822,7 +864,7 @@ class SubqueryLoader(AbstractRelationshipLoader):
local_cols, remote_cols = self._local_remote_columns(self.parent_property)
remote_attr = [
- self.mapper._get_col_to_prop(c).key
+ self.mapper._columntoproperty[c].key
for c in remote_cols]
q = context.attributes[('subquery', path)]
@@ -940,7 +982,7 @@ class EagerLoader(AbstractRelationshipLoader):
("eager_row_processor", reduced_path)
] = clauses
- for value in self.mapper._iterate_polymorphic_properties():
+ for value in self.mapper._polymorphic_properties:
value.setup(
context,
entity,
diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py
index 3b2a291bd..05298767d 100644
--- a/lib/sqlalchemy/orm/sync.py
+++ b/lib/sqlalchemy/orm/sync.py
@@ -71,7 +71,7 @@ def source_modified(uowcommit, source, source_mapper, synchronize_pairs):
"""
for l, r in synchronize_pairs:
try:
- prop = source_mapper._get_col_to_prop(l)
+ prop = source_mapper._columntoproperty[l]
except exc.UnmappedColumnError:
_raise_col_to_prop(False, source_mapper, l, None, r)
history = uowcommit.get_attribute_history(source, prop.key, passive=True)
diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py
index e10891924..830ac3c0c 100644
--- a/lib/sqlalchemy/orm/unitofwork.py
+++ b/lib/sqlalchemy/orm/unitofwork.py
@@ -219,7 +219,7 @@ class UOWTransaction(object):
def states_for_mapper_hierarchy(self, mapper, isdelete, listonly):
checktup = (isdelete, listonly)
- for mapper in mapper.base_mapper.polymorphic_iterator():
+ for mapper in mapper.base_mapper.self_and_descendants:
for state in self.mappers[mapper]:
if self.states[state] == checktup:
yield state
@@ -318,11 +318,11 @@ class IterateMappersMixin(object):
def _mappers(self, uow):
if self.fromparent:
return iter(
- m for m in self.dependency_processor.parent.polymorphic_iterator()
+ m for m in self.dependency_processor.parent.self_and_descendants
if uow._mapper_for_dep[(m, self.dependency_processor)]
)
else:
- return self.dependency_processor.mapper.polymorphic_iterator()
+ return self.dependency_processor.mapper.self_and_descendants
class Preprocess(IterateMappersMixin):
def __init__(self, dependency_processor, fromparent):
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
index 0f4adec00..d68ff4473 100644
--- a/lib/sqlalchemy/orm/util.py
+++ b/lib/sqlalchemy/orm/util.py
@@ -482,18 +482,30 @@ def outerjoin(left, right, onclause=None, join_to_left=True):
return _ORMJoin(left, right, onclause, True, join_to_left)
def with_parent(instance, prop):
- """Return criterion which selects instances with a given parent.
-
- :param instance: a parent instance, which should be persistent
- or detached.
-
- :param property: a class-attached descriptor, MapperProperty or
- string property name
- attached to the parent instance.
-
- :param \**kwargs: all extra keyword arguments are propagated
- to the constructor of Query.
-
+ """Create filtering criterion that relates this query's primary entity
+ to the given related instance, using established :func:`.relationship()`
+ configuration.
+
+ The SQL rendered is the same as that rendered when a lazy loader
+ would fire off from the given parent on that attribute, meaning
+ that the appropriate state is taken from the parent object in
+ Python without the need to render joins to the parent table
+ in the rendered statement.
+
+ As of 0.6.4, this method accepts parent instances in all
+ persistence states, including transient, persistent, and detached.
+ Only the requisite primary key/foreign key attributes need to
+ be populated. Previous versions didn't work with transient
+ instances.
+
+ :param instance:
+ An instance which has some :func:`.relationship`.
+
+ :param property:
+ String property name, or class-bound attribute, which indicates
+ what relationship from the instance should be used to reconcile the
+ parent/child relationship.
+
"""
if isinstance(prop, basestring):
mapper = object_mapper(instance)
@@ -501,7 +513,9 @@ def with_parent(instance, prop):
elif isinstance(prop, attributes.QueryableAttribute):
prop = prop.property
- return prop.compare(operators.eq, instance, value_is_parent=True)
+ return prop.compare(operators.eq,
+ instance,
+ value_is_parent=True)
def _entity_info(entity, compile=True):
diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py
index 9d37b1838..c70a41069 100644
--- a/lib/sqlalchemy/pool.py
+++ b/lib/sqlalchemy/pool.py
@@ -132,13 +132,30 @@ class Pool(log.Identified):
self.add_listener(l)
def unique_connection(self):
+ """Produce a DBAPI connection that is not referenced by any
+ thread-local context.
+
+ This method is different from :meth:`.Pool.connect` only if the
+ ``use_threadlocal`` flag has been set to ``True``.
+
+ """
+
return _ConnectionFairy(self).checkout()
def create_connection(self):
+ """Called by subclasses to create a new ConnectionRecord."""
+
return _ConnectionRecord(self)
def recreate(self):
- """Return a new instance with identical creation arguments."""
+ """Return a new :class:`.Pool`, of the same class as this one
+ and configured with identical creation arguments.
+
+ This method is used in conjunection with :meth:`dispose`
+ to close out an entire :class:`.Pool` and create a new one in
+ its place.
+
+ """
raise NotImplementedError()
@@ -149,11 +166,19 @@ class Pool(log.Identified):
remaining open, It is advised to not reuse the pool once dispose()
is called, and to instead use a new pool constructed by the
recreate() method.
+
"""
raise NotImplementedError()
def connect(self):
+ """Return a DBAPI connection from the pool.
+
+ The connection is instrumented such that when its
+ ``close()`` method is called, the connection will be returned to
+ the pool.
+
+ """
if not self._use_threadlocal:
return _ConnectionFairy(self).checkout()
@@ -169,17 +194,33 @@ class Pool(log.Identified):
return agent.checkout()
def return_conn(self, record):
+ """Given a _ConnectionRecord, return it to the :class:`.Pool`.
+
+ This method is called when an instrumented DBAPI connection
+ has its ``close()`` method called.
+
+ """
if self._use_threadlocal and hasattr(self._threadconns, "current"):
del self._threadconns.current
self.do_return_conn(record)
def get(self):
+ """Return a non-instrumented DBAPI connection from this :class:`.Pool`.
+
+ This is called by ConnectionRecord in order to get its DBAPI
+ resource.
+
+ """
return self.do_get()
def do_get(self):
+ """Implementation for :meth:`get`, supplied by subclasses."""
+
raise NotImplementedError()
def do_return_conn(self, conn):
+ """Implementation for :meth:`return_conn`, supplied by subclasses."""
+
raise NotImplementedError()
def status(self):
diff --git a/lib/sqlalchemy/schema.py b/lib/sqlalchemy/schema.py
index 5b5983858..59f9fae7f 100644
--- a/lib/sqlalchemy/schema.py
+++ b/lib/sqlalchemy/schema.py
@@ -118,7 +118,7 @@ class Table(SchemaItem, expression.TableClause):
:param \*args: Additional positional arguments are used primarily
to add the list of :class:`Column` objects contained within this
table. Similar to the style of a CREATE TABLE statement, other
- :class:`SchemaItem` constructs may be added here, including
+ :class:`.SchemaItem` constructs may be added here, including
:class:`PrimaryKeyConstraint`, and :class:`ForeignKeyConstraint`.
:param autoload: Defaults to False: the Columns for this table should
@@ -450,21 +450,51 @@ class Table(SchemaItem, expression.TableClause):
def tometadata(self, metadata, schema=RETAIN_SCHEMA):
- """Return a copy of this ``Table`` associated with a different
- ``MetaData``."""
+ """Return a copy of this :class:`Table` associated with a different
+ :class:`MetaData`.
+
+ E.g.::
+
+ # create two metadata
+ meta1 = MetaData('sqlite:///querytest.db')
+ meta2 = MetaData()
- try:
- if schema is RETAIN_SCHEMA:
- schema = self.schema
- key = _get_table_key(self.name, schema)
+ # load 'users' from the sqlite engine
+ users_table = Table('users', meta1, autoload=True)
+
+ # create the same Table object for the plain metadata
+ users_table_2 = users_table.tometadata(meta2)
+
+ """
+
+ if schema is RETAIN_SCHEMA:
+ schema = self.schema
+ key = _get_table_key(self.name, schema)
+ if key in metadata.tables:
+ util.warn("Table '%s' already exists within the given "
+ "MetaData - not copying." % self.description)
return metadata.tables[key]
- except KeyError:
- args = []
- for c in self.columns:
- args.append(c.copy(schema=schema))
- for c in self.constraints:
- args.append(c.copy(schema=schema))
- return Table(self.name, metadata, schema=schema, *args)
+
+ args = []
+ for c in self.columns:
+ args.append(c.copy(schema=schema))
+ for c in self.constraints:
+ args.append(c.copy(schema=schema))
+ table = Table(
+ self.name, metadata, schema=schema,
+ *args, **self.kwargs
+ )
+ for index in self.indexes:
+ # skip indexes that would be generated
+ # by the 'index' flag on Column
+ if len(index.columns) == 1 and \
+ list(index.columns)[0].index:
+ continue
+ Index(index.name,
+ unique=index.unique,
+ *[table.c[col] for col in index.columns.keys()],
+ **index.kwargs)
+ return table
class Column(SchemaItem, expression.ColumnClause):
"""Represents a column in a database table."""
@@ -512,7 +542,7 @@ class Column(SchemaItem, expression.ColumnClause):
may not function in all cases.
:param \*args: Additional positional arguments include various
- :class:`SchemaItem` derived constructs which will be applied
+ :class:`.SchemaItem` derived constructs which will be applied
as options to the column. These include instances of
:class:`Constraint`, :class:`ForeignKey`, :class:`ColumnDefault`,
and :class:`Sequence`. In some cases an equivalent keyword
@@ -892,6 +922,10 @@ class Column(SchemaItem, expression.ColumnClause):
"""
fk = [ForeignKey(f.column) for f in self.foreign_keys]
+ if name is None and self.name is None:
+ raise exc.InvalidRequestError("Cannot initialize a sub-selectable"
+ " with this Column object until it's 'name' has "
+ "been assigned.")
c = self._constructor(
name or self.name,
self.type,
@@ -1243,7 +1277,23 @@ class DefaultGenerator(SchemaItem):
class ColumnDefault(DefaultGenerator):
"""A plain default value on a column.
- This could correspond to a constant, a callable function, or a SQL clause.
+ This could correspond to a constant, a callable function,
+ or a SQL clause.
+
+ :class:`.ColumnDefault` is generated automatically
+ whenever the ``default``, ``onupdate`` arguments of
+ :class:`.Column` are used. A :class:`.ColumnDefault`
+ can be passed positionally as well.
+
+ For example, the following::
+
+ Column('foo', Integer, default=50)
+
+ Is equivalent to::
+
+ Column('foo', Integer, ColumnDefault(50))
+
+
"""
def __init__(self, arg, **kwargs):
@@ -1374,7 +1424,20 @@ class Sequence(DefaultGenerator):
class FetchedValue(object):
- """A default that takes effect on the database side."""
+ """A marker for a transparent database-side default.
+
+ Use :class:`.FetchedValue` when the database is configured
+ to provide some automatic default for a column.
+
+ E.g.::
+
+ Column('foo', Integer, FetchedValue())
+
+ Would indicate that some trigger or default generator
+ will create a new value for the ``foo`` column during an
+ INSERT.
+
+ """
def __init__(self, for_update=False):
self.for_update = for_update
@@ -1391,7 +1454,26 @@ class FetchedValue(object):
class DefaultClause(FetchedValue):
- """A DDL-specified DEFAULT column value."""
+ """A DDL-specified DEFAULT column value.
+
+ :class:`.DefaultClause` is a :class:`.FetchedValue`
+ that also generates a "DEFAULT" clause when
+ "CREATE TABLE" is emitted.
+
+ :class:`.DefaultClause` is generated automatically
+ whenever the ``server_default``, ``server_onupdate`` arguments of
+ :class:`.Column` are used. A :class:`.DefaultClause`
+ can be passed positionally as well.
+
+ For example, the following::
+
+ Column('foo', Integer, server_default="50")
+
+ Is equivalent to::
+
+ Column('foo', Integer, DefaultClause("50"))
+
+ """
def __init__(self, arg, for_update=False):
util.assert_arg_type(arg, (basestring,
diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py
index 584e43a88..154ede1bf 100644
--- a/lib/sqlalchemy/sql/compiler.py
+++ b/lib/sqlalchemy/sql/compiler.py
@@ -332,6 +332,10 @@ class SQLCompiler(engine.Compiled):
def visit_column(self, column, result_map=None, **kwargs):
name = column.name
+ if name is None:
+ raise exc.CompileError("Cannot compile Column object until "
+ "it's 'name' is assigned.")
+
if not column.is_literal and isinstance(name, sql._generated_label):
name = self._truncated_identifier("colident", name)
diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py
index d184816ab..5df3b8794 100644
--- a/lib/sqlalchemy/sql/expression.py
+++ b/lib/sqlalchemy/sql/expression.py
@@ -1269,10 +1269,15 @@ class ClauseElement(Visitable):
return engine
else:
return None
-
+
+ @util.pending_deprecation('0.7',
+ 'Only SQL expressions which subclass '
+ ':class:`.Executable` may provide the '
+ ':func:`.execute` method.')
def execute(self, *multiparams, **params):
- """Compile and execute this :class:`ClauseElement`."""
-
+ """Compile and execute this :class:`ClauseElement`.
+
+ """
e = self.bind
if e is None:
label = getattr(self, 'description', self.__class__.__name__)
@@ -1284,9 +1289,13 @@ class ClauseElement(Visitable):
raise exc.UnboundExecutionError(msg)
return e._execute_clauseelement(self, multiparams, params)
+ @util.pending_deprecation('0.7',
+ 'Only SQL expressions which subclass '
+ ':class:`.Executable` may provide the '
+ ':func:`.scalar` method.')
def scalar(self, *multiparams, **params):
- """Compile and execute this :class:`ClauseElement`, returning the
- result's scalar representation.
+ """Compile and execute this :class:`ClauseElement`, returning
+ the result's scalar representation.
"""
return self.execute(*multiparams, **params).scalar()
@@ -1843,17 +1852,19 @@ class ColumnElement(ClauseElement, _CompareMixin):
descending selectable.
"""
-
- if name:
- co = ColumnClause(name, selectable, type_=getattr(self,
- 'type', None))
+ if name is None:
+ name = self.anon_label
+ # TODO: may want to change this to anon_label,
+ # or some value that is more useful than the
+ # compiled form of the expression
+ key = str(self)
else:
- name = str(self)
- co = ColumnClause(self.anon_label, selectable,
- type_=getattr(self, 'type', None))
-
+ key = name
+
+ co = ColumnClause(name, selectable, type_=getattr(self,
+ 'type', None))
co.proxies = [self]
- selectable.columns[name] = co
+ selectable.columns[key] = co
return co
def compare(self, other, use_proxies=False, equivalents=None, **kw):
@@ -2401,7 +2412,7 @@ class Executable(_Generative):
COMMIT will be invoked in order to provide its "autocommit" feature.
Typically, all INSERT/UPDATE/DELETE statements as well as
CREATE/DROP statements have autocommit behavior enabled; SELECT
- constructs do not. Use this option when invokving a SELECT or other
+ constructs do not. Use this option when invoking a SELECT or other
specific SQL construct where COMMIT is desired (typically when
calling stored procedures and such).
@@ -2436,6 +2447,27 @@ class Executable(_Generative):
"""
self._execution_options = self._execution_options.union(kw)
+ def execute(self, *multiparams, **params):
+ """Compile and execute this :class:`.Executable`."""
+
+ e = self.bind
+ if e is None:
+ label = getattr(self, 'description', self.__class__.__name__)
+ msg = ('This %s is not bound and does not support direct '
+ 'execution. Supply this statement to a Connection or '
+ 'Engine for execution. Or, assign a bind to the statement '
+ 'or the Metadata of its underlying tables to enable '
+ 'implicit execution via this method.' % label)
+ raise exc.UnboundExecutionError(msg)
+ return e._execute_clauseelement(self, multiparams, params)
+
+ def scalar(self, *multiparams, **params):
+ """Compile and execute this :class:`.Executable`, returning the
+ result's scalar representation.
+
+ """
+ return self.execute(*multiparams, **params).scalar()
+
# legacy, some outside users may be calling this
_Executable = Executable
@@ -3653,8 +3685,7 @@ class _ScalarSelect(_Grouping):
def __init__(self, element):
self.element = element
- cols = list(element.c)
- self.type = cols[0].type
+ self.type = element._scalar_type()
@property
def columns(self):
@@ -3705,7 +3736,10 @@ class CompoundSelect(_SelectBaseMixin, FromClause):
self.selects.append(s.self_group(self))
_SelectBaseMixin.__init__(self, **kwargs)
-
+
+ def _scalar_type(self):
+ return self.selects[0]._scalar_type()
+
def self_group(self, against=None):
return _FromGrouping(self)
@@ -3878,6 +3912,11 @@ class Select(_SelectBaseMixin, FromClause):
return froms
+ def _scalar_type(self):
+ elem = self._raw_columns[0]
+ cols = list(elem._select_iterable)
+ return cols[0].type
+
@property
def froms(self):
"""Return the displayed list of FromClause elements."""
diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py
index c999ab786..bd4f70247 100644
--- a/lib/sqlalchemy/sql/util.py
+++ b/lib/sqlalchemy/sql/util.py
@@ -92,6 +92,31 @@ def find_columns(clause):
visitors.traverse(clause, {}, {'column':cols.add})
return cols
+def bind_values(clause):
+ """Return an ordered list of "bound" values in the given clause.
+
+ E.g.::
+
+ >>> expr = and_(
+ ... table.c.foo==5, table.c.foo==7
+ ... )
+ >>> bind_values(expr)
+ [5, 7]
+ """
+
+ v = []
+ def visit_bindparam(bind):
+ value = bind.value
+
+ # evaluate callables
+ if callable(value):
+ value = value()
+
+ v.append(value)
+
+ visitors.traverse(clause, {}, {'bindparam':visit_bindparam})
+ return v
+
def _quote_ddl_expr(element):
if isinstance(element, basestring):
element = element.replace("'", "''")
diff --git a/lib/sqlalchemy/test/requires.py b/lib/sqlalchemy/test/requires.py
index fefb00330..a9b84e85d 100644
--- a/lib/sqlalchemy/test/requires.py
+++ b/lib/sqlalchemy/test/requires.py
@@ -247,6 +247,12 @@ def sane_rowcount(fn):
skip_if(lambda: not testing.db.dialect.supports_sane_rowcount)
)
+def dbapi_lastrowid(fn):
+ return _chain_decorators_on(
+ fn,
+ fails_on_everything_except('mysql+mysqldb', 'mysql+oursql', 'sqlite+pysqlite')
+ )
+
def sane_multi_rowcount(fn):
return _chain_decorators_on(
fn,
@@ -257,7 +263,7 @@ def reflects_pk_names(fn):
"""Target driver reflects the name of primary key constraints."""
return _chain_decorators_on(
fn,
- fails_on_everything_except('postgresql')
+ fails_on_everything_except('postgresql', 'oracle')
)
def python2(fn):
diff --git a/lib/sqlalchemy/test/testing.py b/lib/sqlalchemy/test/testing.py
index 78cd74d22..41ba3038f 100644
--- a/lib/sqlalchemy/test/testing.py
+++ b/lib/sqlalchemy/test/testing.py
@@ -398,6 +398,7 @@ def uses_deprecated(*messages):
verbiage emitted by the sqlalchemy.util.deprecated decorator.
"""
+
def decorate(fn):
def safe(*args, **kw):
# todo: should probably be strict about this, too
@@ -435,8 +436,6 @@ def resetwarnings():
# warnings.simplefilter('error')
- if sys.version_info < (2, 4):
- warnings.filterwarnings('ignore', category=FutureWarning)
def global_cleanup_assertions():
"""Check things that have to be finalized at the end of a test suite.
diff --git a/lib/sqlalchemy/types.py b/lib/sqlalchemy/types.py
index 777353714..af7ef22e6 100644
--- a/lib/sqlalchemy/types.py
+++ b/lib/sqlalchemy/types.py
@@ -16,9 +16,9 @@ __all__ = [ 'TypeEngine', 'TypeDecorator', 'AbstractType', 'UserDefinedType',
'FLOAT', 'NUMERIC', 'DECIMAL', 'TIMESTAMP', 'DATETIME', 'CLOB',
'BLOB', 'BOOLEAN', 'SMALLINT', 'INTEGER', 'DATE', 'TIME',
'String', 'Integer', 'SmallInteger', 'BigInteger', 'Numeric',
- 'Float', 'DateTime', 'Date', 'Time', 'LargeBinary', 'Binary', 'Boolean',
- 'Unicode', 'MutableType', 'Concatenable', 'UnicodeText',
- 'PickleType', 'Interval', 'type_map', 'Enum' ]
+ 'Float', 'DateTime', 'Date', 'Time', 'LargeBinary', 'Binary',
+ 'Boolean', 'Unicode', 'MutableType', 'Concatenable',
+ 'UnicodeText','PickleType', 'Interval', 'type_map', 'Enum' ]
import inspect
import datetime as dt
@@ -35,18 +35,13 @@ from sqlalchemy import util
from sqlalchemy import processors
import collections
+DefaultDialect = None
NoneType = type(None)
if util.jython:
import array
class AbstractType(Visitable):
- def __init__(self, *args, **kwargs):
- pass
-
- def compile(self, dialect):
- return dialect.type_compiler.process(self)
-
def copy_value(self, value):
return value
@@ -80,12 +75,14 @@ class AbstractType(Visitable):
This allows systems like the ORM to know if a column value can
be considered 'not changed' by comparing the identity of
- objects alone.
-
- Use the :class:`MutableType` mixin or override this method to
- return True in custom types that hold mutable values such as
- ``dict``, ``list`` and custom objects.
-
+ objects alone. Values such as dicts, lists which
+ are serialized into strings are examples of "mutable"
+ column structures.
+
+ When this method is overridden, :meth:`copy_value` should
+ also be supplied. The :class:`.MutableType` mixin
+ is recommended as a helper.
+
"""
return False
@@ -107,7 +104,8 @@ class AbstractType(Visitable):
@util.memoized_property
def _type_affinity(self):
- """Return a rudimental 'affinity' value expressing the general class of type."""
+ """Return a rudimental 'affinity' value expressing the general class
+ of type."""
typ = None
for t in self.__class__.__mro__:
@@ -120,7 +118,8 @@ class AbstractType(Visitable):
def _coerce_compared_value(self, op, value):
_coerced_type = type_map.get(type(value), NULLTYPE)
- if _coerced_type is NULLTYPE or _coerced_type._type_affinity is self._type_affinity:
+ if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
+ is self._type_affinity:
return self
else:
return _coerced_type
@@ -128,6 +127,30 @@ class AbstractType(Visitable):
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
+ def compile(self, dialect=None):
+ # arg, return value is inconsistent with
+ # ClauseElement.compile()....this is a mistake.
+
+ if not dialect:
+ global DefaultDialect
+ if DefaultDialect is None:
+ from sqlalchemy.engine.default import DefaultDialect
+ dialect = DefaultDialect()
+
+ return dialect.type_compiler.process(self)
+
+ def __str__(self):
+ # Py3K
+ #return unicode(self.compile())
+ # Py2K
+ return unicode(self.compile()).encode('ascii', 'backslashreplace')
+ # end Py2K
+
+ def __init__(self, *args, **kwargs):
+ # supports getargspec of the __init__ method
+ # used by generic __repr__
+ pass
+
def __repr__(self):
return "%s(%s)" % (
self.__class__.__name__,
@@ -142,12 +165,12 @@ class TypeEngine(AbstractType):
return {}
def dialect_impl(self, dialect, **kwargs):
- key = (dialect.__class__, dialect.server_version_info)
-
+ key = dialect.__class__, dialect.server_version_info
try:
return self._impl_dict[key]
except KeyError:
- return self._impl_dict.setdefault(key, dialect.type_descriptor(self))
+ return self._impl_dict.setdefault(key,
+ dialect.type_descriptor(self))
def __getstate__(self):
d = self.__dict__.copy()
@@ -268,11 +291,11 @@ class TypeDecorator(AbstractType):
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
- Types that receive a Python type that isn't similar to the
- ultimate type used may want to define the :meth:`TypeDecorator.coerce_compared_value`
- method. This is used to give the expression system a hint
- when coercing Python objects into bind parameters within expressions.
- Consider this expression::
+ Types that receive a Python type that isn't similar to the ultimate type
+ used may want to define the :meth:`TypeDecorator.coerce_compared_value`
+ method. This is used to give the expression system a hint when coercing
+ Python objects into bind parameters within expressions. Consider this
+ expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
@@ -282,10 +305,10 @@ class TypeDecorator(AbstractType):
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
- However, in the case of ``TypeDecorator``, we are usually changing
- an incoming Python type to something new - ``TypeDecorator`` by
- default will "coerce" the non-typed side to be the same type as itself.
- Such as below, we define an "epoch" type that stores a date value as an integer::
+ However, in the case of ``TypeDecorator``, we are usually changing an
+ incoming Python type to something new - ``TypeDecorator`` by default will
+ "coerce" the non-typed side to be the same type as itself. Such as below,
+ we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
@@ -301,10 +324,11 @@ class TypeDecorator(AbstractType):
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
- This behavior can be overridden via the :meth:`~TypeDecorator.coerce_compared_value`
- method, which returns a type that should be used for the value of the expression.
- Below we set it such that an integer value will be treated as an ``Integer``,
- and any other value is assumed to be a date and will be treated as a ``MyEpochType``::
+ This behavior can be overridden via the
+ :meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
+ that should be used for the value of the expression. Below we set it such
+ that an integer value will be treated as an ``Integer``, and any other
+ value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
@@ -318,8 +342,10 @@ class TypeDecorator(AbstractType):
def __init__(self, *args, **kwargs):
if not hasattr(self.__class__, 'impl'):
- raise AssertionError("TypeDecorator implementations require a class-level "
- "variable 'impl' which refers to the class of type being decorated")
+ raise AssertionError("TypeDecorator implementations "
+ "require a class-level variable "
+ "'impl' which refers to the class of "
+ "type being decorated")
self.impl = self.__class__.impl(*args, **kwargs)
def adapt(self, cls):
@@ -346,8 +372,10 @@ class TypeDecorator(AbstractType):
typedesc = self.load_dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
- raise AssertionError("Type object %s does not properly implement the copy() "
- "method, it must return an object of type %s" % (self, self.__class__))
+ raise AssertionError('Type object %s does not properly '
+ 'implement the copy() method, it must '
+ 'return an object of type %s' % (self,
+ self.__class__))
tt.impl = typedesc
self._impl_dict[key] = tt
return tt
@@ -376,7 +404,8 @@ class TypeDecorator(AbstractType):
return dialect.type_descriptor(self.impl)
def __getattr__(self, key):
- """Proxy all other undefined accessors to the underlying implementation."""
+ """Proxy all other undefined accessors to the underlying
+ implementation."""
return getattr(self.impl, key)
@@ -387,29 +416,36 @@ class TypeDecorator(AbstractType):
raise NotImplementedError()
def bind_processor(self, dialect):
- if self.__class__.process_bind_param.func_code is not TypeDecorator.process_bind_param.func_code:
+ if self.__class__.process_bind_param.func_code \
+ is not TypeDecorator.process_bind_param.func_code:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
+
else:
def process(value):
return process_param(value, dialect)
+
return process
else:
return self.impl.bind_processor(dialect)
def result_processor(self, dialect, coltype):
- if self.__class__.process_result_value.func_code is not TypeDecorator.process_result_value.func_code:
+ if self.__class__.process_result_value.func_code \
+ is not TypeDecorator.process_result_value.func_code:
process_value = self.process_result_value
- impl_processor = self.impl.result_processor(dialect, coltype)
+ impl_processor = self.impl.result_processor(dialect,
+ coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
+
else:
def process(value):
return process_value(value, dialect)
+
return process
else:
return self.impl.result_processor(dialect, coltype)
@@ -451,6 +487,19 @@ class TypeDecorator(AbstractType):
return self.impl.compare_values(x, y)
def is_mutable(self):
+ """Return True if the target Python type is 'mutable'.
+
+ This allows systems like the ORM to know if a column value can
+ be considered 'not changed' by comparing the identity of
+ objects alone. Values such as dicts, lists which
+ are serialized into strings are examples of "mutable"
+ column structures.
+
+ When this method is overridden, :meth:`copy_value` should
+ also be supplied. The :class:`.MutableType` mixin
+ is recommended as a helper.
+
+ """
return self.impl.is_mutable()
def _adapt_expression(self, op, othertype):
@@ -528,7 +577,12 @@ class MutableType(object):
"""
def is_mutable(self):
- """Return True, mutable."""
+ """Return True if the target Python type is 'mutable'.
+
+ For :class:`.MutableType`, this method is set to
+ return ``True``.
+
+ """
return True
def copy_value(self, value):
@@ -593,10 +647,12 @@ class NullType(TypeEngine):
NullTypeEngine = NullType
class Concatenable(object):
- """A mixin that marks a type as supporting 'concatenation', typically strings."""
+ """A mixin that marks a type as supporting 'concatenation',
+ typically strings."""
def _adapt_expression(self, op, othertype):
- if op is operators.add and issubclass(othertype._type_affinity, (Concatenable, NullType)):
+ if op is operators.add and issubclass(othertype._type_affinity,
+ (Concatenable, NullType)):
return operators.concat_op, self
else:
return op, self
@@ -604,8 +660,9 @@ class Concatenable(object):
class _DateAffinity(object):
"""Mixin date/time specific expression adaptations.
- Rules are implemented within Date,Time,Interval,DateTime, Numeric, Integer.
- Based on http://www.postgresql.org/docs/current/static/functions-datetime.html.
+ Rules are implemented within Date,Time,Interval,DateTime, Numeric,
+ Integer. Based on http://www.postgresql.org/docs/current/static
+ /functions-datetime.html.
"""
@@ -673,11 +730,11 @@ class String(Concatenable, TypeEngine):
set convert_unicode='force'. This will incur significant
performance overhead when fetching unicode result columns.
- :param assert_unicode: Deprecated. A warning is raised in all cases when a non-Unicode
- object is passed when SQLAlchemy would coerce into an encoding
- (note: but **not** when the DBAPI handles unicode objects natively).
- To suppress or raise this warning to an
- error, use the Python warnings filter documented at:
+ :param assert_unicode: Deprecated. A warning is raised in all cases
+ when a non-Unicode object is passed when SQLAlchemy would coerce
+ into an encoding (note: but **not** when the DBAPI handles unicode
+ objects natively). To suppress or raise this warning to an error,
+ use the Python warnings filter documented at:
http://docs.python.org/library/warnings.html
:param unicode_error: Optional, a method to use to handle Unicode
@@ -699,12 +756,14 @@ class String(Concatenable, TypeEngine):
"when unicode_error is set.")
if assert_unicode:
- util.warn_deprecated("assert_unicode is deprecated. "
- "SQLAlchemy emits a warning in all cases where it "
- "would otherwise like to encode a Python unicode object "
- "into a specific encoding but a plain bytestring is received. "
- "This does *not* apply to DBAPIs that coerce Unicode natively."
- )
+ util.warn_deprecated('assert_unicode is deprecated. '
+ 'SQLAlchemy emits a warning in all '
+ 'cases where it would otherwise like '
+ 'to encode a Python unicode object '
+ 'into a specific encoding but a plain '
+ 'bytestring is received. This does '
+ '*not* apply to DBAPIs that coerce '
+ 'Unicode natively.')
self.length = length
self.convert_unicode = convert_unicode
self.unicode_error = unicode_error
@@ -720,7 +779,8 @@ class String(Concatenable, TypeEngine):
def bind_processor(self, dialect):
if self.convert_unicode or dialect.convert_unicode:
- if dialect.supports_unicode_binds and self.convert_unicode != 'force':
+ if dialect.supports_unicode_binds and \
+ self.convert_unicode != 'force':
if self._warn_on_bytestring:
def process(value):
# Py3K
@@ -948,7 +1008,8 @@ class Numeric(_DateAffinity, TypeEngine):
"""
Construct a Numeric.
- :param precision: the numeric precision for use in DDL ``CREATE TABLE``.
+ :param precision: the numeric precision for use in DDL ``CREATE
+ TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
@@ -1000,18 +1061,21 @@ class Numeric(_DateAffinity, TypeEngine):
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
- util.warn("Dialect %s+%s does *not* support Decimal objects natively, "
- "and SQLAlchemy must convert from floating point - "
- "rounding errors and other issues may occur. "
- "Please consider storing Decimal numbers as strings or "
- "integers on this platform for lossless storage." %
- (dialect.name, dialect.driver))
+ util.warn('Dialect %s+%s does *not* support Decimal '
+ 'objects natively, and SQLAlchemy must '
+ 'convert from floating point - rounding '
+ 'errors and other issues may occur. Please '
+ 'consider storing Decimal numbers as strings '
+ 'or integers on this platform for lossless '
+ 'storage.' % (dialect.name, dialect.driver))
# we're a "numeric", DBAPI returns floats, convert.
if self.scale is not None:
- return processors.to_decimal_processor_factory(_python_Decimal, self.scale)
+ return processors.to_decimal_processor_factory(
+ _python_Decimal, self.scale)
else:
- return processors.to_decimal_processor_factory(_python_Decimal)
+ return processors.to_decimal_processor_factory(
+ _python_Decimal)
else:
if dialect.supports_native_decimal:
return processors.to_float
@@ -1060,7 +1124,8 @@ class Float(Numeric):
"""
Construct a Float.
- :param precision: the numeric precision for use in DDL ``CREATE TABLE``.
+ :param precision: the numeric precision for use in DDL ``CREATE
+ TABLE``.
:param asdecimal: the same flag as that of :class:`Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
@@ -1277,7 +1342,8 @@ class Binary(LargeBinary):
"""Deprecated. Renamed to LargeBinary."""
def __init__(self, *arg, **kw):
- util.warn_deprecated("The Binary type has been renamed to LargeBinary.")
+ util.warn_deprecated('The Binary type has been renamed to '
+ 'LargeBinary.')
LargeBinary.__init__(self, *arg, **kw)
class SchemaType(object):
@@ -1295,36 +1361,26 @@ class SchemaType(object):
self.schema = kw.pop('schema', None)
self.metadata = kw.pop('metadata', None)
if self.metadata:
- self.metadata.append_ddl_listener(
- 'before-create',
- util.portable_instancemethod(self._on_metadata_create)
- )
- self.metadata.append_ddl_listener(
- 'after-drop',
- util.portable_instancemethod(self._on_metadata_drop)
- )
+ self.metadata.append_ddl_listener('before-create',
+ util.portable_instancemethod(self._on_metadata_create))
+ self.metadata.append_ddl_listener('after-drop',
+ util.portable_instancemethod(self._on_metadata_drop))
def _set_parent(self, column):
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _set_table(self, table, column):
- table.append_ddl_listener(
- 'before-create',
- util.portable_instancemethod(self._on_table_create)
- )
- table.append_ddl_listener(
- 'after-drop',
- util.portable_instancemethod(self._on_table_drop)
- )
+ table.append_ddl_listener('before-create',
+ util.portable_instancemethod(
+ self._on_table_create))
+ table.append_ddl_listener('after-drop',
+ util.portable_instancemethod(
+ self._on_table_drop))
if self.metadata is None:
- table.metadata.append_ddl_listener(
- 'before-create',
- util.portable_instancemethod(self._on_metadata_create)
- )
- table.metadata.append_ddl_listener(
- 'after-drop',
- util.portable_instancemethod(self._on_metadata_drop)
- )
+ table.metadata.append_ddl_listener('before-create',
+ util.portable_instancemethod(self._on_metadata_create))
+ table.metadata.append_ddl_listener('after-drop',
+ util.portable_instancemethod(self._on_metadata_drop))
@property
def bind(self):
@@ -1386,40 +1442,42 @@ class Enum(String, SchemaType):
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
- :param \*enums: string or unicode enumeration labels. If unicode labels
- are present, the `convert_unicode` flag is auto-enabled.
-
- :param convert_unicode: Enable unicode-aware bind parameter and result-set
- processing for this Enum's data. This is set automatically based on
- the presence of unicode label strings.
-
- :param metadata: Associate this type directly with a ``MetaData`` object.
- For types that exist on the target database as an independent schema
- construct (Postgresql), this type will be created and dropped within
- ``create_all()`` and ``drop_all()`` operations. If the type is not
- associated with any ``MetaData`` object, it will associate itself with
- each ``Table`` in which it is used, and will be created when any of
- those individual tables are created, after a check is performed for
- it's existence. The type is only dropped when ``drop_all()`` is called
- for that ``Table`` object's metadata, however.
-
- :param name: The name of this type. This is required for Postgresql and
- any future supported database which requires an explicitly named type,
- or an explicitly named constraint in order to generate the type and/or
- a table that uses it.
-
- :param native_enum: Use the database's native ENUM type when available.
- Defaults to True. When False, uses VARCHAR + check constraint
- for all backends.
-
- :param schema: Schemaname of this type. For types that exist on the target
- database as an independent schema construct (Postgresql), this
- parameter specifies the named schema in which the type is present.
-
- :param quote: Force quoting to be on or off on the type's name. If left as
- the default of `None`, the usual schema-level "case
- sensitive"/"reserved name" rules are used to determine if this type's
- name should be quoted.
+ :param \*enums: string or unicode enumeration labels. If unicode
+ labels are present, the `convert_unicode` flag is auto-enabled.
+
+ :param convert_unicode: Enable unicode-aware bind parameter and
+ result-set processing for this Enum's data. This is set
+ automatically based on the presence of unicode label strings.
+
+ :param metadata: Associate this type directly with a ``MetaData``
+ object. For types that exist on the target database as an
+ independent schema construct (Postgresql), this type will be
+ created and dropped within ``create_all()`` and ``drop_all()``
+ operations. If the type is not associated with any ``MetaData``
+ object, it will associate itself with each ``Table`` in which it is
+ used, and will be created when any of those individual tables are
+ created, after a check is performed for it's existence. The type is
+ only dropped when ``drop_all()`` is called for that ``Table``
+ object's metadata, however.
+
+ :param name: The name of this type. This is required for Postgresql
+ and any future supported database which requires an explicitly
+ named type, or an explicitly named constraint in order to generate
+ the type and/or a table that uses it.
+
+ :param native_enum: Use the database's native ENUM type when
+ available. Defaults to True. When False, uses VARCHAR + check
+ constraint for all backends.
+
+ :param schema: Schemaname of this type. For types that exist on the
+ target database as an independent schema construct (Postgresql),
+ this parameter specifies the named schema in which the type is
+ present.
+
+ :param quote: Force quoting to be on or off on the type's name. If
+ left as the default of `None`, the usual schema-level "case
+ sensitive"/"reserved name" rules are used to determine if this
+ type's name should be quoted.
"""
self.enums = enums
@@ -1455,7 +1513,8 @@ class Enum(String, SchemaType):
e = schema.CheckConstraint(
column.in_(self.enums),
name=self.name,
- _create_rule=util.portable_instancemethod(self._should_create_constraint)
+ _create_rule=util.portable_instancemethod(
+ self._should_create_constraint)
)
table.append_constraint(e)
@@ -1487,7 +1546,8 @@ class PickleType(MutableType, TypeDecorator):
impl = LargeBinary
- def __init__(self, protocol=pickle.HIGHEST_PROTOCOL, pickler=None, mutable=True, comparator=None):
+ def __init__(self, protocol=pickle.HIGHEST_PROTOCOL,
+ pickler=None, mutable=True, comparator=None):
"""
Construct a PickleType.
@@ -1548,7 +1608,8 @@ class PickleType(MutableType, TypeDecorator):
def copy_value(self, value):
if self.mutable:
- return self.pickler.loads(self.pickler.dumps(value, self.protocol))
+ return self.pickler.loads(
+ self.pickler.dumps(value, self.protocol))
else:
return value
@@ -1559,6 +1620,13 @@ class PickleType(MutableType, TypeDecorator):
return x == y
def is_mutable(self):
+ """Return True if the target Python type is 'mutable'.
+
+ When this method is overridden, :meth:`copy_value` should
+ also be supplied. The :class:`.MutableType` mixin
+ is recommended as a helper.
+
+ """
return self.mutable
@@ -1596,7 +1664,8 @@ class Boolean(TypeEngine, SchemaType):
e = schema.CheckConstraint(
column.in_([0, 1]),
name=self.name,
- _create_rule=util.portable_instancemethod(self._should_create_constraint)
+ _create_rule=util.portable_instancemethod(
+ self._should_create_constraint)
)
table.append_constraint(e)
@@ -1614,12 +1683,12 @@ class Interval(_DateAffinity, TypeDecorator):
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
- Note that the ``Interval`` type does not currently provide
- date arithmetic operations on platforms which do not support
- interval types natively. Such operations usually require
- transformation of both sides of the expression (such as, conversion
- of both sides into integer epoch values first) which currently
- is a manual procedure (such as via :attr:`~sqlalchemy.sql.expression.func`).
+ Note that the ``Interval`` type does not currently provide date arithmetic
+ operations on platforms which do not support interval types natively. Such
+ operations usually require transformation of both sides of the expression
+ (such as, conversion of both sides into integer epoch values first) which
+ currently is a manual procedure (such as via
+ :attr:`~sqlalchemy.sql.expression.func`).
"""
@@ -1842,6 +1911,9 @@ STRINGTYPE = String()
# using VARCHAR/NCHAR so that we dont get the genericized "String"
# type which usually resolves to TEXT/CLOB
+# NOTE: this dict is not meant to be public and will be underscored
+# in 0.7, see [ticket:1870].
+
type_map = {
str: String(),
# Py3K
diff --git a/lib/sqlalchemy/util.py b/lib/sqlalchemy/util.py
index 7eb0a522f..10931be5e 100644
--- a/lib/sqlalchemy/util.py
+++ b/lib/sqlalchemy/util.py
@@ -176,6 +176,32 @@ class frozendict(dict):
def __repr__(self):
return "frozendict(%s)" % dict.__repr__(self)
+
+# find or create a dict implementation that supports __missing__
+class _probe(dict):
+ def __missing__(self, key):
+ return 1
+
+try:
+ try:
+ _probe()['missing']
+ py25_dict = dict
+ except KeyError:
+ class py25_dict(dict):
+ def __getitem__(self, key):
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ try:
+ missing = self.__missing__
+ except AttributeError:
+ raise KeyError(key)
+ else:
+ return missing(key)
+finally:
+ del _probe
+
+
def to_list(x, default=None):
if x is None:
return default
@@ -1242,16 +1268,30 @@ class UniqueAppender(object):
class ScopedRegistry(object):
"""A Registry that can store one or multiple instances of a single
- class on a per-thread scoped basis, or on a customized scope.
+ class on the basis of a "scope" function.
+
+ The object implements ``__call__`` as the "getter", so by
+ calling ``myregistry()`` the contained object is returned
+ for the current scope.
- createfunc
+ :param createfunc:
a callable that returns a new object to be placed in the registry
- scopefunc
+ :param scopefunc:
a callable that will return a key to store/retrieve an object.
"""
def __init__(self, createfunc, scopefunc):
+ """Construct a new :class:`.ScopedRegistry`.
+
+ :param createfunc: A creation function that will generate
+ a new value for the current scope, if none is present.
+
+ :param scopefunc: A function that returns a hashable
+ token representing the current scope (such as, current
+ thread identifier).
+
+ """
self.createfunc = createfunc
self.scopefunc = scopefunc
self.registry = {}
@@ -1264,18 +1304,28 @@ class ScopedRegistry(object):
return self.registry.setdefault(key, self.createfunc())
def has(self):
+ """Return True if an object is present in the current scope."""
+
return self.scopefunc() in self.registry
def set(self, obj):
+ """Set the value forthe current scope."""
+
self.registry[self.scopefunc()] = obj
def clear(self):
+ """Clear the current scope, if any."""
+
try:
del self.registry[self.scopefunc()]
except KeyError:
pass
class ThreadLocalRegistry(ScopedRegistry):
+ """A :class:`.ScopedRegistry` that uses a ``threading.local()``
+ variable for storage.
+
+ """
def __init__(self, createfunc):
self.createfunc = createfunc
self.registry = threading.local()
@@ -1434,6 +1484,7 @@ def function_named(fn, name):
fn.func_defaults, fn.func_closure)
return fn
+
class memoized_property(object):
"""A read-only @property that is only evaluated once."""
def __init__(self, fget, doc=None):
@@ -1478,6 +1529,24 @@ class memoized_instancemethod(object):
def reset_memoized(instance, name):
instance.__dict__.pop(name, None)
+
+class group_expirable_memoized_property(object):
+ """A family of @memoized_properties that can be expired in tandem."""
+
+ def __init__(self):
+ self.attributes = []
+
+ def expire_instance(self, instance):
+ """Expire all memoized properties for *instance*."""
+ stash = instance.__dict__
+ for attribute in self.attributes:
+ stash.pop(attribute, None)
+
+ def __call__(self, fn):
+ self.attributes.append(fn.__name__)
+ return memoized_property(fn)
+
+
class WeakIdentityMapping(weakref.WeakKeyDictionary):
"""A WeakKeyDictionary with an object identity index.
diff --git a/setup.py b/setup.py
index 3b64bc7f7..76cba0584 100644
--- a/setup.py
+++ b/setup.py
@@ -56,7 +56,8 @@ elif BUILD_CEXTENSIONS:
def find_packages(dir_):
packages = []
- for _dir, subdirectories, files in os.walk(os.path.join(dir_, 'sqlalchemy')):
+ for _dir, subdirectories, files in os.walk(os.path.join(dir_,
+ 'sqlalchemy')):
if '__init__.py' in files:
lib, fragment = _dir.split(os.sep, 1)
packages.append(fragment.replace(os.sep, '.'))
@@ -65,8 +66,10 @@ def find_packages(dir_):
if sys.version_info < (2, 4):
raise Exception("SQLAlchemy requires Python 2.4 or higher.")
-v = open(os.path.join(os.path.dirname(__file__), 'lib', 'sqlalchemy', '__init__.py'))
-VERSION = re.compile(r".*__version__ = '(.*?)'", re.S).match(v.read()).group(1)
+v = open(os.path.join(os.path.dirname(__file__), 'lib', 'sqlalchemy',
+ '__init__.py'))
+VERSION = re.compile(r".*__version__ = '(.*?)'",
+ re.S).match(v.read()).group(1)
v.close()
setup(name = "SQLAlchemy",
@@ -94,28 +97,123 @@ setup(name = "SQLAlchemy",
long_description = """\
SQLAlchemy is:
- * The Python SQL toolkit and Object Relational Mapper that gives application developers the full power and flexibility of SQL. SQLAlchemy provides a full suite of well known enterprise-level persistence patterns, designed for efficient and high-performing database access, adapted into a simple and Pythonic domain language.
- * extremely easy to use for all the basic tasks, such as: accessing pooled connections, constructing SQL from Python expressions, finding object instances, and commiting object modifications back to the database.
- * powerful enough for complicated tasks, such as: eager load a graph of objects and their dependencies via joins; map recursive adjacency structures automatically; map objects to not just tables but to any arbitrary join or select statement; combine multiple tables together to load whole sets of otherwise unrelated objects from a single result set; commit entire graphs of object changes in one step.
- * built to conform to what DBAs demand, including the ability to swap out generated SQL with hand-optimized statements, full usage of bind parameters for all literal values, fully transactionalized and consistent updates using Unit of Work.
- * modular. Different parts of SQLAlchemy can be used independently of the rest, including the connection pool, SQL construction, and ORM. SQLAlchemy is constructed in an open style that allows plenty of customization, with an architecture that supports custom datatypes, custom SQL extensions, and ORM plugins which can augment or extend mapping functionality.
+ * The Python SQL toolkit and Object Relational Mapper
+ that gives application developers the full power and
+ flexibility of SQL. SQLAlchemy provides a full suite
+ of well known enterprise-level persistence patterns,
+ designed for efficient and high-performing database
+ access, adapted into a simple and Pythonic domain
+ language.
+ * extremely easy to use for all the basic tasks, such
+ as: accessing pooled connections, constructing SQL
+ from Python expressions, finding object instances, and
+ commiting object modifications back to the database.
+ * powerful enough for complicated tasks, such as: eager
+ load a graph of objects and their dependencies via
+ joins; map recursive adjacency structures
+ automatically; map objects to not just tables but to
+ any arbitrary join or select statement; combine
+ multiple tables together to load whole sets of
+ otherwise unrelated objects from a single result set;
+ commit entire graphs of object changes in one step.
+ * built to conform to what DBAs demand, including the
+ ability to swap out generated SQL with hand-optimized
+ statements, full usage of bind parameters for all
+ literal values, fully transactionalized and consistent
+ updates using Unit of Work.
+ * modular. Different parts of SQLAlchemy can be used
+ independently of the rest, including the connection
+ pool, SQL construction, and ORM. SQLAlchemy is
+ constructed in an open style that allows plenty of
+ customization, with an architecture that supports
+ custom datatypes, custom SQL extensions, and ORM
+ plugins which can augment or extend mapping
+ functionality.
SQLAlchemy's Philosophy:
- * SQL databases behave less and less like object collections the more size and performance start to matter; object collections behave less and less like tables and rows the more abstraction starts to matter. SQLAlchemy aims to accomodate both of these principles.
- * Your classes aren't tables, and your objects aren't rows. Databases aren't just collections of tables; they're relational algebra engines. You don't have to select from just tables, you can select from joins, subqueries, and unions. Database and domain concepts should be visibly decoupled from the beginning, allowing both sides to develop to their full potential.
- * For example, table metadata (objects that describe tables) are declared distinctly from the classes theyre designed to store. That way database relationship concepts don't interfere with your object design concepts, and vice-versa; the transition from table-mapping to selectable-mapping is seamless; a class can be mapped against the database in more than one way. SQLAlchemy provides a powerful mapping layer that can work as automatically or as manually as you choose, determining relationships based on foreign keys or letting you define the join conditions explicitly, to bridge the gap between database and domain.
+ * SQL databases behave less and less like object
+ collections the more size and performance start to
+ matter; object collections behave less and less like
+ tables and rows the more abstraction starts to matter.
+ SQLAlchemy aims to accomodate both of these
+ principles.
+ * Your classes aren't tables, and your objects aren't
+ rows. Databases aren't just collections of tables;
+ they're relational algebra engines. You don't have to
+ select from just tables, you can select from joins,
+ subqueries, and unions. Database and domain concepts
+ should be visibly decoupled from the beginning,
+ allowing both sides to develop to their full
+ potential.
+ * For example, table metadata (objects that describe
+ tables) are declared distinctly from the classes
+ theyre designed to store. That way database
+ relationship concepts don't interfere with your object
+ design concepts, and vice-versa; the transition from
+ table-mapping to selectable-mapping is seamless; a
+ class can be mapped against the database in more than
+ one way. SQLAlchemy provides a powerful mapping layer
+ that can work as automatically or as manually as you
+ choose, determining relationships based on foreign
+ keys or letting you define the join conditions
+ explicitly, to bridge the gap between database and
+ domain.
SQLAlchemy's Advantages:
- * The Unit Of Work system organizes pending CRUD operations into queues and commits them all in one batch. It then performs a topological "dependency sort" of all items to be committed and deleted and groups redundant statements together. This produces the maxiumum efficiency and transaction safety, and minimizes chances of deadlocks. Modeled after Fowler's "Unit of Work" pattern as well as Java Hibernate.
- * Function-based query construction allows boolean expressions, operators, functions, table aliases, selectable subqueries, create/update/insert/delete queries, correlated updates, correlated EXISTS clauses, UNION clauses, inner and outer joins, bind parameters, free mixing of literal text within expressions, as little or as much as desired. Query-compilation is vendor-specific; the same query object can be compiled into any number of resulting SQL strings depending on its compilation algorithm.
- * Database mapping and class design are totally separate. Persisted objects have no subclassing requirement (other than 'object') and are POPO's : plain old Python objects. They retain serializability (pickling) for usage in various caching systems and session objects. SQLAlchemy "decorates" classes with non-intrusive property accessors to automatically log object creates and modifications with the UnitOfWork engine, to lazyload related data, as well as to track attribute change histories.
- * Custom list classes can be used with eagerly or lazily loaded child object lists, allowing rich relationships to be created on the fly as SQLAlchemy appends child objects to an object attribute.
- * Composite (multiple-column) primary keys are supported, as are "association" objects that represent the middle of a "many-to-many" relationship.
- * Self-referential tables and mappers are supported. Adjacency list structures can be created, saved, and deleted with proper cascading, with no extra programming.
- * Data mapping can be used in a row-based manner. Any bizarre hyper-optimized query that you or your DBA can cook up, you can run in SQLAlchemy, and as long as it returns the expected columns within a rowset, you can get your objects from it. For a rowset that contains more than one kind of object per row, multiple mappers can be chained together to return multiple object instance lists from a single database round trip.
- * The type system allows pre- and post- processing of data, both at the bind parameter and the result set level. User-defined types can be freely mixed with built-in types. Generic types as well as SQL-specific types are available.
+ * The Unit Of Work system organizes pending CRUD
+ operations into queues and commits them all in one
+ batch. It then performs a topological "dependency
+ sort" of all items to be committed and deleted and
+ groups redundant statements together. This produces
+ the maxiumum efficiency and transaction safety, and
+ minimizes chances of deadlocks. Modeled after Fowler's
+ "Unit of Work" pattern as well as Java Hibernate.
+ * Function-based query construction allows boolean
+ expressions, operators, functions, table aliases,
+ selectable subqueries, create/update/insert/delete
+ queries, correlated updates, correlated EXISTS
+ clauses, UNION clauses, inner and outer joins, bind
+ parameters, free mixing of literal text within
+ expressions, as little or as much as desired.
+ Query-compilation is vendor-specific; the same query
+ object can be compiled into any number of resulting
+ SQL strings depending on its compilation algorithm.
+ * Database mapping and class design are totally
+ separate. Persisted objects have no subclassing
+ requirement (other than 'object') and are POPO's :
+ plain old Python objects. They retain serializability
+ (pickling) for usage in various caching systems and
+ session objects. SQLAlchemy "decorates" classes with
+ non-intrusive property accessors to automatically log
+ object creates and modifications with the UnitOfWork
+ engine, to lazyload related data, as well as to track
+ attribute change histories.
+ * Custom list classes can be used with eagerly or lazily
+ loaded child object lists, allowing rich relationships
+ to be created on the fly as SQLAlchemy appends child
+ objects to an object attribute.
+ * Composite (multiple-column) primary keys are
+ supported, as are "association" objects that represent
+ the middle of a "many-to-many" relationship.
+ * Self-referential tables and mappers are supported.
+ Adjacency list structures can be created, saved, and
+ deleted with proper cascading, with no extra
+ programming.
+ * Data mapping can be used in a row-based manner. Any
+ bizarre hyper-optimized query that you or your DBA can
+ cook up, you can run in SQLAlchemy, and as long as it
+ returns the expected columns within a rowset, you can
+ get your objects from it. For a rowset that contains
+ more than one kind of object per row, multiple mappers
+ can be chained together to return multiple object
+ instance lists from a single database round trip.
+ * The type system allows pre- and post- processing of
+ data, both at the bind parameter and the result set
+ level. User-defined types can be freely mixed with
+ built-in types. Generic types as well as SQL-specific
+ types are available.
""",
classifiers = [
diff --git a/test/aaa_profiling/test_zoomark_orm.py b/test/aaa_profiling/test_zoomark_orm.py
index 0b699eead..3e30efa24 100644
--- a/test/aaa_profiling/test_zoomark_orm.py
+++ b/test/aaa_profiling/test_zoomark_orm.py
@@ -366,7 +366,7 @@ class ZooMarkTest(TestBase):
def test_profile_5_aggregates(self):
self.test_baseline_5_aggregates()
- @profiling.function_call_count(3172)
+ @profiling.function_call_count(2929)
def test_profile_6_editing(self):
self.test_baseline_6_editing()
diff --git a/test/dialect/test_firebird.py b/test/dialect/test_firebird.py
index a9b9fa262..41a50e6a3 100644
--- a/test/dialect/test_firebird.py
+++ b/test/dialect/test_firebird.py
@@ -94,7 +94,8 @@ class DomainReflectionTest(TestBase, AssertsExecutionResults):
class BuggyDomainReflectionTest(TestBase, AssertsExecutionResults):
- "Test Firebird domains, see [ticket:1663] and http://tracker.firebirdsql.org/browse/CORE-356"
+ """Test Firebird domains (and some other reflection bumps),
+ see [ticket:1663] and http://tracker.firebirdsql.org/browse/CORE-356"""
__only_on__ = 'firebird'
@@ -168,6 +169,12 @@ CREATE DOMAIN DOM_ID INTEGER NOT NULL
CREATE TABLE A (
ID DOM_ID /* INTEGER NOT NULL */ DEFAULT 0 )
"""
+
+ # the 'default' keyword is lower case here
+ TABLE_B = """\
+CREATE TABLE B (
+ID DOM_ID /* INTEGER NOT NULL */ default 0 )
+"""
@classmethod
def setup_class(cls):
@@ -181,11 +188,13 @@ ID DOM_ID /* INTEGER NOT NULL */ DEFAULT 0 )
con.execute(cls.DOM_ID)
con.execute(cls.TABLE_A)
+ con.execute(cls.TABLE_B)
@classmethod
def teardown_class(cls):
con = testing.db.connect()
con.execute('DROP TABLE a')
+ con.execute("DROP TABLE b")
con.execute('DROP DOMAIN dom_id')
con.execute('DROP TABLE def_error_nodom')
con.execute('DROP TABLE def_error')
@@ -213,7 +222,14 @@ ID DOM_ID /* INTEGER NOT NULL */ DEFAULT 0 )
table_a = Table('a', metadata, autoload=True)
eq_(table_a.c.id.server_default.arg.text, "0")
+
+ def test_lowercase_default_name(self):
+ metadata = MetaData(testing.db)
+
+ table_b = Table('b', metadata, autoload=True)
+ eq_(table_b.c.id.server_default.arg.text, "0")
+
class CompileTest(TestBase, AssertsCompiledSQL):
diff --git a/test/dialect/test_oracle.py b/test/dialect/test_oracle.py
index 384066c41..29d18b988 100644
--- a/test/dialect/test_oracle.py
+++ b/test/dialect/test_oracle.py
@@ -1121,7 +1121,80 @@ class UnsupportedIndexReflectTest(TestBase):
'TEST_INDEX_REFLECT (UPPER(DATA))')
m2 = MetaData(testing.db)
t2 = Table('test_index_reflect', m2, autoload=True)
-
+
+class RoundTripIndexTest(TestBase):
+ __only_on__ = 'oracle'
+
+ def test_basic(self):
+ engine = testing.db
+ metadata = MetaData(engine)
+
+ table=Table("sometable", metadata,
+ Column("id_a", Unicode(255), primary_key=True),
+ Column("id_b", Unicode(255), primary_key=True, unique=True),
+ Column("group", Unicode(255), primary_key=True),
+ Column("col", Unicode(255)),
+ UniqueConstraint('col','group'),
+ )
+
+ # "group" is a keyword, so lower case
+ normalind = Index('tableind', table.c.id_b, table.c.group)
+
+ # create
+ metadata.create_all()
+ try:
+ # round trip, create from reflection
+ mirror = MetaData(engine)
+ mirror.reflect()
+ metadata.drop_all()
+ mirror.create_all()
+
+ # inspect the reflected creation
+ inspect = MetaData(engine)
+ inspect.reflect()
+
+ def obj_definition(obj):
+ return obj.__class__, tuple([c.name for c in
+ obj.columns]), getattr(obj, 'unique', None)
+
+ # find what the primary k constraint name should be
+ primaryconsname = engine.execute(
+ text("""SELECT constraint_name
+ FROM all_constraints
+ WHERE table_name = :table_name
+ AND owner = :owner
+ AND constraint_type = 'P' """),
+ table_name=table.name.upper(),
+ owner=engine.url.username.upper()).fetchall()[0][0]
+
+ reflectedtable = inspect.tables[table.name]
+
+ # make a dictionary of the reflected objects:
+
+ reflected = dict([(obj_definition(i), i) for i in
+ reflectedtable.indexes
+ | reflectedtable.constraints])
+
+ # assert we got primary key constraint and its name, Error
+ # if not in dict
+
+ assert reflected[(PrimaryKeyConstraint, ('id_a', 'id_b',
+ 'group'), None)].name.upper() \
+ == primaryconsname.upper()
+
+ # Error if not in dict
+
+ assert reflected[(Index, ('id_b', 'group'), False)].name \
+ == normalind.name
+ assert (Index, ('id_b', ), True) in reflected
+ assert (Index, ('col', 'group'), True) in reflected
+ assert len(reflectedtable.constraints) == 1
+ assert len(reflectedtable.indexes) == 3
+
+ finally:
+ metadata.drop_all()
+
+
class SequenceTest(TestBase, AssertsCompiledSQL):
diff --git a/test/dialect/test_postgresql.py b/test/dialect/test_postgresql.py
index a605594d4..9ad46c189 100644
--- a/test/dialect/test_postgresql.py
+++ b/test/dialect/test_postgresql.py
@@ -1725,7 +1725,7 @@ class ServerSideCursorsTest(TestBase, AssertsExecutionResults):
result.close()
result = \
sess.query(Foo).execution_options(stream_results=True).\
- subquery().execute()
+ statement.execute()
assert result.cursor.name
result.close()
finally:
diff --git a/test/engine/test_metadata.py b/test/engine/test_metadata.py
index 7ea753621..528d56244 100644
--- a/test/engine/test_metadata.py
+++ b/test/engine/test_metadata.py
@@ -1,8 +1,11 @@
-from sqlalchemy.test.testing import assert_raises, assert_raises_message
+from sqlalchemy.test.testing import assert_raises
+from sqlalchemy.test.testing import assert_raises_message
+from sqlalchemy.test.testing import emits_warning
+
import pickle
from sqlalchemy import Integer, String, UniqueConstraint, \
CheckConstraint, ForeignKey, MetaData, Sequence, \
- ForeignKeyConstraint, ColumnDefault
+ ForeignKeyConstraint, ColumnDefault, Index
from sqlalchemy.test.schema import Table, Column
from sqlalchemy import schema, exc
import sqlalchemy as tsa
@@ -246,6 +249,62 @@ class MetaDataTest(TestBase, ComparesTables):
eq_(str(table_c.join(table2_c).onclause),
'someschema.mytable.myid = someschema.othertable.myid')
+ def test_tometadata_kwargs(self):
+ meta = MetaData()
+
+ table = Table('mytable', meta,
+ Column('myid', Integer, primary_key=True),
+ mysql_engine='InnoDB',
+ )
+
+ meta2 = MetaData()
+ table_c = table.tometadata(meta2)
+
+ eq_(table.kwargs,table_c.kwargs)
+
+ def test_tometadata_indexes(self):
+ meta = MetaData()
+
+ table = Table('mytable', meta,
+ Column('id', Integer, primary_key=True),
+ Column('data1', Integer, index=True),
+ Column('data2', Integer),
+ )
+ Index('multi',table.c.data1,table.c.data2),
+
+ meta2 = MetaData()
+ table_c = table.tometadata(meta2)
+
+ def _get_key(i):
+ return [i.name,i.unique] + \
+ sorted(i.kwargs.items()) + \
+ i.columns.keys()
+
+ eq_(
+ sorted([_get_key(i) for i in table.indexes]),
+ sorted([_get_key(i) for i in table_c.indexes])
+ )
+
+ @emits_warning("Table '.+' already exists within the given MetaData")
+ def test_tometadata_already_there(self):
+
+ meta1 = MetaData()
+ table1 = Table('mytable', meta1,
+ Column('myid', Integer, primary_key=True),
+ )
+ meta2 = MetaData()
+ table2 = Table('mytable', meta2,
+ Column('yourid', Integer, primary_key=True),
+ )
+
+ meta3 = MetaData()
+
+ table_c = table1.tometadata(meta2)
+ table_d = table2.tometadata(meta2)
+
+ # d'oh!
+ assert table_c is table_d
+
def test_tometadata_default_schema(self):
meta = MetaData()
diff --git a/test/ext/test_declarative.py b/test/ext/test_declarative.py
index 71e31233b..65ec92ca2 100644
--- a/test/ext/test_declarative.py
+++ b/test/ext/test_declarative.py
@@ -369,11 +369,14 @@ class DeclarativeTest(DeclarativeTestBase):
hasattr(User.addresses, 'property')
- # the exeption is preserved
-
- assert_raises_message(sa.exc.InvalidRequestError,
- r"suppressed within a hasattr\(\)",
- compile_mappers)
+ # the exception is preserved. Remains the
+ # same through repeated calls.
+ for i in range(3):
+ assert_raises_message(sa.exc.InvalidRequestError,
+ "^One or more mappers failed to initialize - "
+ "can't proceed with initialization of other "
+ "mappers. Original exception was: When initializing.*",
+ compile_mappers)
def test_custom_base(self):
class MyBase(object):
@@ -1316,7 +1319,42 @@ class DeclarativeInheritanceTest(DeclarativeTestBase):
primary_language = Column('primary_language', String(50))
assert class_mapper(Engineer).inherits is class_mapper(Person)
+
+ @testing.fails_if(lambda: True, "Not implemented until 0.7")
+ def test_foreign_keys_with_col(self):
+ """Test that foreign keys that reference a literal 'id' subclass
+ 'id' attribute behave intuitively.
+
+ See ticket 1892.
+
+ """
+ class Booking(Base):
+ __tablename__ = 'booking'
+ id = Column(Integer, primary_key=True)
+
+ class PlanBooking(Booking):
+ __tablename__ = 'plan_booking'
+ id = Column(Integer, ForeignKey(Booking.id),
+ primary_key=True)
+
+ # referencing PlanBooking.id gives us the column
+ # on plan_booking, not booking
+ class FeatureBooking(Booking):
+ __tablename__ = 'feature_booking'
+ id = Column(Integer, ForeignKey(Booking.id),
+ primary_key=True)
+ plan_booking_id = Column(Integer,
+ ForeignKey(PlanBooking.id))
+
+ plan_booking = relationship(PlanBooking,
+ backref='feature_bookings')
+
+ assert FeatureBooking.__table__.c.plan_booking_id.\
+ references(PlanBooking.__table__.c.id)
+ assert FeatureBooking.__table__.c.id.\
+ references(Booking.__table__.c.id)
+
def test_with_undefined_foreignkey(self):
class Parent(Base):
diff --git a/test/orm/inheritance/test_basic.py b/test/orm/inheritance/test_basic.py
index b4aaf13ba..c6dec16b7 100644
--- a/test/orm/inheritance/test_basic.py
+++ b/test/orm/inheritance/test_basic.py
@@ -981,7 +981,8 @@ class OverrideColKeyTest(_base.MappedTest):
# s2 gets a new id, base_id is overwritten by the ultimate
# PK col
assert s2.id == s2.base_id != 15
-
+
+ @testing.emits_warning(r'Implicit')
def test_override_implicit(self):
# this is how the pattern looks intuitively when
# using declarative.
@@ -1143,7 +1144,9 @@ class OptimizedLoadTest(_base.MappedTest):
# redefine Sub's "id" to favor the "id" col in the subtable.
# "id" is also part of the primary join condition
- mapper(Sub, sub, inherits=Base, polymorphic_identity='sub', properties={'id':sub.c.id})
+ mapper(Sub, sub, inherits=Base,
+ polymorphic_identity='sub',
+ properties={'id':[sub.c.id, base.c.id]})
sess = sessionmaker()()
s1 = Sub(data='s1data', sub='s1sub')
sess.add(s1)
diff --git a/test/orm/inheritance/test_single.py b/test/orm/inheritance/test_single.py
index 4b7078eb5..2efde2b32 100644
--- a/test/orm/inheritance/test_single.py
+++ b/test/orm/inheritance/test_single.py
@@ -8,7 +8,7 @@ from test.orm._base import MappedTest, ComparableEntity
from sqlalchemy.test.schema import Table, Column
-class SingleInheritanceTest(MappedTest):
+class SingleInheritanceTest(testing.AssertsCompiledSQL, MappedTest):
@classmethod
def define_tables(cls, metadata):
Table('employees', metadata,
@@ -26,6 +26,7 @@ class SingleInheritanceTest(MappedTest):
@classmethod
def setup_classes(cls):
+ global Employee, Manager, Engineer, JuniorEngineer
class Employee(ComparableEntity):
pass
class Manager(Employee):
@@ -114,6 +115,31 @@ class SingleInheritanceTest(MappedTest):
# session.query(Employee.name, Manager.manager_data, Engineer.engineer_info).all(),
# []
# )
+
+ @testing.resolve_artifact_names
+ def test_from_self(self):
+ sess = create_session()
+ self.assert_compile(sess.query(Engineer).from_self(),
+ 'SELECT anon_1.employees_employee_id AS '
+ 'anon_1_employees_employee_id, '
+ 'anon_1.employees_name AS '
+ 'anon_1_employees_name, '
+ 'anon_1.employees_manager_data AS '
+ 'anon_1_employees_manager_data, '
+ 'anon_1.employees_engineer_info AS '
+ 'anon_1_employees_engineer_info, '
+ 'anon_1.employees_type AS '
+ 'anon_1_employees_type FROM (SELECT '
+ 'employees.employee_id AS '
+ 'employees_employee_id, employees.name AS '
+ 'employees_name, employees.manager_data AS '
+ 'employees_manager_data, '
+ 'employees.engineer_info AS '
+ 'employees_engineer_info, employees.type '
+ 'AS employees_type FROM employees) AS '
+ 'anon_1 WHERE anon_1.employees_type IN '
+ '(:type_1, :type_2)',
+ use_default_dialect=True)
@testing.resolve_artifact_names
def test_select_from(self):
@@ -182,6 +208,56 @@ class SingleInheritanceTest(MappedTest):
assert len(rq.join(Report.employee.of_type(Manager)).all()) == 1
assert len(rq.join(Report.employee.of_type(Engineer)).all()) == 0
+class RelationshipFromSingleTest(testing.AssertsCompiledSQL, MappedTest):
+ @classmethod
+ def define_tables(cls, metadata):
+ Table('employee', metadata,
+ Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('name', String(50)),
+ Column('type', String(20)),
+ )
+
+ Table('employee_stuff', metadata,
+ Column('id', Integer, primary_key=True, test_needs_autoincrement=True),
+ Column('employee_id', Integer, ForeignKey('employee.id')),
+ Column('name', String(50)),
+ )
+
+ @classmethod
+ def setup_classes(cls):
+ class Employee(ComparableEntity):
+ pass
+ class Manager(Employee):
+ pass
+ class Stuff(ComparableEntity):
+ pass
+
+ @testing.resolve_artifact_names
+ def test_subquery_load(self):
+ mapper(Employee, employee, polymorphic_on=employee.c.type, polymorphic_identity='employee')
+ mapper(Manager, inherits=Employee, polymorphic_identity='manager', properties={
+ 'stuff':relationship(Stuff)
+ })
+ mapper(Stuff, employee_stuff)
+
+ sess = create_session()
+ context = sess.query(Manager).options(subqueryload('stuff'))._compile_context()
+ subq = context.attributes[('subquery', (class_mapper(Employee), 'stuff'))]
+
+ self.assert_compile(subq,
+ 'SELECT employee_stuff.id AS '
+ 'employee_stuff_id, employee_stuff.employee'
+ '_id AS employee_stuff_employee_id, '
+ 'employee_stuff.name AS '
+ 'employee_stuff_name, anon_1.employee_id '
+ 'AS anon_1_employee_id FROM (SELECT '
+ 'employee.id AS employee_id FROM employee '
+ 'WHERE employee.type IN (:type_1)) AS anon_1 '
+ 'JOIN employee_stuff ON anon_1.employee_id '
+ '= employee_stuff.employee_id ORDER BY '
+ 'anon_1.employee_id',
+ use_default_dialect=True
+ )
class RelationshipToSingleTest(MappedTest):
@classmethod
diff --git a/test/orm/test_assorted_eager.py b/test/orm/test_assorted_eager.py
index 20736b8fe..0e389b74b 100644
--- a/test/orm/test_assorted_eager.py
+++ b/test/orm/test_assorted_eager.py
@@ -324,7 +324,7 @@ class EagerTest3(_base.MappedTest):
arb_data = sa.select(
[stats.c.data_id, sa.func.max(stats.c.somedata).label('max')],
stats.c.data_id <= 5,
- group_by=[stats.c.data_id]).alias('arb')
+ group_by=[stats.c.data_id])
arb_result = arb_data.execute().fetchall()
@@ -334,6 +334,8 @@ class EagerTest3(_base.MappedTest):
# extract just the "data_id" from it
arb_result = [row['data_id'] for row in arb_result]
+ arb_data = arb_data.alias('arb')
+
# now query for Data objects using that above select, adding the
# "order by max desc" separately
q = (session.query(Data).
diff --git a/test/orm/test_attributes.py b/test/orm/test_attributes.py
index 3a8a320e3..742e9d874 100644
--- a/test/orm/test_attributes.py
+++ b/test/orm/test_attributes.py
@@ -693,14 +693,18 @@ class UtilTest(_base.ORMTest):
class BackrefTest(_base.ORMTest):
- def test_manytomany(self):
+ def test_m2m(self):
class Student(object):pass
class Course(object):pass
attributes.register_class(Student)
attributes.register_class(Course)
- attributes.register_attribute(Student, 'courses', uselist=True, extension=attributes.GenericBackrefExtension('students'), useobject=True)
- attributes.register_attribute(Course, 'students', uselist=True, extension=attributes.GenericBackrefExtension('courses'), useobject=True)
+ attributes.register_attribute(Student, 'courses', uselist=True,
+ extension=attributes.GenericBackrefExtension('students'
+ ), useobject=True)
+ attributes.register_attribute(Course, 'students', uselist=True,
+ extension=attributes.GenericBackrefExtension('courses'
+ ), useobject=True)
s = Student()
c = Course()
@@ -717,14 +721,18 @@ class BackrefTest(_base.ORMTest):
s1.courses.remove(c)
self.assert_(c.students == [s2,s3])
- def test_onetomany(self):
+ def test_o2m(self):
class Post(object):pass
class Blog(object):pass
attributes.register_class(Post)
attributes.register_class(Blog)
- attributes.register_attribute(Post, 'blog', uselist=False, extension=attributes.GenericBackrefExtension('posts'), trackparent=True, useobject=True)
- attributes.register_attribute(Blog, 'posts', uselist=True, extension=attributes.GenericBackrefExtension('blog'), trackparent=True, useobject=True)
+ attributes.register_attribute(Post, 'blog', uselist=False,
+ extension=attributes.GenericBackrefExtension('posts'),
+ trackparent=True, useobject=True)
+ attributes.register_attribute(Blog, 'posts', uselist=True,
+ extension=attributes.GenericBackrefExtension('blog'),
+ trackparent=True, useobject=True)
b = Blog()
(p1, p2, p3) = (Post(), Post(), Post())
b.posts.append(p1)
@@ -748,13 +756,17 @@ class BackrefTest(_base.ORMTest):
p5.blog = None
del p5.blog
- def test_onetoone(self):
+ def test_o2o(self):
class Port(object):pass
class Jack(object):pass
attributes.register_class(Port)
attributes.register_class(Jack)
- attributes.register_attribute(Port, 'jack', uselist=False, extension=attributes.GenericBackrefExtension('port'), useobject=True)
- attributes.register_attribute(Jack, 'port', uselist=False, extension=attributes.GenericBackrefExtension('jack'), useobject=True)
+ attributes.register_attribute(Port, 'jack', uselist=False,
+ extension=attributes.GenericBackrefExtension('port'),
+ useobject=True)
+ attributes.register_attribute(Jack, 'port', uselist=False,
+ extension=attributes.GenericBackrefExtension('jack'),
+ useobject=True)
p = Port()
j = Jack()
p.jack = j
@@ -764,6 +776,96 @@ class BackrefTest(_base.ORMTest):
j.port = None
self.assert_(p.jack is None)
+ def test_symmetric_o2o_inheritance(self):
+ """Test that backref 'initiator' catching goes against
+ a token that is global to all InstrumentedAttribute objects
+ within a particular class, not just the indvidual IA object
+ since we use distinct objects in an inheritance scenario.
+
+ """
+ class Parent(object):
+ pass
+ class Child(object):
+ pass
+ class SubChild(Child):
+ pass
+
+ p_token = object()
+ c_token = object()
+
+ attributes.register_class(Parent)
+ attributes.register_class(Child)
+ attributes.register_class(SubChild)
+ attributes.register_attribute(Parent, 'child', uselist=False,
+ extension=attributes.GenericBackrefExtension('parent'),
+ parent_token = p_token,
+ useobject=True)
+ attributes.register_attribute(Child, 'parent', uselist=False,
+ extension=attributes.GenericBackrefExtension('child'),
+ parent_token = c_token,
+ useobject=True)
+ attributes.register_attribute(SubChild, 'parent',
+ uselist=False,
+ extension=attributes.GenericBackrefExtension('child'),
+ parent_token = c_token,
+ useobject=True)
+
+ p1 = Parent()
+ c1 = Child()
+ p1.child = c1
+
+ c2 = SubChild()
+ c2.parent = p1
+
+ def test_symmetric_o2m_inheritance(self):
+ class Parent(object):
+ pass
+ class SubParent(Parent):
+ pass
+ class Child(object):
+ pass
+
+ p_token = object()
+ c_token = object()
+
+ attributes.register_class(Parent)
+ attributes.register_class(SubParent)
+ attributes.register_class(Child)
+ attributes.register_attribute(Parent, 'children', uselist=True,
+ extension=attributes.GenericBackrefExtension('parent'),
+ parent_token = p_token,
+ useobject=True)
+ attributes.register_attribute(SubParent, 'children', uselist=True,
+ extension=attributes.GenericBackrefExtension('parent'),
+ parent_token = p_token,
+ useobject=True)
+ attributes.register_attribute(Child, 'parent', uselist=False,
+ extension=attributes.GenericBackrefExtension('children'),
+ parent_token = c_token,
+ useobject=True)
+
+ p1 = Parent()
+ p2 = SubParent()
+ c1 = Child()
+
+ p1.children.append(c1)
+
+ assert c1.parent is p1
+ assert c1 in p1.children
+
+ p2.children.append(c1)
+ assert c1.parent is p2
+
+ # note its still in p1.children -
+ # the event model currently allows only
+ # one level deep. without the parent_token,
+ # it keeps going until a ValueError is raised
+ # and this condition changes.
+ assert c1 in p1.children
+
+
+
+
class PendingBackrefTest(_base.ORMTest):
def setup(self):
global Post, Blog, called, lazy_load
diff --git a/test/orm/test_dynamic.py b/test/orm/test_dynamic.py
index 5d822fa3d..c06f6918a 100644
--- a/test/orm/test_dynamic.py
+++ b/test/orm/test_dynamic.py
@@ -21,6 +21,7 @@ class DynamicTest(_fixtures.FixtureTest, AssertsCompiledSQL):
q = sess.query(User)
u = q.filter(User.id==7).first()
+
eq_([User(id=7,
addresses=[Address(id=1, email_address='jack@bean.com')])],
q.filter(User.id==7).all())
diff --git a/test/orm/test_load_on_fks.py b/test/orm/test_load_on_fks.py
new file mode 100644
index 000000000..8e4f53b0d
--- /dev/null
+++ b/test/orm/test_load_on_fks.py
@@ -0,0 +1,273 @@
+from sqlalchemy import *
+from sqlalchemy.orm import *
+
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy.test.testing import TestBase, eq_, AssertsExecutionResults, assert_raises
+from sqlalchemy.test import testing
+from sqlalchemy.orm.attributes import instance_state
+from sqlalchemy.orm.exc import FlushError
+from sqlalchemy.test.schema import Table, Column
+
+engine = testing.db
+
+
+class FlushOnPendingTest(AssertsExecutionResults, TestBase):
+ def setUp(self):
+ global Parent, Child, Base
+ Base= declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+
+ id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
+ name = Column(String(50), nullable=False)
+ children = relationship("Child", load_on_pending=True)
+
+ class Child(Base):
+ __tablename__ = 'child'
+ id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
+ parent_id = Column(Integer, ForeignKey('parent.id'))
+
+ Base.metadata.create_all(engine)
+
+ def tearDown(self):
+ Base.metadata.drop_all(engine)
+
+ def test_annoying_autoflush_one(self):
+ sess = Session(engine)
+
+ p1 = Parent()
+ sess.add(p1)
+ p1.children = []
+
+ def test_annoying_autoflush_two(self):
+ sess = Session(engine)
+
+ p1 = Parent()
+ sess.add(p1)
+ assert p1.children == []
+
+ def test_dont_load_if_no_keys(self):
+ sess = Session(engine)
+
+ p1 = Parent()
+ sess.add(p1)
+
+ def go():
+ assert p1.children == []
+ self.assert_sql_count(testing.db, go, 0)
+
+class LoadOnFKsTest(AssertsExecutionResults, TestBase):
+
+ def setUp(self):
+ global Parent, Child, Base
+ Base= declarative_base()
+
+ class Parent(Base):
+ __tablename__ = 'parent'
+ __table_args__ = {'mysql_engine':'InnoDB'}
+
+ id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
+
+ class Child(Base):
+ __tablename__ = 'child'
+ __table_args__ = {'mysql_engine':'InnoDB'}
+
+ id= Column(Integer, primary_key=True, test_needs_autoincrement=True)
+ parent_id = Column(Integer, ForeignKey('parent.id'))
+
+ parent = relationship(Parent, backref=backref("children"))
+
+ Base.metadata.create_all(engine)
+
+ global sess, p1, p2, c1, c2
+ sess = Session(bind=engine)
+
+ p1 = Parent()
+ p2 = Parent()
+ c1, c2 = Child(), Child()
+ c1.parent = p1
+ sess.add_all([p1, p2])
+ assert c1 in sess
+
+ sess.commit()
+
+ def tearDown(self):
+ sess.rollback()
+ Base.metadata.drop_all(engine)
+
+ def test_load_on_pending_disallows_backref_event(self):
+ Child.parent.property.load_on_pending = True
+ sess.autoflush = False
+ c3 = Child()
+ sess.add(c3)
+ c3.parent_id = p1.id
+ c3.parent = p1
+
+ # a side effect of load-on-pending with no autoflush.
+ # a change to the backref event handler to check
+ # collection membership before assuming "old == new so return"
+ # would fix this - but this is wasteful and autoflush
+ # should be turned on.
+ assert c3 not in p1.children
+
+ def test_load_on_persistent_allows_backref_event(self):
+ Child.parent.property.load_on_pending = True
+ c3 = Child()
+ sess.add(c3)
+ c3.parent_id = p1.id
+ c3.parent = p1
+
+ assert c3 in p1.children
+
+ def test_no_load_on_pending_allows_backref_event(self):
+ # users who stick with the program and don't use
+ # 'load_on_pending' get expected behavior
+
+ sess.autoflush = False
+ c3 = Child()
+ sess.add(c3)
+ c3.parent_id = p1.id
+
+ c3.parent = p1
+
+ assert c3 in p1.children
+
+ def test_autoflush_on_pending(self):
+ c3 = Child()
+ sess.add(c3)
+ c3.parent_id = p1.id
+
+ # pendings don't autoflush
+ assert c3.parent is None
+
+ def test_autoflush_on_pending(self):
+ Child.parent.property.load_on_pending = True
+ c3 = Child()
+ sess.add(c3)
+ c3.parent_id = p1.id
+
+ # ...unless the flag is on
+ assert c3.parent is p1
+
+ def test_load_on_pending_with_set(self):
+ Child.parent.property.load_on_pending = True
+
+ p1.children
+
+ c3 = Child()
+ sess.add(c3)
+
+ c3.parent_id = p1.id
+
+ def go():
+ c3.parent = p1
+ self.assert_sql_count(testing.db, go, 0)
+
+ def test_backref_doesnt_double(self):
+ Child.parent.property.load_on_pending = True
+ sess.autoflush = False
+ p1.children
+ c3 = Child()
+ sess.add(c3)
+ c3.parent = p1
+ c3.parent = p1
+ c3.parent = p1
+ c3.parent = p1
+ assert len(p1.children)== 2
+
+ def test_m2o_lazy_loader_on_persistent(self):
+ """Compare the behaviors from the lazyloader using
+ the "committed" state in all cases, vs. the lazyloader
+ using the "current" state in all cases except during flush.
+
+ """
+ for loadfk in (True, False):
+ for loadrel in (True, False):
+ for autoflush in (True, False):
+ for manualflush in (True, False):
+ for fake_autoexpire in (True, False):
+ sess.autoflush = autoflush
+
+ if loadfk:
+ c1.parent_id
+ if loadrel:
+ c1.parent
+
+ c1.parent_id = p2.id
+
+ if manualflush:
+ sess.flush()
+
+ # fake_autoexpire refers to the eventual
+ # auto-expire of 'parent' when c1.parent_id
+ # is altered.
+ if fake_autoexpire:
+ sess.expire(c1, ['parent'])
+
+ # old 0.6 behavior
+ #if manualflush and (not loadrel or fake_autoexpire):
+ # # a flush occurs, we get p2
+ # assert c1.parent is p2
+ #elif not loadrel and not loadfk:
+ # # problematically - we get None since committed state
+ # # is empty when c1.parent_id was mutated, since we want
+ # # to save on selects. this is
+ # # why the patch goes in in 0.6 - this is mostly a bug.
+ # assert c1.parent is None
+ #else:
+ # # if things were loaded, autoflush doesn't even
+ # # happen.
+ # assert c1.parent is p1
+
+ # new behavior
+ if loadrel and not fake_autoexpire:
+ assert c1.parent is p1
+ else:
+ assert c1.parent is p2
+
+ sess.rollback()
+
+ def test_m2o_lazy_loader_on_pending(self):
+ for loadonpending in (False, True):
+ for autoflush in (False, True):
+ for manualflush in (False, True):
+ Child.parent.property.load_on_pending = loadonpending
+ sess.autoflush = autoflush
+ c2 = Child()
+ sess.add(c2)
+ c2.parent_id = p2.id
+
+ if manualflush:
+ sess.flush()
+
+ if loadonpending or manualflush:
+ assert c2.parent is p2
+ else:
+ assert c2.parent is None
+
+ sess.rollback()
+
+ def test_m2o_lazy_loader_on_transient(self):
+ for loadonpending in (False, True):
+ for attach in (False, True):
+ for autoflush in (False, True):
+ for manualflush in (False, True):
+ Child.parent.property.load_on_pending = loadonpending
+ sess.autoflush = autoflush
+ c2 = Child()
+
+ if attach:
+ sess._attach(instance_state(c2))
+
+ c2.parent_id = p2.id
+
+ if manualflush:
+ sess.flush()
+
+ if loadonpending and attach:
+ assert c2.parent is p2
+ else:
+ assert c2.parent is None
+
+ sess.rollback()
diff --git a/test/orm/test_mapper.py b/test/orm/test_mapper.py
index e24906a1f..f041c8896 100644
--- a/test/orm/test_mapper.py
+++ b/test/orm/test_mapper.py
@@ -88,14 +88,25 @@ class MapperTest(_fixtures.FixtureTest):
@testing.resolve_artifact_names
def test_exceptions_sticky(self):
- """test preservation of mapper compile errors raised during hasattr()."""
+ """test preservation of mapper compile errors raised during hasattr(),
+ as well as for redundant mapper compile calls. Test that
+ repeated calls don't stack up error messages.
+
+ """
mapper(Address, addresses, properties={
'user':relationship(User)
})
hasattr(Address.user, 'property')
- assert_raises_message(sa.exc.InvalidRequestError, r"suppressed within a hasattr\(\)", compile_mappers)
+ for i in range(3):
+ assert_raises_message(sa.exc.InvalidRequestError,
+ "^One or more mappers failed to "
+ "initialize - can't proceed with "
+ "initialization of other mappers. "
+ "Original exception was: Class "
+ "'test.orm._fixtures.User' is not mapped$"
+ , compile_mappers)
@testing.resolve_artifact_names
def test_column_prefix(self):
@@ -157,13 +168,15 @@ class MapperTest(_fixtures.FixtureTest):
@testing.resolve_artifact_names
def test_column_not_present(self):
- assert_raises_message(sa.exc.ArgumentError, "not represented in the mapper's table", mapper, User, users, properties={
- 'foo':addresses.c.user_id
- })
+ assert_raises_message(sa.exc.ArgumentError,
+ "not represented in the mapper's table",
+ mapper, User, users, properties={'foo'
+ : addresses.c.user_id})
@testing.resolve_artifact_names
def test_bad_constructor(self):
"""If the construction of a mapped class fails, the instance does not get placed in the session"""
+
class Foo(object):
def __init__(self, one, two, _sa_session=None):
pass
@@ -482,18 +495,22 @@ class MapperTest(_fixtures.FixtureTest):
class Manager(Employee): pass
class Hoho(object): pass
class Lala(object): pass
-
+ class Fub(object):pass
+ class Frob(object):pass
class HasDef(object):
def name(self):
pass
-
+ class Empty(object):pass
+
+ empty = mapper(Empty, t, properties={'empty_id' : t.c.id},
+ include_properties=[])
p_m = mapper(Person, t, polymorphic_on=t.c.type,
include_properties=('id', 'type', 'name'))
- e_m = mapper(Employee, inherits=p_m, polymorphic_identity='employee',
- properties={
- 'boss': relationship(Manager, backref=backref('peon', ), remote_side=t.c.id)
- },
- exclude_properties=('vendor_id',))
+ e_m = mapper(Employee, inherits=p_m,
+ polymorphic_identity='employee', properties={'boss'
+ : relationship(Manager, backref=backref('peon'),
+ remote_side=t.c.id)},
+ exclude_properties=('vendor_id', ))
m_m = mapper(Manager, inherits=e_m, polymorphic_identity='manager',
include_properties=('id', 'type'))
@@ -506,8 +523,12 @@ class MapperTest(_fixtures.FixtureTest):
hd_m = mapper(HasDef, t, column_prefix="h_")
+ fb_m = mapper(Fub, t, include_properties=(t.c.id, t.c.type))
+ frb_m = mapper(Frob, t, column_prefix='f_',
+ exclude_properties=(t.c.boss_id,
+ 'employee_number', t.c.vendor_id))
+
p_m.compile()
- #sa.orm.compile_mappers()
def assert_props(cls, want):
have = set([n for n in dir(cls) if not n.startswith('_')])
@@ -519,7 +540,8 @@ class MapperTest(_fixtures.FixtureTest):
want = set(want)
eq_(have, want)
- assert_props(HasDef, ['h_boss_id', 'h_employee_number', 'h_id', 'name', 'h_name', 'h_vendor_id', 'h_type'])
+ assert_props(HasDef, ['h_boss_id', 'h_employee_number', 'h_id',
+ 'name', 'h_name', 'h_vendor_id', 'h_type'])
assert_props(Person, ['id', 'name', 'type'])
assert_instrumented(Person, ['id', 'name', 'type'])
assert_props(Employee, ['boss', 'boss_id', 'employee_number',
@@ -535,24 +557,58 @@ class MapperTest(_fixtures.FixtureTest):
assert_props(Vendor, ['vendor_id', 'id', 'name', 'type'])
assert_props(Hoho, ['id', 'name', 'type'])
assert_props(Lala, ['p_employee_number', 'p_id', 'p_name', 'p_type'])
-
+ assert_props(Fub, ['id', 'type'])
+ assert_props(Frob, ['f_id', 'f_type', 'f_name', ])
# excluding the discriminator column is currently not allowed
class Foo(Person):
pass
- assert_raises(sa.exc.InvalidRequestError, mapper, Foo, inherits=Person, polymorphic_identity='foo', exclude_properties=('type',) )
+ assert_props(Empty, ['empty_id'])
+
+ assert_raises(
+ sa.exc.InvalidRequestError,
+ mapper,
+ Foo, inherits=Person, polymorphic_identity='foo',
+ exclude_properties=('type', ),
+ )
@testing.resolve_artifact_names
- def test_mapping_to_join(self):
- """Mapping to a join"""
+ def test_mapping_to_join_raises(self):
+ """Test implicit merging of two cols warns."""
+
usersaddresses = sa.join(users, addresses,
users.c.id == addresses.c.user_id)
- mapper(User, usersaddresses, primary_key=[users.c.id])
+ assert_raises_message(
+ sa.exc.SAWarning,
+ "Implicitly",
+ mapper, User, usersaddresses, primary_key=[users.c.id]
+ )
+ sa.orm.clear_mappers()
+
+ @testing.emits_warning(r'Implicitly')
+ def go():
+ # but it works despite the warning
+ mapper(User, usersaddresses, primary_key=[users.c.id])
+ l = create_session().query(User).order_by(users.c.id).all()
+ eq_(l, self.static.user_result[:3])
+ go()
+
+ @testing.resolve_artifact_names
+ def test_mapping_to_join(self):
+ """Mapping to a join"""
+
+ usersaddresses = sa.join(users, addresses, users.c.id
+ == addresses.c.user_id)
+ mapper(User, usersaddresses, primary_key=[users.c.id],
+ exclude_properties=[addresses.c.id])
l = create_session().query(User).order_by(users.c.id).all()
eq_(l, self.static.user_result[:3])
@testing.resolve_artifact_names
def test_mapping_to_join_no_pk(self):
- m = mapper(Address, addresses.join(email_bounces))
+ m = mapper(Address,
+ addresses.join(email_bounces),
+ properties={'id':[addresses.c.id, email_bounces.c.id]}
+ )
m.compile()
assert addresses in m._pks_by_table
assert email_bounces not in m._pks_by_table
diff --git a/test/orm/test_naturalpks.py b/test/orm/test_naturalpks.py
index b305375da..d02ecb707 100644
--- a/test/orm/test_naturalpks.py
+++ b/test/orm/test_naturalpks.py
@@ -8,7 +8,7 @@ import sqlalchemy as sa
from sqlalchemy.test import testing
from sqlalchemy import Integer, String, ForeignKey, Unicode
from sqlalchemy.test.schema import Table, Column
-from sqlalchemy.orm import mapper, relationship, create_session, backref
+from sqlalchemy.orm import mapper, relationship, create_session, backref, Session
from sqlalchemy.orm.session import make_transient
from sqlalchemy.test.testing import eq_
from test.orm import _base, _fixtures
@@ -499,24 +499,49 @@ class SelfReferentialTest(_base.MappedTest):
pass
@testing.resolve_artifact_names
- def test_one_to_many(self):
+ def test_one_to_many_on_m2o(self):
mapper(Node, nodes, properties={
'children': relationship(Node,
backref=sa.orm.backref('parentnode',
remote_side=nodes.c.name,
passive_updates=False),
- passive_updates=False)})
+ )})
- sess = create_session()
+ sess = Session()
+ n1 = Node(name='n1')
+ sess.add(n1)
+ n2 = Node(name='n11', parentnode=n1)
+ n3 = Node(name='n12', parentnode=n1)
+ n4 = Node(name='n13', parentnode=n1)
+ sess.add_all([n2, n3, n4])
+ sess.commit()
+
+ n1.name = 'new n1'
+ sess.commit()
+ eq_(['new n1', 'new n1', 'new n1'],
+ [n.parent
+ for n in sess.query(Node).filter(
+ Node.name.in_(['n11', 'n12', 'n13']))])
+
+ @testing.resolve_artifact_names
+ def test_one_to_many_on_o2m(self):
+ mapper(Node, nodes, properties={
+ 'children': relationship(Node,
+ backref=sa.orm.backref('parentnode',
+ remote_side=nodes.c.name),
+ passive_updates=False
+ )})
+
+ sess = Session()
n1 = Node(name='n1')
n1.children.append(Node(name='n11'))
n1.children.append(Node(name='n12'))
n1.children.append(Node(name='n13'))
sess.add(n1)
- sess.flush()
+ sess.commit()
n1.name = 'new n1'
- sess.flush()
+ sess.commit()
eq_(n1.children[1].parent, 'new n1')
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
@@ -540,18 +565,16 @@ class SelfReferentialTest(_base.MappedTest):
}
)
- sess = create_session()
+ sess = Session()
n1 = Node(name='n1')
n11 = Node(name='n11', parentnode=n1)
n12 = Node(name='n12', parentnode=n1)
n13 = Node(name='n13', parentnode=n1)
sess.add_all([n1, n11, n12, n13])
- sess.flush()
+ sess.commit()
n1.name = 'new n1'
- sess.flush()
- if passive:
- sess.expire_all()
+ sess.commit()
eq_(['new n1', 'new n1', 'new n1'],
[n.parent
for n in sess.query(Node).filter(
diff --git a/test/orm/test_query.py b/test/orm/test_query.py
index 65be1e00a..8facd4e69 100644
--- a/test/orm/test_query.py
+++ b/test/orm/test_query.py
@@ -1375,6 +1375,9 @@ class ParentTest(QueryTest):
o = sess.query(Order).with_parent(u1, property='orders').all()
assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o
+ o = sess.query(Order).with_parent(u1, property=User.orders).all()
+ assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o
+
o = sess.query(Order).filter(with_parent(u1, User.orders)).all()
assert [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")] == o
@@ -1396,7 +1399,9 @@ class ParentTest(QueryTest):
q = sess.query(Item).with_parent(u1)
assert False
except sa_exc.InvalidRequestError, e:
- assert str(e) == "Could not locate a property which relates instances of class 'Item' to instances of class 'User'"
+ assert str(e) \
+ == "Could not locate a property which relates "\
+ "instances of class 'Item' to instances of class 'User'"
def test_m2m(self):
sess = create_session()
@@ -1404,6 +1409,50 @@ class ParentTest(QueryTest):
k = sess.query(Keyword).with_parent(i1).all()
assert [Keyword(name='red'), Keyword(name='small'), Keyword(name='square')] == k
+ def test_with_transient(self):
+ sess = Session()
+
+ q = sess.query(User)
+ u1 = q.filter_by(name='jack').one()
+ utrans = User(id=u1.id)
+ o = sess.query(Order).with_parent(utrans, 'orders')
+ eq_(
+ [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")],
+ o.all()
+ )
+
+ o = sess.query(Order).filter(with_parent(utrans, 'orders'))
+ eq_(
+ [Order(description="order 1"), Order(description="order 3"), Order(description="order 5")],
+ o.all()
+ )
+
+ def test_with_pending_autoflush(self):
+ sess = Session()
+
+ o1 = sess.query(Order).first()
+ opending = Order(id=20, user_id=o1.user_id)
+ sess.add(opending)
+ eq_(
+ sess.query(User).with_parent(opending, 'user').one(),
+ User(id=o1.user_id)
+ )
+ eq_(
+ sess.query(User).filter(with_parent(opending, 'user')).one(),
+ User(id=o1.user_id)
+ )
+
+ def test_with_pending_no_autoflush(self):
+ sess = Session(autoflush=False)
+
+ o1 = sess.query(Order).first()
+ opending = Order(user_id=o1.user_id)
+ sess.add(opending)
+ eq_(
+ sess.query(User).with_parent(opending, 'user').one(),
+ User(id=o1.user_id)
+ )
+
class InheritedJoinTest(_base.MappedTest, AssertsCompiledSQL):
run_setup_mappers = 'once'
diff --git a/test/orm/test_session.py b/test/orm/test_session.py
index 4976db131..6ac42a6b3 100644
--- a/test/orm/test_session.py
+++ b/test/orm/test_session.py
@@ -4,7 +4,7 @@ from sqlalchemy.test.util import gc_collect
import inspect
import pickle
from sqlalchemy.orm import create_session, sessionmaker, attributes, \
- make_transient
+ make_transient, Session
from sqlalchemy.orm.attributes import instance_state
import sqlalchemy as sa
from sqlalchemy.test import engines, testing, config
@@ -713,12 +713,39 @@ class SessionTest(_fixtures.FixtureTest):
sess.flush()
sess.rollback()
assert_raises_message(sa.exc.InvalidRequestError,
- 'inactive due to a rollback in a '
- 'subtransaction', sess.begin,
- subtransactions=True)
+ "This Session's transaction has been "
+ r"rolled back by a nested rollback\(\) "
+ "call. To begin a new transaction, "
+ r"issue Session.rollback\(\) first.",
+ sess.begin, subtransactions=True)
sess.close()
@testing.resolve_artifact_names
+ def test_preserve_flush_error(self):
+ mapper(User, users)
+ sess = Session()
+
+ sess.add(User(id=5))
+ assert_raises(
+ sa.exc.DBAPIError,
+ sess.commit
+ )
+
+ for i in range(5):
+ assert_raises_message(sa.exc.InvalidRequestError,
+ "^This Session's transaction has been "
+ r"rolled back due to a previous exception during flush. To "
+ "begin a new transaction with this "
+ "Session, first issue "
+ r"Session.rollback\(\). Original exception "
+ "was:",
+ sess.commit)
+ sess.rollback()
+ sess.add(User(id=5, name='some name'))
+ sess.commit()
+
+
+ @testing.resolve_artifact_names
def test_no_autocommit_with_explicit_commit(self):
mapper(User, users)
session = create_session(autocommit=False)
@@ -1383,6 +1410,22 @@ class SessionTest(_fixtures.FixtureTest):
assert b in sess
assert len(list(sess)) == 1
+ @testing.resolve_artifact_names
+ def test_identity_map_mutate(self):
+ mapper(User, users)
+
+ sess = Session()
+
+ sess.add_all([User(name='u1'), User(name='u2'), User(name='u3')])
+ sess.commit()
+
+ u1, u2, u3 = sess.query(User).all()
+ for i, (key, value) in enumerate(sess.identity_map.iteritems()):
+ if i == 2:
+ del u3
+ gc_collect()
+
+
class DisposedStates(_base.MappedTest):
run_setup_mappers = 'once'
run_inserts = 'once'
diff --git a/test/orm/test_unitofwork.py b/test/orm/test_unitofwork.py
index ea6397517..c95836055 100644
--- a/test/orm/test_unitofwork.py
+++ b/test/orm/test_unitofwork.py
@@ -11,7 +11,8 @@ from sqlalchemy.test import engines, testing, pickleable
from sqlalchemy import Integer, String, ForeignKey, literal_column
from sqlalchemy.test.schema import Table
from sqlalchemy.test.schema import Column
-from sqlalchemy.orm import mapper, relationship, create_session, column_property, attributes
+from sqlalchemy.orm import mapper, relationship, create_session, \
+ column_property, attributes, Session, reconstructor, object_session
from sqlalchemy.test.testing import eq_, ne_
from test.orm import _base, _fixtures
from test.engine import _base as engine_base
@@ -698,10 +699,15 @@ class PassiveDeletesTest(_base.MappedTest):
assert mytable.count().scalar() == 0
assert myothertable.count().scalar() == 0
+ @testing.emits_warning(r".*'passive_deletes' is normally configured on one-to-many")
@testing.resolve_artifact_names
def test_backwards_pd(self):
- # the unusual scenario where a trigger or something might be deleting
- # a many-to-one on deletion of the parent row
+ """Test that passive_deletes=True disables a delete from an m2o.
+
+ This is not the usual usage and it now raises a warning, but test
+ that it works nonetheless.
+
+ """
mapper(MyOtherClass, myothertable, properties={
'myclass':relationship(MyClass, cascade="all, delete", passive_deletes=True)
})
@@ -721,8 +727,17 @@ class PassiveDeletesTest(_base.MappedTest):
session.delete(mco)
session.flush()
+ # mytable wasn't deleted, is the point.
assert mytable.count().scalar() == 1
assert myothertable.count().scalar() == 0
+
+ @testing.resolve_artifact_names
+ def test_aaa_m2o_emits_warning(self):
+ mapper(MyOtherClass, myothertable, properties={
+ 'myclass':relationship(MyClass, cascade="all, delete", passive_deletes=True)
+ })
+ mapper(MyClass, mytable)
+ assert_raises(sa.exc.SAWarning, sa.orm.compile_mappers)
class ExtraPassiveDeletesTest(_base.MappedTest):
__requires__ = ('foreign_keys',)
@@ -2111,7 +2126,68 @@ class BooleanColTest(_base.MappedTest):
sess.flush()
eq_(sess.query(T).filter(T.value==True).all(), [T(value=True, name="t1"),T(value=True, name="t3")])
-
+class DontAllowFlushOnLoadingObjectTest(_base.MappedTest):
+ """Test that objects with NULL identity keys aren't permitted to complete a flush.
+
+ User-defined callables that execute during a load may modify state
+ on instances which results in their being autoflushed, before attributes
+ are populated. If the primary key identifiers are missing, an explicit assertion
+ is needed to check that the object doesn't go through the flush process with
+ no net changes and gets placed in the identity map with an incorrect
+ identity key.
+
+ """
+ @classmethod
+ def define_tables(cls, metadata):
+ t1 = Table('t1', metadata,
+ Column('id', Integer, primary_key=True),
+ Column('data', String(30)),
+ )
+
+ @testing.resolve_artifact_names
+ def test_flush_raises(self):
+ class T1(_base.ComparableEntity):
+ @reconstructor
+ def go(self):
+ # blow away 'id', no change event.
+ # this simulates a callable occuring
+ # before 'id' was even populated, i.e. a callable
+ # within an attribute_mapped_collection
+ self.__dict__.pop('id', None)
+
+ # generate a change event, perhaps this occurs because
+ # someone wrote a broken attribute_mapped_collection that
+ # inappropriately fires off change events when it should not,
+ # now we're dirty
+ self.data = 'foo bar'
+
+ # blow away that change, so an UPDATE does not occur
+ # (since it would break)
+ self.__dict__.pop('data', None)
+
+ # flush ! any lazyloader here would trigger
+ # autoflush, for example.
+ sess.flush()
+
+ mapper(T1, t1)
+
+ sess = Session()
+ sess.add(T1(data='test', id=5))
+ sess.commit()
+ sess.close()
+
+ # make sure that invalid state doesn't get into the session
+ # with the wrong key. If the identity key is not NULL, at least
+ # the population process would continue after the erroneous flush
+ # and thing would right themselves.
+ assert_raises_message(sa.orm.exc.FlushError,
+ 'has a NULL identity key. Check if this '
+ 'flush is occuring at an inappropriate '
+ 'time, such as during a load operation.',
+ sess.query(T1).first)
+
+
+
class RowSwitchTest(_base.MappedTest):
@classmethod
def define_tables(cls, metadata):
diff --git a/test/sql/test_case_statement.py b/test/sql/test_case_statement.py
index 645822fa7..1a106ee5e 100644
--- a/test/sql/test_case_statement.py
+++ b/test/sql/test_case_statement.py
@@ -32,14 +32,14 @@ class CaseTest(TestBase, AssertsCompiledSQL):
@testing.fails_on('firebird', 'FIXME: unknown')
@testing.fails_on('maxdb', 'FIXME: unknown')
@testing.requires.subqueries
- def testcase(self):
+ def test_case(self):
inner = select([case([
[info_table.c.pk < 3,
'lessthan3'],
[and_(info_table.c.pk >= 3, info_table.c.pk < 7),
'gt3']]).label('x'),
info_table.c.pk, info_table.c.info],
- from_obj=[info_table]).alias('q_inner')
+ from_obj=[info_table])
inner_result = inner.execute().fetchall()
@@ -59,7 +59,7 @@ class CaseTest(TestBase, AssertsCompiledSQL):
('gt3', 6, 'pk_6_data')
]
- outer = select([inner])
+ outer = select([inner.alias('q_inner')])
outer_result = outer.execute().fetchall()
@@ -79,7 +79,7 @@ class CaseTest(TestBase, AssertsCompiledSQL):
6]],
else_ = 0).label('x'),
info_table.c.pk, info_table.c.info],
- from_obj=[info_table]).alias('q_inner')
+ from_obj=[info_table])
else_result = w_else.execute().fetchall()
diff --git a/test/sql/test_compiler.py b/test/sql/test_compiler.py
index 09432e1d4..07ceb9767 100644
--- a/test/sql/test_compiler.py
+++ b/test/sql/test_compiler.py
@@ -89,7 +89,7 @@ class SelectTest(TestBase, AssertsCompiledSQL):
def test_invalid_col_argument(self):
assert_raises(exc.ArgumentError, select, table1)
assert_raises(exc.ArgumentError, select, table1.c.myid)
-
+
def test_from_subquery(self):
"""tests placing select statements in the column clause of another select, for the
purposes of selecting from the exported columns of that select."""
@@ -263,13 +263,27 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
s3 = select([s2], use_labels=True)
s4 = s3.alias()
s5 = select([s4], use_labels=True)
- self.assert_compile(s5, "SELECT anon_1.anon_2_myid AS anon_1_anon_2_myid, anon_1.anon_2_name AS anon_1_anon_2_name, "\
- "anon_1.anon_2_description AS anon_1_anon_2_description FROM (SELECT anon_2.myid AS anon_2_myid, anon_2.name AS anon_2_name, "\
- "anon_2.description AS anon_2_description FROM (SELECT mytable.myid AS myid, mytable.name AS name, mytable.description "\
- "AS description FROM mytable) AS anon_2) AS anon_1")
+ self.assert_compile(s5,
+ 'SELECT anon_1.anon_2_myid AS '
+ 'anon_1_anon_2_myid, anon_1.anon_2_name AS '
+ 'anon_1_anon_2_name, anon_1.anon_2_descript'
+ 'ion AS anon_1_anon_2_description FROM '
+ '(SELECT anon_2.myid AS anon_2_myid, '
+ 'anon_2.name AS anon_2_name, '
+ 'anon_2.description AS anon_2_description '
+ 'FROM (SELECT mytable.myid AS myid, '
+ 'mytable.name AS name, mytable.description '
+ 'AS description FROM mytable) AS anon_2) '
+ 'AS anon_1')
def test_dont_overcorrelate(self):
- self.assert_compile(select([table1], from_obj=[table1, table1.select()]), """SELECT mytable.myid, mytable.name, mytable.description FROM mytable, (SELECT mytable.myid AS myid, mytable.name AS name, mytable.description AS description FROM mytable)""")
+ self.assert_compile(select([table1], from_obj=[table1,
+ table1.select()]),
+ "SELECT mytable.myid, mytable.name, "
+ "mytable.description FROM mytable, (SELECT "
+ "mytable.myid AS myid, mytable.name AS "
+ "name, mytable.description AS description "
+ "FROM mytable)")
def test_full_correlate(self):
# intentional
@@ -301,31 +315,56 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
"EXISTS (SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)"
)
- self.assert_compile(exists([table1.c.myid], table1.c.myid==5).select(), "SELECT EXISTS (SELECT mytable.myid FROM mytable WHERE mytable.myid = :myid_1)", params={'mytable_myid':5})
-
- self.assert_compile(select([table1, exists([1], from_obj=table2)]), "SELECT mytable.myid, mytable.name, mytable.description, EXISTS (SELECT 1 FROM myothertable) FROM mytable", params={})
-
- self.assert_compile(select([table1, exists([1], from_obj=table2).label('foo')]), "SELECT mytable.myid, mytable.name, mytable.description, EXISTS (SELECT 1 FROM myothertable) AS foo FROM mytable", params={})
-
- self.assert_compile(
- table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE EXISTS (SELECT * FROM myothertable WHERE myothertable.otherid = mytable.myid)"
- )
-
- self.assert_compile(
- table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE EXISTS (SELECT * FROM myothertable WHERE myothertable.otherid = mytable.myid)"
- )
-
- self.assert_compile(
- table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)).replace_selectable(table2, table2.alias()),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE EXISTS (SELECT * FROM myothertable AS myothertable_1 WHERE myothertable_1.otherid = mytable.myid)"
- )
-
- self.assert_compile(
- table1.select(exists().where(table2.c.otherid == table1.c.myid).correlate(table1)).select_from(table1.join(table2, table1.c.myid==table2.c.otherid)).replace_selectable(table2, table2.alias()),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable JOIN myothertable AS myothertable_1 ON mytable.myid = myothertable_1.otherid WHERE EXISTS (SELECT * FROM myothertable AS myothertable_1 WHERE myothertable_1.otherid = mytable.myid)"
- )
+ self.assert_compile(exists([table1.c.myid], table1.c.myid
+ == 5).select(),
+ 'SELECT EXISTS (SELECT mytable.myid FROM '
+ 'mytable WHERE mytable.myid = :myid_1)',
+ params={'mytable_myid': 5})
+ self.assert_compile(select([table1, exists([1],
+ from_obj=table2)]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description, EXISTS (SELECT 1 '
+ 'FROM myothertable) FROM mytable',
+ params={})
+ self.assert_compile(select([table1, exists([1],
+ from_obj=table2).label('foo')]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description, EXISTS (SELECT 1 '
+ 'FROM myothertable) AS foo FROM mytable',
+ params={})
+
+ self.assert_compile(table1.select(exists().where(table2.c.otherid
+ == table1.c.myid).correlate(table1)),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'EXISTS (SELECT * FROM myothertable WHERE '
+ 'myothertable.otherid = mytable.myid)')
+ self.assert_compile(table1.select(exists().where(table2.c.otherid
+ == table1.c.myid).correlate(table1)),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'EXISTS (SELECT * FROM myothertable WHERE '
+ 'myothertable.otherid = mytable.myid)')
+ self.assert_compile(table1.select(exists().where(table2.c.otherid
+ == table1.c.myid).correlate(table1)).replace_selectable(table2,
+ table2.alias()),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'EXISTS (SELECT * FROM myothertable AS '
+ 'myothertable_1 WHERE myothertable_1.otheri'
+ 'd = mytable.myid)')
+ self.assert_compile(table1.select(exists().where(table2.c.otherid
+ == table1.c.myid).correlate(table1)).select_from(table1.join(table2,
+ table1.c.myid
+ == table2.c.otherid)).replace_selectable(table2,
+ table2.alias()),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable JOIN '
+ 'myothertable AS myothertable_1 ON '
+ 'mytable.myid = myothertable_1.otherid '
+ 'WHERE EXISTS (SELECT * FROM myothertable '
+ 'AS myothertable_1 WHERE '
+ 'myothertable_1.otherid = mytable.myid)')
self.assert_compile(
select([
@@ -334,62 +373,93 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
exists().where(table2.c.otherid=='bar')
)
]),
- "SELECT (EXISTS (SELECT * FROM myothertable WHERE myothertable.otherid = :otherid_1)) "\
- "OR (EXISTS (SELECT * FROM myothertable WHERE myothertable.otherid = :otherid_2)) AS anon_1"
+ "SELECT (EXISTS (SELECT * FROM myothertable "
+ "WHERE myothertable.otherid = :otherid_1)) "
+ "OR (EXISTS (SELECT * FROM myothertable WHERE "
+ "myothertable.otherid = :otherid_2)) AS anon_1"
)
def test_where_subquery(self):
- s = select([addresses.c.street], addresses.c.user_id==users.c.user_id, correlate=True).alias('s')
- self.assert_compile(
- select([users, s.c.street], from_obj=s),
- """SELECT users.user_id, users.user_name, users.password, s.street FROM users, (SELECT addresses.street AS street FROM addresses WHERE addresses.user_id = users.user_id) AS s""")
-
- self.assert_compile(
- table1.select(table1.c.myid == select([table1.c.myid], table1.c.name=='jack')),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE mytable.myid = (SELECT mytable.myid FROM mytable WHERE mytable.name = :name_1)"
- )
-
- self.assert_compile(
- table1.select(table1.c.myid == select([table2.c.otherid], table1.c.name == table2.c.othername)),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE mytable.myid = (SELECT myothertable.otherid FROM myothertable WHERE mytable.name = myothertable.othername)"
- )
-
- self.assert_compile(
- table1.select(exists([1], table2.c.otherid == table1.c.myid)),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE EXISTS (SELECT 1 FROM myothertable WHERE myothertable.otherid = mytable.myid)"
- )
-
-
+ s = select([addresses.c.street], addresses.c.user_id
+ == users.c.user_id, correlate=True).alias('s')
+ self.assert_compile(select([users, s.c.street], from_obj=s),
+ "SELECT users.user_id, users.user_name, "
+ "users.password, s.street FROM users, "
+ "(SELECT addresses.street AS street FROM "
+ "addresses WHERE addresses.user_id = "
+ "users.user_id) AS s")
+ self.assert_compile(table1.select(table1.c.myid
+ == select([table1.c.myid], table1.c.name
+ == 'jack')),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'mytable.myid = (SELECT mytable.myid FROM '
+ 'mytable WHERE mytable.name = :name_1)')
+ self.assert_compile(table1.select(table1.c.myid
+ == select([table2.c.otherid], table1.c.name
+ == table2.c.othername)),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'mytable.myid = (SELECT '
+ 'myothertable.otherid FROM myothertable '
+ 'WHERE mytable.name = myothertable.othernam'
+ 'e)')
+ self.assert_compile(table1.select(exists([1], table2.c.otherid
+ == table1.c.myid)),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'EXISTS (SELECT 1 FROM myothertable WHERE '
+ 'myothertable.otherid = mytable.myid)')
talias = table1.alias('ta')
- s = subquery('sq2', [talias], exists([1], table2.c.otherid == talias.c.myid))
- self.assert_compile(
- select([s, table1])
- ,"SELECT sq2.myid, sq2.name, sq2.description, mytable.myid, mytable.name, mytable.description FROM (SELECT ta.myid AS myid, ta.name AS name, ta.description AS description FROM mytable AS ta WHERE EXISTS (SELECT 1 FROM myothertable WHERE myothertable.otherid = ta.myid)) AS sq2, mytable")
-
- s = select([addresses.c.street], addresses.c.user_id==users.c.user_id, correlate=True).alias('s')
- self.assert_compile(
- select([users, s.c.street], from_obj=s),
- """SELECT users.user_id, users.user_name, users.password, s.street FROM users, (SELECT addresses.street AS street FROM addresses WHERE addresses.user_id = users.user_id) AS s""")
-
- # test constructing the outer query via append_column(), which occurs in the ORM's Query object
- s = select([], exists([1], table2.c.otherid==table1.c.myid), from_obj=table1)
+ s = subquery('sq2', [talias], exists([1], table2.c.otherid
+ == talias.c.myid))
+ self.assert_compile(select([s, table1]),
+ 'SELECT sq2.myid, sq2.name, '
+ 'sq2.description, mytable.myid, '
+ 'mytable.name, mytable.description FROM '
+ '(SELECT ta.myid AS myid, ta.name AS name, '
+ 'ta.description AS description FROM '
+ 'mytable AS ta WHERE EXISTS (SELECT 1 FROM '
+ 'myothertable WHERE myothertable.otherid = '
+ 'ta.myid)) AS sq2, mytable')
+ s = select([addresses.c.street], addresses.c.user_id
+ == users.c.user_id, correlate=True).alias('s')
+ self.assert_compile(select([users, s.c.street], from_obj=s),
+ "SELECT users.user_id, users.user_name, "
+ "users.password, s.street FROM users, "
+ "(SELECT addresses.street AS street FROM "
+ "addresses WHERE addresses.user_id = "
+ "users.user_id) AS s")
+
+ # test constructing the outer query via append_column(), which
+ # occurs in the ORM's Query object
+
+ s = select([], exists([1], table2.c.otherid == table1.c.myid),
+ from_obj=table1)
s.append_column(table1)
- self.assert_compile(
- s,
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable WHERE EXISTS (SELECT 1 FROM myothertable WHERE myothertable.otherid = mytable.myid)"
- )
+ self.assert_compile(s,
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable WHERE '
+ 'EXISTS (SELECT 1 FROM myothertable WHERE '
+ 'myothertable.otherid = mytable.myid)')
def test_orderby_subquery(self):
- self.assert_compile(
- table1.select(order_by=[select([table2.c.otherid], table1.c.myid==table2.c.otherid)]),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable ORDER BY (SELECT myothertable.otherid FROM myothertable WHERE mytable.myid = myothertable.otherid)"
- )
- self.assert_compile(
- table1.select(order_by=[desc(select([table2.c.otherid], table1.c.myid==table2.c.otherid))]),
- "SELECT mytable.myid, mytable.name, mytable.description FROM mytable ORDER BY (SELECT myothertable.otherid FROM myothertable WHERE mytable.myid = myothertable.otherid) DESC"
- )
+ self.assert_compile(table1.select(order_by=[select([table2.c.otherid],
+ table1.c.myid == table2.c.otherid)]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable ORDER BY '
+ '(SELECT myothertable.otherid FROM '
+ 'myothertable WHERE mytable.myid = '
+ 'myothertable.otherid)')
+ self.assert_compile(table1.select(order_by=[desc(select([table2.c.otherid],
+ table1.c.myid == table2.c.otherid))]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description FROM mytable ORDER BY '
+ '(SELECT myothertable.otherid FROM '
+ 'myothertable WHERE mytable.myid = '
+ 'myothertable.otherid) DESC')
@testing.uses_deprecated('scalar option')
def test_scalar_select(self):
@@ -401,41 +471,76 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
)
s = select([table1.c.myid], correlate=False).as_scalar()
- self.assert_compile(select([table1, s]), "SELECT mytable.myid, mytable.name, mytable.description, (SELECT mytable.myid FROM mytable) AS anon_1 FROM mytable")
-
+ self.assert_compile(select([table1, s]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description, (SELECT mytable.myid '
+ 'FROM mytable) AS anon_1 FROM mytable')
s = select([table1.c.myid]).as_scalar()
- self.assert_compile(select([table2, s]), "SELECT myothertable.otherid, myothertable.othername, (SELECT mytable.myid FROM mytable) AS anon_1 FROM myothertable")
-
+ self.assert_compile(select([table2, s]),
+ 'SELECT myothertable.otherid, '
+ 'myothertable.othername, (SELECT '
+ 'mytable.myid FROM mytable) AS anon_1 FROM '
+ 'myothertable')
s = select([table1.c.myid]).correlate(None).as_scalar()
- self.assert_compile(select([table1, s]), "SELECT mytable.myid, mytable.name, mytable.description, (SELECT mytable.myid FROM mytable) AS anon_1 FROM mytable")
-
- # test that aliases use as_scalar() when used in an explicitly scalar context
- s = select([table1.c.myid]).alias()
- self.assert_compile(select([table1.c.myid]).where(table1.c.myid==s), "SELECT mytable.myid FROM mytable WHERE mytable.myid = (SELECT mytable.myid FROM mytable)")
- self.assert_compile(select([table1.c.myid]).where(s > table1.c.myid), "SELECT mytable.myid FROM mytable WHERE mytable.myid < (SELECT mytable.myid FROM mytable)")
+ self.assert_compile(select([table1, s]),
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description, (SELECT mytable.myid '
+ 'FROM mytable) AS anon_1 FROM mytable')
+ # test that aliases use as_scalar() when used in an explicitly
+ # scalar context
+ s = select([table1.c.myid]).alias()
+ self.assert_compile(select([table1.c.myid]).where(table1.c.myid
+ == s),
+ 'SELECT mytable.myid FROM mytable WHERE '
+ 'mytable.myid = (SELECT mytable.myid FROM '
+ 'mytable)')
+ self.assert_compile(select([table1.c.myid]).where(s
+ > table1.c.myid),
+ 'SELECT mytable.myid FROM mytable WHERE '
+ 'mytable.myid < (SELECT mytable.myid FROM '
+ 'mytable)')
s = select([table1.c.myid]).as_scalar()
- self.assert_compile(select([table2, s]), "SELECT myothertable.otherid, myothertable.othername, (SELECT mytable.myid FROM mytable) AS anon_1 FROM myothertable")
+ self.assert_compile(select([table2, s]),
+ 'SELECT myothertable.otherid, '
+ 'myothertable.othername, (SELECT '
+ 'mytable.myid FROM mytable) AS anon_1 FROM '
+ 'myothertable')
# test expressions against scalar selects
- self.assert_compile(select([s - literal(8)]), "SELECT (SELECT mytable.myid FROM mytable) - :param_1 AS anon_1")
- self.assert_compile(select([select([table1.c.name]).as_scalar() + literal('x')]), "SELECT (SELECT mytable.name FROM mytable) || :param_1 AS anon_1")
- self.assert_compile(select([s > literal(8)]), "SELECT (SELECT mytable.myid FROM mytable) > :param_1 AS anon_1")
- self.assert_compile(select([select([table1.c.name]).label('foo')]), "SELECT (SELECT mytable.name FROM mytable) AS foo")
+ self.assert_compile(select([s - literal(8)]),
+ 'SELECT (SELECT mytable.myid FROM mytable) '
+ '- :param_1 AS anon_1')
+ self.assert_compile(select([select([table1.c.name]).as_scalar()
+ + literal('x')]),
+ 'SELECT (SELECT mytable.name FROM mytable) '
+ '|| :param_1 AS anon_1')
+ self.assert_compile(select([s > literal(8)]),
+ 'SELECT (SELECT mytable.myid FROM mytable) '
+ '> :param_1 AS anon_1')
+ self.assert_compile(select([select([table1.c.name]).label('foo'
+ )]),
+ 'SELECT (SELECT mytable.name FROM mytable) '
+ 'AS foo')
+
+ # scalar selects should not have any attributes on their 'c' or
+ # 'columns' attribute
- # scalar selects should not have any attributes on their 'c' or 'columns' attribute
s = select([table1.c.myid]).as_scalar()
try:
s.c.foo
except exc.InvalidRequestError, err:
- assert str(err) == 'Scalar Select expression has no columns; use this object directly within a column-level expression.'
-
+ assert str(err) \
+ == 'Scalar Select expression has no columns; use this '\
+ 'object directly within a column-level expression.'
try:
s.columns.foo
except exc.InvalidRequestError, err:
- assert str(err) == 'Scalar Select expression has no columns; use this object directly within a column-level expression.'
+ assert str(err) \
+ == 'Scalar Select expression has no columns; use this '\
+ 'object directly within a column-level expression.'
zips = table('zips',
column('zipcode'),
@@ -455,29 +560,55 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
order_by = ['dist', places.c.nm]
)
- self.assert_compile(q,"SELECT places.id, places.nm, zips.zipcode, latlondist((SELECT zips.latitude FROM zips WHERE "
- "zips.zipcode = :zipcode_1), (SELECT zips.longitude FROM zips WHERE zips.zipcode = :zipcode_2)) AS dist "
- "FROM places, zips WHERE zips.zipcode = :zipcode_3 ORDER BY dist, places.nm")
+ self.assert_compile(q,
+ 'SELECT places.id, places.nm, '
+ 'zips.zipcode, latlondist((SELECT '
+ 'zips.latitude FROM zips WHERE '
+ 'zips.zipcode = :zipcode_1), (SELECT '
+ 'zips.longitude FROM zips WHERE '
+ 'zips.zipcode = :zipcode_2)) AS dist FROM '
+ 'places, zips WHERE zips.zipcode = '
+ ':zipcode_3 ORDER BY dist, places.nm')
zalias = zips.alias('main_zip')
qlat = select([zips.c.latitude], zips.c.zipcode == zalias.c.zipcode).as_scalar()
qlng = select([zips.c.longitude], zips.c.zipcode == zalias.c.zipcode).as_scalar()
- q = select([places.c.id, places.c.nm, zalias.c.zipcode, func.latlondist(qlat, qlng).label('dist')],
- order_by = ['dist', places.c.nm]
- )
- self.assert_compile(q, "SELECT places.id, places.nm, main_zip.zipcode, latlondist((SELECT zips.latitude FROM zips WHERE zips.zipcode = main_zip.zipcode), (SELECT zips.longitude FROM zips WHERE zips.zipcode = main_zip.zipcode)) AS dist FROM places, zips AS main_zip ORDER BY dist, places.nm")
+ q = select([places.c.id, places.c.nm, zalias.c.zipcode,
+ func.latlondist(qlat, qlng).label('dist')],
+ order_by=['dist', places.c.nm])
+ self.assert_compile(q,
+ 'SELECT places.id, places.nm, '
+ 'main_zip.zipcode, latlondist((SELECT '
+ 'zips.latitude FROM zips WHERE '
+ 'zips.zipcode = main_zip.zipcode), (SELECT '
+ 'zips.longitude FROM zips WHERE '
+ 'zips.zipcode = main_zip.zipcode)) AS dist '
+ 'FROM places, zips AS main_zip ORDER BY '
+ 'dist, places.nm')
a1 = table2.alias('t2alias')
s1 = select([a1.c.otherid], table1.c.myid==a1.c.otherid).as_scalar()
j1 = table1.join(table2, table1.c.myid==table2.c.otherid)
s2 = select([table1, s1], from_obj=j1)
- self.assert_compile(s2, "SELECT mytable.myid, mytable.name, mytable.description, (SELECT t2alias.otherid FROM myothertable AS t2alias WHERE mytable.myid = t2alias.otherid) AS anon_1 FROM mytable JOIN myothertable ON mytable.myid = myothertable.otherid")
+ self.assert_compile(s2,
+ 'SELECT mytable.myid, mytable.name, '
+ 'mytable.description, (SELECT '
+ 't2alias.otherid FROM myothertable AS '
+ 't2alias WHERE mytable.myid = '
+ 't2alias.otherid) AS anon_1 FROM mytable '
+ 'JOIN myothertable ON mytable.myid = '
+ 'myothertable.otherid')
def test_label_comparison(self):
x = func.lala(table1.c.myid).label('foo')
- self.assert_compile(select([x], x==5), "SELECT lala(mytable.myid) AS foo FROM mytable WHERE lala(mytable.myid) = :param_1")
+ self.assert_compile(select([x], x == 5),
+ 'SELECT lala(mytable.myid) AS foo FROM '
+ 'mytable WHERE lala(mytable.myid) = '
+ ':param_1')
- self.assert_compile(label('bar', column('foo', type_=String)) + "foo", "foo || :param_1")
+ self.assert_compile(
+ label('bar', column('foo', type_=String))+ 'foo',
+ 'foo || :param_1')
def test_conjunctions(self):
@@ -491,7 +622,8 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
)
self.assert_compile(
- and_(table1.c.myid == 12, table1.c.name=='asdf', table2.c.othername == 'foo', "sysdate() = today()"),
+ and_(table1.c.myid == 12, table1.c.name=='asdf',
+ table2.c.othername == 'foo', "sysdate() = today()"),
"mytable.myid = :myid_1 AND mytable.name = :name_1 "\
"AND myothertable.othername = :othername_1 AND sysdate() = today()"
)
@@ -499,11 +631,14 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
self.assert_compile(
and_(
table1.c.myid == 12,
- or_(table2.c.othername=='asdf', table2.c.othername == 'foo', table2.c.otherid == 9),
+ or_(table2.c.othername=='asdf',
+ table2.c.othername == 'foo', table2.c.otherid == 9),
"sysdate() = today()",
),
- "mytable.myid = :myid_1 AND (myothertable.othername = :othername_1 OR "\
- "myothertable.othername = :othername_2 OR myothertable.otherid = :otherid_1) AND sysdate() = today()",
+ 'mytable.myid = :myid_1 AND (myothertable.othername = '
+ ':othername_1 OR myothertable.othername = :othername_2 OR '
+ 'myothertable.otherid = :otherid_1) AND sysdate() = '
+ 'today()',
checkparams = {'othername_1': 'asdf', 'othername_2':'foo', 'otherid_1': 9, 'myid_1': 12}
)
@@ -1766,18 +1901,74 @@ sq.myothertable_othername AS sq_myothertable_othername FROM (" + sqstring + ") A
self.assert_compile(table.select(between((table.c.field == table.c.field), False, True)),
"SELECT op.field FROM op WHERE (op.field = op.field) BETWEEN :param_1 AND :param_2")
+ def test_delayed_col_naming(self):
+ my_str = Column(String)
+
+ sel1 = select([my_str])
+
+ assert_raises_message(
+ exc.InvalidRequestError,
+ "Cannot initialize a sub-selectable with this Column",
+ lambda: sel1.c
+ )
+
+ # calling label or as_scalar doesn't compile
+ # anything.
+ sel2 = select([func.substr(my_str, 2, 3)]).label('my_substr')
+
+ assert_raises_message(
+ exc.CompileError,
+ "Cannot compile Column object until it's 'name' is assigned.",
+ str, sel2
+ )
+
+ sel3 = select([my_str]).as_scalar()
+ assert_raises_message(
+ exc.CompileError,
+ "Cannot compile Column object until it's 'name' is assigned.",
+ str, sel3
+ )
+
+ my_str.name = 'foo'
+
+ self.assert_compile(
+ sel1,
+ "SELECT foo",
+ )
+ self.assert_compile(
+ sel2,
+ '(SELECT substr(foo, :substr_2, :substr_3) AS substr_1)',
+ )
+
+ self.assert_compile(
+ sel3,
+ "(SELECT foo)"
+ )
+
def test_naming(self):
- s1 = select([table1.c.myid, table1.c.myid.label('foobar'), func.hoho(table1.c.name), func.lala(table1.c.name).label('gg')])
- assert s1.c.keys() == ['myid', 'foobar', 'hoho(mytable.name)', 'gg']
+ f1 = func.hoho(table1.c.name)
+ s1 = select([table1.c.myid, table1.c.myid.label('foobar'),
+ f1,
+ func.lala(table1.c.name).label('gg')])
+
+ eq_(
+ s1.c.keys(),
+ ['myid', 'foobar', str(f1), 'gg']
+ )
meta = MetaData()
t1 = Table('mytable', meta, Column('col1', Integer))
+ exprs = (
+ table1.c.myid==12,
+ func.hoho(table1.c.myid),
+ cast(table1.c.name, Numeric)
+ )
for col, key, expr, label in (
(table1.c.name, 'name', 'mytable.name', None),
- (table1.c.myid==12, 'mytable.myid = :myid_1', 'mytable.myid = :myid_1', 'anon_1'),
- (func.hoho(table1.c.myid), 'hoho(mytable.myid)', 'hoho(mytable.myid)', 'hoho_1'),
- (cast(table1.c.name, Numeric), 'CAST(mytable.name AS NUMERIC)', 'CAST(mytable.name AS NUMERIC)', 'anon_1'),
+ (exprs[0], str(exprs[0]), 'mytable.myid = :myid_1', 'anon_1'),
+ (exprs[1], str(exprs[1]), 'hoho(mytable.myid)', 'hoho_1'),
+ (exprs[2], str(exprs[2]), 'CAST(mytable.name AS NUMERIC)', 'anon_1'),
(t1.c.col1, 'col1', 'mytable.col1', None),
(column('some wacky thing'), 'some wacky thing', '"some wacky thing"', '')
):
diff --git a/test/sql/test_query.py b/test/sql/test_query.py
index 0a496906d..a87931bb3 100644
--- a/test/sql/test_query.py
+++ b/test/sql/test_query.py
@@ -216,7 +216,7 @@ class QueryTest(TestBase):
{'user_name':'jack'},
)
assert r.closed
-
+
def test_row_iteration(self):
users.insert().execute(
{'user_id':7, 'user_name':'jack'},
@@ -619,6 +619,16 @@ class QueryTest(TestBase):
eq_(r[users.c.user_name], 'jack')
eq_(r.user_name, 'jack')
+ @testing.requires.dbapi_lastrowid
+ def test_native_lastrowid(self):
+ r = testing.db.execute(
+ users.insert(),
+ {'user_id':1, 'user_name':'ed'}
+ )
+
+ eq_(r.lastrowid, 1)
+
+
def test_graceful_fetch_on_non_rows(self):
"""test that calling fetchone() etc. on a result that doesn't
return rows fails gracefully.
@@ -743,6 +753,7 @@ class QueryTest(TestBase):
r = testing.db.execute('select user_name from query_users').first()
eq_(len(r), 1)
+ @testing.uses_deprecated(r'.*which subclass Executable')
def test_cant_execute_join(self):
try:
users.join(addresses).execute()
@@ -784,7 +795,10 @@ class QueryTest(TestBase):
)
shadowed.create(checkfirst=True)
try:
- shadowed.insert().execute(shadow_id=1, shadow_name='The Shadow', parent='The Light', row='Without light there is no shadow', _parent='Hidden parent', _row='Hidden row')
+ shadowed.insert().execute(shadow_id=1, shadow_name='The Shadow', parent='The Light',
+ row='Without light there is no shadow',
+ _parent='Hidden parent',
+ _row='Hidden row')
r = shadowed.select(shadowed.c.shadow_id==1).execute().first()
self.assert_(r.shadow_id == r['shadow_id'] == r[shadowed.c.shadow_id] == 1)
self.assert_(r.shadow_name == r['shadow_name'] == r[shadowed.c.shadow_name] == 'The Shadow')
diff --git a/test/sql/test_types.py b/test/sql/test_types.py
index a80e761d7..af460628e 100644
--- a/test/sql/test_types.py
+++ b/test/sql/test_types.py
@@ -17,11 +17,13 @@ from sqlalchemy.test.util import round_decimal
class AdaptTest(TestBase):
def test_uppercase_rendering(self):
- """Test that uppercase types from types.py always render as their type.
+ """Test that uppercase types from types.py always render as their
+ type.
- As of SQLA 0.6, using an uppercase type means you want specifically that
- type. If the database in use doesn't support that DDL, it (the DB backend)
- should raise an error - it means you should be using a lowercased (genericized) type.
+ As of SQLA 0.6, using an uppercase type means you want specifically
+ that type. If the database in use doesn't support that DDL, it (the DB
+ backend) should raise an error - it means you should be using a
+ lowercased (genericized) type.
"""
@@ -30,20 +32,21 @@ class AdaptTest(TestBase):
mysql.dialect(),
postgresql.dialect(),
sqlite.dialect(),
- mssql.dialect()]: # TODO when dialects are complete: engines.all_dialects():
+ mssql.dialect()]:
for type_, expected in (
(FLOAT, "FLOAT"),
(NUMERIC, "NUMERIC"),
(DECIMAL, "DECIMAL"),
(INTEGER, "INTEGER"),
(SMALLINT, "SMALLINT"),
- (TIMESTAMP, "TIMESTAMP"),
+ (TIMESTAMP, ("TIMESTAMP", "TIMESTAMP WITHOUT TIME ZONE")),
(DATETIME, "DATETIME"),
(DATE, "DATE"),
- (TIME, "TIME"),
+ (TIME, ("TIME", "TIME WITHOUT TIME ZONE")),
(CLOB, "CLOB"),
(VARCHAR(10), ("VARCHAR(10)","VARCHAR(10 CHAR)")),
- (NVARCHAR(10), ("NVARCHAR(10)", "NATIONAL VARCHAR(10)", "NVARCHAR2(10)")),
+ (NVARCHAR(10), ("NVARCHAR(10)", "NATIONAL VARCHAR(10)",
+ "NVARCHAR2(10)")),
(CHAR, "CHAR"),
(NCHAR, ("NCHAR", "NATIONAL CHAR")),
(BLOB, "BLOB"),
@@ -51,14 +54,18 @@ class AdaptTest(TestBase):
):
if isinstance(expected, str):
expected = (expected, )
- for exp in expected:
- compiled = types.to_instance(type_).compile(dialect=dialect)
- if exp in compiled:
- break
- else:
- assert False, "%r matches none of %r for dialect %s" % \
- (compiled, expected, dialect.name)
-
+
+ compiled = types.to_instance(type_).\
+ compile(dialect=dialect)
+
+ assert compiled in expected, \
+ "%r matches none of %r for dialect %s" % \
+ (compiled, expected, dialect.name)
+
+ assert str(types.to_instance(type_)) in expected, \
+ "default str() of type %r not expected, %r" % \
+ (type_, expected)
+
class TypeAffinityTest(TestBase):
def test_type_affinity(self):
for type_, affin in [
diff --git a/test/zblog/test_zblog.py b/test/zblog/test_zblog.py
index 5e46c1ceb..8103cde8b 100644
--- a/test/zblog/test_zblog.py
+++ b/test/zblog/test_zblog.py
@@ -52,11 +52,10 @@ class SavePostTest(ZBlogTest):
clear_mappers()
super(SavePostTest, cls).teardown_class()
- def testattach(self):
- """test that a transient/pending instance has proper bi-directional behavior.
-
- this requires that lazy loaders do not fire off for a transient/pending instance."""
- s = create_session(bind=testing.db)
+ def test_attach_noautoflush(self):
+ """Test pending backref behavior."""
+
+ s = create_session(bind=testing.db, autoflush=False)
s.begin()
try:
@@ -69,6 +68,21 @@ class SavePostTest(ZBlogTest):
finally:
s.rollback()
+ def test_attach_autoflush(self):
+ s = create_session(bind=testing.db, autoflush=True)
+
+ s.begin()
+ try:
+ blog = s.query(Blog).get(blog_id)
+ user = s.query(User).get(user_id)
+ post = Post(headline="asdf asdf", summary="asdfasfd", user=user)
+ s.add(post)
+ post.blog_id=blog_id
+ post.blog = blog
+ assert post in blog.posts
+ finally:
+ s.rollback()
+
def testoptimisticorphans(self):
"""test that instances in the session with un-loaded parents will not
get marked as "orphans" and then deleted """