From b31a17c521ec1160a653774e2a4b99b01d27a644 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 17:37:43 -0400 Subject: removed all dialect table_names() methods and standardized on get_table_names(). [ticket:1739] --- lib/sqlalchemy/dialects/access/base.py | 5 +++-- lib/sqlalchemy/dialects/firebird/base.py | 7 ++---- lib/sqlalchemy/dialects/informix/base.py | 3 ++- lib/sqlalchemy/dialects/maxdb/base.py | 5 +++-- lib/sqlalchemy/dialects/mssql/base.py | 13 ++++------- .../dialects/mssql/information_schema.py | 4 ++-- lib/sqlalchemy/dialects/mysql/base.py | 12 ++++------- lib/sqlalchemy/dialects/mysql/oursql.py | 4 ++-- lib/sqlalchemy/dialects/oracle/base.py | 25 +++++++++++----------- lib/sqlalchemy/dialects/postgresql/base.py | 24 ++++++++++----------- lib/sqlalchemy/dialects/sqlite/base.py | 9 +++----- lib/sqlalchemy/dialects/sybase/base.py | 3 --- lib/sqlalchemy/engine/base.py | 2 +- 13 files changed, 49 insertions(+), 67 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/access/base.py b/lib/sqlalchemy/dialects/access/base.py index 7dfb3153e..2b76b93d0 100644 --- a/lib/sqlalchemy/dialects/access/base.py +++ b/lib/sqlalchemy/dialects/access/base.py @@ -16,7 +16,7 @@ This dialect is *not* tested on SQLAlchemy 0.6. """ from sqlalchemy import sql, schema, types, exc, pool from sqlalchemy.sql import compiler, expression -from sqlalchemy.engine import default, base +from sqlalchemy.engine import default, base, reflection from sqlalchemy import processors class AcNumeric(types.Numeric): @@ -299,7 +299,8 @@ class AccessDialect(default.DefaultDialect): finally: dtbs.Close() - def table_names(self, connection, schema): + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): # A fresh DAO connection is opened for each reflection # This is necessary, so we get the latest updates dtbs = daoEngine.OpenDatabase(connection.engine.url.database) diff --git a/lib/sqlalchemy/dialects/firebird/base.py b/lib/sqlalchemy/dialects/firebird/base.py index a2da132da..70318157c 100644 --- a/lib/sqlalchemy/dialects/firebird/base.py +++ b/lib/sqlalchemy/dialects/firebird/base.py @@ -378,7 +378,8 @@ class FBDialect(default.DefaultDialect): c = connection.execute(genqry, [self.denormalize_name(sequence_name)]) return c.first() is not None - def table_names(self, connection, schema): + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): s = """ SELECT DISTINCT rdb$relation_name FROM rdb$relation_fields @@ -386,10 +387,6 @@ class FBDialect(default.DefaultDialect): """ return [self.normalize_name(row[0]) for row in connection.execute(s)] - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - return self.table_names(connection, schema) - @reflection.cache def get_view_names(self, connection, schema=None, **kw): s = """ diff --git a/lib/sqlalchemy/dialects/informix/base.py b/lib/sqlalchemy/dialects/informix/base.py index 54aae6eb3..266a74a7b 100644 --- a/lib/sqlalchemy/dialects/informix/base.py +++ b/lib/sqlalchemy/dialects/informix/base.py @@ -193,7 +193,8 @@ class InformixDialect(default.DefaultDialect): cu.execute('SET LOCK MODE TO WAIT') #cu.execute('SET ISOLATION TO REPEATABLE READ') - def table_names(self, connection, schema): + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): s = "select tabname from systables" return [row[0] for row in connection.execute(s)] diff --git a/lib/sqlalchemy/dialects/maxdb/base.py b/lib/sqlalchemy/dialects/maxdb/base.py index 758cfaf05..2e1d6a58f 100644 --- a/lib/sqlalchemy/dialects/maxdb/base.py +++ b/lib/sqlalchemy/dialects/maxdb/base.py @@ -63,7 +63,7 @@ import datetime, itertools, re from sqlalchemy import exc, schema, sql, util, processors from sqlalchemy.sql import operators as sql_operators, expression as sql_expr from sqlalchemy.sql import compiler, visitors -from sqlalchemy.engine import base as engine_base, default +from sqlalchemy.engine import base as engine_base, default, reflection from sqlalchemy import types as sqltypes @@ -880,7 +880,8 @@ class MaxDBDialect(default.DefaultDialect): rp = connection.execute(sql, bind) return bool(rp.first()) - def table_names(self, connection, schema): + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): if schema is None: sql = (" SELECT TABLENAME FROM TABLES WHERE " " SCHEMANAME=CURRENT_SCHEMA ") diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 7660fe9f7..57b468083 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -1149,11 +1149,6 @@ class MSDialect(default.DefaultDialect): pass return self.schema_name - def table_names(self, connection, schema): - s = select([ischema.tables.c.table_name], - ischema.tables.c.table_schema==schema) - return [row[0] for row in connection.execute(s)] - def has_table(self, connection, tablename, schema=None): current_schema = schema or self.default_schema_name @@ -1182,7 +1177,7 @@ class MSDialect(default.DefaultDialect): s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, - tables.c.table_type == 'BASE TABLE' + tables.c.table_type == u'BASE TABLE' ), order_by=[tables.c.table_name] ) @@ -1196,7 +1191,7 @@ class MSDialect(default.DefaultDialect): s = sql.select([tables.c.table_name], sql.and_( tables.c.table_schema == current_schema, - tables.c.table_type == 'VIEW' + tables.c.table_type == u'VIEW' ), order_by=[tables.c.table_name] ) @@ -1320,11 +1315,11 @@ class MSDialect(default.DefaultDialect): table_fullname = "%s.%s" % (current_schema, tablename) cursor = connection.execute( "select ident_seed('%s'), ident_incr('%s')" - % (tablename, tablename) + % (table_fullname, table_fullname) ) row = cursor.first() - if not row is None: + if row is not None and row[0] is not None: colmap[ic]['sequence'].update({ 'start' : int(row[0]), 'increment' : int(row[1]) diff --git a/lib/sqlalchemy/dialects/mssql/information_schema.py b/lib/sqlalchemy/dialects/mssql/information_schema.py index bb6ff315a..312e83cb1 100644 --- a/lib/sqlalchemy/dialects/mssql/information_schema.py +++ b/lib/sqlalchemy/dialects/mssql/information_schema.py @@ -21,7 +21,7 @@ tables = Table("TABLES", ischema, Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("TABLE_TYPE", String, key="table_type"), + Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"), schema="INFORMATION_SCHEMA") columns = Table("COLUMNS", ischema, @@ -42,7 +42,7 @@ constraints = Table("TABLE_CONSTRAINTS", ischema, Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), Column("TABLE_NAME", CoerceUnicode, key="table_name"), Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - Column("CONSTRAINT_TYPE", String, key="constraint_type"), + Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"), schema="INFORMATION_SCHEMA") column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 2311b06df..df4a666fb 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1766,24 +1766,20 @@ class MySQLDialect(default.DefaultDialect): @reflection.cache def get_table_names(self, connection, schema=None, **kw): + """Return a Unicode SHOW TABLES from a given schema.""" if schema is not None: current_schema = schema else: current_schema = self.default_schema_name - table_names = self.table_names(connection, current_schema) - return table_names - - def table_names(self, connection, schema): - """Return a Unicode SHOW TABLES from a given schema.""" charset = self._connection_charset if self.server_version_info < (5, 0, 2): rp = connection.execute("SHOW TABLES FROM %s" % - self.identifier_preparer.quote_identifier(schema)) + self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)] else: rp = connection.execute("SHOW FULL TABLES FROM %s" % - self.identifier_preparer.quote_identifier(schema)) + self.identifier_preparer.quote_identifier(current_schema)) return [row[0] for row in self._compat_fetchall(rp, charset=charset)\ if row[1] == 'BASE TABLE'] @@ -1796,7 +1792,7 @@ class MySQLDialect(default.DefaultDialect): if schema is None: schema = self.default_schema_name if self.server_version_info < (5, 0, 2): - return self.table_names(connection, schema) + return self.get_table_names(connection, schema) charset = self._connection_charset rp = connection.execute("SHOW FULL TABLES FROM %s" % self.identifier_preparer.quote_identifier(schema)) diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index f26bc4da2..9e38993f2 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -151,8 +151,8 @@ class MySQLDialect_oursql(MySQLDialect): **kw ) - def table_names(self, connection, schema): - return MySQLDialect.table_names(self, + def get_table_names(self, connection, schema=None, **kw): + return MySQLDialect.get_table_names(self, connection.connect().\ execution_options(_oursql_plain_query=True), schema diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index f76edabf2..b270f38cf 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -631,18 +631,6 @@ class OracleDialect(default.DefaultDialect): def _get_default_schema_name(self, connection): return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar()) - def table_names(self, connection, schema): - # note that table_names() isnt loading DBLINKed or synonym'ed tables - if schema is None: - schema = self.default_schema_name - s = sql.text( - "SELECT table_name FROM all_tables " - "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') " - "AND OWNER = :owner " - "AND IOT_NAME IS NULL") - cursor = connection.execute(s, owner=self.denormalize_name(schema)) - return [self.normalize_name(row[0]) for row in cursor] - def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None): """search for a local synonym matching the given desired owner/name. @@ -712,7 +700,18 @@ class OracleDialect(default.DefaultDialect): @reflection.cache def get_table_names(self, connection, schema=None, **kw): schema = self.denormalize_name(schema or self.default_schema_name) - return self.table_names(connection, schema) + + # note that table_names() isnt loading DBLINKed or synonym'ed tables + if schema is None: + schema = self.default_schema_name + s = sql.text( + "SELECT table_name FROM all_tables " + "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') " + "AND OWNER = :owner " + "AND IOT_NAME IS NULL") + cursor = connection.execute(s, owner=schema) + return [self.normalize_name(row[0]) for row in cursor] + @reflection.cache def get_view_names(self, connection, schema=None, **kw): diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index cbd92ccfe..f45fc9671 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -725,17 +725,6 @@ class PGDialect(default.DefaultDialect): cursor = connection.execute(sql.text(query, bindparams=bindparams)) return bool(cursor.scalar()) - def table_names(self, connection, schema): - result = connection.execute( - sql.text(u"SELECT relname FROM pg_class c " - "WHERE relkind = 'r' " - "AND '%s' = (select nspname from pg_namespace n where n.oid = c.relnamespace) " % - schema, - typemap = {'relname':sqltypes.Unicode} - ) - ) - return [row[0] for row in result] - def _get_server_version_info(self, connection): v = connection.execute("select version()").scalar() m = re.match('PostgreSQL (\d+)\.(\d+)(?:\.(\d+))?(?:devel)?', v) @@ -805,8 +794,17 @@ class PGDialect(default.DefaultDialect): current_schema = schema else: current_schema = self.default_schema_name - table_names = self.table_names(connection, current_schema) - return table_names + + result = connection.execute( + sql.text(u"SELECT relname FROM pg_class c " + "WHERE relkind = 'r' " + "AND '%s' = (select nspname from pg_namespace n where n.oid = c.relnamespace) " % + current_schema, + typemap = {'relname':sqltypes.Unicode} + ) + ) + return [row[0] for row in result] + @reflection.cache def get_view_names(self, connection, schema=None, **kw): diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index d7637e71b..0d9827322 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -360,8 +360,9 @@ class SQLiteDialect(default.DefaultDialect): return connect else: return None - - def table_names(self, connection, schema): + + @reflection.cache + def get_table_names(self, connection, schema=None, **kw): if schema is not None: qschema = self.identifier_preparer.quote_identifier(schema) master = '%s.sqlite_master' % qschema @@ -400,10 +401,6 @@ class SQLiteDialect(default.DefaultDialect): return (row is not None) - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - return self.table_names(connection, schema) - @reflection.cache def get_view_names(self, connection, schema=None, **kw): if schema is not None: diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index bdaab2eb7..aaec7a504 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -382,9 +382,6 @@ class SybaseDialect(default.DefaultDialect): def get_table_names(self, connection, schema=None, **kw): if schema is None: schema = self.default_schema_name - return self.table_names(connection, schema) - - def table_names(self, connection, schema): result = connection.execute( text("select sysobjects.name from sysobjects, sysusers " diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 095f7a960..5490169c6 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1569,7 +1569,7 @@ class Engine(Connectable, log.Identified): if not schema: schema = self.dialect.default_schema_name try: - return self.dialect.table_names(conn, schema) + return self.dialect.get_table_names(conn, schema) finally: if connection is None: conn.close() -- cgit v1.2.1 From f4df21c578ac519bf4436ad7b21246a14786ecdf Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 17:51:50 -0400 Subject: - StaticPool now initializes, disposes and recreates without opening a new connection - the connection is only opened when first requested. dispose() also works on AssertionPool now. [ticket:1728] --- lib/sqlalchemy/pool.py | 39 ++++++++------------------------------- 1 file changed, 8 insertions(+), 31 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/pool.py b/lib/sqlalchemy/pool.py index 3be63ced3..31ab7facc 100644 --- a/lib/sqlalchemy/pool.py +++ b/lib/sqlalchemy/pool.py @@ -747,35 +747,10 @@ class StaticPool(Pool): """ - def __init__(self, creator, **params): - """ - Construct a StaticPool. - - :param creator: a callable function that returns a DB-API - connection object. The function will be called with - parameters. - - :param echo: If True, connections being pulled and retrieved - from the pool will be logged to the standard output, as well - as pool sizing information. Echoing can also be achieved by - enabling logging for the "sqlalchemy.pool" - namespace. Defaults to False. - - :param reset_on_return: If true, reset the database state of - connections returned to the pool. This is typically a - ROLLBACK to release locks and transaction resources. - Disable at your own peril. Defaults to True. - - :param listeners: A list of - :class:`~sqlalchemy.interfaces.PoolListener`-like objects or - dictionaries of callables that receive events when DB-API - connections are created, checked out and checked in to the - pool. + @memoized_property + def _conn(self): + return self._creator() - """ - Pool.__init__(self, creator, **params) - self._conn = creator() - @memoized_property def connection(self): return _ConnectionRecord(self) @@ -784,8 +759,9 @@ class StaticPool(Pool): return "StaticPool" def dispose(self): - self._conn.close() - self._conn = None + if '_conn' in self.__dict__: + self._conn.close() + self._conn = None def recreate(self): self.logger.info("Pool recreating") @@ -837,7 +813,8 @@ class AssertionPool(Pool): def dispose(self): self._checked_out = False - self._conn.close() + if self._conn: + self._conn.close() def recreate(self): self.logger.info("Pool recreating") -- cgit v1.2.1 From cdda68aea3b392670ced210130fd64d7e1598d79 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 18:10:53 -0400 Subject: - Added support for rendering and reflecting TIMESTAMP WITH TIME ZONE, i.e. TIMESTAMP(timezone=True). [ticket:651] - Oracle INTERVAL type can now be reflected. --- lib/sqlalchemy/dialects/oracle/base.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index b270f38cf..2af5bdd7d 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -225,6 +225,8 @@ ischema_names = { 'CLOB' : CLOB, 'NCLOB' : NCLOB, 'TIMESTAMP' : TIMESTAMP, + 'TIMESTAMP WITH TIME ZONE' : TIMESTAMP, + 'INTERVAL DAY TO SECOND' : INTERVAL, 'RAW' : RAW, 'FLOAT' : FLOAT, 'DOUBLE PRECISION' : DOUBLE_PRECISION, @@ -256,7 +258,13 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): "(%d)" % type_.second_precision or "", ) - + + def visit_TIMESTAMP(self, type_): + if type_.timezone: + return "TIMESTAMP WITH TIME ZONE" + else: + return "TIMESTAMP" + def visit_DOUBLE_PRECISION(self, type_): return self._generate_numeric(type_, "DOUBLE PRECISION") @@ -756,6 +764,8 @@ class OracleDialect(default.DefaultDialect): coltype = NUMBER(precision, scale) elif coltype=='CHAR' or coltype=='VARCHAR2': coltype = self.ischema_names.get(coltype)(length) + elif 'WITH TIME ZONE' in coltype: + coltype = TIMESTAMP(timezone=True) else: coltype = re.sub(r'\(\d+\)', '', coltype) try: -- cgit v1.2.1 From 04dd671f65cfc9a84996015671f50c38c033fa0c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 18:17:52 -0400 Subject: documented listeners arg on create_engine(), [ticket:1230] --- lib/sqlalchemy/engine/__init__.py | 42 +++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 19 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 9a53545df..0dbb2404f 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -120,11 +120,11 @@ def create_engine(*args, **kwargs): that are common to most ``create_engine()`` usage. :param assert_unicode: Deprecated. A warning is raised in all cases when a non-Unicode - object is passed when SQLAlchemy would coerce into an encoding - (note: but **not** when the DBAPI handles unicode objects natively). - To suppress or raise this warning to an - error, use the Python warnings filter documented at: - http://docs.python.org/library/warnings.html + object is passed when SQLAlchemy would coerce into an encoding + (note: but **not** when the DBAPI handles unicode objects natively). + To suppress or raise this warning to an + error, use the Python warnings filter documented at: + http://docs.python.org/library/warnings.html :param connect_args: a dictionary of options which will be passed directly to the DBAPI's ``connect()`` method as @@ -144,11 +144,6 @@ def create_engine(*args, **kwargs): connections. Usage of this function causes connection parameters specified in the URL argument to be bypassed. - :param logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.engine" logger. Defaults to a hexstring of the - object's id. - :param echo=False: if True, the Engine will log all statements as well as a repr() of their parameter lists to the engines logger, which defaults to sys.stdout. The ``echo`` attribute of @@ -158,11 +153,6 @@ def create_engine(*args, **kwargs): controls a Python logger; see :ref:`dbengine_logging` for information on how to configure logging directly. - :param pool_logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.pool" logger. Defaults to a hexstring of the object's - id. - :param echo_pool=False: if True, the connection pool will log all checkouts/checkins to the logging stream, which defaults to sys.stdout. This flag ultimately controls a Python logger; see @@ -178,6 +168,20 @@ def create_engine(*args, **kwargs): characters. If less than 6, labels are generated as "_(counter)". If ``None``, the value of ``dialect.max_identifier_length`` is used instead. + + :param listeners: A list of one or more + :class:`~sqlalchemy.interfaces.PoolListener` objects which will + receive connection pool events. + + :param logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.engine" logger. Defaults to a hexstring of the + object's id. + + :param max_overflow=10: the number of connections to allow in + connection pool "overflow", that is connections that can be + opened above and beyond the pool_size setting, which defaults + to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. :param module=None: used by database implementations which support multiple DBAPI modules, this is a reference to a DBAPI2 @@ -199,10 +203,10 @@ def create_engine(*args, **kwargs): instantiate the pool in this case, you just indicate what type of pool to be used. - :param max_overflow=10: the number of connections to allow in - connection pool "overflow", that is connections that can be - opened above and beyond the pool_size setting, which defaults - to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. + :param pool_logging_name: String identifier which will be used within + the "name" field of logging records generated within the + "sqlalchemy.pool" logger. Defaults to a hexstring of the object's + id. :param pool_size=5: the number of connections to keep open inside the connection pool. This used with :class:`~sqlalchemy.pool.QueuePool` as -- cgit v1.2.1 From 960863906680b4a2fd4c27fff58e8a5939ea35f1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 20:20:18 -0400 Subject: - Fixed import error which could occur reflecting tables on a Windows host [ticket:1580] --- lib/sqlalchemy/dialects/mysql/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index df4a666fb..873dfd16c 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1942,7 +1942,7 @@ class MySQLDialect(default.DefaultDialect): # For winxx database hosts. TODO: is this really needed? if casing == 1 and table.name != table.name.lower(): table.name = table.name.lower() - lc_alias = schema._get_table_key(table.name, table.schema) + lc_alias = sa_schema._get_table_key(table.name, table.schema) table.metadata.tables[lc_alias] = table def _detect_charset(self, connection): -- cgit v1.2.1 From ddeaa9f0d6ed1c7422a90e5b8e92b717e2671403 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 21:12:29 -0400 Subject: - Fixed bug whereby calling query(A).join(A.bs).add_entity(B) in a joined inheritance scenario would double-add B as a target and produce an invalid query. [ticket:1188] --- lib/sqlalchemy/orm/query.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 682aa2bbf..8d3d7fbb3 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -114,7 +114,8 @@ class Query(object): mapper, selectable, is_aliased_class = _entity_info(entity) if not is_aliased_class and mapper.with_polymorphic: with_polymorphic = mapper._with_polymorphic_mappers - self.__mapper_loads_polymorphically_with(mapper, + if mapper.mapped_table not in self._polymorphic_adapters: + self.__mapper_loads_polymorphically_with(mapper, sql_util.ColumnAdapter(selectable, mapper._equivalent_columns)) adapter = None elif is_aliased_class: -- cgit v1.2.1 From 1398268c42667d5feb148cf5f6e27aeaecfe35e9 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 21:37:42 -0400 Subject: re-split PyODBCNumeric among Sybase and MS-SQL, they can't be shared. MS-SQL really needs the pure string approach else crashes occur on windows. --- lib/sqlalchemy/connectors/pyodbc.py | 43 ------------------------ lib/sqlalchemy/dialects/mssql/pyodbc.py | 56 ++++++++++++++++++++++++++++++-- lib/sqlalchemy/dialects/sybase/pyodbc.py | 32 ++++++++++++++---- 3 files changed, 79 insertions(+), 52 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/pyodbc.py b/lib/sqlalchemy/connectors/pyodbc.py index 5cf00bc92..b291f3e16 100644 --- a/lib/sqlalchemy/connectors/pyodbc.py +++ b/lib/sqlalchemy/connectors/pyodbc.py @@ -5,49 +5,6 @@ import sys import re import urllib import decimal -from sqlalchemy import processors, types as sqltypes - -class PyODBCNumeric(sqltypes.Numeric): - """Turns Decimals with adjusted() < -6 into floats, > 7 into strings""" - - convert_large_decimals_to_string = False - - def bind_processor(self, dialect): - super_process = super(PyODBCNumeric, self).bind_processor(dialect) - - def process(value): - if self.asdecimal and \ - isinstance(value, decimal.Decimal): - - if value.adjusted() < -6: - return processors.to_float(value) - elif self.convert_large_decimals_to_string and \ - value.adjusted() > 7: - return self._large_dec_to_string(value) - - if super_process: - return super_process(value) - else: - return value - return process - - def _large_dec_to_string(self, value): - if 'E' in str(value): - result = "%s%s%s" % ( - (value < 0 and '-' or ''), - "".join([str(s) for s in value._int]), - "0" * (value.adjusted() - (len(value._int)-1))) - else: - if (len(value._int) - 1) > value.adjusted(): - result = "%s%s.%s" % ( - (value < 0 and '-' or ''), - "".join([str(s) for s in value._int][0:value.adjusted() + 1]), - "".join([str(s) for s in value._int][value.adjusted() + 1:])) - else: - result = "%s%s" % ( - (value < 0 and '-' or ''), - "".join([str(s) for s in value._int][0:value.adjusted() + 1])) - return result class PyODBCConnector(Connector): driver='pyodbc' diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 8e7e90629..5625e3cd2 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -12,11 +12,61 @@ Connect strings are of the form:: """ from sqlalchemy.dialects.mssql.base import MSExecutionContext, MSDialect -from sqlalchemy.connectors.pyodbc import PyODBCConnector, PyODBCNumeric +from sqlalchemy.connectors.pyodbc import PyODBCConnector from sqlalchemy import types as sqltypes, util +import decimal -class _MSNumeric_pyodbc(PyODBCNumeric): - convert_large_decimals_to_string = True +class _MSNumeric_pyodbc(sqltypes.Numeric): + """Turns Decimals with adjusted() < -6 or > 7 into strings. + + This is the only method that is proven to work with Pyodbc+MSSQL + without crashing (floats can be used but seem to cause sporadic + crashes). + + """ + + def bind_processor(self, dialect): + super_process = super(_MSNumeric_pyodbc, self).bind_processor(dialect) + + def process(value): + if self.asdecimal and \ + isinstance(value, decimal.Decimal): + + adjusted = value.adjusted() + if adjusted < -6: + return self._small_dec_to_string(value) + elif adjusted > 7: + return self._large_dec_to_string(value) + + if super_process: + return super_process(value) + else: + return value + return process + + def _small_dec_to_string(self, value): + return "%s0.%s%s" % ( + (value < 0 and '-' or ''), + '0' * (abs(value.adjusted()) - 1), + "".join([str(nint) for nint in value._int])) + + def _large_dec_to_string(self, value): + if 'E' in str(value): + result = "%s%s%s" % ( + (value < 0 and '-' or ''), + "".join([str(s) for s in value._int]), + "0" * (value.adjusted() - (len(value._int)-1))) + else: + if (len(value._int) - 1) > value.adjusted(): + result = "%s%s.%s" % ( + (value < 0 and '-' or ''), + "".join([str(s) for s in value._int][0:value.adjusted() + 1]), + "".join([str(s) for s in value._int][value.adjusted() + 1:])) + else: + result = "%s%s" % ( + (value < 0 and '-' or ''), + "".join([str(s) for s in value._int][0:value.adjusted() + 1])) + return result class MSExecutionContext_pyodbc(MSExecutionContext): diff --git a/lib/sqlalchemy/dialects/sybase/pyodbc.py b/lib/sqlalchemy/dialects/sybase/pyodbc.py index 19ad70fe8..e34f2605c 100644 --- a/lib/sqlalchemy/dialects/sybase/pyodbc.py +++ b/lib/sqlalchemy/dialects/sybase/pyodbc.py @@ -29,12 +29,34 @@ Currently *not* supported are:: """ from sqlalchemy.dialects.sybase.base import SybaseDialect, SybaseExecutionContext -from sqlalchemy.connectors.pyodbc import PyODBCConnector, PyODBCNumeric +from sqlalchemy.connectors.pyodbc import PyODBCConnector +import decimal +from sqlalchemy import types as sqltypes, util, processors -from sqlalchemy import types as sqltypes, util +class _SybNumeric_pyodbc(sqltypes.Numeric): + """Turns Decimals with adjusted() < -6 into floats. + + It's not yet known how to get decimals with many + significant digits or very large adjusted() into Sybase + via pyodbc. + + """ + + def bind_processor(self, dialect): + super_process = super(_SybNumeric_pyodbc, self).bind_processor(dialect) + + def process(value): + if self.asdecimal and \ + isinstance(value, decimal.Decimal): -class _SybNumeric_pyodbc(PyODBCNumeric): - convert_large_decimals_to_string = False + if value.adjusted() < -6: + return processors.to_float(value) + + if super_process: + return super_process(value) + else: + return value + return process class SybaseExecutionContext_pyodbc(SybaseExecutionContext): def set_ddl_autocommit(self, connection, value): @@ -43,8 +65,6 @@ class SybaseExecutionContext_pyodbc(SybaseExecutionContext): else: connection.autocommit = False - - class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): execution_ctx_cls = SybaseExecutionContext_pyodbc -- cgit v1.2.1 From 268c64a6aaa64f3e09ec5a37ddffde57eb160d2d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 21:42:35 -0400 Subject: try < 0 --- lib/sqlalchemy/dialects/mssql/pyodbc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 5625e3cd2..7f46ec7fb 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -17,7 +17,7 @@ from sqlalchemy import types as sqltypes, util import decimal class _MSNumeric_pyodbc(sqltypes.Numeric): - """Turns Decimals with adjusted() < -6 or > 7 into strings. + """Turns Decimals with adjusted() < 0 or > 7 into strings. This is the only method that is proven to work with Pyodbc+MSSQL without crashing (floats can be used but seem to cause sporadic @@ -33,7 +33,7 @@ class _MSNumeric_pyodbc(sqltypes.Numeric): isinstance(value, decimal.Decimal): adjusted = value.adjusted() - if adjusted < -6: + if adjusted < 0: return self._small_dec_to_string(value) elif adjusted > 7: return self._large_dec_to_string(value) -- cgit v1.2.1 From 7cf74e9ccaa928d3a5227f7a7846b0a25a7ab9b1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 19 Mar 2010 22:29:08 -0400 Subject: oracle cleanup --- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 57 ++++++++++++++++++----------- 1 file changed, 36 insertions(+), 21 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index c6e9cea5d..db1647679 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -207,11 +207,19 @@ class OracleCompiler_cx_oracle(OracleCompiler): class OracleExecutionContext_cx_oracle(OracleExecutionContext): def pre_exec(self): - quoted_bind_names = getattr(self.compiled, '_quoted_bind_names', {}) + quoted_bind_names = \ + getattr(self.compiled, '_quoted_bind_names', None) if quoted_bind_names: + if not self.dialect.supports_unicode_binds: + quoted_bind_names = \ + dict( + (fromname, toname.encode(self.dialect.encoding)) + for fromname, toname in + quoted_bind_names.items() + ) for param in self.parameters: - for fromname, toname in self.compiled._quoted_bind_names.iteritems(): - param[toname.encode(self.dialect.encoding)] = param[fromname] + for fromname, toname in quoted_bind_names.items(): + param[toname] = param[fromname] del param[fromname] if self.dialect.auto_setinputsizes: @@ -219,14 +227,12 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): # on String, including that outparams/RETURNING # breaks for varchars self.set_input_sizes(quoted_bind_names, - exclude_types=self.dialect._cx_oracle_string_types + exclude_types=self.dialect._cx_oracle_string_types ) - + + # if a single execute, check for outparams if len(self.compiled_parameters) == 1: - for key in self.compiled.binds: - bindparam = self.compiled.binds[key] - name = self.compiled.bind_names[bindparam] - value = self.compiled_parameters[0][name] + for bindparam in self.compiled.binds.values(): if bindparam.isoutparam: dbtype = bindparam.type.dialect_impl(self.dialect).\ get_dbapi_type(self.dialect.dbapi) @@ -238,6 +244,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): " cx_oracle" % (name, bindparam.type) ) + name = self.compiled.bind_names[bindparam] self.out_parameters[name] = self.cursor.var(dbtype) self.parameters[0][quoted_bind_names.get(name, name)] = \ self.out_parameters[name] @@ -250,7 +257,10 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): def get_result_proxy(self): if hasattr(self, 'out_parameters') and self.compiled.returning: - returning_params = dict((k, v.getvalue()) for k, v in self.out_parameters.items()) + returning_params = dict( + (k, v.getvalue()) + for k, v in self.out_parameters.items() + ) return ReturningResultProxy(self, returning_params) result = None @@ -264,10 +274,11 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): result = base.ResultProxy(self) if hasattr(self, 'out_parameters'): - if self.compiled_parameters is not None and len(self.compiled_parameters) == 1: + if self.compiled_parameters is not None and \ + len(self.compiled_parameters) == 1: result.out_parameters = out_parameters = {} - for bind, name in self.compiled.bind_names.iteritems(): + for bind, name in self.compiled.bind_names.items(): if name in self.out_parameters: type = bind.type impl_type = type.dialect_impl(self.dialect) @@ -291,12 +302,14 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext): class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle): """Support WITH_UNICODE in Python 2.xx. - WITH_UNICODE allows cx_Oracle's Python 3 unicode handling behavior under Python 2.x. - This mode in some cases disallows and in other cases silently - passes corrupted data when non-Python-unicode strings (a.k.a. plain old Python strings) - are passed as arguments to connect(), the statement sent to execute(), or any of the bind - parameter keys or values sent to execute(). This optional context - therefore ensures that all statements are passed as Python unicode objects. + WITH_UNICODE allows cx_Oracle's Python 3 unicode handling + behavior under Python 2.x. This mode in some cases disallows + and in other cases silently passes corrupted data when + non-Python-unicode strings (a.k.a. plain old Python strings) + are passed as arguments to connect(), the statement sent to execute(), + or any of the bind parameter keys or values sent to execute(). + This optional context therefore ensures that all statements are + passed as Python unicode objects. """ def __init__(self, *arg, **kw): @@ -373,17 +386,19 @@ class OracleDialect_cx_oracle(OracleDialect): if hasattr(self.dbapi, 'version'): cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')]) - self.supports_unicode_binds = cx_oracle_ver >= (5, 0) - self._cx_oracle_native_nvarchar = cx_oracle_ver >= (5, 0) else: cx_oracle_ver = None def types(*names): - return set([getattr(self.dbapi, name, None) for name in names]).difference([None]) + return set([ + getattr(self.dbapi, name, None) for name in names + ]).difference([None]) self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB") self._cx_oracle_unicode_types = types("UNICODE", "NCLOB") self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") + self.supports_unicode_binds = cx_oracle_ver >= (5, 0) + self._cx_oracle_native_nvarchar = cx_oracle_ver >= (5, 0) if cx_oracle_ver is None: # this occurs in tests with mock DBAPIs -- cgit v1.2.1 From eb728389539a5bac4c3f231f578ceb92cb068065 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 20 Mar 2010 11:50:39 -0400 Subject: - pymssql now works again, expecting at least the 1.0 series. --- lib/sqlalchemy/dialects/mssql/adodbapi.py | 4 ++ lib/sqlalchemy/dialects/mssql/base.py | 114 +----------------------------- lib/sqlalchemy/dialects/mssql/pymssql.py | 79 +++++++++++++++++---- lib/sqlalchemy/dialects/mssql/pyodbc.py | 67 ++++++++++++++++-- lib/sqlalchemy/test/requires.py | 1 + 5 files changed, 136 insertions(+), 129 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/mssql/adodbapi.py b/lib/sqlalchemy/dialects/mssql/adodbapi.py index 9e12a944d..502a02acc 100644 --- a/lib/sqlalchemy/dialects/mssql/adodbapi.py +++ b/lib/sqlalchemy/dialects/mssql/adodbapi.py @@ -1,3 +1,7 @@ +""" +The adodbapi dialect is not implemented for 0.6 at this time. + +""" from sqlalchemy import types as sqltypes, util from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect import sys diff --git a/lib/sqlalchemy/dialects/mssql/base.py b/lib/sqlalchemy/dialects/mssql/base.py index 57b468083..066ab8d04 100644 --- a/lib/sqlalchemy/dialects/mssql/base.py +++ b/lib/sqlalchemy/dialects/mssql/base.py @@ -2,119 +2,10 @@ """Support for the Microsoft SQL Server database. -Driver ------- - -The MSSQL dialect will work with three different available drivers: - -* *pyodbc* - http://pyodbc.sourceforge.net/. This is the recommeded - driver. - -* *pymssql* - http://pymssql.sourceforge.net/ - -* *adodbapi* - http://adodbapi.sourceforge.net/ - -Drivers are loaded in the order listed above based on availability. - -If you need to load a specific driver pass ``module_name`` when -creating the engine:: - - engine = create_engine('mssql+module_name://dsn') - -``module_name`` currently accepts: ``pyodbc``, ``pymssql``, and -``adodbapi``. - -Currently the pyodbc driver offers the greatest level of -compatibility. - Connecting ---------- -Connecting with create_engine() uses the standard URL approach of -``mssql://user:pass@host/dbname[?key=value&key=value...]``. - -If the database name is present, the tokens are converted to a -connection string with the specified values. If the database is not -present, then the host token is taken directly as the DSN name. - -Examples of pyodbc connection string URLs: - -* *mssql+pyodbc://mydsn* - connects using the specified DSN named ``mydsn``. - The connection string that is created will appear like:: - - dsn=mydsn;Trusted_Connection=Yes - -* *mssql+pyodbc://user:pass@mydsn* - connects using the DSN named - ``mydsn`` passing in the ``UID`` and ``PWD`` information. The - connection string that is created will appear like:: - - dsn=mydsn;UID=user;PWD=pass - -* *mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english* - connects - using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` - information, plus the additional connection configuration option - ``LANGUAGE``. The connection string that is created will appear - like:: - - dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english - -* *mssql+pyodbc://user:pass@host/db* - connects using a connection string - dynamically created that would appear like:: - - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass - -* *mssql+pyodbc://user:pass@host:123/db* - connects using a connection - string that is dynamically created, which also includes the port - information using the comma syntax. If your connection string - requires the port information to be passed as a ``port`` keyword - see the next example. This will create the following connection - string:: - - DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass - -* *mssql+pyodbc://user:pass@host/db?port=123* - connects using a connection - string that is dynamically created that includes the port - information as a separate ``port`` keyword. This will create the - following connection string:: - - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 - -If you require a connection string that is outside the options -presented above, use the ``odbc_connect`` keyword to pass in a -urlencoded connection string. What gets passed in will be urldecoded -and passed directly. - -For example:: - - mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb - -would create the following connection string:: - - dsn=mydsn;Database=db - -Encoding your connection string can be easily accomplished through -the python shell. For example:: - - >>> import urllib - >>> urllib.quote_plus('dsn=mydsn;Database=db') - 'dsn%3Dmydsn%3BDatabase%3Ddb' - -Additional arguments which may be specified either as query string -arguments on the URL, or as keyword argument to -:func:`~sqlalchemy.create_engine()` are: - -* *query_timeout* - allows you to override the default query timeout. - Defaults to ``None``. This is only supported on pymssql. - -* *use_scope_identity* - allows you to specify that SCOPE_IDENTITY - should be used in place of the non-scoped version @@IDENTITY. - Defaults to True. - -* *max_identifier_length* - allows you to se the maximum length of - identfiers supported by the database. Defaults to 128. For pymssql - the default is 30. - -* *schema_name* - use to set the schema name. Defaults to ``dbo``. +See the individual driver sections below for details on connecting. Auto Increment Behavior ----------------------- @@ -220,9 +111,6 @@ Known Issues * No support for more than one ``IDENTITY`` column per table -* pymssql has problems with binary and unicode data that this module - does **not** work around - """ import datetime, decimal, inspect, operator, sys, re import itertools diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index b3a57d318..36cb5f370 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -1,40 +1,95 @@ """ Support for the pymssql dialect. -Going forward we will be supporting the 1.0 release of pymssql. +This dialect supports pymssql 1.0 and greater. + +pymssql is available at: + + http://pymssql.sourceforge.net/ + +Connect string:: + + mssql+pymssql://:@ + +Adding "?charset=utf8" or similar will cause pymssql to return +strings as Python unicode objects. This can potentially improve +performance in some scenarios as decoding of strings is +handled natively. + +pymssql inherits a lot of limitations from FreeTDS, including: + +* no support for multibyte schema identifiers +* poor support for large decimals +* poor support for binary fields +* poor support for VARCHAR/CHAR fields over 255 characters + +Please consult the pymssql documentation for further information. """ from sqlalchemy.dialects.mssql.base import MSDialect -from sqlalchemy import types as sqltypes +from sqlalchemy import types as sqltypes, util, processors +import re +import decimal +class _MSNumeric_pymssql(sqltypes.Numeric): + def result_processor(self, dialect, type_): + if not self.asdecimal: + return processors.to_float + else: + return sqltypes.Numeric.result_processor(self, dialect, type_) class MSDialect_pymssql(MSDialect): supports_sane_rowcount = False max_identifier_length = 30 driver = 'pymssql' - + + colspecs = util.update_copy( + MSDialect.colspecs, + { + sqltypes.Numeric:_MSNumeric_pymssql, + sqltypes.Float:sqltypes.Float, + } + ) @classmethod def dbapi(cls): - import pymssql as module + module = __import__('pymssql') # pymmsql doesn't have a Binary method. we use string # TODO: monkeypatching here is less than ideal - module.Binary = lambda st: str(st) + module.Binary = str + + client_ver = tuple(int(x) for x in module.__version__.split(".")) + if client_ver < (1, ): + util.warn("The pymssql dialect expects at least " + "the 1.0 series of the pymssql DBAPI.") return module def __init__(self, **params): super(MSDialect_pymssql, self).__init__(**params) self.use_scope_identity = True + def _get_server_version_info(self, connection): + vers = connection.scalar("select @@version") + m = re.match(r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers) + if m: + return tuple(int(x) for x in m.group(1, 2, 3, 4)) + else: + return None def create_connect_args(self, url): - keys = url.query - if keys.get('port'): - # pymssql expects port as host:port, not a separate arg - keys['host'] = ''.join([keys.get('host', ''), ':', str(keys['port'])]) - del keys['port'] - return [[], keys] + opts = url.translate_connect_args(username='user') + opts.update(url.query) + opts.pop('port', None) + return [[], opts] def is_disconnect(self, e): - return isinstance(e, self.dbapi.DatabaseError) and "Error 10054" in str(e) + for msg in ( + "Error 10054", + "Not connected to any MS SQL server", + "Connection is closed" + ): + if msg in str(e): + return True + else: + return False dialect = MSDialect_pymssql \ No newline at end of file diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index 7f46ec7fb..eb4bf5cff 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -1,12 +1,71 @@ """ Support for MS-SQL via pyodbc. -http://pypi.python.org/pypi/pyodbc/ +pyodbc is available at: -Connect strings are of the form:: + http://pypi.python.org/pypi/pyodbc/ - mssql+pyodbc://:@/ - mssql+pyodbc://:@/ +Examples of pyodbc connection string URLs: + +* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``. + The connection string that is created will appear like:: + + dsn=mydsn;Trusted_Connection=Yes + +* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named + ``mydsn`` passing in the ``UID`` and ``PWD`` information. The + connection string that is created will appear like:: + + dsn=mydsn;UID=user;PWD=pass + +* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects + using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` + information, plus the additional connection configuration option + ``LANGUAGE``. The connection string that is created will appear + like:: + + dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english + +* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection string + dynamically created that would appear like:: + + DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass + +* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection + string that is dynamically created, which also includes the port + information using the comma syntax. If your connection string + requires the port information to be passed as a ``port`` keyword + see the next example. This will create the following connection + string:: + + DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass + +* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection + string that is dynamically created that includes the port + information as a separate ``port`` keyword. This will create the + following connection string:: + + DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 + +If you require a connection string that is outside the options +presented above, use the ``odbc_connect`` keyword to pass in a +urlencoded connection string. What gets passed in will be urldecoded +and passed directly. + +For example:: + + mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb + +would create the following connection string:: + + dsn=mydsn;Database=db + +Encoding your connection string can be easily accomplished through +the python shell. For example:: + + >>> import urllib + >>> urllib.quote_plus('dsn=mydsn;Database=db') + 'dsn%3Dmydsn%3BDatabase%3Ddb' """ diff --git a/lib/sqlalchemy/test/requires.py b/lib/sqlalchemy/test/requires.py index c4c745c54..73b212095 100644 --- a/lib/sqlalchemy/test/requires.py +++ b/lib/sqlalchemy/test/requires.py @@ -224,6 +224,7 @@ def unicode_ddl(fn): no_support('maxdb', 'database support flakey'), no_support('oracle', 'FIXME: no support in database?'), no_support('sybase', 'FIXME: guessing, needs confirmation'), + no_support('mssql+pymssql', 'no FreeTDS support'), exclude('mysql', '<', (4, 1, 1), 'no unicode connection support'), ) -- cgit v1.2.1 From aabd00218ba7b6eb3e521b171c90aee4782b480f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 20 Mar 2010 11:57:38 -0400 Subject: fix a python 3 screwup --- lib/sqlalchemy/dialects/oracle/cx_oracle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/oracle/cx_oracle.py b/lib/sqlalchemy/dialects/oracle/cx_oracle.py index db1647679..91af6620b 100644 --- a/lib/sqlalchemy/dialects/oracle/cx_oracle.py +++ b/lib/sqlalchemy/dialects/oracle/cx_oracle.py @@ -387,7 +387,7 @@ class OracleDialect_cx_oracle(OracleDialect): if hasattr(self.dbapi, 'version'): cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')]) else: - cx_oracle_ver = None + cx_oracle_ver = (0, 0, 0) def types(*names): return set([ -- cgit v1.2.1 From b728f2db23ab297c1fe2853ed3e5e69178ab2d0c Mon Sep 17 00:00:00 2001 From: Brad Allen Date: Sat, 20 Mar 2010 21:36:43 -0600 Subject: added comment about alternative way to get server version info --- lib/sqlalchemy/connectors/mxodbc.py | 1 + 1 file changed, 1 insertion(+) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 484c11d49..0c7e5ad06 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -101,6 +101,7 @@ class MxODBCConnector(Connector): return False def _get_server_version_info(self, connection): + # eGenix suggests using conn.dbms_version instead of what we're doing here dbapi_con = connection.connection version = [] r = re.compile('[.\-]') -- cgit v1.2.1 From 323f1b358de19cc9cbfd45408636aa58060adf15 Mon Sep 17 00:00:00 2001 From: Brad Allen Date: Sat, 20 Mar 2010 21:58:32 -0600 Subject: now loading mxODBC exception classes into module namespace while still avoiding module imports (hooking into dbapi class method) --- lib/sqlalchemy/connectors/mxodbc.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 0c7e5ad06..ac7075209 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -31,6 +31,9 @@ class MxODBCConnector(Connector): @classmethod def dbapi(cls): + # this classmethod will normally be replaced by an instance + # attribute of the same name, so this is normally only called once. + cls._load_mx_exceptions() platform = sys.platform if platform == 'win32': from mx.ODBC import Windows as module @@ -43,6 +46,16 @@ class MxODBCConnector(Connector): raise ImportError, "Unrecognized platform for mxODBC import" return module + @classmethod + def _load_mx_exceptions(cls): + """ Import mxODBC exception classes into the module namespace, + as if they had been imported normally. This is done here + to avoid requiring all SQLAlchemy users to install mxODBC. + """ + global InterfaceError, ProgrammingError + from mx.ODBC import InterfaceError + from mx.ODBC import ProgrammingError + def on_connect(self): def connect(conn): conn.stringformat = self.dbapi.MIXED_STRINGFORMAT -- cgit v1.2.1 From 619376e7ac5e3a2e698b789fa601d117057ab133 Mon Sep 17 00:00:00 2001 From: Brad Allen Date: Sat, 20 Mar 2010 22:00:51 -0600 Subject: For cases when mxODBC's cursor.execute can't do the job, raise a warning and fall back on cursor.executedirect which is less picky. This causes a drastic improvement in passing tests. --- lib/sqlalchemy/connectors/mxodbc.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index ac7075209..78a1719c1 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -126,4 +126,11 @@ class MxODBCConnector(Connector): version.append(n) return tuple(version) - + def do_execute(self, cursor, statement, parameters, context=None): + # temporary workaround until a more comprehensive solution can + # be found for controlling when to use executedirect + try: + cursor.execute(statement, parameters) + except (InterfaceError, ProgrammingError), e: + warnings.warn("cursor.execute failed; falling back to executedirect") + cursor.executedirect(statement, parameters) -- cgit v1.2.1 From 1a3f424c864d1bbf782db20d0840895c8ae0f35d Mon Sep 17 00:00:00 2001 From: Brad Allen Date: Sat, 20 Mar 2010 22:01:12 -0600 Subject: whitespace tweaks --- lib/sqlalchemy/connectors/mxodbc.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 78a1719c1..4476ffd78 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -65,10 +65,9 @@ class MxODBCConnector(Connector): return connect def _error_handler(self): - """Return a handler that adjusts mxODBC's raised Warnings to + """ Return a handler that adjusts mxODBC's raised Warnings to emit Python standard warnings. """ - from mx.ODBC.Error import Warning as MxOdbcWarning def error_handler(connection, cursor, errorclass, errorvalue): -- cgit v1.2.1 From e42aa19cf3293f959274d0972f9abd547783db78 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 21 Mar 2010 12:58:14 -0400 Subject: some apparent refactorings --- lib/sqlalchemy/orm/unitofwork.py | 1 + lib/sqlalchemy/topological.py | 23 +++++++++-------------- 2 files changed, 10 insertions(+), 14 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py index c0a088b01..30b0b61e5 100644 --- a/lib/sqlalchemy/orm/unitofwork.py +++ b/lib/sqlalchemy/orm/unitofwork.py @@ -483,6 +483,7 @@ class UOWTask(object): per-mapper topological structure is found to have cycles. """ + dependencies = {} def set_processor_for_state(state, depprocessor, target_state, isdelete): if state not in dependencies: diff --git a/lib/sqlalchemy/topological.py b/lib/sqlalchemy/topological.py index 76c0c717f..d35213f6b 100644 --- a/lib/sqlalchemy/topological.py +++ b/lib/sqlalchemy/topological.py @@ -172,7 +172,7 @@ def _sort(tuples, allitems, allow_cycles=False, ignore_self_cycles=False): n = nodes[id0] n.cycles = set([n]) elif not ignore_self_cycles: - raise CircularDependencyError("Self-referential dependency detected " + repr(t)) + raise CircularDependencyError("Self-referential dependency detected: %r" % t) continue childnode = nodes[id1] parentnode = nodes[id0] @@ -207,7 +207,7 @@ def _sort(tuples, allitems, allow_cycles=False, ignore_self_cycles=False): continue else: # long cycles not allowed - raise CircularDependencyError("Circular dependency detected " + repr(edges) + repr(queue)) + raise CircularDependencyError("Circular dependency detected: %r %r " % (edges, queue)) node = queue.pop() if not hasattr(node, '_cyclical'): output.append(node) @@ -264,35 +264,30 @@ def _organize_as_tree(nodes): return (head.item, [n.item for n in head.cycles or []], head.children) def _find_cycles(edges): - involved_in_cycles = set() cycles = {} - def traverse(node, goal=None, cycle=None): - if goal is None: - goal = node - cycle = [] - elif node is goal: - return True + def traverse(node, cycle, goal): for (n, key) in edges.edges_by_parent(node): if key in cycle: continue - cycle.append(key) - if traverse(key, goal, cycle): + cycle.add(key) + if key is goal: cycset = set(cycle) for x in cycle: - involved_in_cycles.add(x) if x in cycles: existing_set = cycles[x] - [existing_set.add(y) for y in cycset] + existing_set.update(cycset) for y in existing_set: cycles[y] = existing_set cycset = existing_set else: cycles[x] = cycset + else: + traverse(key, cycle, goal) cycle.pop() for parent in edges.get_parents(): - traverse(parent) + traverse(parent, set(), parent) unique_cycles = set(tuple(s) for s in cycles.values()) -- cgit v1.2.1 From a57f7c072b15f9af5c1d66abd0a85a502a0c4bc2 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 21 Mar 2010 18:45:54 -0400 Subject: initial subq implementation --- lib/sqlalchemy/orm/__init__.py | 8 +++ lib/sqlalchemy/orm/properties.py | 10 ++-- lib/sqlalchemy/orm/strategies.py | 106 +++++++++++++++++++++++++++++++++++++-- 3 files changed, 117 insertions(+), 7 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 3337287d8..c9ed3cf2e 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -96,6 +96,8 @@ __all__ = ( 'relation', 'scoped_session', 'sessionmaker', + 'subqueryload', + 'subqueryload_all', 'synonym', 'undefer', 'undefer_group', @@ -974,6 +976,12 @@ def eagerload_all(*keys, **kw): else: return strategies.EagerLazyOption(keys, lazy=False, chained=True) +def subqueryload(*keys): + return strategies.EagerLazyOption(keys, _strategy_cls=strategies.SubqueryLoader) + +def subqueryload_all(*keys): + return strategies.EagerLazyOption(keys, _strategy_cls=strategies.SubqueryLoader, chained=True) + @sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated') def lazyload(*keys): """Return a ``MapperOption`` that will convert the property of the given diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 5c43fd355..4b6770861 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -397,13 +397,17 @@ class RelationshipProperty(StrategizedProperty): elif self.lazy == 'dynamic': from sqlalchemy.orm import dynamic self.strategy_class = dynamic.DynaLoader - elif self.lazy is False: + elif self.lazy is False or self.lazy == 'joined': self.strategy_class = strategies.EagerLoader - elif self.lazy is None: + elif self.lazy is None or self.lazy == 'noload': self.strategy_class = strategies.NoLoader + elif self.lazy is False or self.lazy == 'select': + self.strategy_class = strategies.LazyLoader + elif self.lazy == 'subquery': + self.strategy_class = strategies.SubqueryLoader else: self.strategy_class = strategies.LazyLoader - + self._reverse_property = set() if cascade is not False: diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index ce19667c6..f6b36f557 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -17,6 +17,7 @@ from sqlalchemy.orm.interfaces import ( ) from sqlalchemy.orm import session as sessionlib from sqlalchemy.orm import util as mapperutil +import itertools def _register_attribute(strategy, mapper, useobject, compare_function=None, @@ -575,6 +576,98 @@ class LoadLazyAttribute(object): else: return None +class SubqueryLoader(AbstractRelationshipLoader): + def init_class_attribute(self, mapper): + self.parent_property._get_strategy(LazyLoader).init_class_attribute(mapper) + + def setup_query(self, context, entity, path, adapter, + column_collection=None, parentmapper=None, **kwargs): + + if not context.query._enable_eagerloads: + return + + path = path + (self.key,) + + # TODO: shouldn't have to use getattr() to get at + # InstrumentedAttributes, or alternatively should not need to + # use InstrumentedAttributes with the Query at all (it should accept + # the MapperProperty objects as well). + + local_cols, remote_cols = self._local_remote_columns + + local_attr = [getattr(self.parent.class_, key) + for key in + [self.parent._get_col_to_prop(c).key for c in local_cols] + ] + + attr = getattr(self.parent.class_, self.key) + + # modify the query to just look for parent columns in the join condition + + # TODO. secondary is not supported at all yet. + + # TODO: what happens to options() in the parent query ? are they going + # to get in the way here ? + + q = context.query._clone() + q._set_entities(local_attr) + if self.parent_property.secondary is not None: + q = q.from_self(self.mapper, *local_attr) + else: + q = q.from_self(self.mapper) + q = q.join(attr) + + if self.parent_property.secondary is not None: + q = q.order_by(*local_attr) + else: + q = q.order_by(*remote_cols) + + if self.parent_property.order_by: + q = q.order_by(*self.parent_property.order_by) + + context.attributes[('subquery', path)] = q + + @property + def _local_remote_columns(self): + if self.parent_property.secondary is None: + return zip(*self.parent_property.local_remote_pairs) + else: + return \ + [p[0] for p in self.parent_property.synchronize_pairs],\ + [p[0] for p in self.parent_property.secondary_synchronize_pairs] + + def create_row_processor(self, context, path, mapper, row, adapter): + path = path + (self.key,) + + local_cols, remote_cols = self._local_remote_columns + + local_attr = [self.parent._get_col_to_prop(c).key for c in local_cols] + remote_attr = [self.mapper._get_col_to_prop(c).key for c in remote_cols] + + q = context.attributes[('subquery', path)] + + if self.parent_property.secondary is not None: + collections = dict((k, [v[0] for v in v]) for k, v in itertools.groupby( + q, + lambda x:x[1:] + )) + else: + collections = dict((k, list(v)) for k, v in itertools.groupby( + q, + lambda x:tuple([getattr(x, key) for key in remote_attr]) + )) + + + def execute(state, dict_, row): + collection = collections.get( + tuple([row[col] for col in local_cols]), + () + ) + state.get_impl(self.key).set_committed_value(state, dict_, collection) + + return (execute, None) + + class EagerLoader(AbstractRelationshipLoader): """Strategize a relationship() that loads within the process of the parent object being selected.""" @@ -809,24 +902,29 @@ class EagerLoader(AbstractRelationshipLoader): log.class_logger(EagerLoader) class EagerLazyOption(StrategizedOption): - - def __init__(self, key, lazy=True, chained=False, mapper=None, propagate_to_loaders=True): + def __init__(self, key, lazy=True, chained=False, + mapper=None, propagate_to_loaders=True, + _strategy_cls=None + ): super(EagerLazyOption, self).__init__(key, mapper) self.lazy = lazy self.chained = chained self.propagate_to_loaders = propagate_to_loaders + self.strategy_cls = _strategy_cls def is_chained(self): return not self.lazy and self.chained def get_strategy_class(self): - if self.lazy: + if self.strategy_cls: + return self.strategy_cls + elif self.lazy: return LazyLoader elif self.lazy is False: return EagerLoader elif self.lazy is None: return NoLoader - + class EagerJoinOption(PropertyOption): def __init__(self, key, innerjoin, chained=False): -- cgit v1.2.1 From 4475506371342b6e61d8dc236ec1ba9d7877e9e5 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Mar 2010 13:16:21 -0400 Subject: - Fixed bug in Query whereby calling q.join(prop).from_self(...). join(prop) would fail to render the second join outside the subquery, when joining on the same criterion as was on the inside. --- lib/sqlalchemy/orm/query.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 8d3d7fbb3..7ae1194c1 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -627,9 +627,11 @@ class Query(object): @_generative() def _from_selectable(self, fromclause): - self._statement = self._criterion = None - self._order_by = self._group_by = self._distinct = False - self._limit = self._offset = None + for attr in ('_statement', '_criterion', '_order_by', '_group_by', + '_limit', '_offset', '_joinpath', '_joinpoint', + '_distinct' + ): + self.__dict__.pop(attr, None) self._set_select_from(fromclause) old_entities = self._entities self._entities = [] -- cgit v1.2.1 From e62b7d408774941184b286e216806074a72ddfdb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Mar 2010 14:03:09 -0400 Subject: - ordering tests - bring all lines in strategies.py to 78 chars --- lib/sqlalchemy/orm/strategies.py | 348 ++++++++++++++++++++++++++------------- 1 file changed, 233 insertions(+), 115 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 00b00be13..828530d7a 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -4,7 +4,8 @@ # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -"""sqlalchemy.orm.interfaces.LoaderStrategy implementations, and related MapperOptions.""" +"""sqlalchemy.orm.interfaces.LoaderStrategy + implementations, and related MapperOptions.""" from sqlalchemy import exc as sa_exc from sqlalchemy import sql, util, log @@ -39,7 +40,9 @@ def _register_attribute(strategy, mapper, useobject, attribute_ext.insert(0, _SingleParentValidator(prop)) if prop.key in prop.parent._validators: - attribute_ext.insert(0, mapperutil.Validator(prop.key, prop.parent._validators[prop.key])) + attribute_ext.insert(0, + mapperutil.Validator(prop.key, prop.parent._validators[prop.key]) + ) if useobject: attribute_ext.append(sessionlib.UOWEventHandler(prop.key)) @@ -67,7 +70,7 @@ def _register_attribute(strategy, mapper, useobject, ) class UninstrumentedColumnLoader(LoaderStrategy): - """Represent the strategy for a MapperProperty that doesn't instrument the class. + """Represent the a non-instrumented MapperProperty. The polymorphic_on argument of mapper() often results in this, if the argument is against the with_polymorphic selectable. @@ -76,14 +79,15 @@ class UninstrumentedColumnLoader(LoaderStrategy): def init(self): self.columns = self.parent_property.columns - def setup_query(self, context, entity, path, adapter, column_collection=None, **kwargs): + def setup_query(self, context, entity, path, adapter, + column_collection=None, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] column_collection.append(c) def create_row_processor(self, selectcontext, path, mapper, row, adapter): - return (None, None) + return None, None class ColumnLoader(LoaderStrategy): """Strategize the loading of a plain column-based MapperProperty.""" @@ -92,7 +96,8 @@ class ColumnLoader(LoaderStrategy): self.columns = self.parent_property.columns self.is_composite = hasattr(self.parent_property, 'composite_class') - def setup_query(self, context, entity, path, adapter, column_collection=None, **kwargs): + def setup_query(self, context, entity, path, adapter, + column_collection=None, **kwargs): for c in self.columns: if adapter: c = adapter.columns[c] @@ -136,7 +141,8 @@ class CompositeColumnLoader(ColumnLoader): def copy(obj): if obj is None: return None - return self.parent_property.composite_class(*obj.__composite_values__()) + return self.parent_property.\ + composite_class(*obj.__composite_values__()) def compare(a, b): if a is None or b is None: @@ -157,7 +163,8 @@ class CompositeColumnLoader(ColumnLoader): #active_history ? ) - def create_row_processor(self, selectcontext, path, mapper, row, adapter): + def create_row_processor(self, selectcontext, path, mapper, + row, adapter): key = self.key columns = self.columns composite_class = self.parent_property.composite_class @@ -204,7 +211,8 @@ class DeferredColumnLoader(LoaderStrategy): def init(self): if hasattr(self.parent_property, 'composite_class'): - raise NotImplementedError("Deferred loading for composite types not implemented yet") + raise NotImplementedError("Deferred loading for composite " + "types not implemented yet") self.columns = self.parent_property.columns self.group = self.parent_property.group @@ -219,13 +227,15 @@ class DeferredColumnLoader(LoaderStrategy): expire_missing=False ) - def setup_query(self, context, entity, path, adapter, only_load_props=None, **kwargs): - if \ - (self.group is not None and context.attributes.get(('undefer', self.group), False)) or \ - (only_load_props and self.key in only_load_props): - + def setup_query(self, context, entity, path, adapter, + only_load_props=None, **kwargs): + if ( + self.group is not None and + context.attributes.get(('undefer', self.group), False) + ) or (only_load_props and self.key in only_load_props): self.parent_property._get_strategy(ColumnLoader).\ - setup_query(context, entity, path, adapter, **kwargs) + setup_query(context, entity, + path, adapter, **kwargs) def _class_level_loader(self, state): if not mapperutil._state_has_identity(state): @@ -277,14 +287,15 @@ class LoadDeferredColumns(object): session = sessionlib._state_session(state) if session is None: raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session; " - "deferred load operation of attribute '%s' cannot proceed" % - (mapperutil.state_str(state), self.key) - ) + "Parent instance %s is not bound to a Session; " + "deferred load operation of attribute '%s' cannot proceed" % + (mapperutil.state_str(state), self.key) + ) query = session.query(localparent) ident = state.key[1] - query._get(None, ident=ident, only_load_props=group, refresh_state=state) + query._get(None, ident=ident, + only_load_props=group, refresh_state=state) return attributes.ATTR_WAS_SET class DeferredOption(StrategizedOption): @@ -310,7 +321,7 @@ class UndeferGroupOption(MapperOption): query._attributes[('undefer', self.group)] = True class AbstractRelationshipLoader(LoaderStrategy): - """LoaderStratgies which deal with related objects as opposed to scalars.""" + """LoaderStratgies which deal with related objects.""" def init(self): self.mapper = self.parent_property.mapper @@ -364,31 +375,47 @@ class LazyLoader(AbstractRelationshipLoader): for c in self.mapper._equivalent_columns[col]: self._equated_columns[c] = self._equated_columns[col] - self.logger.info("%s will use query.get() to optimize instance loads" % self) + self.logger.info("%s will use query.get() to " + "optimize instance loads" % self) def init_class_attribute(self, mapper): self.is_class_level = True - # MANYTOONE currently only needs the "old" value for delete-orphan - # cascades. the required _SingleParentValidator will enable active_history - # in that case. otherwise we don't need the "old" value during backref operations. + # MANYTOONE currently only needs the + # "old" value for delete-orphan + # cascades. the required _SingleParentValidator + # will enable active_history + # in that case. otherwise we don't need the + # "old" value during backref operations. _register_attribute(self, mapper, useobject=True, callable_=self._class_level_loader, uselist = self.parent_property.uselist, typecallable = self.parent_property.collection_class, - active_history = self.parent_property.direction is not interfaces.MANYTOONE or not self.use_get, + active_history = \ + self.parent_property.direction is not \ + interfaces.MANYTOONE or \ + not self.use_get, ) - def lazy_clause(self, state, reverse_direction=False, alias_secondary=False, adapt_source=None): + def lazy_clause(self, state, reverse_direction=False, + alias_secondary=False, adapt_source=None): if state is None: - return self._lazy_none_clause(reverse_direction, adapt_source=adapt_source) + return self._lazy_none_clause( + reverse_direction, + adapt_source=adapt_source) if not reverse_direction: - (criterion, bind_to_col, rev) = (self.__lazywhere, self.__bind_to_col, self._equated_columns) + criterion, bind_to_col, rev = \ + self.__lazywhere, \ + self.__bind_to_col, \ + self._equated_columns else: - (criterion, bind_to_col, rev) = LazyLoader._create_lazy_clause(self.parent_property, reverse_direction=reverse_direction) + criterion, bind_to_col, rev = \ + LazyLoader._create_lazy_clause( + self.parent_property, + reverse_direction=reverse_direction) if reverse_direction: mapper = self.parent_property.mapper @@ -397,25 +424,38 @@ class LazyLoader(AbstractRelationshipLoader): def visit_bindparam(bindparam): if bindparam.key in bind_to_col: - # use the "committed" (database) version to get query column values - # also its a deferred value; so that when used by Query, the committed value is used + # use the "committed" (database) version to get + # query column values + # also its a deferred value; so that when used + # by Query, the committed value is used # after an autoflush occurs o = state.obj() # strong ref - bindparam.value = lambda: mapper._get_committed_attr_by_column(o, bind_to_col[bindparam.key]) + bindparam.value = \ + lambda: mapper._get_committed_attr_by_column( + o, bind_to_col[bindparam.key]) if self.parent_property.secondary is not None and alias_secondary: - criterion = sql_util.ClauseAdapter(self.parent_property.secondary.alias()).traverse(criterion) + criterion = sql_util.ClauseAdapter( + self.parent_property.secondary.alias()).\ + traverse(criterion) - criterion = visitors.cloned_traverse(criterion, {}, {'bindparam':visit_bindparam}) + criterion = visitors.cloned_traverse( + criterion, {}, {'bindparam':visit_bindparam}) if adapt_source: criterion = adapt_source(criterion) return criterion def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): if not reverse_direction: - (criterion, bind_to_col, rev) = (self.__lazywhere, self.__bind_to_col, self._equated_columns) + criterion, bind_to_col, rev = \ + self.__lazywhere, \ + self.__bind_to_col,\ + self._equated_columns else: - (criterion, bind_to_col, rev) = LazyLoader._create_lazy_clause(self.parent_property, reverse_direction=reverse_direction) + criterion, bind_to_col, rev = \ + LazyLoader._create_lazy_clause( + self.parent_property, + reverse_direction=reverse_direction) criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col) @@ -433,22 +473,30 @@ class LazyLoader(AbstractRelationshipLoader): key = self.key if not self.is_class_level: def new_execute(state, dict_, row): - # we are not the primary manager for this attribute on this class - set up a - # per-instance lazyloader, which will override the class-level behavior. - # this currently only happens when using a "lazyload" option on a "no load" - # attribute - "eager" attributes always have a class-level lazyloader - # installed. + # we are not the primary manager for this attribute + # on this class - set up a + # per-instance lazyloader, which will override the + # class-level behavior. + # this currently only happens when using a + # "lazyload" option on a "no load" + # attribute - "eager" attributes always have a + # class-level lazyloader installed. state.set_callable(dict_, key, LoadLazyAttribute(state, key)) else: def new_execute(state, dict_, row): - # we are the primary manager for this attribute on this class - reset its - # per-instance attribute state, so that the class-level lazy loader is - # executed when next referenced on this instance. this is needed in - # populate_existing() types of scenarios to reset any existing state. + # we are the primary manager for this attribute on + # this class - reset its + # per-instance attribute state, so that the class-level + # lazy loader is + # executed when next referenced on this instance. + # this is needed in + # populate_existing() types of scenarios to reset + # any existing state. state.reset(dict_, key) return new_execute, None - + + @classmethod def _create_lazy_clause(cls, prop, reverse_direction=False): binds = util.column_dict() lookup = util.column_dict() @@ -478,18 +526,19 @@ class LazyLoader(AbstractRelationshipLoader): lazywhere = prop.primaryjoin if prop.secondaryjoin is None or not reverse_direction: - lazywhere = visitors.replacement_traverse(lazywhere, {}, col_to_bind) + lazywhere = visitors.replacement_traverse( + lazywhere, {}, col_to_bind) if prop.secondaryjoin is not None: secondaryjoin = prop.secondaryjoin if reverse_direction: - secondaryjoin = visitors.replacement_traverse(secondaryjoin, {}, col_to_bind) + secondaryjoin = visitors.replacement_traverse( + secondaryjoin, {}, col_to_bind) lazywhere = sql.and_(lazywhere, secondaryjoin) bind_to_col = dict((binds[col].key, col) for col in binds) - return (lazywhere, bind_to_col, equated_columns) - _create_lazy_clause = classmethod(_create_lazy_clause) + return lazywhere, bind_to_col, equated_columns log.class_logger(LazyLoader) @@ -511,12 +560,14 @@ class LoadLazyAttribute(object): prop = instance_mapper.get_property(self.key) strategy = prop._get_strategy(LazyLoader) - if kw.get('passive') is attributes.PASSIVE_NO_FETCH and not strategy.use_get: + if kw.get('passive') is attributes.PASSIVE_NO_FETCH and \ + not strategy.use_get: return attributes.PASSIVE_NO_RESULT if strategy._should_log_debug(): strategy.logger.debug("loading %s", - mapperutil.state_attribute_str(state, self.key)) + mapperutil.state_attribute_str( + state, self.key)) session = sessionlib._state_session(state) if session is None: @@ -537,8 +588,11 @@ class LoadLazyAttribute(object): ident = [] allnulls = True for primary_key in prop.mapper.primary_key: - val = instance_mapper._get_committed_state_attr_by_column( - state, strategy._equated_columns[primary_key], **kw) + val = instance_mapper.\ + _get_committed_state_attr_by_column( + state, + strategy._equated_columns[primary_key], + **kw) if val is attributes.PASSIVE_NO_RESULT: return val allnulls = allnulls and val is None @@ -570,7 +624,8 @@ class LoadLazyAttribute(object): if l > 1: util.warn( "Multiple rows returned with " - "uselist=False for lazily-loaded attribute '%s' " % prop) + "uselist=False for lazily-loaded attribute '%s' " + % prop) return result[0] else: @@ -578,10 +633,13 @@ class LoadLazyAttribute(object): class SubqueryLoader(AbstractRelationshipLoader): def init_class_attribute(self, mapper): - self.parent_property._get_strategy(LazyLoader).init_class_attribute(mapper) + self.parent_property.\ + _get_strategy(LazyLoader).\ + init_class_attribute(mapper) - def setup_query(self, context, entity, path, adapter, - column_collection=None, parentmapper=None, **kwargs): + def setup_query(self, context, entity, + path, adapter, column_collection=None, + parentmapper=None, **kwargs): if not context.query._enable_eagerloads: return @@ -597,16 +655,25 @@ class SubqueryLoader(AbstractRelationshipLoader): attr = self.parent_property.class_attribute - # modify the query to just look for parent columns in the join condition + # modify the query to just look for parent columns in the + # join condition - # TODO: what happens to options() in the parent query ? are they going + # TODO: what happens to options() in the parent query ? + # are they going # to get in the way here ? + # set the original query to only look + # for the significant columns, not order + # by anything. q = context.query._clone() q._set_entities(local_attr) + q._order_by = None + # now select from it as a subquery. q = q.from_self(self.mapper, *local_attr) + # and join to the related thing we want + # to load. q = q.join(attr) q = q.order_by(*local_attr) @@ -623,7 +690,10 @@ class SubqueryLoader(AbstractRelationshipLoader): else: return \ [p[0] for p in self.parent_property.synchronize_pairs],\ - [p[0] for p in self.parent_property.secondary_synchronize_pairs] + [ + p[0] for p in self.parent_property. + secondary_synchronize_pairs + ] def create_row_processor(self, context, path, mapper, row, adapter): path = path + (self.key,) @@ -631,38 +701,44 @@ class SubqueryLoader(AbstractRelationshipLoader): local_cols, remote_cols = self._local_remote_columns local_attr = [self.parent._get_col_to_prop(c).key for c in local_cols] - remote_attr = [self.mapper._get_col_to_prop(c).key for c in remote_cols] + remote_attr = [ + self.mapper._get_col_to_prop(c).key + for c in remote_cols] q = context.attributes[('subquery', path)] - collections = dict((k, [v[0] for v in v]) for k, v in itertools.groupby( + collections = dict( + (k, [v[0] for v in v]) + for k, v in itertools.groupby( q, lambda x:x[1:] )) - def execute(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), () ) - state.get_impl(self.key).set_committed_value(state, dict_, collection) + state.get_impl(self.key).\ + set_committed_value(state, dict_, collection) - return (execute, None) - + return execute, None class EagerLoader(AbstractRelationshipLoader): - """Strategize a relationship() that loads within the process of the parent object being selected.""" + """Strategize a relationship() that loads within the process + of the parent object being selected.""" def init(self): super(EagerLoader, self).init() self.join_depth = self.parent_property.join_depth def init_class_attribute(self, mapper): - self.parent_property._get_strategy(LazyLoader).init_class_attribute(mapper) + self.parent_property.\ + _get_strategy(LazyLoader).init_class_attribute(mapper) def setup_query(self, context, entity, path, adapter, \ - column_collection=None, parentmapper=None, **kwargs): + column_collection=None, parentmapper=None, + **kwargs): """Add a left outer join to the statement thats being constructed.""" if not context.query._enable_eagerloads: @@ -673,16 +749,21 @@ class EagerLoader(AbstractRelationshipLoader): reduced_path = interfaces._reduce_path(path) # check for user-defined eager alias - if ("user_defined_eager_row_processor", reduced_path) in context.attributes: - clauses = context.attributes[("user_defined_eager_row_processor", reduced_path)] + if ("user_defined_eager_row_processor", reduced_path) in\ + context.attributes: + clauses = context.attributes[ + ("user_defined_eager_row_processor", + reduced_path)] adapter = entity._get_entity_clauses(context.query, context) if adapter and clauses: - context.attributes[("user_defined_eager_row_processor", reduced_path)] = \ - clauses = clauses.wrap(adapter) + context.attributes[ + ("user_defined_eager_row_processor", + reduced_path)] = clauses = clauses.wrap(adapter) elif adapter: - context.attributes[("user_defined_eager_row_processor", reduced_path)] = \ - clauses = adapter + context.attributes[ + ("user_defined_eager_row_processor", + reduced_path)] = clauses = adapter add_to_collection = context.primary_columns @@ -698,18 +779,24 @@ class EagerLoader(AbstractRelationshipLoader): if self.mapper.base_mapper in reduced_path: return - clauses = mapperutil.ORMAdapter(mapperutil.AliasedClass(self.mapper), - equivalents=self.mapper._equivalent_columns, adapt_required=True) + clauses = mapperutil.ORMAdapter( + mapperutil.AliasedClass(self.mapper), + equivalents=self.mapper._equivalent_columns, + adapt_required=True) if self.parent_property.direction != interfaces.MANYTOONE: context.multi_row_eager_loaders = True context.create_eager_joins.append( - (self._create_eager_join, context, entity, path, adapter, parentmapper, clauses) + (self._create_eager_join, context, + entity, path, adapter, + parentmapper, clauses) ) add_to_collection = context.secondary_columns - context.attributes[("eager_row_processor", reduced_path)] = clauses + context.attributes[ + ("eager_row_processor", reduced_path) + ] = clauses for value in self.mapper._iterate_polymorphic_properties(): value.setup( @@ -720,7 +807,8 @@ class EagerLoader(AbstractRelationshipLoader): parentmapper=self.mapper, column_collection=add_to_collection) - def _create_eager_join(self, context, entity, path, adapter, parentmapper, clauses): + def _create_eager_join(self, context, entity, + path, adapter, parentmapper, clauses): if parentmapper is None: localparent = entity.mapper @@ -738,12 +826,13 @@ class EagerLoader(AbstractRelationshipLoader): not should_nest_selectable and \ context.from_clause: index, clause = \ - sql_util.find_join_source(context.from_clause, entity.selectable) + sql_util.find_join_source( + context.from_clause, entity.selectable) if clause is not None: # join to an existing FROM clause on the query. # key it to its list index in the eager_joins dict. - # Query._compile_context will adapt as needed and append to the - # FROM clause of the select(). + # Query._compile_context will adapt as needed and + # append to the FROM clause of the select(). entity_key, default_towrap = index, clause if entity_key is None: @@ -754,28 +843,38 @@ class EagerLoader(AbstractRelationshipLoader): join_to_left = False if adapter: if getattr(adapter, 'aliased_class', None): - onclause = getattr(adapter.aliased_class, self.key, self.parent_property) + onclause = getattr( + adapter.aliased_class, self.key, + self.parent_property) else: - onclause = getattr(mapperutil.AliasedClass(self.parent, adapter.selectable), - self.key, self.parent_property) + onclause = getattr( + mapperutil.AliasedClass( + self.parent, + adapter.selectable + ), + self.key, self.parent_property + ) if onclause is self.parent_property: - # TODO: this is a temporary hack to account for polymorphic eager loads where + # TODO: this is a temporary hack to + # account for polymorphic eager loads where # the eagerload is referencing via of_type(). join_to_left = True else: onclause = self.parent_property - innerjoin = context.attributes.get(("eager_join_type", path), - self.parent_property.innerjoin) + innerjoin = context.attributes.get( + ("eager_join_type", path), + self.parent_property.innerjoin) - context.eager_joins[entity_key] = eagerjoin = mapperutil.join( - towrap, - clauses.aliased_class, - onclause, - join_to_left=join_to_left, - isouter=not innerjoin - ) + context.eager_joins[entity_key] = eagerjoin = \ + mapperutil.join( + towrap, + clauses.aliased_class, + onclause, + join_to_left=join_to_left, + isouter=not innerjoin + ) # send a hint to the Query as to where it may "splice" this join eagerjoin.stop_on = entity.selectable @@ -783,11 +882,14 @@ class EagerLoader(AbstractRelationshipLoader): if self.parent_property.secondary is None and \ not parentmapper: # for parentclause that is the non-eager end of the join, - # ensure all the parent cols in the primaryjoin are actually in the + # ensure all the parent cols in the primaryjoin are actually + # in the # columns clause (i.e. are not deferred), so that aliasing applied - # by the Query propagates those columns outward. This has the effect + # by the Query propagates those columns outward. + # This has the effect # of "undefering" those columns. - for col in sql_util.find_columns(self.parent_property.primaryjoin): + for col in sql_util.find_columns( + self.parent_property.primaryjoin): if localparent.mapped_table.c.contains_column(col): if adapter: col = adapter.columns[col] @@ -797,22 +899,29 @@ class EagerLoader(AbstractRelationshipLoader): context.eager_order_by += \ eagerjoin._target_adapter.\ copy_and_process( - util.to_list(self.parent_property.order_by) + util.to_list( + self.parent_property.order_by + ) ) def _create_eager_adapter(self, context, row, adapter, path): reduced_path = interfaces._reduce_path(path) - if ("user_defined_eager_row_processor", reduced_path) in context.attributes: - decorator = context.attributes[("user_defined_eager_row_processor", reduced_path)] - # user defined eagerloads are part of the "primary" portion of the load. + if ("user_defined_eager_row_processor", reduced_path) in \ + context.attributes: + decorator = context.attributes[ + ("user_defined_eager_row_processor", + reduced_path)] + # user defined eagerloads are part of the "primary" + # portion of the load. # the adapters applied to the Query should be honored. if context.adapter and decorator: decorator = decorator.wrap(context.adapter) elif context.adapter: decorator = context.adapter elif ("eager_row_processor", reduced_path) in context.attributes: - decorator = context.attributes[("eager_row_processor", reduced_path)] + decorator = context.attributes[ + ("eager_row_processor", reduced_path)] else: return False @@ -827,7 +936,10 @@ class EagerLoader(AbstractRelationshipLoader): def create_row_processor(self, context, path, mapper, row, adapter): path = path + (self.key,) - eager_adapter = self._create_eager_adapter(context, row, adapter, path) + eager_adapter = self._create_eager_adapter( + context, + row, + adapter, path) if eager_adapter is not False: key = self.key @@ -856,8 +968,8 @@ class EagerLoader(AbstractRelationshipLoader): return new_execute, existing_execute else: def new_execute(state, dict_, row): - collection = attributes.init_state_collection(state, dict_, - key) + collection = attributes.init_state_collection( + state, dict_, key) result_list = util.UniqueAppender(collection, 'append_without_event') context.attributes[(state, key)] = result_list @@ -873,14 +985,18 @@ class EagerLoader(AbstractRelationshipLoader): # distinct sets of result columns collection = attributes.init_state_collection(state, dict_, key) - result_list = util.UniqueAppender(collection, - 'append_without_event') + result_list = util.UniqueAppender( + collection, + 'append_without_event') context.attributes[(state, key)] = result_list _instance(row, result_list) return new_execute, existing_execute else: - return self.parent_property._get_strategy(LazyLoader).\ - create_row_processor(context, path, mapper, row, adapter) + return self.parent_property.\ + _get_strategy(LazyLoader).\ + create_row_processor( + context, path, + mapper, row, adapter) log.class_logger(EagerLoader) @@ -962,8 +1078,10 @@ class _SingleParentValidator(interfaces.AttributeExtension): if value is not None: hasparent = initiator.hasparent(attributes.instance_state(value)) if hasparent and oldvalue is not value: - raise sa_exc.InvalidRequestError("Instance %s is already associated with an instance " - "of %s via its %s attribute, and is only allowed a single parent." % + raise sa_exc.InvalidRequestError( + "Instance %s is already associated with an instance " + "of %s via its %s attribute, and is only allowed a " + "single parent." % (mapperutil.instance_str(value), state.class_, self.prop) ) return value -- cgit v1.2.1 From efcc9d782274ee1d5eb08855d50aaf627d76a073 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Mar 2010 16:54:58 -0400 Subject: and here's where it gets *fun* ! so much for being easy --- lib/sqlalchemy/orm/interfaces.py | 19 +++++---- lib/sqlalchemy/orm/query.py | 3 +- lib/sqlalchemy/orm/strategies.py | 83 +++++++++++++++++++++++++++++----------- 3 files changed, 75 insertions(+), 30 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 03ebb97c4..c773e74f6 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -646,7 +646,7 @@ class StrategizedProperty(MapperProperty): """ - def __get_context_strategy(self, context, path): + def _get_context_strategy(self, context, path): cls = context.attributes.get(("loaderstrategy", _reduce_path(path)), None) if cls: try: @@ -668,11 +668,11 @@ class StrategizedProperty(MapperProperty): return strategy def setup(self, context, entity, path, adapter, **kwargs): - self.__get_context_strategy(context, path + (self.key,)).\ + self._get_context_strategy(context, path + (self.key,)).\ setup_query(context, entity, path, adapter, **kwargs) def create_row_processor(self, context, path, mapper, row, adapter): - return self.__get_context_strategy(context, path + (self.key,)).\ + return self._get_context_strategy(context, path + (self.key,)).\ create_row_processor(context, path, mapper, row, adapter) def do_init(self): @@ -775,12 +775,17 @@ class PropertyOption(MapperOption): isa = True for ent in query._mapper_entities: - if searchfor is ent.path_entity or (isa and searchfor.common_parent(ent.path_entity)): + if searchfor is ent.path_entity or ( + isa and + searchfor.common_parent(ent.path_entity)): return ent else: if raiseerr: - raise sa_exc.ArgumentError("Can't find entity %s in Query. Current list: %r" - % (searchfor, [str(m.path_entity) for m in query._entities])) + raise sa_exc.ArgumentError( + "Can't find entity %s in Query. Current list: %r" + % (searchfor, [ + str(m.path_entity) for m in query._entities + ])) else: return None @@ -921,7 +926,7 @@ class StrategizedOption(PropertyOption): return False def process_query_property(self, query, paths, mappers): - # __get_context_strategy may receive the path in terms of + # _get_context_strategy may receive the path in terms of # a base mapper - e.g. options(eagerload_all(Company.employees, Engineer.machines)) # in the polymorphic tests leads to "(Person, 'machines')" in # the path due to the mechanics of how the eager strategy builds diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 7ae1194c1..f06717217 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -619,7 +619,8 @@ class Query(object): those being selected. """ - fromclause = self.with_labels().enable_eagerloads(False).statement.correlate(None) + fromclause = self.with_labels().enable_eagerloads(False).\ + statement.correlate(None) q = self._from_selectable(fromclause) if entities: q._set_entities(entities) diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 828530d7a..08bb5062a 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -644,28 +644,38 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return - path = path + (self.key,) +# path = path + (self.key,) + - local_cols, remote_cols = self._local_remote_columns + if ("orig_query", SubqueryLoader) not in context.attributes: + context.attributes[("orig_query", SubqueryLoader)] =\ + context.query + + orig_query = context.attributes[("orig_query", SubqueryLoader)] + +# orig_query = context.query + path = context.query._current_path + path + (self.key, ) + + prop = path[0].get_property(path[1]) + + local_cols, remote_cols = self._local_remote_columns(prop) local_attr = [ - self.parent._get_col_to_prop(c).class_attribute + path[0]._get_col_to_prop(c).class_attribute for c in local_cols ] - attr = self.parent_property.class_attribute + #attr = self.parent_property.class_attribute # modify the query to just look for parent columns in the # join condition - # TODO: what happens to options() in the parent query ? - # are they going - # to get in the way here ? - # set the original query to only look # for the significant columns, not order # by anything. - q = context.query._clone() + q = orig_query._clone() #context.query._clone() + q._attributes = q._attributes.copy() + q._attributes[("orig_query", SubqueryLoader)] = orig_query q._set_entities(local_attr) q._order_by = None @@ -674,37 +684,66 @@ class SubqueryLoader(AbstractRelationshipLoader): # and join to the related thing we want # to load. - q = q.join(attr) - + for mapper, key in [(path[i], path[i+1]) for i in xrange(0, len(path), 2)]: + prop = mapper.get_property(key) + q = q.join(prop.class_attribute) + q = q.order_by(*local_attr) + q._attributes = q._attributes.copy() + for attr in orig_query._attributes: + strat, opt_path = attr + if strat == "loaderstrategy": + opt_path = opt_path[len(path):] + q._attributes[("loaderstrategy", opt_path)] =\ + context.query._attributes[attr] + + q = q._with_current_path(path) if self.parent_property.order_by: q = q.order_by(*self.parent_property.order_by) - context.attributes[('subquery', path)] = q + context.attributes[('subquery', path)] = \ + q._attributes[('subquery', path)] = \ + q + +# for value in self.mapper._iterate_polymorphic_properties(): +# strat = value._get_context_strategy( +# context, path + +# (self.mapper,value.key) +# ) + #print "VALUE", value, "PATH", path + (self.mapper,), "STRAT", type(strat) +# if isinstance(strat, SubqueryLoader): +# value.setup( +# context, +# entity, +## path + (self.mapper,), +# adapter, +# parentmapper=self.mapper, +# ) - @property - def _local_remote_columns(self): - if self.parent_property.secondary is None: - return zip(*self.parent_property.local_remote_pairs) + def _local_remote_columns(self, prop): + if prop.secondary is None: + return zip(*prop.local_remote_pairs) else: return \ - [p[0] for p in self.parent_property.synchronize_pairs],\ + [p[0] for p in prop.synchronize_pairs],\ [ - p[0] for p in self.parent_property. + p[0] for p in prop. secondary_synchronize_pairs ] def create_row_processor(self, context, path, mapper, row, adapter): - path = path + (self.key,) +# path = path + (self.key,) + path = context.query._current_path + path + (self.key,) - local_cols, remote_cols = self._local_remote_columns + local_cols, remote_cols = self._local_remote_columns(self.parent_property) local_attr = [self.parent._get_col_to_prop(c).key for c in local_cols] remote_attr = [ self.mapper._get_col_to_prop(c).key for c in remote_cols] - + + print "STRAT LOOKING FOR SUBQ AT PATH", path q = context.attributes[('subquery', path)] collections = dict( @@ -713,7 +752,7 @@ class SubqueryLoader(AbstractRelationshipLoader): q, lambda x:x[1:] )) - + def execute(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), -- cgit v1.2.1 From 0197a70343a7d6f4eb387bbd461e0624e2dc436b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Mar 2010 20:15:50 -0400 Subject: - Fixed bug which affected all eagerload() and similar options such that "remote" eager loads, i.e. eagerloads off of a lazy load such as query(A).options(eagerload(A.b, B.c)) wouldn't eagerload anything, but using eagerload("b.c") would work fine. - subquery eagerloading very close --- lib/sqlalchemy/orm/interfaces.py | 150 +++++++++++++++++++++------------------ lib/sqlalchemy/orm/strategies.py | 73 ++++++++----------- 2 files changed, 110 insertions(+), 113 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index c773e74f6..255b6b6fe 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -757,14 +757,35 @@ class PropertyOption(MapperOption): self._process(query, False) def _process(self, query, raiseerr): - paths, mappers = self.__get_paths(query, raiseerr) + paths, mappers = self._get_paths(query, raiseerr) if paths: self.process_query_property(query, paths, mappers) def process_query_property(self, query, paths, mappers): pass - def __find_entity(self, query, mapper, raiseerr): + def __getstate__(self): + d = self.__dict__.copy() + d['key'] = ret = [] + for token in util.to_list(self.key): + if isinstance(token, PropComparator): + ret.append((token.mapper.class_, token.key)) + else: + ret.append(token) + return d + + def __setstate__(self, state): + ret = [] + for key in state['key']: + if isinstance(key, tuple): + cls, propkey = key + ret.append(getattr(cls, propkey)) + else: + ret.append(key) + state['key'] = tuple(ret) + self.__dict__ = state + + def _find_entity(self, query, mapper, raiseerr): from sqlalchemy.orm.util import _class_to_mapper, _is_aliased_class if _is_aliased_class(mapper): @@ -773,7 +794,7 @@ class PropertyOption(MapperOption): else: searchfor = _class_to_mapper(mapper) isa = True - + for ent in query._mapper_entities: if searchfor is ent.path_entity or ( isa and @@ -789,28 +810,7 @@ class PropertyOption(MapperOption): else: return None - def __getstate__(self): - d = self.__dict__.copy() - d['key'] = ret = [] - for token in util.to_list(self.key): - if isinstance(token, PropComparator): - ret.append((token.mapper.class_, token.key)) - else: - ret.append(token) - return d - - def __setstate__(self, state): - ret = [] - for key in state['key']: - if isinstance(key, tuple): - cls, propkey = key - ret.append(getattr(cls, propkey)) - else: - ret.append(key) - state['key'] = tuple(ret) - self.__dict__ = state - - def __get_paths(self, query, raiseerr): + def _get_paths(self, query, raiseerr): path = None entity = None l = [] @@ -820,61 +820,71 @@ class PropertyOption(MapperOption): # with an existing path current_path = list(query._current_path) - if self.mapper: - entity = self.__find_entity(query, self.mapper, raiseerr) - mapper = entity.mapper - path_element = entity.path_entity - + tokens = [] for key in util.to_list(self.key): if isinstance(key, basestring): - tokens = key.split('.') + tokens += key.split('.') else: - tokens = [key] - for token in tokens: - if isinstance(token, basestring): - if not entity: - entity = query._entity_zero() - path_element = entity.path_entity - mapper = entity.mapper - mappers.append(mapper) - prop = mapper.get_property(token, resolve_synonyms=True, raiseerr=raiseerr) - key = token - elif isinstance(token, PropComparator): - prop = token.property - if not entity: - entity = self.__find_entity(query, token.parententity, raiseerr) - if not entity: - return [], [] - path_element = entity.path_entity - mappers.append(prop.parent) - key = prop.key - else: - raise sa_exc.ArgumentError("mapper option expects string key " - "or list of attributes") - - if current_path and key == current_path[1]: - current_path = current_path[2:] - continue + tokens += [key] + + for token in tokens: + if isinstance(token, basestring): + if not entity: + if current_path: + if current_path[1] == token: + current_path = current_path[2:] + continue - if prop is None: - return [], [] - - path = build_path(path_element, prop.key, path) - l.append(path) - if getattr(token, '_of_type', None): - path_element = mapper = token._of_type - else: - path_element = mapper = getattr(prop, 'mapper', None) - - if path_element: - path_element = path_element + entity = query._entity_zero() + path_element = entity.path_entity + mapper = entity.mapper + mappers.append(mapper) + prop = mapper.get_property( + token, + resolve_synonyms=True, + raiseerr=raiseerr) + key = token + elif isinstance(token, PropComparator): + prop = token.property + if not entity: + if current_path: + if current_path[0:2] == [token.parententity, prop.key]: + current_path = current_path[2:] + continue + + entity = self._find_entity( + query, + token.parententity, + raiseerr) + if not entity: + return [], [] + path_element = entity.path_entity + mapper = entity.mapper + mappers.append(prop.parent) + key = prop.key + else: + raise sa_exc.ArgumentError("mapper option expects string key " + "or list of attributes") + + if prop is None: + return [], [] + + path = build_path(path_element, prop.key, path) + l.append(path) + if getattr(token, '_of_type', None): + path_element = mapper = token._of_type + else: + path_element = mapper = getattr(prop, 'mapper', None) + + if path_element: + path_element = path_element # if current_path tokens remain, then # we didn't have an exact path match. if current_path: return [], [] - + return l, mappers class AttributeExtension(object): diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 08bb5062a..ccaf1dd7b 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -643,30 +643,30 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return - -# path = path + (self.key,) - - - if ("orig_query", SubqueryLoader) not in context.attributes: - context.attributes[("orig_query", SubqueryLoader)] =\ - context.query - orig_query = context.attributes[("orig_query", SubqueryLoader)] + orig_query = context.attributes.get(("orig_query", SubqueryLoader), + context.query) -# orig_query = context.query - path = context.query._current_path + path + (self.key, ) + path = path + (self.key, ) - prop = path[0].get_property(path[1]) + local_cols, remote_cols = self._local_remote_columns(self.parent_property) + if len(path) > 1: + leftmost_mapper, leftmost_prop = path[0], path[0].get_property(path[1]) + leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) + else: + leftmost_cols = local_cols + leftmost_mapper = self.parent - local_cols, remote_cols = self._local_remote_columns(prop) + leftmost_attr = [ + leftmost_mapper._get_col_to_prop(c).class_attribute + for c in leftmost_cols + ] local_attr = [ - path[0]._get_col_to_prop(c).class_attribute + self.parent._get_col_to_prop(c).class_attribute for c in local_cols ] - #attr = self.parent_property.class_attribute - # modify the query to just look for parent columns in the # join condition @@ -676,7 +676,7 @@ class SubqueryLoader(AbstractRelationshipLoader): q = orig_query._clone() #context.query._clone() q._attributes = q._attributes.copy() q._attributes[("orig_query", SubqueryLoader)] = orig_query - q._set_entities(local_attr) + q._set_entities(leftmost_attr) q._order_by = None # now select from it as a subquery. @@ -690,36 +690,19 @@ class SubqueryLoader(AbstractRelationshipLoader): q = q.order_by(*local_attr) - q._attributes = q._attributes.copy() for attr in orig_query._attributes: strat, opt_path = attr if strat == "loaderstrategy": opt_path = opt_path[len(path):] q._attributes[("loaderstrategy", opt_path)] =\ context.query._attributes[attr] - - q = q._with_current_path(path) + if self.parent_property.order_by: q = q.order_by(*self.parent_property.order_by) context.attributes[('subquery', path)] = \ - q._attributes[('subquery', path)] = \ - q - -# for value in self.mapper._iterate_polymorphic_properties(): -# strat = value._get_context_strategy( -# context, path + -# (self.mapper,value.key) -# ) - #print "VALUE", value, "PATH", path + (self.mapper,), "STRAT", type(strat) -# if isinstance(strat, SubqueryLoader): -# value.setup( -# context, -# entity, -## path + (self.mapper,), -# adapter, -# parentmapper=self.mapper, -# ) + q._attributes[('subquery', path)] = q + def _local_remote_columns(self, prop): if prop.secondary is None: @@ -733,9 +716,11 @@ class SubqueryLoader(AbstractRelationshipLoader): ] def create_row_processor(self, context, path, mapper, row, adapter): -# path = path + (self.key,) - path = context.query._current_path + path + (self.key,) - + path = path + (self.key,) + + if ('subquery', path) not in context.attributes: + return None, None + local_cols, remote_cols = self._local_remote_columns(self.parent_property) local_attr = [self.parent._get_col_to_prop(c).key for c in local_cols] @@ -743,7 +728,6 @@ class SubqueryLoader(AbstractRelationshipLoader): self.mapper._get_col_to_prop(c).key for c in remote_cols] - print "STRAT LOOKING FOR SUBQ AT PATH", path q = context.attributes[('subquery', path)] collections = dict( @@ -753,6 +737,9 @@ class SubqueryLoader(AbstractRelationshipLoader): lambda x:x[1:] )) + if adapter: + local_cols = [adapter.columns[c] for c in local_cols] + def execute(state, dict_, row): collection = collections.get( tuple([row[col] for col in local_cols]), @@ -1040,11 +1027,11 @@ class EagerLoader(AbstractRelationshipLoader): log.class_logger(EagerLoader) class EagerLazyOption(StrategizedOption): - def __init__(self, key, lazy=True, chained=False, - mapper=None, propagate_to_loaders=True, + def __init__(self, key, lazy=True, chained=False, + propagate_to_loaders=True, _strategy_cls=None ): - super(EagerLazyOption, self).__init__(key, mapper) + super(EagerLazyOption, self).__init__(key) self.lazy = lazy self.chained = chained self.propagate_to_loaders = propagate_to_loaders -- cgit v1.2.1 From 77752f4a3fb496330efaab8b741f3c902ed8c58d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 22 Mar 2010 21:23:31 -0400 Subject: all tests are passing. some more TODOs and more testing needed --- lib/sqlalchemy/orm/strategies.py | 60 ++++++++++++++++++++++++++++------------ 1 file changed, 43 insertions(+), 17 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index ccaf1dd7b..db3d565c4 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -644,18 +644,29 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return - orig_query = context.attributes.get(("orig_query", SubqueryLoader), - context.query) + # the leftmost query we'll be joining from. + # in the case of an end-user query with eager or subq + # loads, this is the user's query. In the case of a lazyload, + # this is the query generated in the LazyLoader. + # this query is passed along to all queries generated for this + # load. + if ("orig_query", SubqueryLoader) not in context.attributes: + context.attributes[("orig_query", SubqueryLoader)] = context.query + + orig_query = context.attributes[("orig_query", SubqueryLoader)] + # build up a path indicating the path from the leftmost + # entity to the thing we're subquery loading. + subq_path = context.attributes.get(('subquery_path', None), ()) + path = path + (self.key, ) local_cols, remote_cols = self._local_remote_columns(self.parent_property) - if len(path) > 1: - leftmost_mapper, leftmost_prop = path[0], path[0].get_property(path[1]) - leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) - else: - leftmost_cols = local_cols - leftmost_mapper = self.parent + + subq_path = subq_path + path + leftmost_mapper, leftmost_prop = \ + subq_path[0], subq_path[0].get_property(subq_path[1]) + leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) leftmost_attr = [ leftmost_mapper._get_col_to_prop(c).class_attribute @@ -673,36 +684,51 @@ class SubqueryLoader(AbstractRelationshipLoader): # set the original query to only look # for the significant columns, not order # by anything. - q = orig_query._clone() #context.query._clone() - q._attributes = q._attributes.copy() + q = orig_query._clone() + q._attributes = {} q._attributes[("orig_query", SubqueryLoader)] = orig_query q._set_entities(leftmost_attr) q._order_by = None + q._attributes[('subquery_path', None)] = subq_path + # now select from it as a subquery. q = q.from_self(self.mapper, *local_attr) # and join to the related thing we want # to load. - for mapper, key in [(path[i], path[i+1]) for i in xrange(0, len(path), 2)]: + for mapper, key in [(subq_path[i], subq_path[i+1]) + for i in xrange(0, len(subq_path), 2)]: prop = mapper.get_property(key) q = q.join(prop.class_attribute) q = q.order_by(*local_attr) - for attr in orig_query._attributes: + # place loaderstrategy tokens in the new query + # so that further loader strategy options take effect. + # TODO: use the actual options in the parent query, + # figure out how to achieve the path-manipulation + # (should probably use _current_path). + # some of these options may be user-defined so they + # must propagate. + # consider adding a new call to MapperOption that is + # specific to subquery loads. + for attr in context.attributes: strat, opt_path = attr if strat == "loaderstrategy": - opt_path = opt_path[len(path):] + # TODO: make sure we understand this part + #opt_path = opt_path[len(path):] # works, i think this + # leaves excess tho + opt_path = opt_path[2:] # also works q._attributes[("loaderstrategy", opt_path)] =\ - context.query._attributes[attr] + context.query._attributes[attr] if self.parent_property.order_by: q = q.order_by(*self.parent_property.order_by) - context.attributes[('subquery', path)] = \ - q._attributes[('subquery', path)] = q - + # this key is for the row_processor to pick up + # within this same loader. + context.attributes[('subquery', path)] = q def _local_remote_columns(self, prop): if prop.secondary is None: -- cgit v1.2.1 From f130b2f04dd2bc94e086d48e9eaa41644ad8850f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 11:14:02 -0400 Subject: that was easy --- lib/sqlalchemy/orm/strategies.py | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index db3d565c4..46cfdbe61 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -704,24 +704,9 @@ class SubqueryLoader(AbstractRelationshipLoader): q = q.order_by(*local_attr) - # place loaderstrategy tokens in the new query - # so that further loader strategy options take effect. - # TODO: use the actual options in the parent query, - # figure out how to achieve the path-manipulation - # (should probably use _current_path). - # some of these options may be user-defined so they - # must propagate. - # consider adding a new call to MapperOption that is - # specific to subquery loads. - for attr in context.attributes: - strat, opt_path = attr - if strat == "loaderstrategy": - # TODO: make sure we understand this part - #opt_path = opt_path[len(path):] # works, i think this - # leaves excess tho - opt_path = opt_path[2:] # also works - q._attributes[("loaderstrategy", opt_path)] =\ - context.query._attributes[attr] + # propagate loader options etc. to the new query + q = q._with_current_path(subq_path) + q = q._conditional_options(*orig_query._with_options) if self.parent_property.order_by: q = q.order_by(*self.parent_property.order_by) -- cgit v1.2.1 From 2d6ac273aeb0cec505789716406fcbcc1fa33db0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 12:11:20 -0400 Subject: adding tests, all the features present in joined eager loading --- lib/sqlalchemy/orm/strategies.py | 68 ++++++++++++++++++++++++++++++---------- 1 file changed, 52 insertions(+), 16 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 46cfdbe61..259e0f10c 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -632,6 +632,10 @@ class LoadLazyAttribute(object): return None class SubqueryLoader(AbstractRelationshipLoader): + def init(self): + super(SubqueryLoader, self).init() + self.join_depth = self.parent_property.join_depth + def init_class_attribute(self, mapper): self.parent_property.\ _get_strategy(LazyLoader).\ @@ -643,6 +647,27 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return + + path = path + (self.key, ) + + # build up a path indicating the path from the leftmost + # entity to the thing we're subquery loading. + subq_path = context.attributes.get(('subquery_path', None), ()) + + subq_path = subq_path + path + + reduced_path = interfaces._reduce_path(subq_path) + + # check for join_depth or basic recursion, + # if the current path was not explicitly stated as + # a desired "loaderstrategy" (i.e. via query.options()) + if ("loaderstrategy", reduced_path) not in context.attributes: + if self.join_depth: + if len(path) / 2 > self.join_depth: + return + else: + if self.mapper.base_mapper in reduced_path: + return # the leftmost query we'll be joining from. # in the case of an end-user query with eager or subq @@ -655,15 +680,9 @@ class SubqueryLoader(AbstractRelationshipLoader): orig_query = context.attributes[("orig_query", SubqueryLoader)] - # build up a path indicating the path from the leftmost - # entity to the thing we're subquery loading. - subq_path = context.attributes.get(('subquery_path', None), ()) - - path = path + (self.key, ) local_cols, remote_cols = self._local_remote_columns(self.parent_property) - subq_path = subq_path + path leftmost_mapper, leftmost_prop = \ subq_path[0], subq_path[0].get_property(subq_path[1]) leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) @@ -688,7 +707,8 @@ class SubqueryLoader(AbstractRelationshipLoader): q._attributes = {} q._attributes[("orig_query", SubqueryLoader)] = orig_query q._set_entities(leftmost_attr) - q._order_by = None + if q._limit is None and q._offset is None: + q._order_by = None q._attributes[('subquery_path', None)] = subq_path @@ -750,15 +770,31 @@ class SubqueryLoader(AbstractRelationshipLoader): if adapter: local_cols = [adapter.columns[c] for c in local_cols] - - def execute(state, dict_, row): - collection = collections.get( - tuple([row[col] for col in local_cols]), - () - ) - state.get_impl(self.key).\ - set_committed_value(state, dict_, collection) - + + if self.uselist: + def execute(state, dict_, row): + collection = collections.get( + tuple([row[col] for col in local_cols]), + () + ) + state.get_impl(self.key).\ + set_committed_value(state, dict_, collection) + else: + def execute(state, dict_, row): + collection = collections.get( + tuple([row[col] for col in local_cols]), + (None,) + ) + if len(collection) > 1: + util.warn( + "Multiple rows returned with " + "uselist=False for eagerly-loaded attribute '%s' " + % self) + + scalar = collection[0] + state.get_impl(self.key).\ + set_committed_value(state, dict_, scalar) + return execute, None class EagerLoader(AbstractRelationshipLoader): -- cgit v1.2.1 From 3308cf8f489d869abe50e91fd83ab3fad43c82a0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 14:02:22 -0400 Subject: - order by secondary works - self referential is killing it, however --- lib/sqlalchemy/orm/strategies.py | 30 +++++++++++++++++++++++++----- 1 file changed, 25 insertions(+), 5 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 259e0f10c..f507bfbe5 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -711,25 +711,45 @@ class SubqueryLoader(AbstractRelationshipLoader): q._order_by = None q._attributes[('subquery_path', None)] = subq_path - + # now select from it as a subquery. q = q.from_self(self.mapper, *local_attr) - + # and join to the related thing we want # to load. for mapper, key in [(subq_path[i], subq_path[i+1]) - for i in xrange(0, len(subq_path), 2)]: + for i in xrange(0, len(subq_path), 2)]: prop = mapper.get_property(key) q = q.join(prop.class_attribute) + + #join_on = [(subq_path[i], subq_path[i+1]) + # for i in xrange(0, len(subq_path), 2)] + #for i, (mapper, key) in enumerate(join_on): + # aliased = i != len(join_on) - 1 + # prop = mapper.get_property(key) + # q = q.join(prop.class_attribute, aliased=aliased) q = q.order_by(*local_attr) # propagate loader options etc. to the new query q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) - + if self.parent_property.order_by: - q = q.order_by(*self.parent_property.order_by) + # if there's an ORDER BY, alias it the same + # way joinedloader does, but we have to pull out + # the "eagerjoin" from the query. + # this really only picks up the "secondary" table + # right now. + eagerjoin = q._from_obj[0] + eager_order_by = \ + eagerjoin._target_adapter.\ + copy_and_process( + util.to_list( + self.parent_property.order_by + ) + ) + q = q.order_by(*eager_order_by) # this key is for the row_processor to pick up # within this same loader. -- cgit v1.2.1 From 0a556d322029651be9018a1b41f13e23edf18388 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 14:54:26 -0400 Subject: - Fixed bug in Query whereby the usage of aliased() constructs would fail if the underlying table (but not the actual alias) were referenced inside the subquery generated by q.from_self() or q.select_from(). --- lib/sqlalchemy/sql/util.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/sql/util.py b/lib/sqlalchemy/sql/util.py index 74651a9d1..d5575e0e7 100644 --- a/lib/sqlalchemy/sql/util.py +++ b/lib/sqlalchemy/sql/util.py @@ -579,7 +579,7 @@ class ClauseAdapter(visitors.ReplacingCloningVisitor): return None elif self.exclude and col in self.exclude: return None - + return self._corresponding_column(col, True) class ColumnAdapter(ClauseAdapter): @@ -587,11 +587,13 @@ class ColumnAdapter(ClauseAdapter): Provides the ability to "wrap" this ClauseAdapter around another, a columns dictionary which returns - cached, adapted elements given an original, and an + adapted elements given an original, and an adapted_row() factory. """ - def __init__(self, selectable, equivalents=None, chain_to=None, include=None, exclude=None, adapt_required=False): + def __init__(self, selectable, equivalents=None, + chain_to=None, include=None, + exclude=None, adapt_required=False): ClauseAdapter.__init__(self, selectable, equivalents, include, exclude) if chain_to: self.chain(chain_to) @@ -617,7 +619,7 @@ class ColumnAdapter(ClauseAdapter): return locate def _locate_col(self, col): - c = self._corresponding_column(col, False) + c = self._corresponding_column(col, True) if c is None: c = self.adapt_clause(col) -- cgit v1.2.1 From 4d396e5ff0ea111c81605527d415b251d73629f7 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 18:33:31 -0400 Subject: this version actually works for all existing tests plus simple self-referential. I don't like how difficult it was to get Query() to do it, however. --- lib/sqlalchemy/orm/query.py | 15 +++++------ lib/sqlalchemy/orm/strategies.py | 54 +++++++++++++++++++++++++--------------- 2 files changed, 42 insertions(+), 27 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index f06717217..2dfefc433 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -134,7 +134,7 @@ class Query(object): self._polymorphic_adapters[m.mapped_table] = self._polymorphic_adapters[m.local_table] = adapter def _set_select_from(self, *obj): - + fa = [] for from_obj in obj: if isinstance(from_obj, expression._SelectBaseMixin): @@ -143,9 +143,8 @@ class Query(object): self._from_obj = tuple(fa) - # TODO: only use this adapter for from_self() ? right - # now its usage is somewhat arbitrary. - if len(self._from_obj) == 1 and isinstance(self._from_obj[0], expression.Alias): + if len(self._from_obj) == 1 and \ + isinstance(self._from_obj[0], expression.Alias): equivs = self.__all_equivs() self._from_obj_alias = sql_util.ColumnAdapter(self._from_obj[0], equivs) @@ -625,7 +624,7 @@ class Query(object): if entities: q._set_entities(entities) return q - + @_generative() def _from_selectable(self, fromclause): for attr in ('_statement', '_criterion', '_order_by', '_group_by', @@ -2139,6 +2138,7 @@ class _MapperEntity(_QueryEntity): self._with_polymorphic = with_polymorphic self._polymorphic_discriminator = None self.is_aliased_class = is_aliased_class + self.disable_aliasing = False if is_aliased_class: self.path_entity = self.entity = self.entity_zero = entity else: @@ -2170,7 +2170,9 @@ class _MapperEntity(_QueryEntity): query._entities.append(self) def _get_entity_clauses(self, query, context): - + if self.disable_aliasing: + return None + adapter = None if not self.is_aliased_class and query._polymorphic_adapters: adapter = query._polymorphic_adapters.get(self.mapper, None) @@ -2251,7 +2253,6 @@ class _MapperEntity(_QueryEntity): def __str__(self): return str(self.mapper) - class _ColumnEntity(_QueryEntity): """Column/expression based entity.""" diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index f507bfbe5..4431b408f 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -692,11 +692,6 @@ class SubqueryLoader(AbstractRelationshipLoader): for c in leftmost_cols ] - local_attr = [ - self.parent._get_col_to_prop(c).class_attribute - for c in local_cols - ] - # modify the query to just look for parent columns in the # join condition @@ -713,24 +708,44 @@ class SubqueryLoader(AbstractRelationshipLoader): q._attributes[('subquery_path', None)] = subq_path # now select from it as a subquery. - q = q.from_self(self.mapper, *local_attr) + local_attr = [ + self.parent._get_col_to_prop(c).class_attribute + for c in local_cols + ] + + q = q.from_self(self.mapper) + q._entities[0].disable_aliasing = True - # and join to the related thing we want - # to load. - for mapper, key in [(subq_path[i], subq_path[i+1]) - for i in xrange(0, len(subq_path), 2)]: + to_join = [(subq_path[i], subq_path[i+1]) + for i in xrange(0, len(subq_path), 2)] + + for i, (mapper, key) in enumerate(to_join): + alias_join = i < len(to_join) - 1 + second_to_last = i == len(to_join) - 2 + prop = mapper.get_property(key) - q = q.join(prop.class_attribute) + q = q.join(prop.class_attribute, aliased=alias_join) - #join_on = [(subq_path[i], subq_path[i+1]) - # for i in xrange(0, len(subq_path), 2)] - #for i, (mapper, key) in enumerate(join_on): - # aliased = i != len(join_on) - 1 - # prop = mapper.get_property(key) - # q = q.join(prop.class_attribute, aliased=aliased) - - q = q.order_by(*local_attr) + if alias_join and second_to_last: + cols = [ + q._adapt_clause(col, True, False) + for col in local_cols + ] + for col in cols: + q = q.add_column(col) + q = q.order_by(*cols) + if len(to_join) < 2: + local_attr = [ + self.parent._get_col_to_prop(c).class_attribute + for c in local_cols + ] + + for col in local_attr: + q = q.add_column(col) + q = q.order_by(*local_attr) + + # propagate loader options etc. to the new query q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) @@ -774,7 +789,6 @@ class SubqueryLoader(AbstractRelationshipLoader): local_cols, remote_cols = self._local_remote_columns(self.parent_property) - local_attr = [self.parent._get_col_to_prop(c).key for c in local_cols] remote_attr = [ self.mapper._get_col_to_prop(c).key for c in remote_cols] -- cgit v1.2.1 From ead3d6ab2fd323dad99c310cab7aa12d72f4f4ee Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 18:53:42 -0400 Subject: - added add_columns() to Query - pending deprecates add_column() - refined subquery strategy to use more public Query API --- lib/sqlalchemy/orm/query.py | 15 +++++++++--- lib/sqlalchemy/orm/strategies.py | 52 ++++++++++++++++++---------------------- 2 files changed, 35 insertions(+), 32 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 2dfefc433..5e7a2028e 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -662,16 +662,25 @@ class Query(object): return None @_generative() - def add_column(self, column): - """Add a SQL ColumnElement to the list of result columns to be returned.""" + def add_columns(self, *column): + """Add one or more column expressions to the list + of result columns to be returned.""" self._entities = list(self._entities) l = len(self._entities) - _ColumnEntity(self, column) + for c in column: + _ColumnEntity(self, c) # _ColumnEntity may add many entities if the # given arg is a FROM clause self._setup_aliasizers(self._entities[l:]) + @util.pending_deprecation("add_column() superceded by add_columns()") + def add_column(self, column): + """Add a column expression to the list of result columns + to be returned.""" + + return self.add_columns(column) + def options(self, *args): """Return a new Query object, applying the given list of MapperOptions. diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 4431b408f..b6ca1090d 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -707,44 +707,38 @@ class SubqueryLoader(AbstractRelationshipLoader): q._attributes[('subquery_path', None)] = subq_path - # now select from it as a subquery. - local_attr = [ - self.parent._get_col_to_prop(c).class_attribute - for c in local_cols - ] - q = q.from_self(self.mapper) q._entities[0].disable_aliasing = True - to_join = [(subq_path[i], subq_path[i+1]) - for i in xrange(0, len(subq_path), 2)] - - for i, (mapper, key) in enumerate(to_join): - alias_join = i < len(to_join) - 1 - second_to_last = i == len(to_join) - 2 - - prop = mapper.get_property(key) - q = q.join(prop.class_attribute, aliased=alias_join) - - if alias_join and second_to_last: - cols = [ - q._adapt_clause(col, True, False) - for col in local_cols + to_join = [ + (subq_path[i], subq_path[i+1]) + for i in xrange(0, len(subq_path), 2) ] - for col in cols: - q = q.add_column(col) - q = q.order_by(*cols) - + if len(to_join) < 2: local_attr = [ self.parent._get_col_to_prop(c).class_attribute for c in local_cols ] - - for col in local_attr: - q = q.add_column(col) - q = q.order_by(*local_attr) - + else: + parent_alias = mapperutil.AliasedClass(self.parent) + local_attr = [ + getattr(parent_alias, self.parent._get_col_to_prop(c).key) + for c in local_cols + ] + q = q.add_columns(*local_attr) + q = q.order_by(*local_attr) + + for i, (mapper, key) in enumerate(to_join): + alias_join = i < len(to_join) - 1 + second_to_last = i == len(to_join) - 2 + + prop = mapper.get_property(key) + + if second_to_last: + q = q.join((parent_alias, prop.class_attribute)) + else: + q = q.join(prop.class_attribute, aliased=alias_join) # propagate loader options etc. to the new query q = q._with_current_path(subq_path) -- cgit v1.2.1 From 90b5ac47cddfc97df05cd30e33149f963090c0f0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 20:23:01 -0400 Subject: getting inheritance to work. some complex cases may have to fail for the time being. --- lib/sqlalchemy/orm/query.py | 23 +++++++++++++-------- lib/sqlalchemy/orm/strategies.py | 43 +++++++++++++++++++++++++--------------- 2 files changed, 42 insertions(+), 24 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 5e7a2028e..43b4e6d77 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -198,7 +198,13 @@ class Query(object): @_generative() def _adapt_all_clauses(self): self._disable_orm_filtering = True - + + def _adapt_col_list(self, cols): + return [ + self._adapt_clause(expression._literal_as_text(o), True, True) + for o in cols + ] + def _adapt_clause(self, clause, as_filter, orm_only): adapters = [] if as_filter and self._filter_aliases: @@ -773,7 +779,6 @@ class Query(object): return self.filter(sql.and_(*clauses)) - @_generative(_no_statement_condition, _no_limit_offset) @util.accepts_a_list_as_starargs(list_deprecation='deprecated') def order_by(self, *criterion): @@ -782,7 +787,7 @@ class Query(object): if len(criterion) == 1 and criterion[0] is None: self._order_by = None else: - criterion = [self._adapt_clause(expression._literal_as_text(o), True, True) for o in criterion] + criterion = self._adapt_col_list(criterion) if self._order_by is False or self._order_by is None: self._order_by = criterion @@ -796,7 +801,7 @@ class Query(object): criterion = list(chain(*[_orm_columns(c) for c in criterion])) - criterion = [self._adapt_clause(expression._literal_as_text(o), True, True) for o in criterion] + criterion = self._adapt_col_list(criterion) if self._group_by is False: self._group_by = criterion @@ -2147,7 +2152,7 @@ class _MapperEntity(_QueryEntity): self._with_polymorphic = with_polymorphic self._polymorphic_discriminator = None self.is_aliased_class = is_aliased_class - self.disable_aliasing = False + self._subq_aliasing = False if is_aliased_class: self.path_entity = self.entity = self.entity_zero = entity else: @@ -2179,8 +2184,6 @@ class _MapperEntity(_QueryEntity): query._entities.append(self) def _get_entity_clauses(self, query, context): - if self.disable_aliasing: - return None adapter = None if not self.is_aliased_class and query._polymorphic_adapters: @@ -2188,7 +2191,11 @@ class _MapperEntity(_QueryEntity): if not adapter and self.adapter: adapter = self.adapter - + + # special flag set by subquery loader + if self._subq_aliasing: + return adapter + if adapter: if query._from_obj_alias: ret = adapter.wrap(query._from_obj_alias) diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index b6ca1090d..0e5e2efdf 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -647,7 +647,7 @@ class SubqueryLoader(AbstractRelationshipLoader): if not context.query._enable_eagerloads: return - + path = path + (self.key, ) # build up a path indicating the path from the leftmost @@ -657,7 +657,7 @@ class SubqueryLoader(AbstractRelationshipLoader): subq_path = subq_path + path reduced_path = interfaces._reduce_path(subq_path) - + # check for join_depth or basic recursion, # if the current path was not explicitly stated as # a desired "loaderstrategy" (i.e. via query.options()) @@ -680,11 +680,14 @@ class SubqueryLoader(AbstractRelationshipLoader): orig_query = context.attributes[("orig_query", SubqueryLoader)] - local_cols, remote_cols = self._local_remote_columns(self.parent_property) - leftmost_mapper, leftmost_prop = \ - subq_path[0], subq_path[0].get_property(subq_path[1]) + if self.parent.isa(subq_path[0]) and self.key==subq_path[1]: + leftmost_mapper, leftmost_prop = \ + self.parent, self.parent_property + else: + leftmost_mapper, leftmost_prop = \ + subq_path[0], subq_path[0].get_property(subq_path[1]) leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) leftmost_attr = [ @@ -692,23 +695,24 @@ class SubqueryLoader(AbstractRelationshipLoader): for c in leftmost_cols ] - # modify the query to just look for parent columns in the - # join condition - # set the original query to only look # for the significant columns, not order # by anything. q = orig_query._clone() q._attributes = {} q._attributes[("orig_query", SubqueryLoader)] = orig_query - q._set_entities(leftmost_attr) + q._set_entities(q._adapt_col_list(leftmost_attr)) if q._limit is None and q._offset is None: q._order_by = None + + q = q.from_self(self.mapper) - q._attributes[('subquery_path', None)] = subq_path + # TODO: this is currently a magic hardcody + # flag on _MapperEntity. we should find + # a way to turn it into public functionality. + q._entities[0]._subq_aliasing = True - q = q.from_self(self.mapper) - q._entities[0].disable_aliasing = True + q._attributes[('subquery_path', None)] = subq_path to_join = [ (subq_path[i], subq_path[i+1]) @@ -726,14 +730,17 @@ class SubqueryLoader(AbstractRelationshipLoader): getattr(parent_alias, self.parent._get_col_to_prop(c).key) for c in local_cols ] - q = q.add_columns(*local_attr) q = q.order_by(*local_attr) - + q = q.add_columns(*local_attr) + for i, (mapper, key) in enumerate(to_join): alias_join = i < len(to_join) - 1 second_to_last = i == len(to_join) - 2 - prop = mapper.get_property(key) + if i == 0: + prop = leftmost_prop + else: + prop = mapper.get_property(key) if second_to_last: q = q.join((parent_alias, prop.class_attribute)) @@ -762,7 +769,7 @@ class SubqueryLoader(AbstractRelationshipLoader): # this key is for the row_processor to pick up # within this same loader. - context.attributes[('subquery', path)] = q + context.attributes[('subquery', interfaces._reduce_path(path))] = q def _local_remote_columns(self, prop): if prop.secondary is None: @@ -777,6 +784,8 @@ class SubqueryLoader(AbstractRelationshipLoader): def create_row_processor(self, context, path, mapper, row, adapter): path = path + (self.key,) + + path = interfaces._reduce_path(path) if ('subquery', path) not in context.attributes: return None, None @@ -825,6 +834,8 @@ class SubqueryLoader(AbstractRelationshipLoader): return execute, None +log.class_logger(SubqueryLoader) + class EagerLoader(AbstractRelationshipLoader): """Strategize a relationship() that loads within the process of the parent object being selected.""" -- cgit v1.2.1 From 1321db6473a45517bbb86203e9cbdd4c02fa8ac0 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 23 Mar 2010 20:41:40 -0400 Subject: - Fixed bug introduced in 0.6beta2 where column labels would render inside of column expressions already assigned a label. [ticket:1747] --- lib/sqlalchemy/sql/compiler.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 4e9175ae8..75b3f79f0 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -305,11 +305,13 @@ class SQLCompiler(engine.Compiled): def visit_grouping(self, grouping, asfrom=False, **kwargs): return "(" + self.process(grouping.element, **kwargs) + ")" - def visit_label(self, label, result_map=None, within_columns_clause=False, **kw): + def visit_label(self, label, result_map=None, + within_label_clause=False, + within_columns_clause=False, **kw): # only render labels within the columns clause # or ORDER BY clause of a select. dialect-specific compilers # can modify this behavior. - if within_columns_clause: + if within_columns_clause and not within_label_clause: labelname = isinstance(label.name, sql._generated_label) and \ self._truncated_identifier("colident", label.name) or label.name @@ -318,13 +320,14 @@ class SQLCompiler(engine.Compiled): (label.name, (label, label.element, labelname), label.element.type) return self.process(label.element, - within_columns_clause=within_columns_clause, + within_columns_clause=True, + within_label_clause=True, **kw) + \ OPERATORS[operators.as_] + \ self.preparer.format_label(label, labelname) else: return self.process(label.element, - within_columns_clause=within_columns_clause, + within_columns_clause=False, **kw) def visit_column(self, column, result_map=None, **kwargs): -- cgit v1.2.1 From 4cf01115f669e44d77d46381f16ea5bf57197a1b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 12:15:42 -0400 Subject: - Query.join() will detect if the end result will be "FROM A JOIN A", and will raise an error if so. --- lib/sqlalchemy/orm/query.py | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 43b4e6d77..d42a8f863 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1068,6 +1068,13 @@ class Query(object): if left is None: left = self._joinpoint_zero() + if left is right and \ + not create_aliases and \ + not self._entity_zero()._subq_aliasing: # <-- TODO: hack + raise sa_exc.InvalidRequestError( + "Can't construct a join from %s to %s, they are the same entity" % + (left, right)) + left_mapper, left_selectable, left_is_aliased = _entity_info(left) right_mapper, right_selectable, is_aliased_class = _entity_info(right) -- cgit v1.2.1 From 4a794aa5481b3d6e6ab3bae6d12c39d8cb228da8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 12:46:58 -0400 Subject: this version works with *all* the polymorphic scenarios by putting the subquery into an aliased(), so that it can be controlled. self ref breaks now. will move the joining out to use orm.join(). --- lib/sqlalchemy/orm/query.py | 3 +-- lib/sqlalchemy/orm/strategies.py | 26 ++++++++++++++++++++------ 2 files changed, 21 insertions(+), 8 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index d42a8f863..c56804847 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1069,8 +1069,7 @@ class Query(object): left = self._joinpoint_zero() if left is right and \ - not create_aliases and \ - not self._entity_zero()._subq_aliasing: # <-- TODO: hack + not create_aliases: raise sa_exc.InvalidRequestError( "Can't construct a join from %s to %s, they are the same entity" % (left, right)) diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 0e5e2efdf..92b560cd9 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -699,18 +699,27 @@ class SubqueryLoader(AbstractRelationshipLoader): # for the significant columns, not order # by anything. q = orig_query._clone() - q._attributes = {} - q._attributes[("orig_query", SubqueryLoader)] = orig_query +# q._attributes = {} +# q._attributes[("orig_query", SubqueryLoader)] = orig_query q._set_entities(q._adapt_col_list(leftmost_attr)) if q._limit is None and q._offset is None: q._order_by = None - - q = q.from_self(self.mapper) + + embed_q = q.with_labels().subquery() + + q = q.session.query(self.mapper) + q._attributes = {} + q._attributes[("orig_query", SubqueryLoader)] = orig_query + + left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) + q = q.select_from(left_alias) + +# q = q.from_self(self.mapper) # TODO: this is currently a magic hardcody # flag on _MapperEntity. we should find # a way to turn it into public functionality. - q._entities[0]._subq_aliasing = True +# q._entities[0]._subq_aliasing = True q._attributes[('subquery_path', None)] = subq_path @@ -720,8 +729,12 @@ class SubqueryLoader(AbstractRelationshipLoader): ] if len(to_join) < 2: +# local_attr = [ +# self.parent._get_col_to_prop(c).class_attribute +# for c in local_cols +# ] local_attr = [ - self.parent._get_col_to_prop(c).class_attribute + getattr(left_alias, self.parent._get_col_to_prop(c).key) for c in local_cols ] else: @@ -747,6 +760,7 @@ class SubqueryLoader(AbstractRelationshipLoader): else: q = q.join(prop.class_attribute, aliased=alias_join) + # propagate loader options etc. to the new query q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) -- cgit v1.2.1 From 9c68542116285c94bc2584edcd25fec6cf305eba Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 13:19:34 -0400 Subject: everything everything passes on this one. still want to get rid of that hack tho. --- lib/sqlalchemy/orm/query.py | 3 ++- lib/sqlalchemy/orm/strategies.py | 16 +++------------- 2 files changed, 5 insertions(+), 14 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index c56804847..d0827df84 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1069,7 +1069,8 @@ class Query(object): left = self._joinpoint_zero() if left is right and \ - not create_aliases: + not create_aliases and \ + not self._entity_zero()._subq_aliasing: raise sa_exc.InvalidRequestError( "Can't construct a join from %s to %s, they are the same entity" % (left, right)) diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 92b560cd9..cf1839df7 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -699,8 +699,6 @@ class SubqueryLoader(AbstractRelationshipLoader): # for the significant columns, not order # by anything. q = orig_query._clone() -# q._attributes = {} -# q._attributes[("orig_query", SubqueryLoader)] = orig_query q._set_entities(q._adapt_col_list(leftmost_attr)) if q._limit is None and q._offset is None: q._order_by = None @@ -708,19 +706,15 @@ class SubqueryLoader(AbstractRelationshipLoader): embed_q = q.with_labels().subquery() q = q.session.query(self.mapper) + + # magic hardcody thing. TODO: dammit + q._entities[0]._subq_aliasing = True q._attributes = {} q._attributes[("orig_query", SubqueryLoader)] = orig_query left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) q = q.select_from(left_alias) -# q = q.from_self(self.mapper) - - # TODO: this is currently a magic hardcody - # flag on _MapperEntity. we should find - # a way to turn it into public functionality. -# q._entities[0]._subq_aliasing = True - q._attributes[('subquery_path', None)] = subq_path to_join = [ @@ -729,10 +723,6 @@ class SubqueryLoader(AbstractRelationshipLoader): ] if len(to_join) < 2: -# local_attr = [ -# self.parent._get_col_to_prop(c).class_attribute -# for c in local_cols -# ] local_attr = [ getattr(left_alias, self.parent._get_col_to_prop(c).key) for c in local_cols -- cgit v1.2.1 From 769f3eed334a766460ede08a2c1edf527c640db3 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 13:55:07 -0400 Subject: - removed need for _subq_aliasing workaround - removed cruft - all tests pass, now ready for API adjustments ('eagerload'->'joinedload'), docs --- lib/sqlalchemy/orm/query.py | 8 +---- lib/sqlalchemy/orm/strategies.py | 67 ++++++++++++++++++++-------------------- 2 files changed, 35 insertions(+), 40 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index d0827df84..5cbdbe80d 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1069,8 +1069,7 @@ class Query(object): left = self._joinpoint_zero() if left is right and \ - not create_aliases and \ - not self._entity_zero()._subq_aliasing: + not create_aliases: raise sa_exc.InvalidRequestError( "Can't construct a join from %s to %s, they are the same entity" % (left, right)) @@ -2159,7 +2158,6 @@ class _MapperEntity(_QueryEntity): self._with_polymorphic = with_polymorphic self._polymorphic_discriminator = None self.is_aliased_class = is_aliased_class - self._subq_aliasing = False if is_aliased_class: self.path_entity = self.entity = self.entity_zero = entity else: @@ -2199,10 +2197,6 @@ class _MapperEntity(_QueryEntity): if not adapter and self.adapter: adapter = self.adapter - # special flag set by subquery loader - if self._subq_aliasing: - return adapter - if adapter: if query._from_obj_alias: ret = adapter.wrap(query._from_obj_alias) diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index cf1839df7..c5d971313 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -658,9 +658,7 @@ class SubqueryLoader(AbstractRelationshipLoader): reduced_path = interfaces._reduce_path(subq_path) - # check for join_depth or basic recursion, - # if the current path was not explicitly stated as - # a desired "loaderstrategy" (i.e. via query.options()) + # join-depth / recursion check if ("loaderstrategy", reduced_path) not in context.attributes: if self.join_depth: if len(path) / 2 > self.join_depth: @@ -669,19 +667,9 @@ class SubqueryLoader(AbstractRelationshipLoader): if self.mapper.base_mapper in reduced_path: return - # the leftmost query we'll be joining from. - # in the case of an end-user query with eager or subq - # loads, this is the user's query. In the case of a lazyload, - # this is the query generated in the LazyLoader. - # this query is passed along to all queries generated for this - # load. - if ("orig_query", SubqueryLoader) not in context.attributes: - context.attributes[("orig_query", SubqueryLoader)] = context.query - - orig_query = context.attributes[("orig_query", SubqueryLoader)] + orig_query = context.attributes.get(("orig_query", SubqueryLoader), context.query) - local_cols, remote_cols = self._local_remote_columns(self.parent_property) - + # determine attributes of the leftmost mapper if self.parent.isa(subq_path[0]) and self.key==subq_path[1]: leftmost_mapper, leftmost_prop = \ self.parent, self.parent_property @@ -695,32 +683,36 @@ class SubqueryLoader(AbstractRelationshipLoader): for c in leftmost_cols ] - # set the original query to only look - # for the significant columns, not order - # by anything. + # reformat the original query + # to look only for significant columns q = orig_query._clone() + # TODO: why does polymporphic etc. require hardcoding + # into _adapt_col_list ? Does query.add_columns(...) work + # with polymorphic loading ? q._set_entities(q._adapt_col_list(leftmost_attr)) + + # don't need ORDER BY if no limit/offset if q._limit is None and q._offset is None: q._order_by = None - + + # new query will join to an aliased entity + # of the modified original query embed_q = q.with_labels().subquery() + left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) + # new query, request endpoint columns q = q.session.query(self.mapper) - # magic hardcody thing. TODO: dammit - q._entities[0]._subq_aliasing = True q._attributes = {} q._attributes[("orig_query", SubqueryLoader)] = orig_query - - left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) - q = q.select_from(left_alias) - q._attributes[('subquery_path', None)] = subq_path + # figure out what's being joined. a.k.a. the fun part to_join = [ (subq_path[i], subq_path[i+1]) for i in xrange(0, len(subq_path), 2) ] + local_cols, remote_cols = self._local_remote_columns(self.parent_property) if len(to_join) < 2: local_attr = [ @@ -740,16 +732,25 @@ class SubqueryLoader(AbstractRelationshipLoader): alias_join = i < len(to_join) - 1 second_to_last = i == len(to_join) - 2 + # we need to use query.join() here because of the + # rich behavior it brings when dealing with "with_polymorphic" + # mappers, otherwise we get broken aliasing and subquerying if + # using orm.join directly. _joinpoint_zero() is because + # from_joinpoint doesn't seem to be totally working with self-ref, + # and/or we should not use aliased=True, instead use AliasedClass() + # for everything. + # three TODOs: 1. make orm.join() work with rich polymorphic (huge) + # 2. make from_joinpoint work completely 3. use AliasedClass() here + if i == 0: - prop = leftmost_prop + attr = getattr(left_alias, key) else: - prop = mapper.get_property(key) - + attr = getattr(q._joinpoint_zero(), key) + if second_to_last: - q = q.join((parent_alias, prop.class_attribute)) + q = q.join((parent_alias, attr)) else: - q = q.join(prop.class_attribute, aliased=alias_join) - + q = q.join(attr, aliased=alias_join) # propagate loader options etc. to the new query q = q._with_current_path(subq_path) @@ -771,8 +772,8 @@ class SubqueryLoader(AbstractRelationshipLoader): ) q = q.order_by(*eager_order_by) - # this key is for the row_processor to pick up - # within this same loader. + # add new query to attributes to be picked up + # by create_row_processor context.attributes[('subquery', interfaces._reduce_path(path))] = q def _local_remote_columns(self, prop): -- cgit v1.2.1 From 290a1596ce6f806aa6f25dd754cf0d2197f160ff Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 17:54:52 -0400 Subject: - made final refinements to the feature and we are 100% go on subquery loading. - Query.join(Cls.propname, from_joinpoint=True) will check more carefully that "Cls" is compatible with the current joinpoint, and act the same way as Query.join("propname", from_joinpoint=True) in that regard. --- lib/sqlalchemy/orm/query.py | 14 +++++++- lib/sqlalchemy/orm/strategies.py | 76 ++++++++++++++++++++++------------------ 2 files changed, 54 insertions(+), 36 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 5cbdbe80d..859333a39 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -1030,6 +1030,18 @@ class Query(object): descriptor, prop = _entity_descriptor(left_entity, onclause) onclause = descriptor + + # check for q.join(Class.propname, from_joinpoint=True) + # and Class is that of the current joinpoint + elif from_joinpoint and isinstance(onclause, interfaces.PropComparator): + left_entity = onclause.parententity + + left_mapper, left_selectable, left_is_aliased = \ + _entity_info(self._joinpoint_zero()) + if left_mapper is left_entity: + left_entity = self._joinpoint_zero() + descriptor, prop = _entity_descriptor(left_entity, onclause.key) + onclause = descriptor if isinstance(onclause, interfaces.PropComparator): if right_entity is None: @@ -1039,7 +1051,7 @@ class Query(object): right_entity = of_type else: right_entity = onclause.property.mapper - + left_entity = onclause.parententity prop = onclause.property diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index c5d971313..0665bdcb3 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -667,15 +667,18 @@ class SubqueryLoader(AbstractRelationshipLoader): if self.mapper.base_mapper in reduced_path: return - orig_query = context.attributes.get(("orig_query", SubqueryLoader), context.query) + orig_query = context.attributes.get( + ("orig_query", SubqueryLoader), + context.query) # determine attributes of the leftmost mapper if self.parent.isa(subq_path[0]) and self.key==subq_path[1]: leftmost_mapper, leftmost_prop = \ - self.parent, self.parent_property + self.parent, self.parent_property else: leftmost_mapper, leftmost_prop = \ - subq_path[0], subq_path[0].get_property(subq_path[1]) + subq_path[0], \ + subq_path[0].get_property(subq_path[1]) leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) leftmost_attr = [ @@ -695,64 +698,67 @@ class SubqueryLoader(AbstractRelationshipLoader): if q._limit is None and q._offset is None: q._order_by = None - # new query will join to an aliased entity - # of the modified original query + # the original query now becomes a subquery + # which we'll join onto. embed_q = q.with_labels().subquery() left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) - # new query, request endpoint columns - q = q.session.query(self.mapper) + # q becomes a new query. basically doing a longhand + # "from_self()". (from_self() itself not quite industrial + # strength enough for all contingencies...but very close) - q._attributes = {} - q._attributes[("orig_query", SubqueryLoader)] = orig_query - q._attributes[('subquery_path', None)] = subq_path + q = q.session.query(self.mapper) + q._attributes = { + ("orig_query", SubqueryLoader): orig_query, + ('subquery_path', None) : subq_path + } # figure out what's being joined. a.k.a. the fun part to_join = [ (subq_path[i], subq_path[i+1]) for i in xrange(0, len(subq_path), 2) ] - local_cols, remote_cols = self._local_remote_columns(self.parent_property) if len(to_join) < 2: - local_attr = [ - getattr(left_alias, self.parent._get_col_to_prop(c).key) - for c in local_cols - ] + parent_alias = left_alias else: parent_alias = mapperutil.AliasedClass(self.parent) - local_attr = [ - getattr(parent_alias, self.parent._get_col_to_prop(c).key) - for c in local_cols - ] + + local_cols, remote_cols = \ + self._local_remote_columns(self.parent_property) + + local_attr = [ + getattr(parent_alias, self.parent._get_col_to_prop(c).key) + for c in local_cols + ] q = q.order_by(*local_attr) q = q.add_columns(*local_attr) for i, (mapper, key) in enumerate(to_join): - alias_join = i < len(to_join) - 1 - second_to_last = i == len(to_join) - 2 - # we need to use query.join() here because of the - # rich behavior it brings when dealing with "with_polymorphic" - # mappers, otherwise we get broken aliasing and subquerying if - # using orm.join directly. _joinpoint_zero() is because - # from_joinpoint doesn't seem to be totally working with self-ref, - # and/or we should not use aliased=True, instead use AliasedClass() - # for everything. - # three TODOs: 1. make orm.join() work with rich polymorphic (huge) - # 2. make from_joinpoint work completely 3. use AliasedClass() here + # we need to use query.join() as opposed to + # orm.join() here because of the + # rich behavior it brings when dealing with + # "with_polymorphic" mappers. "aliased" + # and "from_joinpoint" take care of most of + # the chaining and aliasing for us. + + first = i == 0 + middle = i < len(to_join) - 1 + second_to_last = i == len(to_join) - 2 - if i == 0: + if first: attr = getattr(left_alias, key) else: - attr = getattr(q._joinpoint_zero(), key) + attr = key if second_to_last: - q = q.join((parent_alias, attr)) + q = q.join((parent_alias, attr), from_joinpoint=True) else: - q = q.join(attr, aliased=alias_join) + q = q.join(attr, aliased=middle, from_joinpoint=True) - # propagate loader options etc. to the new query + # propagate loader options etc. to the new query. + # these will fire relative to subq_path. q = q._with_current_path(subq_path) q = q._conditional_options(*orig_query._with_options) -- cgit v1.2.1 From 1675811029553501bb23084604c64d974dfe739c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 19:11:01 -0400 Subject: - To accomodate the fact that there are now two kinds of eager loading available, the new names for eagerload() and eagerload_all() are joinedload() and joinedload_all(). The old names will remain as synonyms for the foreseeable future. - The "lazy" flag on the relationship() function now accepts a string argument for all kinds of loading: "select", "joined", "subquery", "noload" and "dynamic", where the default is now "select". The old values of True/ False/None still retain their usual meanings and will remain as synonyms for the foreseeable future. - Added documentation to tutorial,mapper doc, api docs for subqueryload, subqueryload_all, and other options. --- lib/sqlalchemy/orm/__init__.py | 131 ++++++++++++++++++++++++++++++++------- lib/sqlalchemy/orm/properties.py | 14 +---- lib/sqlalchemy/orm/strategies.py | 30 +++++---- 3 files changed, 128 insertions(+), 47 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index c9ed3cf2e..fb05f4181 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -83,6 +83,8 @@ __all__ = ( 'eagerload_all', 'extension', 'join', + 'joinedload', + 'joinedload_all', 'lazyload', 'mapper', 'make_transient', @@ -296,16 +298,21 @@ def relationship(argument, secondary=None, **kwargs): eager loads will automatically stop chaining joins when they encounter a mapper which is already higher up in the chain. - :param lazy=(True|False|None|'dynamic'): + :param lazy=('select'|'joined'|'subquery'|'noload'|'dynamic'): specifies how the related items should be loaded. Values include: - True - items should be loaded lazily when the property is first + 'select' - items should be loaded lazily when the property is first accessed. - False - items should be loaded "eagerly" in the same query as + 'joined' - items should be loaded "eagerly" in the same query as that of the parent, using a JOIN or LEFT OUTER JOIN. + + 'subquery' - items should be loaded "eagerly" within the same + query as that of the parent, using a second SQL statement + which issues a JOIN to a subquery of the original + statement. - None - no loading should occur at any time. This is to support + 'noload' - no loading should occur at any time. This is to support "write-only" attributes, or attributes which are populated in some manner specific to the application. @@ -315,7 +322,13 @@ def relationship(argument, secondary=None, **kwargs): ``remove()`` for write operations; changes to the dynamic property will not be visible until the data is flushed to the database. - + + True - a synonym for 'select' + + False - a synonyn for 'joined' + + None - a synonym for 'noload' + :param order_by: indicates the ordering that should be applied when loading these items. @@ -906,35 +919,42 @@ def extension(ext): return ExtensionOption(ext) @sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated') -def eagerload(*keys, **kw): +def joinedload(*keys, **kw): """Return a ``MapperOption`` that will convert the property of the given - name into an eager load. + name into an joined eager load. + + .. note:: This function is known as :func:`eagerload` in all versions + of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. + :func:`eagerload` will remain available for + the foreseeable future in order to enable cross-compatibility. Used with :meth:`~sqlalchemy.orm.query.Query.options`. examples:: - # eagerload the "orders" colleciton on "User" - query(User).options(eagerload(User.orders)) + # joined-load the "orders" colleciton on "User" + query(User).options(joinedload(User.orders)) - # eagerload the "keywords" collection on each "Item", + # joined-load the "keywords" collection on each "Item", # but not the "items" collection on "Order" - those # remain lazily loaded. - query(Order).options(eagerload(Order.items, Item.keywords)) + query(Order).options(joinedload(Order.items, Item.keywords)) - # to eagerload across both, use eagerload_all() - query(Order).options(eagerload_all(Order.items, Item.keywords)) + # to joined-load across both, use joinedload_all() + query(Order).options(joinedload_all(Order.items, Item.keywords)) - :func:`eagerload` also accepts a keyword argument `innerjoin=True` which + :func:`joinedload` also accepts a keyword argument `innerjoin=True` which indicates using an inner join instead of an outer:: - query(Order).options(eagerload(Order.user, innerjoin=True)) + query(Order).options(joinedload(Order.user, innerjoin=True)) - Note that the join created by :func:`eagerload` is aliased such that - no other aspects of the query will affect what it loads. To use eager + Note that the join created by :func:`joinedload` is aliased such that + no other aspects of the query will affect what it loads. To use joined eager loading with a join that is constructed manually using :meth:`~sqlalchemy.orm.query.Query.join` or :func:`~sqlalchemy.orm.join`, see :func:`contains_eager`. + See also: :func:`subqueryload`, :func:`lazyload` + """ innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: @@ -946,26 +966,33 @@ def eagerload(*keys, **kw): return strategies.EagerLazyOption(keys, lazy=False) @sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated') -def eagerload_all(*keys, **kw): +def joinedload_all(*keys, **kw): """Return a ``MapperOption`` that will convert all properties along the - given dot-separated path into an eager load. + given dot-separated path into an joined eager load. + + .. note:: This function is known as :func:`eagerload_all` in all versions + of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 series. + :func:`eagerload_all` will remain available for + the foreseeable future in order to enable cross-compatibility. Used with :meth:`~sqlalchemy.orm.query.Query.options`. For example:: - query.options(eagerload_all('orders.items.keywords'))... + query.options(joinedload_all('orders.items.keywords'))... will set all of 'orders', 'orders.items', and 'orders.items.keywords' to - load in one eager load. + load in one joined eager load. Individual descriptors are accepted as arguments as well:: - query.options(eagerload_all(User.orders, Order.items, Item.keywords)) + query.options(joinedload_all(User.orders, Order.items, Item.keywords)) The keyword arguments accept a flag `innerjoin=True|False` which will override the value of the `innerjoin` flag specified on the relationship(). + See also: :func:`subqueryload_all`, :func:`lazyload` + """ innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: @@ -976,11 +1003,63 @@ def eagerload_all(*keys, **kw): else: return strategies.EagerLazyOption(keys, lazy=False, chained=True) +def eagerload(*args, **kwargs): + """A synonym for :func:`joinedload()`.""" + return joinedload(*args, **kwargs) + +def eagerload_all(*args, **kwargs): + """A synonym for :func:`joinedload_all()`""" + return joinedload_all(*args, **kwargs) + def subqueryload(*keys): - return strategies.EagerLazyOption(keys, _strategy_cls=strategies.SubqueryLoader) + """Return a ``MapperOption`` that will convert the property + of the given name into an subquery eager load. + + .. note:: This function is new as of SQLAlchemy version 0.6beta3. + + Used with :meth:`~sqlalchemy.orm.query.Query.options`. + + examples:: + + # subquery-load the "orders" colleciton on "User" + query(User).options(subqueryload(User.orders)) + + # subquery-load the "keywords" collection on each "Item", + # but not the "items" collection on "Order" - those + # remain lazily loaded. + query(Order).options(subqueryload(Order.items, Item.keywords)) + + # to subquery-load across both, use subqueryload_all() + query(Order).options(subqueryload_all(Order.items, Item.keywords)) + + See also: :func:`joinedload`, :func:`lazyload` + + """ + return strategies.EagerLazyOption(keys, lazy="subquery") def subqueryload_all(*keys): - return strategies.EagerLazyOption(keys, _strategy_cls=strategies.SubqueryLoader, chained=True) + """Return a ``MapperOption`` that will convert all properties along the + given dot-separated path into a subquery eager load. + + .. note:: This function is new as of SQLAlchemy version 0.6beta3. + + Used with :meth:`~sqlalchemy.orm.query.Query.options`. + + For example:: + + query.options(subqueryload_all('orders.items.keywords'))... + + will set all of 'orders', 'orders.items', and 'orders.items.keywords' to + load in one subquery eager load. + + Individual descriptors are accepted as arguments as well:: + + query.options(subquryload_all(User.orders, Order.items, Item.keywords)) + + See also: :func:`joinedload_all`, :func:`lazyload` + + """ + return strategies.EagerLazyOption(keys, lazy="subquery", chained=True) @sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated') def lazyload(*keys): @@ -989,6 +1068,8 @@ def lazyload(*keys): Used with :meth:`~sqlalchemy.orm.query.Query.options`. + See also: :func:`eagerload`, :func:`subqueryload` + """ return strategies.EagerLazyOption(keys, lazy=True) @@ -998,6 +1079,8 @@ def noload(*keys): Used with :meth:`~sqlalchemy.orm.query.Query.options`. + See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload` + """ return strategies.EagerLazyOption(keys, lazy=None) diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 4b6770861..80d101b78 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -391,22 +391,14 @@ class RelationshipProperty(StrategizedProperty): self.comparator_factory = comparator_factory or RelationshipProperty.Comparator self.comparator = self.comparator_factory(self, None) util.set_creation_order(self) - + if strategy_class: self.strategy_class = strategy_class - elif self.lazy == 'dynamic': + elif self.lazy== 'dynamic': from sqlalchemy.orm import dynamic self.strategy_class = dynamic.DynaLoader - elif self.lazy is False or self.lazy == 'joined': - self.strategy_class = strategies.EagerLoader - elif self.lazy is None or self.lazy == 'noload': - self.strategy_class = strategies.NoLoader - elif self.lazy is False or self.lazy == 'select': - self.strategy_class = strategies.LazyLoader - elif self.lazy == 'subquery': - self.strategy_class = strategies.SubqueryLoader else: - self.strategy_class = strategies.LazyLoader + self.strategy_class = strategies.factory(self.lazy) self._reverse_property = set() diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 0665bdcb3..f4f3b5821 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -1125,28 +1125,34 @@ log.class_logger(EagerLoader) class EagerLazyOption(StrategizedOption): def __init__(self, key, lazy=True, chained=False, - propagate_to_loaders=True, - _strategy_cls=None + propagate_to_loaders=True ): super(EagerLazyOption, self).__init__(key) self.lazy = lazy self.chained = chained self.propagate_to_loaders = propagate_to_loaders - self.strategy_cls = _strategy_cls + self.strategy_cls = factory(lazy) def is_chained(self): return not self.lazy and self.chained def get_strategy_class(self): - if self.strategy_cls: - return self.strategy_cls - elif self.lazy: - return LazyLoader - elif self.lazy is False: - return EagerLoader - elif self.lazy is None: - return NoLoader - + return self.strategy_cls + +def factory(identifier): + if identifier is False or identifier == 'joined': + return EagerLoader + elif identifier is None or identifier == 'noload': + return NoLoader + elif identifier is False or identifier == 'select': + return LazyLoader + elif identifier == 'subquery': + return SubqueryLoader + else: + return LazyLoader + + + class EagerJoinOption(PropertyOption): def __init__(self, key, innerjoin, chained=False): -- cgit v1.2.1 From f10b94dfd89707fd7b69ddfa9ac849b0f092e55b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 20:46:14 -0400 Subject: this is needed for correct pathing, however some tests now fail. need tests for this --- lib/sqlalchemy/orm/strategies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index f4f3b5821..a766aab09 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -656,7 +656,7 @@ class SubqueryLoader(AbstractRelationshipLoader): subq_path = subq_path + path - reduced_path = interfaces._reduce_path(subq_path) + reduced_path = interfaces._reduce_path(path) # join-depth / recursion check if ("loaderstrategy", reduced_path) not in context.attributes: @@ -780,7 +780,7 @@ class SubqueryLoader(AbstractRelationshipLoader): # add new query to attributes to be picked up # by create_row_processor - context.attributes[('subquery', interfaces._reduce_path(path))] = q + context.attributes[('subquery', reduced_path)] = q def _local_remote_columns(self, prop): if prop.secondary is None: -- cgit v1.2.1 From a977894c84bf4351f460499a44c7c11439b8d14b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Wed, 24 Mar 2010 23:51:49 -0400 Subject: - fix some final pathing stuff, we weren't getting all the loads in the inheritance examples, now its improved ! - final doc pass --- lib/sqlalchemy/orm/__init__.py | 74 +++++++++++++++++++++------------------- lib/sqlalchemy/orm/interfaces.py | 5 ++- lib/sqlalchemy/orm/query.py | 25 +++++++++----- lib/sqlalchemy/orm/strategies.py | 13 ++++--- 4 files changed, 65 insertions(+), 52 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index fb05f4181..5d4bc2ee4 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -267,7 +267,6 @@ def relationship(argument, secondary=None, **kwargs): change the value used in the operation. :param foreign_keys: - a list of columns which are to be used as "foreign key" columns. this parameter should be used in conjunction with explicit ``primaryjoin`` and ``secondaryjoin`` (if needed) arguments, and @@ -280,7 +279,7 @@ def relationship(argument, secondary=None, **kwargs): the table-defined foreign keys. :param innerjoin=False: - when ``True``, eager loads will use an inner join to join + when ``True``, joined eager loads will use an inner join to join against related tables instead of an outer join. The purpose of this option is strictly one of performance, as inner joins generally perform better than outer joins. This flag can @@ -291,43 +290,46 @@ def relationship(argument, secondary=None, **kwargs): :param join_depth: when non-``None``, an integer value indicating how many levels - deep eagerload joins should be constructed on a self-referring - or cyclical relationship. The number counts how many times the - same Mapper shall be present in the loading condition along a - particular join branch. When left at its default of ``None``, - eager loads will automatically stop chaining joins when they - encounter a mapper which is already higher up in the chain. + deep "eager" loaders should join on a self-referring or cyclical + relationship. The number counts how many times the same Mapper + shall be present in the loading condition along a particular join + branch. When left at its default of ``None``, eager loaders + will stop chaining when they encounter a the same target mapper + which is already higher up in the chain. This option applies + both to joined- and subquery- eager loaders. :param lazy=('select'|'joined'|'subquery'|'noload'|'dynamic'): specifies how the related items should be loaded. Values include: - 'select' - items should be loaded lazily when the property is first - accessed. + * 'select' - items should be loaded lazily when the property is first + accessed. - 'joined' - items should be loaded "eagerly" in the same query as - that of the parent, using a JOIN or LEFT OUTER JOIN. + * 'joined' - items should be loaded "eagerly" in the same query as + that of the parent, using a JOIN or LEFT OUTER JOIN. - 'subquery' - items should be loaded "eagerly" within the same - query as that of the parent, using a second SQL statement - which issues a JOIN to a subquery of the original - statement. - - 'noload' - no loading should occur at any time. This is to support - "write-only" attributes, or attributes which are - populated in some manner specific to the application. - - 'dynamic' - a ``DynaLoader`` will be attached, which returns a - ``Query`` object for all read operations. The - dynamic- collection supports only ``append()`` and - ``remove()`` for write operations; changes to the - dynamic property will not be visible until the data - is flushed to the database. + * 'subquery' - items should be loaded "eagerly" within the same + query as that of the parent, using a second SQL statement + which issues a JOIN to a subquery of the original + statement. + + * 'noload' - no loading should occur at any time. This is to support + "write-only" attributes, or attributes which are + populated in some manner specific to the application. + + * 'dynamic' - the attribute will return a pre-configured + :class:`~sqlalchemy.orm.query.Query` object for all read + operations, onto which further filtering operations can be + applied before iterating the results. The dynamic + collection supports a limited set of mutation operations, + allowing ``append()`` and ``remove()``. Changes to the + collection will not be visible until flushed + to the database, where it is then refetched upon iteration. - True - a synonym for 'select' + * True - a synonym for 'select' - False - a synonyn for 'joined' + * False - a synonyn for 'joined' - None - a synonym for 'noload' + * None - a synonym for 'noload' :param order_by: indicates the ordering that should be applied when loading these @@ -959,11 +961,11 @@ def joinedload(*keys, **kw): innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: return ( - strategies.EagerLazyOption(keys, lazy=False), + strategies.EagerLazyOption(keys, lazy='joined'), strategies.EagerJoinOption(keys, innerjoin) ) else: - return strategies.EagerLazyOption(keys, lazy=False) + return strategies.EagerLazyOption(keys, lazy='joined') @sa_util.accepts_a_list_as_starargs(list_deprecation='deprecated') def joinedload_all(*keys, **kw): @@ -997,11 +999,11 @@ def joinedload_all(*keys, **kw): innerjoin = kw.pop('innerjoin', None) if innerjoin is not None: return ( - strategies.EagerLazyOption(keys, lazy=False, chained=True), + strategies.EagerLazyOption(keys, lazy='joined', chained=True), strategies.EagerJoinOption(keys, innerjoin, chained=True) ) else: - return strategies.EagerLazyOption(keys, lazy=False, chained=True) + return strategies.EagerLazyOption(keys, lazy='joined', chained=True) def eagerload(*args, **kwargs): """A synonym for :func:`joinedload()`.""" @@ -1054,7 +1056,7 @@ def subqueryload_all(*keys): Individual descriptors are accepted as arguments as well:: - query.options(subquryload_all(User.orders, Order.items, Item.keywords)) + query.options(subqueryload_all(User.orders, Order.items, Item.keywords)) See also: :func:`joinedload_all`, :func:`lazyload` @@ -1132,7 +1134,7 @@ def contains_eager(*keys, **kwargs): raise exceptions.ArgumentError("Invalid kwargs for contains_eager: %r" % kwargs.keys()) return ( - strategies.EagerLazyOption(keys, lazy=False, propagate_to_loaders=False), + strategies.EagerLazyOption(keys, lazy='joined', propagate_to_loaders=False), strategies.LoadEagerFromAliasOption(keys, alias=alias) ) diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py index 255b6b6fe..7fbb0862d 100644 --- a/lib/sqlalchemy/orm/interfaces.py +++ b/lib/sqlalchemy/orm/interfaces.py @@ -932,8 +932,7 @@ class StrategizedOption(PropertyOption): for an operation by a StrategizedProperty. """ - def is_chained(self): - return False + is_chained = False def process_query_property(self, query, paths, mappers): # _get_context_strategy may receive the path in terms of @@ -941,7 +940,7 @@ class StrategizedOption(PropertyOption): # in the polymorphic tests leads to "(Person, 'machines')" in # the path due to the mechanics of how the eager strategy builds # up the path - if self.is_chained(): + if self.is_chained: for path in paths: query._attributes[("loaderstrategy", _reduce_path(path))] = \ self.get_strategy_class() diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 859333a39..5b9169c2e 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -381,7 +381,8 @@ class Query(object): statement._annotate({'_halt_adapt': True}) def subquery(self): - """return the full SELECT statement represented by this Query, embedded within an Alias. + """return the full SELECT statement represented by this Query, + embedded within an Alias. Eager JOIN generation within the query is disabled. @@ -397,11 +398,14 @@ class Query(object): @_generative() def enable_eagerloads(self, value): - """Control whether or not eager joins are rendered. + """Control whether or not eager joins and subqueries are + rendered. When set to False, the returned Query will not render - eager joins regardless of eagerload() options - or mapper-level lazy=False configurations. + eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, + :func:`~sqlalchemy.orm.subqueryload` options + or mapper-level ``lazy='joined'``/``lazy='subquery'`` + configurations. This is used primarily when nesting the Query's statement into a subquery or other @@ -508,13 +512,16 @@ class Query(object): overwritten. In particular, it's usually impossible to use this setting with - eagerly loaded collections (i.e. any lazy=False) since those - collections will be cleared for a new load when encountered in a - subsequent result batch. + eagerly loaded collections (i.e. any lazy='joined' or 'subquery') + since those collections will be cleared for a new load when + encountered in a subsequent result batch. In the case of 'subquery' + loading, the full result for all rows is fetched which generally + defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. Also note that many DBAPIs do not "stream" results, pre-buffering all rows before making them available, including mysql-python and - psycopg2. yield_per() will also set the ``stream_results`` execution + psycopg2. :meth:`~sqlalchemy.orm.query.Query.yield_per` will also + set the ``stream_results`` execution option to ``True``, which currently is only understood by psycopg2 and causes server side cursors to be used. @@ -1347,7 +1354,7 @@ class Query(object): first() applies a limit of one within the generated SQL, so that only one primary entity row is generated on the server side - (note this may consist of multiple result rows if eagerly loaded + (note this may consist of multiple result rows if join-loaded collections are present). Calling ``first()`` results in an execution of the underlying query. diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index a766aab09..25c2f83a5 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -664,7 +664,7 @@ class SubqueryLoader(AbstractRelationshipLoader): if len(path) / 2 > self.join_depth: return else: - if self.mapper.base_mapper in reduced_path: + if self.mapper.base_mapper in interfaces._reduce_path(subq_path): return orig_query = context.attributes.get( @@ -1132,10 +1132,15 @@ class EagerLazyOption(StrategizedOption): self.chained = chained self.propagate_to_loaders = propagate_to_loaders self.strategy_cls = factory(lazy) - + + @property + def is_eager(self): + return self.lazy in (False, 'joined', 'subquery') + + @property def is_chained(self): - return not self.lazy and self.chained - + return self.is_eager and self.chained + def get_strategy_class(self): return self.strategy_cls -- cgit v1.2.1 From 3f0bd7269bda6a9fa833c7d6ba2f393688ffd524 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 25 Mar 2010 17:02:50 -0400 Subject: - The psycopg2 dialect will log NOTICE messages via the "sqlalchemy.dialects.postgresql" logger name. [ticket:877] --- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 30 ++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index c239a3ee0..5cbaaffd0 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -34,6 +34,15 @@ Transactions The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. +NOTICE logging +--------------- + +The psycopg2 dialect will log Postgresql NOTICE messages via the +``sqlalchemy.dialects.postgresql`` logger:: + + import logging + logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) + Per-Statement Execution Options ------------------------------- @@ -46,8 +55,10 @@ The following per-statement execution options are respected: """ -import random, re +import random +import re import decimal +import logging from sqlalchemy import util from sqlalchemy import processors @@ -59,6 +70,10 @@ from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \ PGIdentifierPreparer, PGExecutionContext, \ ENUM, ARRAY + +logger = logging.getLogger('sqlalchemy.dialects.postgresql') + + class _PGNumeric(sqltypes.Numeric): def bind_processor(self, dialect): return None @@ -130,11 +145,22 @@ class PGExecutionContext_psycopg2(PGExecutionContext): return self._connection.connection.cursor() def get_result_proxy(self): + if logger.isEnabledFor(logging.INFO): + self._log_notices(self.cursor) + if self.__is_server_side: return base.BufferedRowResultProxy(self) else: return base.ResultProxy(self) + def _log_notices(self, cursor): + for notice in cursor.connection.notices: + # NOTICE messages have a + # newline character at the end + logger.info(notice.rstrip()) + + cursor.connection.notices[:] = [] + class PGCompiler_psycopg2(PGCompiler): def visit_mod(self, binary, **kw): @@ -190,7 +216,7 @@ class PGDialect_psycopg2(PGDialect): return connect else: return base_on_connect - + def create_connect_args(self, url): opts = url.translate_connect_args(username='user') if 'port' in opts: -- cgit v1.2.1 From 03573c0517dc27f90f1a07ef8ad67a0692977a24 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 25 Mar 2010 17:25:32 -0400 Subject: - Using a mixin won't break if the mixin implements an unpredictable __getattribute__(), i.e. Zope interfaces. [ticket:1746] --- lib/sqlalchemy/ext/declarative.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/ext/declarative.py b/lib/sqlalchemy/ext/declarative.py index 775efbff1..435d38161 100644 --- a/lib/sqlalchemy/ext/declarative.py +++ b/lib/sqlalchemy/ext/declarative.py @@ -545,7 +545,7 @@ def _as_declarative(cls, classname, dict_): names = dir(base) if not _is_mapped_class(base): for name in names: - obj = getattr(base,name) + obj = getattr(base,name, None) if isinstance(obj, Column): dict_[name]=column_copies[obj]=obj.copy() get_mapper_args = get_mapper_args or getattr(base,'__mapper_args__',None) -- cgit v1.2.1 From 6acc9e6d9e88bc529a5147f6daa93c4c4e6de64c Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Thu, 25 Mar 2010 22:26:11 +0000 Subject: - The Oracle dialect will issue VARCHAR type definitions using character counts, i.e. VARCHAR2(50 CHAR), so that the column is sized in terms of characters and not bytes. Column reflection of character types will also use ALL_TAB_COLUMNS.CHAR_LENGTH instead of ALL_TAB_COLUMNS.DATA_LENGTH. Both of these behaviors take effect when the server version is 9 or higher - for version 8, the old behaviors are used. [ticket:1744] --- lib/sqlalchemy/dialects/oracle/base.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 2af5bdd7d..332fa805d 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -286,7 +286,10 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler): return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale} def visit_VARCHAR(self, type_): - return "VARCHAR(%(length)s)" % {'length' : type_.length} + if self.dialect.supports_char_length: + return "VARCHAR(%(length)s CHAR)" % {'length' : type_.length} + else: + return "VARCHAR(%(length)s)" % {'length' : type_.length} def visit_NVARCHAR(self, type_): return "NVARCHAR2(%(length)s)" % {'length' : type_.length} @@ -569,7 +572,8 @@ class OracleDialect(default.DefaultDialect): execution_ctx_cls = OracleExecutionContext reflection_options = ('oracle_resolve_synonyms', ) - + + supports_char_length = True def __init__(self, use_ansi=True, @@ -584,6 +588,8 @@ class OracleDialect(default.DefaultDialect): self.implicit_returning = self.server_version_info > (10, ) and \ self.__dict__.get('implicit_returning', True) + self.supports_char_length = self.server_version_info >= (9, ) + if self.server_version_info < (9,): self.colspecs = self.colspecs.copy() self.colspecs.pop(sqltypes.Interval) @@ -749,11 +755,16 @@ class OracleDialect(default.DefaultDialect): resolve_synonyms, dblink, info_cache=info_cache) columns = [] + if self.supports_char_length: + char_length_col = 'char_length' + else: + char_length_col = 'data_length' + c = connection.execute(sql.text( - "SELECT column_name, data_type, data_length, data_precision, data_scale, " + "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, " "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s " "WHERE table_name = :table_name AND owner = :owner " - "ORDER BY column_id" % {'dblink': dblink}), + "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}), table_name=table_name, owner=schema) for row in c: @@ -762,7 +773,7 @@ class OracleDialect(default.DefaultDialect): if coltype == 'NUMBER' : coltype = NUMBER(precision, scale) - elif coltype=='CHAR' or coltype=='VARCHAR2': + elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'): coltype = self.ischema_names.get(coltype)(length) elif 'WITH TIME ZONE' in coltype: coltype = TIMESTAMP(timezone=True) -- cgit v1.2.1 From db052dfba8db282516c353b1dc3984668ce93538 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 12:03:54 -0400 Subject: - the TIME and TIMESTAMP types are now availble from the postgresql dialect directly, which add the PG-specific argument 'precision' to both. 'precision' and 'timezone' are correctly reflected for both TIME and TIMEZONE types. [ticket:997] --- lib/sqlalchemy/dialects/postgresql/base.py | 81 +++++++++++++++++++----------- 1 file changed, 53 insertions(+), 28 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py index f45fc9671..bef2f1c61 100644 --- a/lib/sqlalchemy/dialects/postgresql/base.py +++ b/lib/sqlalchemy/dialects/postgresql/base.py @@ -78,7 +78,7 @@ from sqlalchemy import types as sqltypes from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ CHAR, TEXT, FLOAT, NUMERIC, \ - TIMESTAMP, TIME, DATE, BOOLEAN + DATE, BOOLEAN class REAL(sqltypes.Float): __visit_name__ = "REAL" @@ -101,6 +101,16 @@ class MACADDR(sqltypes.TypeEngine): __visit_name__ = "MACADDR" PGMacAddr = MACADDR +class TIMESTAMP(sqltypes.TIMESTAMP): + def __init__(self, timezone=False, precision=None): + super(TIMESTAMP, self).__init__(timezone=timezone) + self.precision = precision + +class TIME(sqltypes.TIME): + def __init__(self, timezone=False, precision=None): + super(TIME, self).__init__(timezone=timezone) + self.precision = precision + class INTERVAL(sqltypes.TypeEngine): __visit_name__ = 'INTERVAL' def __init__(self, precision=None): @@ -466,10 +476,16 @@ class PGTypeCompiler(compiler.GenericTypeCompiler): return self.dialect.identifier_preparer.format_type(type_) def visit_TIMESTAMP(self, type_): - return "TIMESTAMP " + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + return "TIMESTAMP%s %s" % ( + getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + ) def visit_TIME(self, type_): - return "TIME " + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + return "TIME%s %s" % ( + getattr(type_, 'precision', None) and "(%d)" % type_.precision or "", + (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" + ) def visit_INTERVAL(self, type_): if type_.precision is not None: @@ -875,39 +891,48 @@ class PGDialect(default.DefaultDialect): # format columns columns = [] for name, format_type, default, notnull, attnum, table_oid in rows: - ## strip (30) from character varying(30) - attype = re.search('([^\([]+)', format_type).group(1) + ## strip (5) from character varying(5), timestamp(5) with time zone, etc + attype = re.sub(r'\([\d,]+\)', '', format_type) + + # strip '[]' from integer[], etc. + attype = re.sub(r'\[\]', '', attype) + nullable = not notnull is_array = format_type.endswith('[]') - try: - charlen = re.search('\(([\d,]+)\)', format_type).group(1) - except: - charlen = False - numericprec = False - numericscale = False + charlen = re.search('\(([\d,]+)\)', format_type) + if charlen: + charlen = charlen.group(1) + kwargs = {} + if attype == 'numeric': - if charlen is False: - numericprec, numericscale = (None, None) + if charlen: + prec, scale = charlen.split(',') + args = (int(prec), int(scale)) else: - numericprec, numericscale = charlen.split(',') - charlen = False + args = () elif attype == 'double precision': - numericprec, numericscale = (53, False) - charlen = False + args = (53, ) elif attype == 'integer': - numericprec, numericscale = (32, 0) - charlen = False - args = [] - for a in (charlen, numericprec, numericscale): - if a is None: - args.append(None) - elif a is not False: - args.append(int(a)) - kwargs = {} - if attype == 'timestamp with time zone': + args = (32, 0) + elif attype in ('timestamp with time zone', 'time with time zone'): kwargs['timezone'] = True - elif attype == 'timestamp without time zone': + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif attype in ('timestamp without time zone', 'time without time zone', 'time'): kwargs['timezone'] = False + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif attype in ('interval','interval year to month','interval day to second'): + if charlen: + kwargs['precision'] = int(charlen) + args = () + elif charlen: + args = (int(charlen),) + else: + args = () + if attype in self.ischema_names: coltype = self.ischema_names[attype] elif attype in enums: -- cgit v1.2.1 From febf3bf78c74aa99ead0902d926de371904595d8 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 12:54:13 -0400 Subject: fix typo --- lib/sqlalchemy/dialects/postgresql/psycopg2.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py index 5cbaaffd0..f21c9a558 100644 --- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py +++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py @@ -12,7 +12,7 @@ Note that psycopg1 is **not** supported. Connecting ---------- -URLs are of the form `postgresql+psycopg2://user@password@host:port/dbname[?key=value&key=value...]`. +URLs are of the form `postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]`. psycopg2-specific keyword arguments which are accepted by :func:`~sqlalchemy.create_engine()` are: -- cgit v1.2.1 From a0af13067d952e05626f1ba240254cfcac58e9cc Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 13:09:17 -0400 Subject: clarify cascade docstring, [ticket:1716] --- lib/sqlalchemy/orm/__init__.py | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 5d4bc2ee4..2b577db87 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -230,25 +230,33 @@ def relationship(argument, secondary=None, **kwargs): Available cascades are: - ``save-update`` - cascade the "add()" operation (formerly - known as save() and update()) + * ``save-update`` - cascade the :meth:`~sqlalchemy.orm.session.Session.add` + operation. This cascade applies both to future and + past calls to :meth:`~sqlalchemy.orm.session.Session.add`, + meaning new items added to a collection or scalar relationship + get placed into the same session as that of the parent, and + also applies to items which have been removed from this + relationship but are still part of unflushed history. - ``merge`` - cascade the "merge()" operation + * ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge` + operation - ``expunge`` - cascade the "expunge()" operation + * ``expunge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expunge` + operation - ``delete`` - cascade the "delete()" operation + * ``delete`` - cascade the :meth:`~sqlalchemy.orm.session.Session.delete` + operation - ``delete-orphan`` - if an item of the child's type with no - parent is detected, mark it for deletion. Note that this - option prevents a pending item of the child's class from being - persisted without a parent present. + * ``delete-orphan`` - if an item of the child's type with no + parent is detected, mark it for deletion. Note that this + option prevents a pending item of the child's class from being + persisted without a parent present. - ``refresh-expire`` - cascade the expire() and refresh() - operations + * ``refresh-expire`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expire` + and :meth:`~sqlalchemy.orm.session.Session.refresh` operations - ``all`` - shorthand for "save-update,merge, refresh-expire, - expunge, delete" + * ``all`` - shorthand for "save-update,merge, refresh-expire, + expunge, delete" :param collection_class: a class or callable that returns a new list-holding object. will -- cgit v1.2.1 From 4291e0b316438a81121de1170fe902dc375a4969 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 13:48:13 -0400 Subject: clean up sqlite version detection stuff --- lib/sqlalchemy/dialects/sqlite/base.py | 10 ++++++++++ lib/sqlalchemy/dialects/sqlite/pysqlite.py | 9 ++------- 2 files changed, 12 insertions(+), 7 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/sqlite/base.py b/lib/sqlalchemy/dialects/sqlite/base.py index 0d9827322..ca0a39136 100644 --- a/lib/sqlalchemy/dialects/sqlite/base.py +++ b/lib/sqlalchemy/dialects/sqlite/base.py @@ -331,6 +331,9 @@ class SQLiteDialect(default.DefaultDialect): colspecs = colspecs isolation_level = None + supports_cast = True + supports_default_values = True + def __init__(self, isolation_level=None, native_datetime=False, **kwargs): default.DefaultDialect.__init__(self, **kwargs) if isolation_level and isolation_level not in ('SERIALIZABLE', @@ -345,6 +348,13 @@ class SQLiteDialect(default.DefaultDialect): # conversions (and perhaps datetime/time as well on some # hypothetical driver ?) self.native_datetime = native_datetime + + if self.dbapi is not None: + self.supports_default_values = \ + self.dbapi.sqlite_version_info >= (3, 3, 8) + self.supports_cast = \ + self.dbapi.sqlite_version_info >= (3, 2, 3) + def on_connect(self): if self.isolation_level is not None: diff --git a/lib/sqlalchemy/dialects/sqlite/pysqlite.py b/lib/sqlalchemy/dialects/sqlite/pysqlite.py index b48abbb7d..575cb37f2 100644 --- a/lib/sqlalchemy/dialects/sqlite/pysqlite.py +++ b/lib/sqlalchemy/dialects/sqlite/pysqlite.py @@ -187,20 +187,15 @@ class SQLiteDialect_pysqlite(SQLiteDialect): def __init__(self, **kwargs): SQLiteDialect.__init__(self, **kwargs) - def vers(num): - return tuple([int(x) for x in num.split('.')]) + if self.dbapi is not None: sqlite_ver = self.dbapi.version_info - if sqlite_ver < (2, 1, '3'): + if sqlite_ver < (2, 1, 3): util.warn( ("The installed version of pysqlite2 (%s) is out-dated " "and will cause errors in some cases. Version 2.1.3 " "or greater is recommended.") % '.'.join([str(subver) for subver in sqlite_ver])) - if self.dbapi.sqlite_version_info < (3, 3, 8): - self.supports_default_values = False - self.supports_cast = (self.dbapi is None or vers(self.dbapi.sqlite_version) >= vers("3.2.3")) - @classmethod def dbapi(cls): -- cgit v1.2.1 From 15159a844d11b4a081bea4d7f6e3e3cf7dec937d Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 15:16:00 -0400 Subject: - Using @classdecorator and similar on mixins to define __tablename__, __table_args__, etc. now works if the method references attributes on the ultimate subclass. [ticket:1749] --- lib/sqlalchemy/ext/declarative.py | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/ext/declarative.py b/lib/sqlalchemy/ext/declarative.py index 435d38161..ef1d3e68c 100644 --- a/lib/sqlalchemy/ext/declarative.py +++ b/lib/sqlalchemy/ext/declarative.py @@ -531,31 +531,32 @@ def instrument_declarative(cls, registry, metadata): def _as_declarative(cls, classname, dict_): - # doing it this way enables these attributes to be descriptors, - # see below... - get_mapper_args = '__mapper_args__' in dict_ - get_table_args = '__table_args__' in dict_ - # dict_ will be a dictproxy, which we can't write to, and we need to! dict_ = dict(dict_) column_copies = dict() - + unmapped_mixins = False for base in cls.__bases__: names = dir(base) if not _is_mapped_class(base): + unmapped_mixins = True for name in names: obj = getattr(base,name, None) if isinstance(obj, Column): dict_[name]=column_copies[obj]=obj.copy() - get_mapper_args = get_mapper_args or getattr(base,'__mapper_args__',None) - get_table_args = get_table_args or getattr(base,'__table_args__',None) - tablename = getattr(base,'__tablename__',None) - if tablename: - # subtle: if tablename is a descriptor here, we actually - # put the wrong value in, but it serves as a marker to get - # the right value value... - dict_['__tablename__']=tablename + + # doing it this way enables these attributes to be descriptors + get_mapper_args = '__mapper_args__' in dict_ + get_table_args = '__table_args__' in dict_ + if unmapped_mixins: + get_mapper_args = get_mapper_args or getattr(cls,'__mapper_args__',None) + get_table_args = get_table_args or getattr(cls,'__table_args__',None) + tablename = getattr(cls,'__tablename__',None) + if tablename: + # subtle: if tablename is a descriptor here, we actually + # put the wrong value in, but it serves as a marker to get + # the right value value... + dict_['__tablename__']=tablename # now that we know whether or not to get these, get them from the class # if we should, enabling them to be decorators -- cgit v1.2.1 From d56d420e809588d8dea8f36fd4ae3a8b4204be54 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 14:47:53 -0600 Subject: mssql+mxodbc should use executedirect for all selects and execute for insert/update/delete. To support this, an is_crud property has been added to the DefaultExecutionContext. The behavior is forcable either way per execution using execution_options(native_odbc_parameters=True|False). Some tests have been added to demonstrate usage. (patch by zzzeek committed by bradallen) --- lib/sqlalchemy/connectors/mxodbc.py | 30 ++++++++++++++++++++---------- lib/sqlalchemy/engine/default.py | 5 ++++- 2 files changed, 24 insertions(+), 11 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index 4476ffd78..f50bff7da 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -97,10 +97,10 @@ class MxODBCConnector(Connector): """ opts = url.translate_connect_args(username='user') opts.update(url.query) - args = opts['host'], - kwargs = {'user':opts['user'], - 'password': opts['password']} - return args, kwargs + args = opts.pop('host') + opts.pop('port', None) + opts.pop('database', None) + return (args,), opts def is_disconnect(self, e): # eGenix recommends checking connection.closed here, @@ -126,10 +126,20 @@ class MxODBCConnector(Connector): return tuple(version) def do_execute(self, cursor, statement, parameters, context=None): - # temporary workaround until a more comprehensive solution can - # be found for controlling when to use executedirect - try: - cursor.execute(statement, parameters) - except (InterfaceError, ProgrammingError), e: - warnings.warn("cursor.execute failed; falling back to executedirect") + if context: + native_odbc_execute = context.execution_options.\ + get('native_odbc_execute', 'auto') + if native_odbc_execute is True: + # user specified native_odbc_execute=True + cursor.execute(statement, parameters) + elif native_odbc_execute is False: + # user specified native_odbc_execute=False + cursor.executedirect(statement, parameters) + elif context.is_crud: + # statement is UPDATE, DELETE, INSERT + cursor.execute(statement, parameters) + else: + # all other statements + cursor.executedirect(statement, parameters) + else: cursor.executedirect(statement, parameters) diff --git a/lib/sqlalchemy/engine/default.py b/lib/sqlalchemy/engine/default.py index 720edf66c..6fb0a14a5 100644 --- a/lib/sqlalchemy/engine/default.py +++ b/lib/sqlalchemy/engine/default.py @@ -381,7 +381,10 @@ class DefaultExecutionContext(base.ExecutionContext): self.execution_options = self.execution_options.union(connection._execution_options) self.cursor = self.create_cursor() - + @util.memoized_property + def is_crud(self): + return self.isinsert or self.isupdate or self.isdelete + @util.memoized_property def should_autocommit(self): autocommit = self.execution_options.get('autocommit', -- cgit v1.2.1 From b7a2d7de4854c26ea7773b1002852d4245cfcc10 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Fri, 26 Mar 2010 23:14:16 -0400 Subject: - relationships and columns with foreign keys aren't allowed on declarative mixins, sorry. [ticket:1751] --- lib/sqlalchemy/ext/declarative.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/ext/declarative.py b/lib/sqlalchemy/ext/declarative.py index ef1d3e68c..1f4658b60 100644 --- a/lib/sqlalchemy/ext/declarative.py +++ b/lib/sqlalchemy/ext/declarative.py @@ -507,7 +507,7 @@ Mapped instances then make usage of from sqlalchemy.schema import Table, Column, MetaData from sqlalchemy.orm import synonym as _orm_synonym, mapper, comparable_property, class_mapper from sqlalchemy.orm.interfaces import MapperProperty -from sqlalchemy.orm.properties import PropertyLoader, ColumnProperty +from sqlalchemy.orm.properties import RelationshipProperty, ColumnProperty from sqlalchemy.orm.util import _is_mapped_class from sqlalchemy import util, exceptions from sqlalchemy.sql import util as sql_util @@ -543,7 +543,16 @@ def _as_declarative(cls, classname, dict_): for name in names: obj = getattr(base,name, None) if isinstance(obj, Column): + if obj.foreign_keys: + raise exceptions.InvalidRequestError( + "Columns with foreign keys to other columns " + "are not allowed on declarative mixins at this time." + ) dict_[name]=column_copies[obj]=obj.copy() + elif isinstance(obj, RelationshipProperty): + raise exceptions.InvalidRequestError( + "relationships are not allowed on " + "declarative mixins at this time.") # doing it this way enables these attributes to be descriptors get_mapper_args = '__mapper_args__' in dict_ @@ -778,7 +787,7 @@ def _deferred_relationship(cls, prop): prop.parent, arg, n.args[0], cls)) return return_cls - if isinstance(prop, PropertyLoader): + if isinstance(prop, RelationshipProperty): for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin', 'secondary', '_foreign_keys', 'remote_side'): v = getattr(prop, attr) -- cgit v1.2.1 From 3cd2c4661f1522353be983a309dc947c2a2a28bb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Mar 2010 15:20:03 -0400 Subject: - 0.6beta3 version - remove redundant orderinglist docs, use just module docs - add warning for primary keys/unique colummns, [ticket:1669] --- lib/sqlalchemy/__init__.py | 2 +- lib/sqlalchemy/ext/orderinglist.py | 153 +++++++++++++++++++++---------------- 2 files changed, 88 insertions(+), 67 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/__init__.py b/lib/sqlalchemy/__init__.py index 13e843801..376b13e64 100644 --- a/lib/sqlalchemy/__init__.py +++ b/lib/sqlalchemy/__init__.py @@ -114,6 +114,6 @@ from sqlalchemy.engine import create_engine, engine_from_config __all__ = sorted(name for name, obj in locals().items() if not (name.startswith('_') or inspect.ismodule(obj))) -__version__ = '0.6beta2' +__version__ = '0.6beta3' del inspect, sys diff --git a/lib/sqlalchemy/ext/orderinglist.py b/lib/sqlalchemy/ext/orderinglist.py index db0bd2a4e..0d2c3ae5d 100644 --- a/lib/sqlalchemy/ext/orderinglist.py +++ b/lib/sqlalchemy/ext/orderinglist.py @@ -1,67 +1,92 @@ """A custom list that manages index/position information for its children. -``orderinglist`` is a custom list collection implementation for mapped -relationships that keeps an arbitrary "position" attribute on contained objects in -sync with each object's position in the Python list. - -The collection acts just like a normal Python ``list``, with the added -behavior that as you manipulate the list (via ``insert``, ``pop``, assignment, -deletion, what have you), each of the objects it contains is updated as needed -to reflect its position. This is very useful for managing ordered relationships -which have a user-defined, serialized order:: - - >>> from sqlalchemy import MetaData, Table, Column, Integer, String, ForeignKey - >>> from sqlalchemy.orm import mapper, relationship - >>> from sqlalchemy.ext.orderinglist import ordering_list - -A simple model of users their "top 10" things:: - - >>> metadata = MetaData() - >>> users = Table('users', metadata, - ... Column('id', Integer, primary_key=True)) - >>> blurbs = Table('user_top_ten_list', metadata, - ... Column('id', Integer, primary_key=True), - ... Column('user_id', Integer, ForeignKey('users.id')), - ... Column('position', Integer), - ... Column('blurb', String(80))) - >>> class User(object): - ... pass - ... - >>> class Blurb(object): - ... def __init__(self, blurb): - ... self.blurb = blurb - ... - >>> mapper(User, users, properties={ - ... 'topten': relationship(Blurb, collection_class=ordering_list('position'), - ... order_by=[blurbs.c.position])}) - - >>> mapper(Blurb, blurbs) - - -Acts just like a regular list:: - - >>> u = User() - >>> u.topten.append(Blurb('Number one!')) - >>> u.topten.append(Blurb('Number two!')) - -But the ``.position`` attibute is set automatically behind the scenes:: - - >>> assert [blurb.position for blurb in u.topten] == [0, 1] - -The objects will be renumbered automaticaly after any list-changing operation, -for example an ``insert()``:: - - >>> u.topten.insert(1, Blurb('I am the new Number Two.')) - >>> assert [blurb.position for blurb in u.topten] == [0, 1, 2] - >>> assert u.topten[1].blurb == 'I am the new Number Two.' - >>> assert u.topten[1].position == 1 - -Numbering and serialization are both highly configurable. See the docstrings -in this module and the main SQLAlchemy documentation for more information and -examples. - -The :class:`~sqlalchemy.ext.orderinglist.ordering_list` factory function is the -ORM-compatible constructor for `OrderingList` instances. +:author: Jason Kirtland + +``orderinglist`` is a helper for mutable ordered relationships. It will intercept +list operations performed on a relationship collection and automatically +synchronize changes in list position with an attribute on the related objects. +(See :ref:`advdatamapping_entitycollections` for more information on the general pattern.) + +Example: Two tables that store slides in a presentation. Each slide +has a number of bullet points, displayed in order by the 'position' +column on the bullets table. These bullets can be inserted and re-ordered +by your end users, and you need to update the 'position' column of all +affected rows when changes are made. + +.. sourcecode:: python+sql + + slides_table = Table('Slides', metadata, + Column('id', Integer, primary_key=True), + Column('name', String)) + + bullets_table = Table('Bullets', metadata, + Column('id', Integer, primary_key=True), + Column('slide_id', Integer, ForeignKey('Slides.id')), + Column('position', Integer), + Column('text', String)) + + class Slide(object): + pass + class Bullet(object): + pass + + mapper(Slide, slides_table, properties={ + 'bullets': relationship(Bullet, order_by=[bullets_table.c.position]) + }) + mapper(Bullet, bullets_table) + +The standard relationship mapping will produce a list-like attribute on each Slide +containing all related Bullets, but coping with changes in ordering is totally +your responsibility. If you insert a Bullet into that list, there is no +magic- it won't have a position attribute unless you assign it it one, and +you'll need to manually renumber all the subsequent Bullets in the list to +accommodate the insert. + +An ``orderinglist`` can automate this and manage the 'position' attribute on all +related bullets for you. + +.. sourcecode:: python+sql + + mapper(Slide, slides_table, properties={ + 'bullets': relationship(Bullet, + collection_class=ordering_list('position'), + order_by=[bullets_table.c.position]) + }) + mapper(Bullet, bullets_table) + + s = Slide() + s.bullets.append(Bullet()) + s.bullets.append(Bullet()) + s.bullets[1].position + >>> 1 + s.bullets.insert(1, Bullet()) + s.bullets[2].position + >>> 2 + +Use the ``ordering_list`` function to set up the ``collection_class`` on relationships +(as in the mapper example above). This implementation depends on the list +starting in the proper order, so be SURE to put an order_by on your relationship. + +.. warning:: ``ordering_list`` only provides limited functionality when a primary + key column or unique column is the target of the sort. Since changing the order of + entries often means that two rows must trade values, this is not possible when + the value is constrained by a primary key or unique constraint, since one of the rows + would temporarily have to point to a third available value so that the other row + could take its old value. ``ordering_list`` doesn't do any of this for you, + nor does SQLAlchemy itself. + +``ordering_list`` takes the name of the related object's ordering attribute as +an argument. By default, the zero-based integer index of the object's +position in the ``ordering_list`` is synchronized with the ordering attribute: +index 0 will get position 0, index 1 position 1, etc. To start numbering at 1 +or some other integer, provide ``count_from=1``. + +Ordering values are not limited to incrementing integers. Almost any scheme +can implemented by supplying a custom ``ordering_func`` that maps a Python list +index to any value you require. + + + """ from sqlalchemy.orm.collections import collection @@ -288,7 +313,3 @@ class OrderingList(list): func.__doc__ = getattr(list, func_name).__doc__ del func_name, func -if __name__ == '__main__': - import doctest - doctest.testmod(optionflags=doctest.ELLIPSIS) - -- cgit v1.2.1 From 36047e9bb28501477b1403059087cccc120be2b6 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Mar 2010 17:18:53 -0400 Subject: - Added with_hint() method to Query() construct. This calls directly down to select().with_hint() and also accepts entities as well as tables and aliases. See with_hint() in the SQL section below. [ticket:921] - Added with_hint() method to select() construct. Specify a table/alias, hint text, and optional dialect name, and "hints" will be rendered in the appropriate place in the statement. Works for Oracle, Sybase, MySQL. [ticket:921] --- lib/sqlalchemy/dialects/mysql/base.py | 11 ++++-- lib/sqlalchemy/dialects/oracle/base.py | 23 +++++++++--- lib/sqlalchemy/dialects/sybase/base.py | 3 ++ lib/sqlalchemy/orm/query.py | 25 ++++++++++++- lib/sqlalchemy/sql/compiler.py | 67 ++++++++++++++++++++++++++++------ lib/sqlalchemy/sql/expression.py | 30 ++++++++++++++- 6 files changed, 135 insertions(+), 24 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 873dfd16c..f9bb48235 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1154,7 +1154,10 @@ class MySQLCompiler(compiler.SQLCompiler): def visit_match_op(self, binary, **kw): return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (self.process(binary.left), self.process(binary.right)) - + + def get_from_hint_text(self, table, text): + return text + def visit_typeclause(self, typeclause): type_ = typeclause.type.dialect_impl(self.dialect) if isinstance(type_, sqltypes.Integer): @@ -1204,11 +1207,11 @@ class MySQLCompiler(compiler.SQLCompiler): # support can be added, preferably after dialects are # refactored to be version-sensitive. return ''.join( - (self.process(join.left, asfrom=True), + (self.process(join.left, asfrom=True, **kwargs), (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "), - self.process(join.right, asfrom=True), + self.process(join.right, asfrom=True, **kwargs), " ON ", - self.process(join.onclause))) + self.process(join.onclause, **kwargs))) def for_update_clause(self, select): if select.for_update == 'read': diff --git a/lib/sqlalchemy/dialects/oracle/base.py b/lib/sqlalchemy/dialects/oracle/base.py index 332fa805d..475730988 100644 --- a/lib/sqlalchemy/dialects/oracle/base.py +++ b/lib/sqlalchemy/dialects/oracle/base.py @@ -342,6 +342,11 @@ class OracleCompiler(compiler.SQLCompiler): def visit_match_op(self, binary, **kw): return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right)) + def get_select_hint_text(self, byfroms): + return " ".join( + "/*+ %s */" % text for table, text in byfroms.items() + ) + def function_argspec(self, fn, **kw): if len(fn.clauses) > 0: return compiler.SQLCompiler.function_argspec(self, fn, **kw) @@ -360,7 +365,9 @@ class OracleCompiler(compiler.SQLCompiler): if self.dialect.use_ansi: return compiler.SQLCompiler.visit_join(self, join, **kwargs) else: - return self.process(join.left, asfrom=True) + ", " + self.process(join.right, asfrom=True) + kwargs['asfrom'] = True + return self.process(join.left, **kwargs) + \ + ", " + self.process(join.right, **kwargs) def _get_nonansi_join_whereclause(self, froms): clauses = [] @@ -392,14 +399,18 @@ class OracleCompiler(compiler.SQLCompiler): def visit_sequence(self, seq): return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval" - def visit_alias(self, alias, asfrom=False, **kwargs): + def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??""" - - if asfrom: + + if asfrom or ashint: alias_name = isinstance(alias.name, expression._generated_label) and \ self._truncated_identifier("alias", alias.name) or alias.name - - return self.process(alias.original, asfrom=asfrom, **kwargs) + " " + self.preparer.format_alias(alias, alias_name) + + if ashint: + return alias_name + elif asfrom: + return self.process(alias.original, asfrom=asfrom, **kwargs) + \ + " " + self.preparer.format_alias(alias, alias_name) else: return self.process(alias.original, **kwargs) diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index aaec7a504..79e32b968 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -277,6 +277,9 @@ class SybaseSQLCompiler(compiler.SQLCompiler): s += "START AT %s " % (select._offset+1,) return s + def get_from_hint_text(self, table, text): + return text + def limit_clause(self, select): # Limit in sybase is after the select keyword return "" diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py index 5b9169c2e..e98ad8937 100644 --- a/lib/sqlalchemy/orm/query.py +++ b/lib/sqlalchemy/orm/query.py @@ -84,6 +84,7 @@ class Query(object): _params = util.frozendict() _attributes = util.frozendict() _with_options = () + _with_hints = () def __init__(self, entities, session=None): self.session = session @@ -718,6 +719,21 @@ class Query(object): for opt in opts: opt.process_query(self) + @_generative() + def with_hint(self, selectable, text, dialect_name=None): + """Add an indexing hint for the given entity or selectable to + this :class:`Query`. + + Functionality is passed straight through to + :meth:`~sqlalchemy.sql.expression.Select.with_hint`, + with the addition that ``selectable`` can be a + :class:`Table`, :class:`Alias`, or ORM entity / mapped class + /etc. + """ + mapper, selectable, is_aliased_class = _entity_info(selectable) + + self._with_hints += ((selectable, text, dialect_name),) + @_generative() def execution_options(self, **kwargs): """ Set non-SQL options which take effect during execution. @@ -2053,7 +2069,10 @@ class Query(object): order_by=context.order_by, **self._select_args ) - + + for hint in self._with_hints: + inner = inner.with_hint(*hint) + if self._correlate: inner = inner.correlate(*self._correlate) @@ -2108,6 +2127,10 @@ class Query(object): order_by=context.order_by, **self._select_args ) + + for hint in self._with_hints: + statement = statement.with_hint(*hint) + if self._execution_options: statement = statement.execution_options(**self._execution_options) diff --git a/lib/sqlalchemy/sql/compiler.py b/lib/sqlalchemy/sql/compiler.py index 75b3f79f0..78c65771b 100644 --- a/lib/sqlalchemy/sql/compiler.py +++ b/lib/sqlalchemy/sql/compiler.py @@ -628,13 +628,22 @@ class SQLCompiler(engine.Compiled): else: return self.bindtemplate % {'name':name} - def visit_alias(self, alias, asfrom=False, **kwargs): - if asfrom: + def visit_alias(self, alias, asfrom=False, ashint=False, fromhints=None, **kwargs): + if asfrom or ashint: alias_name = isinstance(alias.name, sql._generated_label) and \ self._truncated_identifier("alias", alias.name) or alias.name - - return self.process(alias.original, asfrom=True, **kwargs) + " AS " + \ + if ashint: + return self.preparer.format_alias(alias, alias_name) + elif asfrom: + ret = self.process(alias.original, asfrom=True, **kwargs) + " AS " + \ self.preparer.format_alias(alias, alias_name) + + if fromhints and alias in fromhints: + hinttext = self.get_from_hint_text(alias, fromhints[alias]) + if hinttext: + ret += " " + hinttext + + return ret else: return self.process(alias.original, **kwargs) @@ -661,8 +670,15 @@ class SQLCompiler(engine.Compiled): else: return column + def get_select_hint_text(self, byfroms): + return None + + def get_from_hint_text(self, table, text): + return None + def visit_select(self, select, asfrom=False, parens=True, - iswrapper=False, compound_index=1, **kwargs): + iswrapper=False, fromhints=None, + compound_index=1, **kwargs): entry = self.stack and self.stack[-1] or {} @@ -697,6 +713,18 @@ class SQLCompiler(engine.Compiled): ] text = "SELECT " # we're off to a good start ! + + if select._hints: + byfrom = dict([ + (from_, hinttext % {'name':self.process(from_, ashint=True)}) + for (from_, dialect), hinttext in + select._hints.iteritems() + if dialect in ('*', self.dialect.name) + ]) + hint_text = self.get_select_hint_text(byfrom) + if hint_text: + text += hint_text + " " + if select._prefixes: text += " ".join(self.process(x, **kwargs) for x in select._prefixes) + " " text += self.get_select_precolumns(select) @@ -704,7 +732,16 @@ class SQLCompiler(engine.Compiled): if froms: text += " \nFROM " - text += ', '.join(self.process(f, asfrom=True, **kwargs) for f in froms) + + if select._hints: + text += ', '.join([self.process(f, + asfrom=True, fromhints=byfrom, + **kwargs) + for f in froms]) + else: + text += ', '.join([self.process(f, + asfrom=True, **kwargs) + for f in froms]) else: text += self.default_from() @@ -767,20 +804,26 @@ class SQLCompiler(engine.Compiled): text += " OFFSET " + str(select._offset) return text - def visit_table(self, table, asfrom=False, **kwargs): - if asfrom: + def visit_table(self, table, asfrom=False, ashint=False, fromhints=None, **kwargs): + if asfrom or ashint: if getattr(table, "schema", None): - return self.preparer.quote_schema(table.schema, table.quote_schema) + \ + ret = self.preparer.quote_schema(table.schema, table.quote_schema) + \ "." + self.preparer.quote(table.name, table.quote) else: - return self.preparer.quote(table.name, table.quote) + ret = self.preparer.quote(table.name, table.quote) + if fromhints and table in fromhints: + hinttext = self.get_from_hint_text(table, fromhints[table]) + if hinttext: + ret += " " + hinttext + return ret else: return "" def visit_join(self, join, asfrom=False, **kwargs): - return (self.process(join.left, asfrom=True) + \ + return (self.process(join.left, asfrom=True, **kwargs) + \ (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + \ - self.process(join.right, asfrom=True) + " ON " + self.process(join.onclause)) + self.process(join.right, asfrom=True, **kwargs) + " ON " + \ + self.process(join.onclause, **kwargs)) def visit_sequence(self, seq): return None diff --git a/lib/sqlalchemy/sql/expression.py b/lib/sqlalchemy/sql/expression.py index 1e02ba96a..3aaa06fd6 100644 --- a/lib/sqlalchemy/sql/expression.py +++ b/lib/sqlalchemy/sql/expression.py @@ -3557,6 +3557,7 @@ class Select(_SelectBaseMixin, FromClause): __visit_name__ = 'select' _prefixes = () + _hints = util.frozendict() def __init__(self, columns, @@ -3659,7 +3660,34 @@ class Select(_SelectBaseMixin, FromClause): """Return the displayed list of FromClause elements.""" return self._get_display_froms() - + + @_generative + def with_hint(self, selectable, text, dialect_name=None): + """Add an indexing hint for the given selectable to this :class:`Select`. + + The text of the hint is written specific to a specific backend, and + typically uses Python string substitution syntax to render the name + of the table or alias, such as for Oracle:: + + select([mytable]).with_hint(mytable, "+ index(%(name)s ix_mytable)") + + Would render SQL as:: + + select /*+ index(mytable ix_mytable) */ ... from mytable + + The ``dialect_name`` option will limit the rendering of a particular hint + to a particular backend. Such as, to add hints for both Oracle and + Sybase simultaneously:: + + select([mytable]).\ + with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\ + with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') + + """ + if not dialect_name: + dialect_name = '*' + self._hints = self._hints.union({(selectable, dialect_name):text}) + @property def type(self): raise exc.InvalidRequestError("Select objects don't have a type. " -- cgit v1.2.1 From 7535e19feca1c25a13e463fb0a5db6db17bda096 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Mar 2010 17:27:28 -0400 Subject: - No longer guessing that TINYINT(1) should be BOOLEAN when reflecting - TINYINT(1) is returned. Use Boolean/ BOOLEAN in table definition to get boolean conversion behavior. [ticket:1752] --- lib/sqlalchemy/dialects/mysql/base.py | 7 ------- 1 file changed, 7 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index f9bb48235..6650b8388 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -2207,13 +2207,6 @@ class MySQLTableDefinitionParser(object): name, type_, args, notnull = \ spec['name'], spec['coltype'], spec['arg'], spec['notnull'] - # Convention says that TINYINT(1) columns == BOOLEAN - if type_ == 'tinyint' and args == '1': - type_ = 'boolean' - args = None - spec['unsigned'] = None - spec['zerofill'] = None - try: col_type = self.dialect.ischema_names[type_] except KeyError: -- cgit v1.2.1 From 40e4ba2478423d2514af578ae019453a83f78ca1 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Mar 2010 18:46:33 -0400 Subject: formatting --- lib/sqlalchemy/orm/__init__.py | 52 +++++++++++++++++++++--------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/__init__.py b/lib/sqlalchemy/orm/__init__.py index 2b577db87..206c8d0c2 100644 --- a/lib/sqlalchemy/orm/__init__.py +++ b/lib/sqlalchemy/orm/__init__.py @@ -230,33 +230,33 @@ def relationship(argument, secondary=None, **kwargs): Available cascades are: - * ``save-update`` - cascade the :meth:`~sqlalchemy.orm.session.Session.add` - operation. This cascade applies both to future and - past calls to :meth:`~sqlalchemy.orm.session.Session.add`, - meaning new items added to a collection or scalar relationship - get placed into the same session as that of the parent, and - also applies to items which have been removed from this - relationship but are still part of unflushed history. + * ``save-update`` - cascade the :meth:`~sqlalchemy.orm.session.Session.add` + operation. This cascade applies both to future and + past calls to :meth:`~sqlalchemy.orm.session.Session.add`, + meaning new items added to a collection or scalar relationship + get placed into the same session as that of the parent, and + also applies to items which have been removed from this + relationship but are still part of unflushed history. - * ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge` - operation + * ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge` + operation - * ``expunge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expunge` - operation + * ``expunge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expunge` + operation - * ``delete`` - cascade the :meth:`~sqlalchemy.orm.session.Session.delete` - operation + * ``delete`` - cascade the :meth:`~sqlalchemy.orm.session.Session.delete` + operation - * ``delete-orphan`` - if an item of the child's type with no - parent is detected, mark it for deletion. Note that this - option prevents a pending item of the child's class from being - persisted without a parent present. + * ``delete-orphan`` - if an item of the child's type with no + parent is detected, mark it for deletion. Note that this + option prevents a pending item of the child's class from being + persisted without a parent present. - * ``refresh-expire`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expire` - and :meth:`~sqlalchemy.orm.session.Session.refresh` operations + * ``refresh-expire`` - cascade the :meth:`~sqlalchemy.orm.session.Session.expire` + and :meth:`~sqlalchemy.orm.session.Session.refresh` operations - * ``all`` - shorthand for "save-update,merge, refresh-expire, - expunge, delete" + * ``all`` - shorthand for "save-update,merge, refresh-expire, + expunge, delete" :param collection_class: a class or callable that returns a new list-holding object. will @@ -306,8 +306,8 @@ def relationship(argument, secondary=None, **kwargs): which is already higher up in the chain. This option applies both to joined- and subquery- eager loaders. - :param lazy=('select'|'joined'|'subquery'|'noload'|'dynamic'): - specifies how the related items should be loaded. Values include: + :param lazy=('select'|'joined'|'subquery'|'noload'|'dynamic'): specifies + how the related items should be loaded. Values include: * 'select' - items should be loaded lazily when the property is first accessed. @@ -320,9 +320,9 @@ def relationship(argument, secondary=None, **kwargs): which issues a JOIN to a subquery of the original statement. - * 'noload' - no loading should occur at any time. This is to support - "write-only" attributes, or attributes which are - populated in some manner specific to the application. + * 'noload' - no loading should occur at any time. This is to + support "write-only" attributes, or attributes which are + populated in some manner specific to the application. * 'dynamic' - the attribute will return a pre-configured :class:`~sqlalchemy.orm.query.Query` object for all read -- cgit v1.2.1 From 0ee5899743dc5b56fabcd9ab1198b2bb969ac5ff Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sat, 27 Mar 2010 19:13:24 -0400 Subject: this is a rewrite from the old sybase module with almost nothing remaining from the original, setting primary copyright --- lib/sqlalchemy/dialects/sybase/base.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/dialects/sybase/base.py b/lib/sqlalchemy/dialects/sybase/base.py index 79e32b968..6719b422b 100644 --- a/lib/sqlalchemy/dialects/sybase/base.py +++ b/lib/sqlalchemy/dialects/sybase/base.py @@ -1,6 +1,9 @@ -# sybase.py -# Copyright (C) 2007 Fisch Asset Management AG http://www.fam.ch -# Coding: Alexander Houben alexander.houben@thor-solutions.ch +# sybase/base.py +# Copyright (C) 2010 Michael Bayer mike_mp@zzzcomputing.com +# get_select_precolumns(), limit_clause() implementation +# copyright (C) 2007 Fisch Asset Management +# AG http://www.fam.ch, with coding by Alexander Houben +# alexander.houben@thor-solutions.ch # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -313,8 +316,6 @@ class SybaseDDLCompiler(compiler.DDLCompiler): "columns in order to generate DDL") seq_col = column.table._autoincrement_column - - # install a IDENTITY Sequence if we have an implicit IDENTITY column if seq_col is column: sequence = isinstance(column.default, sa_schema.Sequence) and column.default -- cgit v1.2.1 From 8f11ca9a6a5fdbb020ad35e2477ac77811485fbb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 28 Mar 2010 11:20:22 -0400 Subject: documentation updates --- lib/sqlalchemy/connectors/mxodbc.py | 1 + lib/sqlalchemy/dialects/mssql/mxodbc.py | 36 ++++++++++- lib/sqlalchemy/dialects/mssql/pymssql.py | 8 ++- lib/sqlalchemy/dialects/mssql/pyodbc.py | 3 + lib/sqlalchemy/dialects/mysql/__init__.py | 4 +- lib/sqlalchemy/dialects/mysql/base.py | 79 ++++++++++++------------- lib/sqlalchemy/dialects/mysql/mysqlconnector.py | 11 +++- lib/sqlalchemy/dialects/mysql/mysqldb.py | 28 ++++++++- lib/sqlalchemy/dialects/mysql/oursql.py | 11 ++++ lib/sqlalchemy/dialects/mysql/pyodbc.py | 19 ++++++ lib/sqlalchemy/dialects/mysql/zxjdbc.py | 7 +++ 11 files changed, 158 insertions(+), 49 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/connectors/mxodbc.py b/lib/sqlalchemy/connectors/mxodbc.py index f50bff7da..816474d43 100644 --- a/lib/sqlalchemy/connectors/mxodbc.py +++ b/lib/sqlalchemy/connectors/mxodbc.py @@ -9,6 +9,7 @@ and 2008, using the SQL Server Native driver. However, it is possible for this to be used on other database platforms. For more info on mxODBC, see http://www.egenix.com/ + """ import sys diff --git a/lib/sqlalchemy/dialects/mssql/mxodbc.py b/lib/sqlalchemy/dialects/mssql/mxodbc.py index 7148a3628..efe763659 100644 --- a/lib/sqlalchemy/dialects/mssql/mxodbc.py +++ b/lib/sqlalchemy/dialects/mssql/mxodbc.py @@ -1,9 +1,41 @@ """ -MSSQL dialect tweaked to work with mxODBC, mainly by making use -of the MSSQLStrictCompiler. +Support for MS-SQL via mxODBC. + +mxODBC is available at: + + http://www.egenix.com/ This was tested with mxODBC 3.1.2 and the SQL Server Native Client connected to MSSQL 2005 and 2008 Express Editions. + +Connecting +~~~~~~~~~~ + +Connection is via DSN:: + + mssql+mxodbc://:@ + +Execution Modes +~~~~~~~~~~~~~~~ + +mxODBC features two styles of statement execution, using the ``cursor.execute()`` +and ``cursor.executedirect()`` methods (the second being an extension to the +DBAPI specification). The former makes use of the native +parameter binding services of the ODBC driver, while the latter uses string escaping. +The primary advantage to native parameter binding is that the same statement, when +executed many times, is only prepared once. Whereas the primary advantage to the +latter is that the rules for bind parameter placement are relaxed. MS-SQL has very +strict rules for native binds, including that they cannot be placed within the argument +lists of function calls, anywhere outside the FROM, or even within subqueries within the +FROM clause - making the usage of bind parameters within SELECT statements impossible for +all but the most simplistic statements. For this reason, the mxODBC dialect uses the +"native" mode by default only for INSERT, UPDATE, and DELETE statements, and uses the +escaped string mode for all other statements. This behavior can be controlled completely +via :meth:`~sqlalchemy.sql.expression.Executable.execution_options` +using the ``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a value of +``True`` will unconditionally use native bind parameters and a value of ``False`` will +uncondtionally use string-escaped parameters. + """ import re diff --git a/lib/sqlalchemy/dialects/mssql/pymssql.py b/lib/sqlalchemy/dialects/mssql/pymssql.py index 36cb5f370..ca1c4a142 100644 --- a/lib/sqlalchemy/dialects/mssql/pymssql.py +++ b/lib/sqlalchemy/dialects/mssql/pymssql.py @@ -7,7 +7,10 @@ pymssql is available at: http://pymssql.sourceforge.net/ -Connect string:: +Connecting +^^^^^^^^^^ + +Sample connect string:: mssql+pymssql://:@ @@ -16,6 +19,9 @@ strings as Python unicode objects. This can potentially improve performance in some scenarios as decoding of strings is handled natively. +Limitations +^^^^^^^^^^^ + pymssql inherits a lot of limitations from FreeTDS, including: * no support for multibyte schema identifiers diff --git a/lib/sqlalchemy/dialects/mssql/pyodbc.py b/lib/sqlalchemy/dialects/mssql/pyodbc.py index eb4bf5cff..c74be0e53 100644 --- a/lib/sqlalchemy/dialects/mssql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mssql/pyodbc.py @@ -5,6 +5,9 @@ pyodbc is available at: http://pypi.python.org/pypi/pyodbc/ +Connecting +^^^^^^^^^^ + Examples of pyodbc connection string URLs: * ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``. diff --git a/lib/sqlalchemy/dialects/mysql/__init__.py b/lib/sqlalchemy/dialects/mysql/__init__.py index e4ecccdfc..f37a0c766 100644 --- a/lib/sqlalchemy/dialects/mysql/__init__.py +++ b/lib/sqlalchemy/dialects/mysql/__init__.py @@ -6,12 +6,12 @@ base.dialect = mysqldb.dialect from sqlalchemy.dialects.mysql.base import \ BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, DOUBLE, ENUM, DECIMAL,\ FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NVARCHAR, NUMERIC, SET, SMALLINT, TEXT, TIME, TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT,\ + NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, TINYBLOB, TINYINT, TINYTEXT,\ VARBINARY, VARCHAR, YEAR, dialect __all__ = ( 'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE', 'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', -'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', +'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP', 'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect' ) diff --git a/lib/sqlalchemy/dialects/mysql/base.py b/lib/sqlalchemy/dialects/mysql/base.py index 6650b8388..6a0761476 100644 --- a/lib/sqlalchemy/dialects/mysql/base.py +++ b/lib/sqlalchemy/dialects/mysql/base.py @@ -1,37 +1,13 @@ # -*- fill-column: 78 -*- -# mysql.py +# mysql/base.py # Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010 Michael Bayer mike_mp@zzzcomputing.com +# and Jason Kirtland. # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """Support for the MySQL database. -Overview --------- - -For normal SQLAlchemy usage, importing this module is unnecessary. It will be -loaded on-demand when a MySQL connection is needed. The generic column types -like :class:`~sqlalchemy.String` and :class:`~sqlalchemy.Integer` will -automatically be adapted to the optimal matching MySQL column type. - -But if you would like to use one of the MySQL-specific or enhanced column -types when creating tables with your :class:`~sqlalchemy.Table` definitions, -then you will need to import them from this module:: - - from sqlalchemy.dialect.mysql import base as mysql - - Table('mytable', metadata, - Column('id', Integer, primary_key=True), - Column('ittybittyblob', mysql.TINYBLOB), - Column('biggy', mysql.BIGINT(unsigned=True))) - -All standard MySQL column types are supported. The OpenGIS types are -available for use via table reflection but have no special support or mapping -to Python classes. If you're using these types and have opinions about how -OpenGIS can be smartly integrated into SQLAlchemy please join the mailing -list! - Supported Versions and Features ------------------------------- @@ -44,10 +20,7 @@ in the suite 100%. No heroic measures are taken to work around major missing SQL features- if your server version does not support sub-selects, for example, they won't work in SQLAlchemy either. -Currently, the only DB-API driver supported is `MySQL-Python` (also referred to -as `MySQLdb`). Either 1.2.1 or 1.2.2 are recommended. The alpha, beta and -gamma releases of 1.2.1 and 1.2.2 should be avoided. Support for Jython and -IronPython is planned. +Most available DBAPI drivers are supported; see below. ===================================== =============== Feature Minimum Version @@ -64,6 +37,37 @@ Nested Transactions 5.0.3 See the official MySQL documentation for detailed information about features supported in any given server release. +Connecting +---------- + +See the API documentation on individual drivers for details on connecting. + +Data Types +---------- + +All of MySQL's standard types are supported. These can also be specified within +table metadata, for the purpose of issuing CREATE TABLE statements +which include MySQL-specific extensions. The types are available +from the module, as in:: + + from sqlalchemy.dialects import mysql + + Table('mytable', metadata, + Column('id', Integer, primary_key=True), + Column('ittybittyblob', mysql.TINYBLOB), + Column('biggy', mysql.BIGINT(unsigned=True))) + +See the API documentation on specific column types for further details. + +Connection Timeouts +------------------- + +MySQL features an automatic connection close behavior, for connections that have +been idle for eight hours or more. To circumvent having this issue, use the +``pool_recycle`` option which controls the maximum age of any connection:: + + engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) + Storage Engines --------------- @@ -159,20 +163,13 @@ And of course any valid MySQL statement can be executed as a string as well. Some limited direct support for MySQL extensions to SQL is currently available. - * SELECT pragma:: - - select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']) - - * UPDATE with LIMIT:: +* SELECT pragma:: - update(..., mysql_limit=10) + select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']) -Boolean Types -------------- +* UPDATE with LIMIT:: -MySQL's BOOL type is a synonym for SMALLINT, so is actually a numeric value, -and additionally MySQL doesn't support CHECK constraints. Therefore SQLA's -Boolean type cannot fully constrain values to just "True" and "False" the way it does for most other backends. + update(..., mysql_limit=10) Troubleshooting --------------- diff --git a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py index 981e1e204..2da18e50f 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqlconnector.py +++ b/lib/sqlalchemy/dialects/mysql/mysqlconnector.py @@ -1,6 +1,15 @@ """Support for the MySQL database via the MySQL Connector/Python adapter. -# TODO: add docs/notes here regarding MySQL Connector/Python +MySQL Connector/Python is available at: + + https://launchpad.net/myconnpy + +Connecting +----------- + +Connect string format:: + + mysql+mysqlconnector://:@[:]/ """ diff --git a/lib/sqlalchemy/dialects/mysql/mysqldb.py b/lib/sqlalchemy/dialects/mysql/mysqldb.py index 9d34939a1..6e6bb0ecc 100644 --- a/lib/sqlalchemy/dialects/mysql/mysqldb.py +++ b/lib/sqlalchemy/dialects/mysql/mysqldb.py @@ -1,5 +1,18 @@ """Support for the MySQL database via the MySQL-python adapter. +MySQL-Python is available at: + + http://sourceforge.net/projects/mysql-python + +At least version 1.2.1 or 1.2.2 should be used. + +Connecting +----------- + +Connect string format:: + + mysql+mysqldb://:@[:]/ + Character Sets -------------- @@ -14,10 +27,21 @@ enabling ``use_unicode`` in the driver by default. For regular encoded strings, also pass ``use_unicode=0`` in the connection arguments:: # set client encoding to utf8; all strings come back as unicode - create_engine('mysql:///mydb?charset=utf8') + create_engine('mysql+mysqldb:///mydb?charset=utf8') # set client encoding to utf8; all strings come back as utf8 str - create_engine('mysql:///mydb?charset=utf8&use_unicode=0') + create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0') + +Known Issues +------------- + +MySQL-python at least as of version 1.2.2 has a serious memory leak related +to unicode conversion, a feature which is disabled via ``use_unicode=0``. +The recommended connection form with SQLAlchemy is:: + + engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600) + + """ import re diff --git a/lib/sqlalchemy/dialects/mysql/oursql.py b/lib/sqlalchemy/dialects/mysql/oursql.py index 9e38993f2..ebc726482 100644 --- a/lib/sqlalchemy/dialects/mysql/oursql.py +++ b/lib/sqlalchemy/dialects/mysql/oursql.py @@ -1,5 +1,16 @@ """Support for the MySQL database via the oursql adapter. +OurSQL is available at: + + http://packages.python.org/oursql/ + +Connecting +----------- + +Connect string format:: + + mysql+oursql://:@[:]/ + Character Sets -------------- diff --git a/lib/sqlalchemy/dialects/mysql/pyodbc.py b/lib/sqlalchemy/dialects/mysql/pyodbc.py index 5add45b21..1f73c6ef1 100644 --- a/lib/sqlalchemy/dialects/mysql/pyodbc.py +++ b/lib/sqlalchemy/dialects/mysql/pyodbc.py @@ -1,5 +1,24 @@ """Support for the MySQL database via the pyodbc adapter. +pyodbc is available at: + + http://pypi.python.org/pypi/pyodbc/ + +Connecting +---------- + +Connect string:: + + mysql+pyodbc://:@ + +Limitations +----------- + +The mysql-pyodbc dialect is subject to unresolved character encoding issues +which exist within the current ODBC drivers available. +(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage +of OurSQL, MySQLdb, or MySQL-connector/Python. + """ from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext diff --git a/lib/sqlalchemy/dialects/mysql/zxjdbc.py b/lib/sqlalchemy/dialects/mysql/zxjdbc.py index f4cf0013c..06d3e6616 100644 --- a/lib/sqlalchemy/dialects/mysql/zxjdbc.py +++ b/lib/sqlalchemy/dialects/mysql/zxjdbc.py @@ -6,6 +6,13 @@ JDBC Driver The official MySQL JDBC driver is at http://dev.mysql.com/downloads/connector/j/. +Connecting +---------- + +Connect string format: + + mysql+zxjdbc://:@[:]/ + Character Sets -------------- -- cgit v1.2.1 From 51fd3447373611af1b9f66a7f5f0c7c4abe94bbb Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 28 Mar 2010 13:12:38 -0400 Subject: - The sqlalchemy.orm.shard module now becomes an extension, sqlalchemy.ext.horizontal_shard. The old import works with a deprecation warning. --- lib/sqlalchemy/engine/__init__.py | 9 +-- lib/sqlalchemy/engine/base.py | 3 + lib/sqlalchemy/ext/horizontal_shard.py | 125 +++++++++++++++++++++++++++++++++ lib/sqlalchemy/orm/shard.py | 112 ++--------------------------- 4 files changed, 138 insertions(+), 111 deletions(-) create mode 100644 lib/sqlalchemy/ext/horizontal_shard.py (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/engine/__init__.py b/lib/sqlalchemy/engine/__init__.py index 0dbb2404f..9b3dbedd8 100644 --- a/lib/sqlalchemy/engine/__init__.py +++ b/lib/sqlalchemy/engine/__init__.py @@ -107,10 +107,11 @@ def create_engine(*args, **kwargs): arguments sent as options to the dialect and resulting Engine. The URL is a string in the form - ``dialect://user:password@host/dbname[?key=value..]``, where - ``dialect`` is a name such as ``mysql``, ``oracle``, ``postgresql``, - etc. Alternatively, the URL can be an instance of - :class:`~sqlalchemy.engine.url.URL`. + ``dialect+driver://user:password@host/dbname[?key=value..]``, where + ``dialect`` is a database name such as ``mysql``, ``oracle``, + ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as + ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, + the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. `**kwargs` takes a wide variety of options which are routed towards their appropriate components. Arguments may be diff --git a/lib/sqlalchemy/engine/base.py b/lib/sqlalchemy/engine/base.py index 5490169c6..dc42ed957 100644 --- a/lib/sqlalchemy/engine/base.py +++ b/lib/sqlalchemy/engine/base.py @@ -1420,6 +1420,9 @@ class Engine(Connectable, log.Identified): """ Connects a :class:`~sqlalchemy.pool.Pool` and :class:`~sqlalchemy.engine.base.Dialect` together to provide a source of database connectivity and behavior. + + An :class:`Engine` object is instantiated publically using the :func:`~sqlalchemy.create_engine` + function. """ diff --git a/lib/sqlalchemy/ext/horizontal_shard.py b/lib/sqlalchemy/ext/horizontal_shard.py new file mode 100644 index 000000000..78e3f5953 --- /dev/null +++ b/lib/sqlalchemy/ext/horizontal_shard.py @@ -0,0 +1,125 @@ +# horizontal_shard.py +# Copyright (C) the SQLAlchemy authors and contributors +# +# This module is part of SQLAlchemy and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +"""Horizontal sharding support. + +Defines a rudimental 'horizontal sharding' system which allows a Session to +distribute queries and persistence operations across multiple databases. + +For a usage example, see the :ref:`examples_sharding` example included in +the source distrbution. + +""" + +import sqlalchemy.exceptions as sa_exc +from sqlalchemy import util +from sqlalchemy.orm.session import Session +from sqlalchemy.orm.query import Query + +__all__ = ['ShardedSession', 'ShardedQuery'] + + +class ShardedSession(Session): + def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, **kwargs): + """Construct a ShardedSession. + + :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a + SQL clause, returns a shard ID. This id may be based off of the + attributes present within the object, or on some round-robin + scheme. If the scheme is based on a selection, it should set + whatever state on the instance to mark it in the future as + participating in that shard. + + :param id_chooser: A callable, passed a query and a tuple of identity values, which + should return a list of shard ids where the ID might reside. The + databases will be queried in the order of this listing. + + :param query_chooser: For a given Query, returns the list of shard_ids where the query + should be issued. Results from all shards returned will be combined + together into a single listing. + + :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.base.Engine` + objects. + + """ + super(ShardedSession, self).__init__(**kwargs) + self.shard_chooser = shard_chooser + self.id_chooser = id_chooser + self.query_chooser = query_chooser + self.__binds = {} + self._mapper_flush_opts = {'connection_callable':self.connection} + self._query_cls = ShardedQuery + if shards is not None: + for k in shards: + self.bind_shard(k, shards[k]) + + def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): + if shard_id is None: + shard_id = self.shard_chooser(mapper, instance) + + if self.transaction is not None: + return self.transaction.connection(mapper, shard_id=shard_id) + else: + return self.get_bind(mapper, + shard_id=shard_id, + instance=instance).contextual_connect(**kwargs) + + def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): + if shard_id is None: + shard_id = self.shard_chooser(mapper, instance, clause=clause) + return self.__binds[shard_id] + + def bind_shard(self, shard_id, bind): + self.__binds[shard_id] = bind + +class ShardedQuery(Query): + def __init__(self, *args, **kwargs): + super(ShardedQuery, self).__init__(*args, **kwargs) + self.id_chooser = self.session.id_chooser + self.query_chooser = self.session.query_chooser + self._shard_id = None + + def set_shard(self, shard_id): + """return a new query, limited to a single shard ID. + + all subsequent operations with the returned query will + be against the single shard regardless of other state. + """ + + q = self._clone() + q._shard_id = shard_id + return q + + def _execute_and_instances(self, context): + if self._shard_id is not None: + result = self.session.connection( + mapper=self._mapper_zero(), + shard_id=self._shard_id).execute(context.statement, self._params) + return self.instances(result, context) + else: + partial = [] + for shard_id in self.query_chooser(self): + result = self.session.connection( + mapper=self._mapper_zero(), + shard_id=shard_id).execute(context.statement, self._params) + partial = partial + list(self.instances(result, context)) + + # if some kind of in memory 'sorting' + # were done, this is where it would happen + return iter(partial) + + def get(self, ident, **kwargs): + if self._shard_id is not None: + return super(ShardedQuery, self).get(ident) + else: + ident = util.to_list(ident) + for shard_id in self.id_chooser(self, ident): + o = self.set_shard(shard_id).get(ident, **kwargs) + if o is not None: + return o + else: + return None + diff --git a/lib/sqlalchemy/orm/shard.py b/lib/sqlalchemy/orm/shard.py index b6026bbc3..9cb26db79 100644 --- a/lib/sqlalchemy/orm/shard.py +++ b/lib/sqlalchemy/orm/shard.py @@ -4,114 +4,12 @@ # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Horizontal sharding support. - -Defines a rudimental 'horizontal sharding' system which allows a Session to -distribute queries and persistence operations across multiple databases. - -For a usage example, see the file ``examples/sharding/attribute_shard.py`` -included in the source distrbution. - -""" - -import sqlalchemy.exceptions as sa_exc from sqlalchemy import util -from sqlalchemy.orm.session import Session -from sqlalchemy.orm.query import Query - -__all__ = ['ShardedSession', 'ShardedQuery'] - - -class ShardedSession(Session): - def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, **kwargs): - """Construct a ShardedSession. - - shard_chooser - A callable which, passed a Mapper, a mapped instance, and possibly a - SQL clause, returns a shard ID. This id may be based off of the - attributes present within the object, or on some round-robin - scheme. If the scheme is based on a selection, it should set - whatever state on the instance to mark it in the future as - participating in that shard. - - id_chooser - A callable, passed a query and a tuple of identity values, which - should return a list of shard ids where the ID might reside. The - databases will be queried in the order of this listing. - - query_chooser - For a given Query, returns the list of shard_ids where the query - should be issued. Results from all shards returned will be combined - together into a single listing. - - """ - super(ShardedSession, self).__init__(**kwargs) - self.shard_chooser = shard_chooser - self.id_chooser = id_chooser - self.query_chooser = query_chooser - self.__binds = {} - self._mapper_flush_opts = {'connection_callable':self.connection} - self._query_cls = ShardedQuery - if shards is not None: - for k in shards: - self.bind_shard(k, shards[k]) - - def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance) - - if self.transaction is not None: - return self.transaction.connection(mapper, shard_id=shard_id) - else: - return self.get_bind(mapper, shard_id=shard_id, instance=instance).contextual_connect(**kwargs) - - def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance, clause=clause) - return self.__binds[shard_id] - def bind_shard(self, shard_id, bind): - self.__binds[shard_id] = bind +util.warn_deprecated( + "Horizontal sharding is now importable via " + "'import sqlalchemy.ext.horizontal_shard" +) -class ShardedQuery(Query): - def __init__(self, *args, **kwargs): - super(ShardedQuery, self).__init__(*args, **kwargs) - self.id_chooser = self.session.id_chooser - self.query_chooser = self.session.query_chooser - self._shard_id = None - - def set_shard(self, shard_id): - """return a new query, limited to a single shard ID. - - all subsequent operations with the returned query will - be against the single shard regardless of other state. - """ - - q = self._clone() - q._shard_id = shard_id - return q - - def _execute_and_instances(self, context): - if self._shard_id is not None: - result = self.session.connection(mapper=self._mapper_zero(), shard_id=self._shard_id).execute(context.statement, self._params) - return self.instances(result, context) - else: - partial = [] - for shard_id in self.query_chooser(self): - result = self.session.connection(mapper=self._mapper_zero(), shard_id=shard_id).execute(context.statement, self._params) - partial = partial + list(self.instances(result, context)) - # if some kind of in memory 'sorting' were done, this is where it would happen - return iter(partial) +from sqlalchemy.ext.horizontal_shard import * - def get(self, ident, **kwargs): - if self._shard_id is not None: - return super(ShardedQuery, self).get(ident) - else: - ident = util.to_list(ident) - for shard_id in self.id_chooser(self, ident): - o = self.set_shard(shard_id).get(ident, **kwargs) - if o is not None: - return o - else: - return None - -- cgit v1.2.1 From a6418268cbc09d1e0f96fd7a84bce322d3cb971f Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Sun, 28 Mar 2010 16:41:10 -0400 Subject: - A collection lazy load will switch off default eagerloading on the reverse many-to-one side, since that loading is by definition unnecessary. [ticket:1495] --- lib/sqlalchemy/orm/properties.py | 169 ++++++++++++++++++++++++++++++--------- lib/sqlalchemy/orm/strategies.py | 9 +++ 2 files changed, 139 insertions(+), 39 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index 80d101b78..a8295e2cd 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -407,8 +407,12 @@ class RelationshipProperty(StrategizedProperty): else: self.cascade = CascadeOptions("save-update, merge") - if self.passive_deletes == 'all' and ("delete" in self.cascade or "delete-orphan" in self.cascade): - raise sa_exc.ArgumentError("Can't set passive_deletes='all' in conjunction with 'delete' or 'delete-orphan' cascade") + if self.passive_deletes == 'all' and \ + ("delete" in self.cascade or + "delete-orphan" in self.cascade): + raise sa_exc.ArgumentError( + "Can't set passive_deletes='all' in conjunction " + "with 'delete' or 'delete-orphan' cascade") self.order_by = order_by @@ -416,7 +420,9 @@ class RelationshipProperty(StrategizedProperty): if self.back_populates: if backref: - raise sa_exc.ArgumentError("backref and back_populates keyword arguments are mutually exclusive") + raise sa_exc.ArgumentError( + "backref and back_populates keyword arguments " + "are mutually exclusive") self.backref = None else: self.backref = backref @@ -463,7 +469,10 @@ class RelationshipProperty(StrategizedProperty): return op(self, *other, **kwargs) def of_type(self, cls): - return RelationshipProperty.Comparator(self.property, self.mapper, cls, adapter=self.adapter) + return RelationshipProperty.Comparator( + self.property, + self.mapper, + cls, adapter=self.adapter) def in_(self, other): raise NotImplementedError("in_() not yet supported for relationships. For a " @@ -476,11 +485,21 @@ class RelationshipProperty(StrategizedProperty): if self.property.direction in [ONETOMANY, MANYTOMANY]: return ~self._criterion_exists() else: - return _orm_annotate(self.property._optimized_compare(None, adapt_source=self.adapter)) + return _orm_annotate( + self.property._optimized_compare( + None, + adapt_source=self.adapter) + ) elif self.property.uselist: - raise sa_exc.InvalidRequestError("Can't compare a collection to an object or collection; use contains() to test for membership.") + raise sa_exc.InvalidRequestError( + "Can't compare a collection to an object or " + "collection; use contains() to test for membership.") else: - return _orm_annotate(self.property._optimized_compare(other, adapt_source=self.adapter)) + return _orm_annotate( + self.property._optimized_compare( + other, + adapt_source=self.adapter) + ) def _criterion_exists(self, criterion=None, **kwargs): if getattr(self, '_of_type', None): @@ -504,7 +523,10 @@ class RelationshipProperty(StrategizedProperty): source_selectable = None pj, sj, source, dest, secondary, target_adapter = \ - self.property._create_joins(dest_polymorphic=True, dest_selectable=to_selectable, source_selectable=source_selectable) + self.property._create_joins( + dest_polymorphic=True, + dest_selectable=to_selectable, + source_selectable=source_selectable) for k in kwargs: crit = self.property.mapper.class_manager[k] == kwargs[k] @@ -513,9 +535,9 @@ class RelationshipProperty(StrategizedProperty): else: criterion = criterion & crit - # annotate the *local* side of the join condition, in the case of pj + sj this - # is the full primaryjoin, in the case of just pj its the local side of - # the primaryjoin. + # annotate the *local* side of the join condition, in the case + # of pj + sj this is the full primaryjoin, in the case of just + # pj its the local side of the primaryjoin. if sj is not None: j = _orm_annotate(pj) & sj else: @@ -525,8 +547,10 @@ class RelationshipProperty(StrategizedProperty): # limit this adapter to annotated only? criterion = target_adapter.traverse(criterion) - # only have the "joined left side" of what we return be subject to Query adaption. The right - # side of it is used for an exists() subquery and should not correlate or otherwise reach out + # only have the "joined left side" of what we + # return be subject to Query adaption. The right + # side of it is used for an exists() subquery and + # should not correlate or otherwise reach out # to anything in the enclosing query. if criterion is not None: criterion = criterion._annotate({'_halt_adapt': True}) @@ -537,18 +561,25 @@ class RelationshipProperty(StrategizedProperty): def any(self, criterion=None, **kwargs): if not self.property.uselist: - raise sa_exc.InvalidRequestError("'any()' not implemented for scalar attributes. Use has().") + raise sa_exc.InvalidRequestError( + "'any()' not implemented for scalar " + "attributes. Use has()." + ) return self._criterion_exists(criterion, **kwargs) def has(self, criterion=None, **kwargs): if self.property.uselist: - raise sa_exc.InvalidRequestError("'has()' not implemented for collections. Use any().") + raise sa_exc.InvalidRequestError( + "'has()' not implemented for collections. " + "Use any().") return self._criterion_exists(criterion, **kwargs) def contains(self, other, **kwargs): if not self.property.uselist: - raise sa_exc.InvalidRequestError("'contains' not implemented for scalar attributes. Use ==") + raise sa_exc.InvalidRequestError( + "'contains' not implemented for scalar " + "attributes. Use ==") clause = self.property._optimized_compare(other, adapt_source=self.adapter) if self.property.secondaryjoin is not None: @@ -559,7 +590,6 @@ class RelationshipProperty(StrategizedProperty): def __negated_contains_or_equals(self, other): if self.property.direction == MANYTOONE: state = attributes.instance_state(other) - strategy = self.property._get_strategy(strategies.LazyLoader) def state_bindparam(state, col): o = state.obj() # strong ref @@ -571,14 +601,20 @@ class RelationshipProperty(StrategizedProperty): else: return col - if strategy.use_get: + if self.property._use_get: return sql.and_(*[ sql.or_( adapt(x) != state_bindparam(state, y), adapt(x) == None) for (x, y) in self.property.local_remote_pairs]) - criterion = sql.and_(*[x==y for (x, y) in zip(self.property.mapper.primary_key, self.property.mapper.primary_key_from_instance(other))]) + criterion = sql.and_(*[x==y for (x, y) in + zip( + self.property.mapper.primary_key, + self.property.\ + mapper.\ + primary_key_from_instance(other)) + ]) return ~self._criterion_exists(criterion) def __ne__(self, other): @@ -588,7 +624,9 @@ class RelationshipProperty(StrategizedProperty): else: return self._criterion_exists() elif self.property.uselist: - raise sa_exc.InvalidRequestError("Can't compare a collection to an object or collection; use contains() to test for membership.") + raise sa_exc.InvalidRequestError( + "Can't compare a collection to an object or " + "collection; use contains() to test for membership.") else: return self.__negated_contains_or_equals(other) @@ -625,7 +663,13 @@ class RelationshipProperty(StrategizedProperty): def __str__(self): return str(self.parent.class_.__name__) + "." + self.key - def merge(self, session, source_state, source_dict, dest_state, dest_dict, load, _recursive): + def merge(self, + session, + source_state, + source_dict, + dest_state, + dest_dict, + load, _recursive): if load: # TODO: no test coverage for recursive check for r in self._reverse_property: @@ -866,7 +910,10 @@ class RelationshipProperty(StrategizedProperty): ] if not eq_pairs: - if not self.viewonly and criterion_as_pairs(self.primaryjoin, consider_as_foreign_keys=self._foreign_keys, any_operator=True): + if not self.viewonly and criterion_as_pairs( + self.primaryjoin, + consider_as_foreign_keys=self._foreign_keys, + any_operator=True): raise sa_exc.ArgumentError("Could not locate any equated, locally " "mapped column pairs for primaryjoin condition '%s' on relationship %s. " "For more relaxed rules on join conditions, the relationship may be " @@ -887,11 +934,24 @@ class RelationshipProperty(StrategizedProperty): self.synchronize_pairs = eq_pairs if self.secondaryjoin is not None: - sq_pairs = criterion_as_pairs(self.secondaryjoin, consider_as_foreign_keys=self._foreign_keys, any_operator=self.viewonly) - sq_pairs = [(l, r) for l, r in sq_pairs if (self._col_is_part_of_mappings(l) and self._col_is_part_of_mappings(r)) or r in self._foreign_keys] + sq_pairs = criterion_as_pairs( + self.secondaryjoin, + consider_as_foreign_keys=self._foreign_keys, + any_operator=self.viewonly) + + sq_pairs = [ + (l, r) + for l, r in sq_pairs + if (self._col_is_part_of_mappings(l) and + self._col_is_part_of_mappings(r)) or + r in self._foreign_keys + ] if not sq_pairs: - if not self.viewonly and criterion_as_pairs(self.secondaryjoin, consider_as_foreign_keys=self._foreign_keys, any_operator=True): + if not self.viewonly and criterion_as_pairs( + self.secondaryjoin, + consider_as_foreign_keys=self._foreign_keys, + any_operator=True): raise sa_exc.ArgumentError("Could not locate any equated, locally mapped " "column pairs for secondaryjoin condition '%s' on relationship %s. " "For more relaxed rules on join conditions, the " @@ -1000,17 +1060,29 @@ class RelationshipProperty(StrategizedProperty): if self.secondaryjoin is not None: eq_pairs += self.secondary_synchronize_pairs else: - eq_pairs = criterion_as_pairs(self.primaryjoin, consider_as_foreign_keys=self._foreign_keys, any_operator=True) + eq_pairs = criterion_as_pairs( + self.primaryjoin, + consider_as_foreign_keys=self._foreign_keys, + any_operator=True) if self.secondaryjoin is not None: - eq_pairs += criterion_as_pairs(self.secondaryjoin, consider_as_foreign_keys=self._foreign_keys, any_operator=True) - eq_pairs = [(l, r) for l, r in eq_pairs if self._col_is_part_of_mappings(l) and self._col_is_part_of_mappings(r)] + eq_pairs += criterion_as_pairs( + self.secondaryjoin, + consider_as_foreign_keys=self._foreign_keys, + any_operator=True) + + eq_pairs = [ + (l, r) for l, r in eq_pairs + if self._col_is_part_of_mappings(l) and + self._col_is_part_of_mappings(r) + ] if self.direction is MANYTOONE: self.local_remote_pairs = [(r, l) for l, r in eq_pairs] else: self.local_remote_pairs = eq_pairs elif self.remote_side: - raise sa_exc.ArgumentError("remote_side argument is redundant against more detailed _local_remote_side argument.") + raise sa_exc.ArgumentError("remote_side argument is redundant " + "against more detailed _local_remote_side argument.") for l, r in self.local_remote_pairs: @@ -1024,16 +1096,20 @@ class RelationshipProperty(StrategizedProperty): "Specify remote_side argument to indicate which column lazy " "join condition should bind." % (r, self.mapper)) - self.local_side, self.remote_side = [util.ordered_column_set(x) for x in zip(*list(self.local_remote_pairs))] + self.local_side, self.remote_side = [ + util.ordered_column_set(x) for x in + zip(*list(self.local_remote_pairs))] def _assert_is_primary(self): if not self.is_primary() and \ - not mapper.class_mapper(self.parent.class_, compile=False)._get_property(self.key, raiseerr=False): + not mapper.class_mapper(self.parent.class_, compile=False).\ + _get_property(self.key, raiseerr=False): raise sa_exc.ArgumentError("Attempting to assign a new relationship '%s' to " "a non-primary mapper on class '%s'. New relationships can only be " "added to the primary mapper, i.e. the very first " - "mapper created for class '%s' " % (self.key, self.parent.class_.__name__, self.parent.class_.__name__)) + "mapper created for class '%s' " % + (self.key, self.parent.class_.__name__, self.parent.class_.__name__)) def _generate_backref(self): if not self.is_primary(): @@ -1089,17 +1165,27 @@ class RelationshipProperty(StrategizedProperty): def _post_init(self): self.logger.info("%s setup primary join %s", self, self.primaryjoin) self.logger.info("%s setup secondary join %s", self, self.secondaryjoin) - self.logger.info("%s synchronize pairs [%s]", self, ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs)) - self.logger.info("%s secondary synchronize pairs [%s]", self, ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or []))) - self.logger.info("%s local/remote pairs [%s]", self, ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs)) + self.logger.info("%s synchronize pairs [%s]", self, + ",".join("(%s => %s)" % (l, r) for l, r in self.synchronize_pairs)) + self.logger.info("%s secondary synchronize pairs [%s]", self, + ",".join(("(%s => %s)" % (l, r) for l, r in self.secondary_synchronize_pairs or []))) + self.logger.info("%s local/remote pairs [%s]", self, + ",".join("(%s / %s)" % (l, r) for l, r in self.local_remote_pairs)) self.logger.info("%s relationship direction %s", self, self.direction) if self.uselist is None: self.uselist = self.direction is not MANYTOONE - + if not self.viewonly: self._dependency_processor = dependency.create_dependency_processor(self) - + + @util.memoized_property + def _use_get(self): + """memoize the 'use_get' attribute of this RelationshipLoader's lazyloader.""" + + strategy = self._get_strategy(strategies.LazyLoader) + return strategy.use_get + def _refers_to_parent_table(self): for c, f in self.synchronize_pairs: if c.table is f.table: @@ -1110,7 +1196,9 @@ class RelationshipProperty(StrategizedProperty): def _is_self_referential(self): return self.mapper.common_parent(self.parent) - def _create_joins(self, source_polymorphic=False, source_selectable=None, dest_polymorphic=False, dest_selectable=None, of_type=None): + def _create_joins(self, source_polymorphic=False, + source_selectable=None, dest_polymorphic=False, + dest_selectable=None, of_type=None): if source_selectable is None: if source_polymorphic and self.parent.with_polymorphic: source_selectable = self.parent._with_polymorphic_selectable @@ -1153,7 +1241,10 @@ class RelationshipProperty(StrategizedProperty): secondary = secondary.alias() primary_aliasizer = ClauseAdapter(secondary) if dest_selectable is not None: - secondary_aliasizer = ClauseAdapter(dest_selectable, equivalents=self.mapper._equivalent_columns).chain(primary_aliasizer) + secondary_aliasizer = \ + ClauseAdapter(dest_selectable, + equivalents=self.mapper._equivalent_columns).\ + chain(primary_aliasizer) else: secondary_aliasizer = primary_aliasizer diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py index 25c2f83a5..93b1170f4 100644 --- a/lib/sqlalchemy/orm/strategies.py +++ b/lib/sqlalchemy/orm/strategies.py @@ -611,8 +611,17 @@ class LoadLazyAttribute(object): if prop.order_by: q = q.order_by(*util.to_list(prop.order_by)) + for rev in prop._reverse_property: + # reverse props that are MANYTOONE are loading *this* + # object from get(), so don't need to eager out to those. + if rev.direction is interfaces.MANYTOONE and \ + rev._use_get and \ + not isinstance(rev.strategy, LazyLoader): + q = q.options(EagerLazyOption(rev.key, lazy='select')) + if state.load_options: q = q._conditional_options(*state.load_options) + q = q.filter(strategy.lazy_clause(state)) result = q.all() -- cgit v1.2.1 From 75e14f855ee64a01bb79e66f8a868911f6c9e583 Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Mon, 29 Mar 2010 17:56:02 -0400 Subject: - Session.refresh() now does an equivalent expire() on the given instance first, so that the "refresh-expire" cascade is propagated. Previously, refresh() was not affected in any way by the presence of "refresh-expire" cascade. This is a change in behavior versus that of 0.6beta2, where the "lockmode" flag passed to refresh() would cause a version check to occur. Since the instance is first expired, refresh() always upgrades the object to the most recent version. - The 'refresh-expire' cascade, when reaching a pending object, will expunge the object if the cascade also includes "delete-orphan", or will simply detach it otherwise. [ticket:1754] --- lib/sqlalchemy/orm/properties.py | 11 +++++++++-- lib/sqlalchemy/orm/session.py | 27 +++++++++++++++++++++------ lib/sqlalchemy/test/requires.py | 12 ++++++++++++ 3 files changed, 42 insertions(+), 8 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py index a8295e2cd..2a5e92c1a 100644 --- a/lib/sqlalchemy/orm/properties.py +++ b/lib/sqlalchemy/orm/properties.py @@ -742,6 +742,8 @@ class RelationshipProperty(StrategizedProperty): else: instances = state.value_as_iterable(self.key, passive=passive) + skip_pending = type_ == 'refresh-expire' and 'delete-orphan' not in self.cascade + if instances: for c in instances: if c is not None and \ @@ -757,12 +759,17 @@ class RelationshipProperty(StrategizedProperty): str(self.parent.class_), str(c.__class__) )) + instance_state = attributes.instance_state(c) + + if skip_pending and not instance_state.key: + continue + visited_instances.add(c) # cascade using the mapper local to this # object, so that its individual properties are located - instance_mapper = object_mapper(c) - yield (c, instance_mapper, attributes.instance_state(c)) + instance_mapper = instance_state.manager.mapper + yield (c, instance_mapper, instance_state) def _add_reverse_property(self, key): other = self.mapper._get_property(key) diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py index 0a3fbe79e..0810175bf 100644 --- a/lib/sqlalchemy/orm/session.py +++ b/lib/sqlalchemy/orm/session.py @@ -883,7 +883,7 @@ class Session(object): state.commit_all(dict_, self.identity_map) def refresh(self, instance, attribute_names=None, lockmode=None): - """Refresh the attributes on the given instance. + """Expire and refresh the attributes on the given instance. A query will be issued to the database and all attributes will be refreshed with their current database value. @@ -907,7 +907,9 @@ class Session(object): state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) - self._validate_persistent(state) + + self._expire_state(state, attribute_names) + if self.query(_object_mapper(instance))._get( state.key, refresh_state=state, lockmode=lockmode, @@ -939,18 +941,31 @@ class Session(object): state = attributes.instance_state(instance) except exc.NO_STATE: raise exc.UnmappedInstanceError(instance) + self._expire_state(state, attribute_names) + + def _expire_state(self, state, attribute_names): self._validate_persistent(state) if attribute_names: _expire_state(state, state.dict, - attribute_names=attribute_names, instance_dict=self.identity_map) + attribute_names=attribute_names, + instance_dict=self.identity_map) else: # pre-fetch the full cascade since the expire is going to # remove associations cascaded = list(_cascade_state_iterator('refresh-expire', state)) - _expire_state(state, state.dict, None, instance_dict=self.identity_map) + self._conditional_expire(state) for (state, m, o) in cascaded: - _expire_state(state, state.dict, None, instance_dict=self.identity_map) - + self._conditional_expire(state) + + def _conditional_expire(self, state): + """Expire a state if persistent, else expunge if pending""" + + if state.key: + _expire_state(state, state.dict, None, instance_dict=self.identity_map) + elif state in self._new: + self._new.pop(state) + state.detach() + def prune(self): """Remove unreferenced instances cached in the identity map. diff --git a/lib/sqlalchemy/test/requires.py b/lib/sqlalchemy/test/requires.py index 73b212095..bf911c2c2 100644 --- a/lib/sqlalchemy/test/requires.py +++ b/lib/sqlalchemy/test/requires.py @@ -149,6 +149,18 @@ def sequences(fn): no_support('sybase', 'no SEQUENCE support'), ) +def update_nowait(fn): + """Target database must support SELECT...FOR UPDATE NOWAIT""" + return _chain_decorators_on( + fn, + no_support('access', 'no FOR UPDATE NOWAIT support'), + no_support('firebird', 'no FOR UPDATE NOWAIT support'), + no_support('mssql', 'no FOR UPDATE NOWAIT support'), + no_support('mysql', 'no FOR UPDATE NOWAIT support'), + no_support('sqlite', 'no FOR UPDATE NOWAIT support'), + no_support('sybase', 'no FOR UPDATE NOWAIT support'), + ) + def subqueries(fn): """Target database must support subqueries.""" return _chain_decorators_on( -- cgit v1.2.1 From 43b9f0d116580474ac56c532a1427a4cdeb3748b Mon Sep 17 00:00:00 2001 From: Mike Bayer Date: Tue, 30 Mar 2010 10:39:36 -0400 Subject: - the compiler extension now allows @compiles decorators on base classes that extend to child classes, @compiles decorators on child classes that aren't broken by a @compiles decorator on the base class. --- lib/sqlalchemy/ext/compiler.py | 2 +- lib/sqlalchemy/sql/visitors.py | 21 +++++++++++---------- 2 files changed, 12 insertions(+), 11 deletions(-) (limited to 'lib/sqlalchemy') diff --git a/lib/sqlalchemy/ext/compiler.py b/lib/sqlalchemy/ext/compiler.py index 3226b0efd..dde49e232 100644 --- a/lib/sqlalchemy/ext/compiler.py +++ b/lib/sqlalchemy/ext/compiler.py @@ -165,7 +165,7 @@ A big part of using the compiler extension is subclassing SQLAlchemy expression def compiles(class_, *specs): def decorate(fn): - existing = getattr(class_, '_compiler_dispatcher', None) + existing = class_.__dict__.get('_compiler_dispatcher', None) if not existing: existing = _dispatcher() diff --git a/lib/sqlalchemy/sql/visitors.py b/lib/sqlalchemy/sql/visitors.py index 4a54375f8..799486c02 100644 --- a/lib/sqlalchemy/sql/visitors.py +++ b/lib/sqlalchemy/sql/visitors.py @@ -40,16 +40,17 @@ class VisitableType(type): # set up an optimized visit dispatch function # for use by the compiler - visit_name = cls.__visit_name__ - if isinstance(visit_name, str): - getter = operator.attrgetter("visit_%s" % visit_name) - def _compiler_dispatch(self, visitor, **kw): - return getter(visitor)(self, **kw) - else: - def _compiler_dispatch(self, visitor, **kw): - return getattr(visitor, 'visit_%s' % self.__visit_name__)(self, **kw) - - cls._compiler_dispatch = _compiler_dispatch + if '__visit_name__' in cls.__dict__: + visit_name = cls.__visit_name__ + if isinstance(visit_name, str): + getter = operator.attrgetter("visit_%s" % visit_name) + def _compiler_dispatch(self, visitor, **kw): + return getter(visitor)(self, **kw) + else: + def _compiler_dispatch(self, visitor, **kw): + return getattr(visitor, 'visit_%s' % self.__visit_name__)(self, **kw) + + cls._compiler_dispatch = _compiler_dispatch super(VisitableType, cls).__init__(clsname, bases, clsdict) -- cgit v1.2.1