summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/mapping
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy/mapping')
-rw-r--r--lib/sqlalchemy/mapping/__init__.py175
-rw-r--r--lib/sqlalchemy/mapping/mapper.py997
-rw-r--r--lib/sqlalchemy/mapping/objectstore.py358
-rw-r--r--lib/sqlalchemy/mapping/properties.py980
-rw-r--r--lib/sqlalchemy/mapping/query.py263
-rw-r--r--lib/sqlalchemy/mapping/sync.py129
-rw-r--r--lib/sqlalchemy/mapping/topological.py349
-rw-r--r--lib/sqlalchemy/mapping/unitofwork.py857
-rw-r--r--lib/sqlalchemy/mapping/util.py31
9 files changed, 0 insertions, 4139 deletions
diff --git a/lib/sqlalchemy/mapping/__init__.py b/lib/sqlalchemy/mapping/__init__.py
deleted file mode 100644
index d21b02aa5..000000000
--- a/lib/sqlalchemy/mapping/__init__.py
+++ /dev/null
@@ -1,175 +0,0 @@
-# mapper/__init__.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""
-the mapper package provides object-relational functionality, building upon the schema and sql
-packages and tying operations to class properties and constructors.
-"""
-import sqlalchemy.sql as sql
-import sqlalchemy.schema as schema
-import sqlalchemy.engine as engine
-import sqlalchemy.util as util
-import objectstore
-from exceptions import *
-import types as types
-from mapper import *
-from properties import *
-import mapper as mapperlib
-
-__all__ = ['relation', 'backref', 'eagerload', 'lazyload', 'noload', 'deferred', 'defer', 'undefer',
- 'mapper', 'clear_mappers', 'objectstore', 'sql', 'extension', 'class_mapper', 'object_mapper', 'MapperExtension',
- 'assign_mapper', 'cascade_mappers'
- ]
-
-def relation(*args, **kwargs):
- """provides a relationship of a primary Mapper to a secondary Mapper, which corresponds
- to a parent-child or associative table relationship."""
- if len(args) > 1 and isinstance(args[0], type):
- raise ArgumentError("relation(class, table, **kwargs) is deprecated. Please use relation(class, **kwargs) or relation(mapper, **kwargs).")
- return _relation_loader(*args, **kwargs)
-
-def _relation_loader(mapper, secondary=None, primaryjoin=None, secondaryjoin=None, lazy=True, **kwargs):
- if lazy:
- return LazyLoader(mapper, secondary, primaryjoin, secondaryjoin, **kwargs)
- elif lazy is None:
- return PropertyLoader(mapper, secondary, primaryjoin, secondaryjoin, **kwargs)
- else:
- return EagerLoader(mapper, secondary, primaryjoin, secondaryjoin, **kwargs)
-
-def backref(name, **kwargs):
- return BackRef(name, **kwargs)
-
-def deferred(*columns, **kwargs):
- """returns a DeferredColumnProperty, which indicates this object attributes should only be loaded
- from its corresponding table column when first accessed."""
- return DeferredColumnProperty(*columns, **kwargs)
-
-def mapper(class_, table=None, *args, **params):
- """returns a new or already cached Mapper object."""
- if table is None:
- return class_mapper(class_)
-
- return Mapper(class_, table, *args, **params)
-
-def clear_mappers():
- """removes all mappers that have been created thus far. when new mappers are
- created, they will be assigned to their classes as their primary mapper."""
- mapper_registry.clear()
-
-def clear_mapper(m):
- """removes the given mapper from the storage of mappers. when a new mapper is
- created for the previous mapper's class, it will be used as that classes'
- new primary mapper."""
- del mapper_registry[m.hash_key]
-
-def extension(ext):
- """returns a MapperOption that will add the given MapperExtension to the
- mapper returned by mapper.options()."""
- return ExtensionOption(ext)
-def eagerload(name, **kwargs):
- """returns a MapperOption that will convert the property of the given name
- into an eager load. Used with mapper.options()"""
- return EagerLazyOption(name, toeager=True, **kwargs)
-
-def lazyload(name, **kwargs):
- """returns a MapperOption that will convert the property of the given name
- into a lazy load. Used with mapper.options()"""
- return EagerLazyOption(name, toeager=False, **kwargs)
-
-def noload(name, **kwargs):
- """returns a MapperOption that will convert the property of the given name
- into a non-load. Used with mapper.options()"""
- return EagerLazyOption(name, toeager=None, **kwargs)
-
-def defer(name, **kwargs):
- """returns a MapperOption that will convert the column property of the given
- name into a deferred load. Used with mapper.options()"""
- return DeferredOption(name, defer=True)
-def undefer(name, **kwargs):
- """returns a MapperOption that will convert the column property of the given
- name into a non-deferred (regular column) load. Used with mapper.options."""
- return DeferredOption(name, defer=False)
-
-
-
-def assign_mapper(class_, *args, **params):
- params.setdefault("is_primary", True)
- if not isinstance(getattr(class_, '__init__'), types.MethodType):
- def __init__(self, **kwargs):
- for key, value in kwargs.items():
- setattr(self, key, value)
- class_.__init__ = __init__
- m = mapper(class_, *args, **params)
- class_.mapper = m
- class_.get = m.get
- class_.select = m.select
- class_.select_by = m.select_by
- class_.selectone = m.selectone
- class_.get_by = m.get_by
- def commit(self):
- objectstore.commit(self)
- def delete(self):
- objectstore.delete(self)
- def expire(self):
- objectstore.expire(self)
- def refresh(self):
- objectstore.refresh(self)
- def expunge(self):
- objectstore.expunge(self)
- class_.commit = commit
- class_.delete = delete
- class_.expire = expire
- class_.refresh = refresh
- class_.expunge = expunge
-
-def cascade_mappers(*classes_or_mappers):
- """given a list of classes and/or mappers, identifies the foreign key relationships
- between the given mappers or corresponding class mappers, and creates relation()
- objects representing those relationships, including a backreference. Attempts to find
- the "secondary" table in a many-to-many relationship as well. The names of the relations
- will be a lowercase version of the related class. In the case of one-to-many or many-to-many,
- the name will be "pluralized", which currently is based on the English language (i.e. an 's' or
- 'es' added to it)."""
- table_to_mapper = {}
- for item in classes_or_mappers:
- if isinstance(item, Mapper):
- m = item
- else:
- klass = item
- m = class_mapper(klass)
- table_to_mapper[m.table] = m
- def pluralize(name):
- # oh crap, do we need locale stuff now
- if name[-1] == 's':
- return name + "es"
- else:
- return name + "s"
- for table,mapper in table_to_mapper.iteritems():
- for fk in table.foreign_keys:
- if fk.column.table is table:
- continue
- secondary = None
- try:
- m2 = table_to_mapper[fk.column.table]
- except KeyError:
- if len(fk.column.table.primary_key):
- continue
- for sfk in fk.column.table.foreign_keys:
- if sfk.column.table is table:
- continue
- m2 = table_to_mapper.get(sfk.column.table)
- secondary = fk.column.table
- if m2 is None:
- continue
- if secondary:
- propname = pluralize(m2.class_.__name__.lower())
- propname2 = pluralize(mapper.class_.__name__.lower())
- else:
- propname = m2.class_.__name__.lower()
- propname2 = pluralize(mapper.class_.__name__.lower())
- mapper.add_property(propname, relation(m2, secondary=secondary, backref=propname2))
-
- \ No newline at end of file
diff --git a/lib/sqlalchemy/mapping/mapper.py b/lib/sqlalchemy/mapping/mapper.py
deleted file mode 100644
index 7977cae6a..000000000
--- a/lib/sqlalchemy/mapping/mapper.py
+++ /dev/null
@@ -1,997 +0,0 @@
-# mapper/mapper.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-import sqlalchemy.sql as sql
-import sqlalchemy.schema as schema
-import sqlalchemy.util as util
-import util as mapperutil
-import sync
-from sqlalchemy.exceptions import *
-import query
-import objectstore
-import sys
-import weakref
-
-# a dictionary mapping classes to their primary mappers
-mapper_registry = weakref.WeakKeyDictionary()
-
-# a list of MapperExtensions that will be installed in all mappers by default
-global_extensions = []
-
-# a constant returned by _getattrbycolumn to indicate
-# this mapper is not handling an attribute for a particular
-# column
-NO_ATTRIBUTE = object()
-
-# returned by a MapperExtension method to indicate a "do nothing" response
-EXT_PASS = object()
-
-class Mapper(object):
- """Persists object instances to and from schema.Table objects via the sql package.
- Instances of this class should be constructed through this package's mapper() or
- relation() function."""
- def __init__(self,
- class_,
- table,
- primarytable = None,
- properties = None,
- primary_key = None,
- is_primary = False,
- inherits = None,
- inherit_condition = None,
- extension = None,
- order_by = False,
- allow_column_override = False,
- entity_name = None,
- always_refresh = False,
- version_id_col = None,
- construct_new = False,
- **kwargs):
-
- if primarytable is not None:
- sys.stderr.write("'primarytable' argument to mapper is deprecated\n")
-
- ext = MapperExtension()
-
- for ext_class in global_extensions:
- ext = ext_class().chain(ext)
-
- if extension is not None:
- for ext_obj in util.to_list(extension):
- ext = ext_obj.chain(ext)
-
- self.extension = ext
-
- self.class_ = class_
- self.entity_name = entity_name
- self.class_key = ClassKey(class_, entity_name)
- self.is_primary = is_primary
- self.order_by = order_by
- self._options = {}
- self.always_refresh = always_refresh
- self.version_id_col = version_id_col
- self.construct_new = construct_new
-
- if not issubclass(class_, object):
- raise ArgumentError("Class '%s' is not a new-style class" % class_.__name__)
-
- if isinstance(table, sql.Select):
- # some db's, noteably postgres, dont want to select from a select
- # without an alias. also if we make our own alias internally, then
- # the configured properties on the mapper are not matched against the alias
- # we make, theres workarounds but it starts to get really crazy (its crazy enough
- # the SQL that gets generated) so just require an alias
- raise ArgumentError("Mapping against a Select object requires that it has a name. Use an alias to give it a name, i.e. s = select(...).alias('myselect')")
- else:
- self.table = table
-
- if inherits is not None:
- if self.class_.__mro__[1] != inherits.class_:
- raise ArgumentError("Class '%s' does not inherit from '%s'" % (self.class_.__name__, inherits.class_.__name__))
- self.primarytable = inherits.primarytable
- # inherit_condition is optional.
- if not table is inherits.noninherited_table:
- if inherit_condition is None:
- # figure out inherit condition from our table to the immediate table
- # of the inherited mapper, not its full table which could pull in other
- # stuff we dont want (allows test/inheritance.InheritTest4 to pass)
- inherit_condition = sql.join(inherits.noninherited_table, table).onclause
- self.table = sql.join(inherits.table, table, inherit_condition)
- #print "inherit condition", str(self.table.onclause)
-
- # generate sync rules. similarly to creating the on clause, specify a
- # stricter set of tables to create "sync rules" by,based on the immediate
- # inherited table, rather than all inherited tables
- self._synchronizer = sync.ClauseSynchronizer(self, self, sync.ONETOMANY)
- self._synchronizer.compile(self.table.onclause, util.HashSet([inherits.noninherited_table]), mapperutil.TableFinder(table))
- # the old rule
- #self._synchronizer.compile(self.table.onclause, inherits.tables, TableFinder(table))
- else:
- self._synchronizer = None
- self.inherits = inherits
- self.noninherited_table = table
- if self.order_by is False:
- self.order_by = inherits.order_by
- else:
- self.primarytable = self.table
- self.noninherited_table = self.table
- self._synchronizer = None
- self.inherits = None
-
- # locate all tables contained within the "table" passed in, which
- # may be a join or other construct
- self.tables = mapperutil.TableFinder(self.table)
-
- # determine primary key columns, either passed in, or get them from our set of tables
- self.pks_by_table = {}
- if primary_key is not None:
- for k in primary_key:
- self.pks_by_table.setdefault(k.table, util.HashSet(ordered=True)).append(k)
- if k.table != self.table:
- # associate pk cols from subtables to the "main" table
- self.pks_by_table.setdefault(self.table, util.HashSet(ordered=True)).append(k)
- else:
- for t in self.tables + [self.table]:
- try:
- l = self.pks_by_table[t]
- except KeyError:
- l = self.pks_by_table.setdefault(t, util.HashSet(ordered=True))
- if not len(t.primary_key):
- raise ArgumentError("Table " + t.name + " has no primary key columns. Specify primary_key argument to mapper.")
- for k in t.primary_key:
- l.append(k)
-
- # make table columns addressable via the mapper
- self.columns = util.OrderedProperties()
- self.c = self.columns
-
- # object attribute names mapped to MapperProperty objects
- self.props = {}
-
- # table columns mapped to lists of MapperProperty objects
- # using a list allows a single column to be defined as
- # populating multiple object attributes
- self.columntoproperty = {}
-
- # load custom properties
- if properties is not None:
- for key, prop in properties.iteritems():
- if sql.is_column(prop):
- try:
- prop = self.table._get_col_by_original(prop)
- except KeyError:
- raise ArgumentError("Column '%s' is not represented in mapper's table" % prop._label)
- self.columns[key] = prop
- prop = ColumnProperty(prop)
- elif isinstance(prop, list) and sql.is_column(prop[0]):
- try:
- prop = [self.table._get_col_by_original(p) for p in prop]
- except KeyError, e:
- raise ArgumentError("Column '%s' is not represented in mapper's table" % e.args[0])
- self.columns[key] = prop[0]
- prop = ColumnProperty(*prop)
- self.props[key] = prop
- if isinstance(prop, ColumnProperty):
- for col in prop.columns:
- proplist = self.columntoproperty.setdefault(col.original, [])
- proplist.append(prop)
-
- # load properties from the main table object,
- # not overriding those set up in the 'properties' argument
- for column in self.table.columns:
- if not self.columns.has_key(column.key):
- self.columns[column.key] = column
-
- if self.columntoproperty.has_key(column.original):
- continue
-
- prop = self.props.get(column.key, None)
- if prop is None:
- prop = ColumnProperty(column)
- self.props[column.key] = prop
- elif isinstance(prop, ColumnProperty):
- # the order which columns are appended to a ColumnProperty is significant, as the
- # column at index 0 determines which result column is used to populate the object
- # attribute, in the case of mapping against a join with column names repeated
- # (and particularly in an inheritance relationship)
- prop.columns.insert(0, column)
- #prop.columns.append(column)
- else:
- if not allow_column_override:
- raise ArgumentError("WARNING: column '%s' not being added due to property '%s'. Specify 'allow_column_override=True' to mapper() to ignore this condition." % (column.key, repr(prop)))
- else:
- continue
-
- # its a ColumnProperty - match the ultimate table columns
- # back to the property
- proplist = self.columntoproperty.setdefault(column.original, [])
- proplist.append(prop)
-
- if not mapper_registry.has_key(self.class_key) or self.is_primary or (inherits is not None and inherits._is_primary_mapper()):
- objectstore.global_attributes.reset_class_managed(self.class_)
- self._init_class()
-
- if inherits is not None:
- for key, prop in inherits.props.iteritems():
- if not self.props.has_key(key):
- self.props[key] = prop.copy()
- self.props[key].parent = self
- # self.props[key].key = None # force re-init
- l = [(key, prop) for key, prop in self.props.iteritems()]
- for key, prop in l:
- if getattr(prop, 'key', None) is None:
- prop.init(key, self)
-
- # this prints a summary of the object attributes and how they
- # will be mapped to table columns
- #print "mapper %s, columntoproperty:" % (self.class_.__name__)
- #for key, value in self.columntoproperty.iteritems():
- # print key.table.name, key.key, [(v.key, v) for v in value]
-
- def _get_query(self):
- try:
- if self._query.mapper is not self:
- self._query = query.Query(self)
- return self._query
- except AttributeError:
- self._query = query.Query(self)
- return self._query
- query = property(_get_query, doc=\
- """returns an instance of sqlalchemy.mapping.query.Query, which implements all the query-constructing
- methods such as get(), select(), select_by(), etc. The default Query object uses the global thread-local
- Session from the objectstore package. To get a Query object for a specific Session, call the
- using(session) method.""")
-
- def get(self, *ident, **kwargs):
- """calls get() on this mapper's default Query object."""
- return self.query.get(*ident, **kwargs)
-
- def _get(self, key, ident=None, reload=False):
- return self.query._get(key, ident=ident, reload=reload)
-
- def get_by(self, *args, **params):
- """calls get_by() on this mapper's default Query object."""
- return self.query.get_by(*args, **params)
-
- def select_by(self, *args, **params):
- """calls select_by() on this mapper's default Query object."""
- return self.query.select_by(*args, **params)
-
- def selectfirst_by(self, *args, **params):
- """calls selectfirst_by() on this mapper's default Query object."""
- return self.query.selectfirst_by(*args, **params)
-
- def selectone_by(self, *args, **params):
- """calls selectone_by() on this mapper's default Query object."""
- return self.query.selectone_by(*args, **params)
-
- def count_by(self, *args, **params):
- """calls count_by() on this mapper's default Query object."""
- return self.query.count_by(*args, **params)
-
- def selectfirst(self, *args, **params):
- """calls selectfirst() on this mapper's default Query object."""
- return self.query.selectfirst(*args, **params)
-
- def selectone(self, *args, **params):
- """calls selectone() on this mapper's default Query object."""
- return self.query.selectone(*args, **params)
-
- def select(self, arg=None, **kwargs):
- """calls select() on this mapper's default Query object."""
- return self.query.select(arg=arg, **kwargs)
-
- def select_whereclause(self, whereclause=None, params=None, **kwargs):
- """calls select_whereclause() on this mapper's default Query object."""
- return self.query.select_whereclause(whereclause=whereclause, params=params, **kwargs)
-
- def count(self, whereclause=None, params=None, **kwargs):
- """calls count() on this mapper's default Query object."""
- return self.query.count(whereclause=whereclause, params=params, **kwargs)
-
- def select_statement(self, statement, **params):
- """calls select_statement() on this mapper's default Query object."""
- return self.query.select_statement(statement, **params)
-
- def select_text(self, text, **params):
- return self.query.select_text(text, **params)
-
- def add_property(self, key, prop):
- """adds an additional property to this mapper. this is the same as if it were
- specified within the 'properties' argument to the constructor. if the named
- property already exists, this will replace it. Useful for
- circular relationships, or overriding the parameters of auto-generated properties
- such as backreferences."""
- if sql.is_column(prop):
- self.columns[key] = prop
- prop = ColumnProperty(prop)
- self.props[key] = prop
- if isinstance(prop, ColumnProperty):
- for col in prop.columns:
- proplist = self.columntoproperty.setdefault(col.original, [])
- proplist.append(prop)
- prop.init(key, self)
-
- def __str__(self):
- return "Mapper|" + self.class_.__name__ + "|" + (self.entity_name is not None and "/%s" % self.entity_name or "") + self.primarytable.name
-
- def _is_primary_mapper(self):
- """returns True if this mapper is the primary mapper for its class key (class + entity_name)"""
- return mapper_registry.get(self.class_key, None) is self
-
- def _primary_mapper(self):
- """returns the primary mapper corresponding to this mapper's class key (class + entity_name)"""
- return mapper_registry[self.class_key]
-
- def is_assigned(self, instance):
- """returns True if this mapper is the primary mapper for the given instance. this is dependent
- not only on class assignment but the optional "entity_name" parameter as well."""
- return instance.__class__ is self.class_ and getattr(instance, '_entity_name', None) == self.entity_name
-
- def _init_class(self):
- """sets up our classes' overridden __init__ method, this mappers hash key as its
- '_mapper' property, and our columns as its 'c' property. if the class already had a
- mapper, the old __init__ method is kept the same."""
- oldinit = self.class_.__init__
- def init(self, *args, **kwargs):
- self._entity_name = kwargs.pop('_sa_entity_name', None)
-
- # this gets the AttributeManager to do some pre-initialization,
- # in order to save on KeyErrors later on
- objectstore.global_attributes.init_attr(self)
-
- nohist = kwargs.pop('_mapper_nohistory', False)
- session = kwargs.pop('_sa_session', objectstore.get_session())
- if not nohist:
- # register new with the correct session, before the object's
- # constructor is called, since further assignments within the
- # constructor would otherwise bind it to whatever get_session() is.
- session.register_new(self)
- if oldinit is not None:
- oldinit(self, *args, **kwargs)
- # override oldinit, insuring that its not already one of our
- # own modified inits
- if oldinit is None or not hasattr(oldinit, '_sa_mapper_init'):
- init._sa_mapper_init = True
- self.class_.__init__ = init
- mapper_registry[self.class_key] = self
- if self.entity_name is None:
- self.class_.c = self.c
-
- def has_eager(self):
- """returns True if one of the properties attached to this Mapper is eager loading"""
- return getattr(self, '_has_eager', False)
-
- def set_property(self, key, prop):
- self.props[key] = prop
- prop.init(key, self)
-
- def instances(self, cursor, *mappers, **kwargs):
- """given a cursor (ResultProxy) from an SQLEngine, returns a list of object instances
- corresponding to the rows in the cursor."""
- limit = kwargs.get('limit', None)
- offset = kwargs.get('offset', None)
- session = kwargs.get('session', None)
- if session is None:
- session = objectstore.get_session()
- populate_existing = kwargs.get('populate_existing', False)
-
- result = util.HistoryArraySet()
- if mappers:
- otherresults = []
- for m in mappers:
- otherresults.append(util.HistoryArraySet())
-
- imap = {}
- while True:
- row = cursor.fetchone()
- if row is None:
- break
- self._instance(session, row, imap, result, populate_existing=populate_existing)
- i = 0
- for m in mappers:
- m._instance(session, row, imap, otherresults[i])
- i+=1
-
- # store new stuff in the identity map
- for value in imap.values():
- session.register_clean(value)
-
- if mappers:
- result = [result] + otherresults
- return result
-
- def identity_key(self, *primary_key):
- """returns the instance key for the given identity value. this is a global tracking object used by the objectstore, and is usually available off a mapped object as instance._instance_key."""
- return objectstore.get_id_key(tuple(primary_key), self.class_, self.entity_name)
-
- def instance_key(self, instance):
- """returns the instance key for the given instance. this is a global tracking object used by the objectstore, and is usually available off a mapped object as instance._instance_key."""
- return self.identity_key(*self.identity(instance))
-
- def identity(self, instance):
- """returns the identity (list of primary key values) for the given instance. The list of values can be fed directly into the get() method as mapper.get(*key)."""
- return [self._getattrbycolumn(instance, column) for column in self.pks_by_table[self.table]]
-
- def compile(self, whereclause = None, **options):
- """works like select, except returns the SQL statement object without
- compiling or executing it"""
- return self.query._compile(whereclause, **options)
-
- def copy(self, **kwargs):
- mapper = Mapper.__new__(Mapper)
- mapper.__dict__.update(self.__dict__)
- mapper.__dict__.update(kwargs)
- mapper.props = self.props.copy()
- return mapper
-
- def using(self, session):
- """returns a new Query object with the given Session."""
- if objectstore.get_session() is session:
- return self.query
- else:
- return query.Query(self, session=session)
-
- def options(self, *options, **kwargs):
- """uses this mapper as a prototype for a new mapper with different behavior.
- *options is a list of options directives, which include eagerload(), lazyload(), and noload()"""
-
- optkey = repr([hash_key(o) for o in options])
- try:
- return self._options[optkey]
- except KeyError:
- mapper = self.copy(**kwargs)
- for option in options:
- option.process(mapper)
- self._options[optkey] = mapper
- return mapper
-
- def _get_criterion(self, key, value):
- """used by select_by to match a key/value pair against
- local properties, column names, or a matching property in this mapper's
- list of relations."""
- if self.props.has_key(key):
- return self.props[key].columns[0] == value
- elif self.table.c.has_key(key):
- return self.table.c[key] == value
- else:
- for prop in self.props.values():
- c = prop.get_criterion(key, value)
- if c is not None:
- return c
- else:
- return None
-
- def __getattr__(self, key):
- if (key.startswith('select_by_') or key.startswith('get_by_')):
- return getattr(self.query, key)
- else:
- raise AttributeError(key)
-
- def _getpropbycolumn(self, column, raiseerror=True):
- try:
- prop = self.columntoproperty[column.original]
- except KeyError:
- try:
- prop = self.props[column.key]
- if not raiseerror:
- return None
- raise InvalidRequestError("Column '%s.%s' is not available, due to conflicting property '%s':%s" % (column.table.name, column.name, column.key, repr(prop)))
- except KeyError:
- if not raiseerror:
- return None
- raise InvalidRequestError("No column %s.%s is configured on mapper %s..." % (column.table.name, column.name, str(self)))
- return prop[0]
-
- def _getattrbycolumn(self, obj, column, raiseerror=True):
- prop = self._getpropbycolumn(column, raiseerror)
- if prop is None:
- return NO_ATTRIBUTE
- return prop.getattr(obj)
-
- def _setattrbycolumn(self, obj, column, value):
- self.columntoproperty[column.original][0].setattr(obj, value)
-
- def save_obj(self, objects, uow, postupdate=False):
- """called by a UnitOfWork object to save objects, which involves either an INSERT or
- an UPDATE statement for each table used by this mapper, for each element of the
- list."""
-
- for table in self.tables:
- #print "SAVE_OBJ table ", table.name
- # looping through our set of tables, which are all "real" tables, as opposed
- # to our main table which might be a select statement or something non-writeable
-
- # the loop structure is tables on the outer loop, objects on the inner loop.
- # this allows us to bundle inserts/updates on the same table together...although currently
- # they are separate execs via execute(), not executemany()
-
- if not self._has_pks(table):
- # if we dont have a full set of primary keys for this table, we cant really
- # do any CRUD with it, so skip. this occurs if we are mapping against a query
- # that joins on other tables so its not really an error condition.
- continue
-
- # two lists to store parameters for each table/object pair located
- insert = []
- update = []
-
- # we have our own idea of the primary key columns
- # for this table, in the case that the user
- # specified custom primary key cols.
- for obj in objects:
- #print "SAVE_OBJ we are Mapper(" + str(id(self)) + ") obj: " + obj.__class__.__name__ + repr(id(obj))
- params = {}
-
- # 'postupdate' means a PropertyLoader is telling us, "yes I know you
- # already inserted/updated this row but I need you to UPDATE one more
- # time"
- isinsert = not postupdate and not hasattr(obj, "_instance_key")
- if isinsert:
- self.extension.before_insert(self, obj)
- else:
- self.extension.before_update(self, obj)
- hasdata = False
- for col in table.columns:
- if col is self.version_id_col:
- if not isinsert:
- params[col._label] = self._getattrbycolumn(obj, col)
- params[col.key] = params[col._label] + 1
- else:
- params[col.key] = 1
- elif self.pks_by_table[table].contains(col):
- # column is a primary key ?
- if not isinsert:
- # doing an UPDATE? put primary key values as "WHERE" parameters
- # matching the bindparam we are creating below, i.e. "<tablename>_<colname>"
- params[col._label] = self._getattrbycolumn(obj, col)
- else:
- # doing an INSERT, primary key col ?
- # if the primary key values are not populated,
- # leave them out of the INSERT altogether, since PostGres doesn't want
- # them to be present for SERIAL to take effect. A SQLEngine that uses
- # explicit sequences will put them back in if they are needed
- value = self._getattrbycolumn(obj, col)
- if value is not None:
- params[col.key] = value
- else:
- # column is not a primary key ?
- if not isinsert:
- # doing an UPDATE ? get the history for the attribute, with "passive"
- # so as not to trigger any deferred loads. if there is a new
- # value, add it to the bind parameters
- prop = self._getpropbycolumn(col, False)
- if prop is None:
- continue
- history = prop.get_history(obj, passive=True)
- if history:
- a = history.added_items()
- if len(a):
- params[col.key] = a[0]
- hasdata = True
- else:
- # doing an INSERT, non primary key col ?
- # add the attribute's value to the
- # bind parameters, unless its None and the column has a
- # default. if its None and theres no default, we still might
- # not want to put it in the col list but SQLIte doesnt seem to like that
- # if theres no columns at all
- value = self._getattrbycolumn(obj, col, False)
- if value is NO_ATTRIBUTE:
- continue
- if col.default is None or value is not None:
- params[col.key] = value
-
- if not isinsert:
- if hasdata:
- # if none of the attributes changed, dont even
- # add the row to be updated.
- update.append((obj, params))
- else:
- insert.append((obj, params))
- if len(update):
- clause = sql.and_()
- for col in self.pks_by_table[table]:
- clause.clauses.append(col == sql.bindparam(col._label, type=col.type))
- if self.version_id_col is not None:
- clause.clauses.append(self.version_id_col == sql.bindparam(self.version_id_col._label, type=col.type))
- statement = table.update(clause)
- rows = 0
- for rec in update:
- (obj, params) = rec
- c = statement.execute(params)
- self._postfetch(table, obj, c, c.last_updated_params())
- self.extension.after_update(self, obj)
- rows += c.cursor.rowcount
- if c.supports_sane_rowcount() and rows != len(update):
- raise CommitError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (rows, len(update)))
- if len(insert):
- statement = table.insert()
- for rec in insert:
- (obj, params) = rec
- c = statement.execute(**params)
- primary_key = c.last_inserted_ids()
- if primary_key is not None:
- i = 0
- for col in self.pks_by_table[table]:
- #print "col: " + table.name + "." + col.key + " val: " + repr(self._getattrbycolumn(obj, col))
- if self._getattrbycolumn(obj, col) is None:
- self._setattrbycolumn(obj, col, primary_key[i])
- i+=1
- self._postfetch(table, obj, c, c.last_inserted_params())
- if self._synchronizer is not None:
- self._synchronizer.execute(obj, obj)
- self.extension.after_insert(self, obj)
-
- def _postfetch(self, table, obj, resultproxy, params):
- """after an INSERT or UPDATE, asks the returned result if PassiveDefaults fired off on the database side
- which need to be post-fetched, *or* if pre-exec defaults like ColumnDefaults were fired off
- and should be populated into the instance. this is only for non-primary key columns."""
- if resultproxy.lastrow_has_defaults():
- clause = sql.and_()
- for p in self.pks_by_table[table]:
- clause.clauses.append(p == self._getattrbycolumn(obj, p))
- row = table.select(clause).execute().fetchone()
- for c in table.c:
- if self._getattrbycolumn(obj, c, False) is None:
- self._setattrbycolumn(obj, c, row[c])
- else:
- for c in table.c:
- if c.primary_key or not params.has_key(c.name):
- continue
- v = self._getattrbycolumn(obj, c, False)
- if v is NO_ATTRIBUTE:
- continue
- elif v != params.get_original(c.name):
- self._setattrbycolumn(obj, c, params.get_original(c.name))
-
- def delete_obj(self, objects, uow):
- """called by a UnitOfWork object to delete objects, which involves a
- DELETE statement for each table used by this mapper, for each object in the list."""
- for table in util.reversed(self.tables):
- if not self._has_pks(table):
- continue
- delete = []
- for obj in objects:
- params = {}
- if not hasattr(obj, "_instance_key"):
- continue
- else:
- delete.append(params)
- for col in self.pks_by_table[table]:
- params[col.key] = self._getattrbycolumn(obj, col)
- if self.version_id_col is not None:
- params[self.version_id_col.key] = self._getattrbycolumn(obj, self.version_id_col)
- self.extension.before_delete(self, obj)
- if len(delete):
- clause = sql.and_()
- for col in self.pks_by_table[table]:
- clause.clauses.append(col == sql.bindparam(col.key, type=col.type))
- if self.version_id_col is not None:
- clause.clauses.append(self.version_id_col == sql.bindparam(self.version_id_col.key, type=self.version_id_col.type))
- statement = table.delete(clause)
- c = statement.execute(*delete)
- if c.supports_sane_rowcount() and c.rowcount != len(delete):
- raise CommitError("ConcurrencyError - updated rowcount %d does not match number of objects updated %d" % (c.cursor.rowcount, len(delete)))
-
- def _has_pks(self, table):
- try:
- for k in self.pks_by_table[table]:
- if not self.columntoproperty.has_key(k.original):
- return False
- else:
- return True
- except KeyError:
- return False
-
- def register_dependencies(self, uowcommit, *args, **kwargs):
- """called by an instance of objectstore.UOWTransaction to register
- which mappers are dependent on which, as well as DependencyProcessor
- objects which will process lists of objects in between saves and deletes."""
- for prop in self.props.values():
- prop.register_dependencies(uowcommit, *args, **kwargs)
- if self.inherits is not None:
- uowcommit.register_dependency(self.inherits, self)
-
- def register_deleted(self, obj, uow):
- for prop in self.props.values():
- prop.register_deleted(obj, uow)
-
-
- def _identity_key(self, row):
- return objectstore.get_row_key(row, self.class_, self.pks_by_table[self.table], self.entity_name)
-
- def _instance(self, session, row, imap, result = None, populate_existing = False):
- """pulls an object instance from the given row and appends it to the given result
- list. if the instance already exists in the given identity map, its not added. in
- either case, executes all the property loaders on the instance to also process extra
- information in the row."""
- # look in main identity map. if its there, we dont do anything to it,
- # including modifying any of its related items lists, as its already
- # been exposed to being modified by the application.
-
- if session is None:
- session = objectstore.get_session()
-
- populate_existing = populate_existing or self.always_refresh
- identitykey = self._identity_key(row)
- if session.has_key(identitykey):
- instance = session._get(identitykey)
-
- isnew = False
- if populate_existing or session.is_expired(instance, unexpire=True):
- if not imap.has_key(identitykey):
- imap[identitykey] = instance
- for prop in self.props.values():
- prop.execute(session, instance, row, identitykey, imap, True)
- if self.extension.append_result(self, row, imap, result, instance, isnew, populate_existing=populate_existing) is EXT_PASS:
- if result is not None:
- result.append_nohistory(instance)
- return instance
-
- # look in result-local identitymap for it.
- exists = imap.has_key(identitykey)
- if not exists:
- # check if primary key cols in the result are None - this indicates
- # an instance of the object is not present in the row
- for col in self.pks_by_table[self.table]:
- if row[col] is None:
- return None
- # plugin point
- instance = self.extension.create_instance(self, row, imap, self.class_)
- if instance is EXT_PASS:
- instance = self._create_instance(session)
- imap[identitykey] = instance
- isnew = True
- else:
- instance = imap[identitykey]
- isnew = False
-
- # plugin point
-
- # call further mapper properties on the row, to pull further
- # instances from the row and possibly populate this item.
- if self.extension.populate_instance(self, session, instance, row, identitykey, imap, isnew) is EXT_PASS:
- self.populate_instance(session, instance, row, identitykey, imap, isnew)
- if self.extension.append_result(self, row, imap, result, instance, isnew, populate_existing=populate_existing) is EXT_PASS:
- if result is not None:
- result.append_nohistory(instance)
- return instance
-
- def _create_instance(self, session):
- if not self.construct_new:
- return self.class_(_mapper_nohistory=True, _sa_entity_name=self.entity_name, _sa_session=session)
-
- obj = self.class_.__new__(self.class_)
- obj._entity_name = self.entity_name
-
- # this gets the AttributeManager to do some pre-initialization,
- # in order to save on KeyErrors later on
- objectstore.global_attributes.init_attr(obj)
-
- session._bind_to(obj)
-
- return obj
-
- def translate_row(self, tomapper, row):
- """attempts to take a row and translate its values to a row that can
- be understood by another mapper. breaks the column references down to their
- bare keynames to accomplish this. So far this works for the various polymorphic
- examples."""
- newrow = util.DictDecorator(row)
- for c in self.table.c:
- newrow[c.name] = row[c]
- for c in tomapper.table.c:
- newrow[c] = newrow[c.name]
- return newrow
-
- def populate_instance(self, session, instance, row, identitykey, imap, isnew, frommapper=None):
- if frommapper is not None:
- row = frommapper.translate_row(self, row)
-
- for prop in self.props.values():
- prop.execute(session, instance, row, identitykey, imap, isnew)
-
-class MapperProperty(object):
- """an element attached to a Mapper that describes and assists in the loading and saving
- of an attribute on an object instance."""
- def execute(self, session, instance, row, identitykey, imap, isnew):
- """called when the mapper receives a row. instance is the parent instance
- corresponding to the row. """
- raise NotImplementedError()
- def copy(self):
- raise NotImplementedError()
- def get_criterion(self, key, value):
- """Returns a WHERE clause suitable for this MapperProperty corresponding to the
- given key/value pair, where the key is a column or object property name, and value
- is a value to be matched. This is only picked up by PropertyLoaders.
-
- this is called by a mappers select_by method to formulate a set of key/value pairs into
- a WHERE criterion that spans multiple tables if needed."""
- return None
- def hash_key(self):
- """describes this property and its instantiated arguments in such a way
- as to uniquely identify the concept this MapperProperty represents,within
- a process."""
- raise NotImplementedError()
- def setup(self, key, statement, **options):
- """called when a statement is being constructed. """
- return self
- def init(self, key, parent):
- """called when the MapperProperty is first attached to a new parent Mapper."""
- self.key = key
- self.parent = parent
- self.do_init(key, parent)
- def do_init(self, key, parent):
- """template method for subclasses"""
- pass
- def register_deleted(self, object, uow):
- """called when the instance is being deleted"""
- pass
- def register_dependencies(self, *args, **kwargs):
- pass
- def is_primary(self):
- """a return value of True indicates we are the primary MapperProperty for this loader's
- attribute on our mapper's class. It means we can set the object's attribute behavior
- at the class level. otherwise we have to set attribute behavior on a per-instance level."""
- return self.parent._is_primary_mapper()
-
-class MapperOption(object):
- """describes a modification to a Mapper in the context of making a copy
- of it. This is used to assist in the prototype pattern used by mapper.options()."""
- def process(self, mapper):
- raise NotImplementedError()
- def hash_key(self):
- return repr(self)
-
-class ExtensionOption(MapperOption):
- """adds a new MapperExtension to a mapper's chain of extensions"""
- def __init__(self, ext):
- self.ext = ext
- def process(self, mapper):
- self.ext.next = mapper.extension
- mapper.extension = self.ext
-
-class MapperExtension(object):
- def __init__(self):
- self.next = None
- def chain(self, ext):
- self.next = ext
- return self
- def select_by(self, query, *args, **kwargs):
- """overrides the select_by method of the Query object"""
- if self.next is None:
- return EXT_PASS
- else:
- return self.next.select_by(query, *args, **kwargs)
- def select(self, query, *args, **kwargs):
- """overrides the select method of the Query object"""
- if self.next is None:
- return EXT_PASS
- else:
- return self.next.select(query, *args, **kwargs)
- def create_instance(self, mapper, row, imap, class_):
- """called when a new object instance is about to be created from a row.
- the method can choose to create the instance itself, or it can return
- None to indicate normal object creation should take place.
-
- mapper - the mapper doing the operation
-
- row - the result row from the database
-
- imap - a dictionary that is storing the running set of objects collected from the
- current result set
-
- class_ - the class we are mapping.
- """
- if self.next is None:
- return EXT_PASS
- else:
- return self.next.create_instance(mapper, row, imap, class_)
- def append_result(self, mapper, row, imap, result, instance, isnew, populate_existing=False):
- """called when an object instance is being appended to a result list.
-
- If this method returns True, it is assumed that the mapper should do the appending, else
- if this method returns False, it is assumed that the append was handled by this method.
-
- mapper - the mapper doing the operation
-
- row - the result row from the database
-
- imap - a dictionary that is storing the running set of objects collected from the
- current result set
-
- result - an instance of util.HistoryArraySet(), which may be an attribute on an
- object if this is a related object load (lazy or eager). use result.append_nohistory(value)
- to append objects to this list.
-
- instance - the object instance to be appended to the result
-
- isnew - indicates if this is the first time we have seen this object instance in the current result
- set. if you are selecting from a join, such as an eager load, you might see the same object instance
- many times in the same result set.
-
- populate_existing - usually False, indicates if object instances that were already in the main
- identity map, i.e. were loaded by a previous select(), get their attributes overwritten
- """
- if self.next is None:
- return EXT_PASS
- else:
- return self.next.append_result(mapper, row, imap, result, instance, isnew, populate_existing)
- def populate_instance(self, mapper, session, instance, row, identitykey, imap, isnew):
- """called right before the mapper, after creating an instance from a row, passes the row
- to its MapperProperty objects which are responsible for populating the object's attributes.
- If this method returns True, it is assumed that the mapper should do the appending, else
- if this method returns False, it is assumed that the append was handled by this method.
-
- Essentially, this method is used to have a different mapper populate the object:
-
- def populate_instance(self, mapper, session, instance, row, identitykey, imap, isnew):
- othermapper.populate_instance(session, instance, row, identitykey, imap, isnew, frommapper=mapper)
- return True
- """
- if self.next is None:
- return EXT_PASS
- else:
- return self.next.populate_instance(mapper, session, instance, row, identitykey, imap, isnew)
- def before_insert(self, mapper, instance):
- """called before an object instance is INSERTed into its table.
-
- this is a good place to set up primary key values and such that arent handled otherwise."""
- if self.next is not None:
- self.next.before_insert(mapper, instance)
- def before_update(self, mapper, instance):
- """called before an object instnace is UPDATED"""
- if self.next is not None:
- self.next.before_update(mapper, instance)
- def after_update(self, mapper, instance):
- """called after an object instnace is UPDATED"""
- if self.next is not None:
- self.next.after_update(mapper, instance)
- def after_insert(self, mapper, instance):
- """called after an object instance has been INSERTed"""
- if self.next is not None:
- self.next.after_insert(mapper, instance)
- def before_delete(self, mapper, instance):
- """called before an object instance is DELETEed"""
- if self.next is not None:
- self.next.before_delete(mapper, instance)
-
-class ClassKey(object):
- """keys a class and an entity name to a mapper, via the mapper_registry"""
- def __init__(self, class_, entity_name):
- self.class_ = class_
- self.entity_name = entity_name
- def __hash__(self):
- return hash((self.class_, self.entity_name))
- def __eq__(self, other):
- return self.class_ is other.class_ and self.entity_name == other.entity_name
-
-def hash_key(obj):
- if obj is None:
- return 'None'
- elif isinstance(obj, list):
- return repr([hash_key(o) for o in obj])
- elif hasattr(obj, 'hash_key'):
- return obj.hash_key()
- else:
- return repr(obj)
-
-def object_mapper(object):
- """given an object, returns the primary Mapper associated with the object
- or the object's class."""
- try:
- return mapper_registry[ClassKey(object.__class__, getattr(object, '_entity_name', None))]
- except KeyError:
- raise InvalidRequestError("Class '%s' entity name '%s' has no mapper associated with it" % (object.__class__.__name__, getattr(object, '_entity_name', None)))
-
-def class_mapper(class_, entity_name=None):
- """given a ClassKey, returns the primary Mapper associated with the key."""
- try:
- return mapper_registry[ClassKey(class_, entity_name)]
- except (KeyError, AttributeError):
- raise InvalidRequestError("Class '%s' entity name '%s' has no mapper associated with it" % (class_.__name__, entity_name))
diff --git a/lib/sqlalchemy/mapping/objectstore.py b/lib/sqlalchemy/mapping/objectstore.py
deleted file mode 100644
index 91f947085..000000000
--- a/lib/sqlalchemy/mapping/objectstore.py
+++ /dev/null
@@ -1,358 +0,0 @@
-# objectstore.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""provides the Session object and a function-oriented convenience interface. This is the
-"front-end" to the Unit of Work system in unitofwork.py. Issues of "scope" are dealt with here,
-primarily through an important function "get_session()", which is where mappers and units of work go to get a handle on the current threa-local context. """
-
-from sqlalchemy import util
-from sqlalchemy.exceptions import *
-import unitofwork
-import weakref
-import sqlalchemy
-
-class Session(object):
- """Maintains a UnitOfWork instance, including transaction state."""
-
- def __init__(self, hash_key=None, new_imap=True, import_session=None):
- """Initialize the objectstore with a UnitOfWork registry. If called
- with no arguments, creates a single UnitOfWork for all operations.
-
- nest_transactions - indicates begin/commit statements can be executed in a
- "nested", defaults to False which indicates "only commit on the outermost begin/commit"
- hash_key - the hash_key used to identify objects against this session, which
- defaults to the id of the Session instance.
- """
- if import_session is not None:
- self.uow = unitofwork.UnitOfWork(identity_map=import_session.uow.identity_map)
- elif new_imap is False:
- self.uow = unitofwork.UnitOfWork(identity_map=objectstore.get_session().uow.identity_map)
- else:
- self.uow = unitofwork.UnitOfWork()
-
- self.binds = {}
- if hash_key is None:
- self.hash_key = id(self)
- else:
- self.hash_key = hash_key
- _sessions[self.hash_key] = self
-
- def bind_table(self, table, bindto):
- self.binds[table] = bindto
-
- def get_id_key(ident, class_, entity_name=None):
- """returns an identity-map key for use in storing/retrieving an item from the identity
- map, given a tuple of the object's primary key values.
-
- ident - a tuple of primary key values corresponding to the object to be stored. these
- values should be in the same order as the primary keys of the table
-
- class_ - a reference to the object's class
-
- entity_name - optional string name to further qualify the class
- """
- return (class_, tuple(ident), entity_name)
- get_id_key = staticmethod(get_id_key)
-
- def get_row_key(row, class_, primary_key, entity_name=None):
- """returns an identity-map key for use in storing/retrieving an item from the identity
- map, given a result set row.
-
- row - a sqlalchemy.dbengine.RowProxy instance or other map corresponding result-set
- column names to their values within a row.
-
- class_ - a reference to the object's class
-
- primary_key - a list of column objects that will target the primary key values
- in the given row.
-
- entity_name - optional string name to further qualify the class
- """
- return (class_, tuple([row[column] for column in primary_key]), entity_name)
- get_row_key = staticmethod(get_row_key)
-
- def engines(self, mapper):
- return [t.engine for t in mapper.tables]
-
- def flush(self, *obj):
- self.uow.flush(self, *obj)
-
- def refresh(self, *obj):
- """reloads the attributes for the given objects from the database, clears
- any changes made."""
- for o in obj:
- self.uow.refresh(o)
-
- def expire(self, *obj):
- """invalidates the data in the given objects and sets them to refresh themselves
- the next time they are requested."""
- for o in obj:
- self.uow.expire(o)
-
- def expunge(self, *obj):
- for o in obj:
- self.uow.expunge(o)
-
- def register_clean(self, obj):
- self._bind_to(obj)
- self.uow.register_clean(obj)
-
- def register_new(self, obj):
- self._bind_to(obj)
- self.uow.register_new(obj)
-
- def _bind_to(self, obj):
- """given an object, binds it to this session. changes on the object will affect
- the currently scoped UnitOfWork maintained by this session."""
- obj._sa_session_id = self.hash_key
-
- def __getattr__(self, key):
- """proxy other methods to our underlying UnitOfWork"""
- return getattr(self.uow, key)
-
- def clear(self):
- self.uow = unitofwork.UnitOfWork()
-
- def delete(self, *obj):
- """registers the given objects as to be deleted upon the next commit"""
- for o in obj:
- self.uow.register_deleted(o)
-
- def import_instance(self, instance):
- """places the given instance in the current thread's unit of work context,
- either in the current IdentityMap or marked as "new". Returns either the object
- or the current corresponding version in the Identity Map.
-
- this method should be used for any object instance that is coming from a serialized
- storage, from another thread (assuming the regular threaded unit of work model), or any
- case where the instance was loaded/created corresponding to a different base unitofwork
- than the current one."""
- if instance is None:
- return None
- key = getattr(instance, '_instance_key', None)
- mapper = object_mapper(instance)
- u = self.uow
- if key is not None:
- if u.identity_map.has_key(key):
- return u.identity_map[key]
- else:
- instance._instance_key = key
- u.identity_map[key] = instance
- self._bind_to(instance)
- else:
- u.register_new(instance)
- return instance
-
-class LegacySession(Session):
- def __init__(self, nest_on=None, hash_key=None, **kwargs):
- super(LegacySession, self).__init__(**kwargs)
- self.parent_uow = None
- self.begin_count = 0
- self.nest_on = util.to_list(nest_on)
- self.__pushed_count = 0
- def was_pushed(self):
- if self.nest_on is None:
- return
- self.__pushed_count += 1
- if self.__pushed_count == 1:
- for n in self.nest_on:
- n.push_session()
- def was_popped(self):
- if self.nest_on is None or self.__pushed_count == 0:
- return
- self.__pushed_count -= 1
- if self.__pushed_count == 0:
- for n in self.nest_on:
- n.pop_session()
- class SessionTrans(object):
- """returned by Session.begin(), denotes a transactionalized UnitOfWork instance.
- call commit() on this to commit the transaction."""
- def __init__(self, parent, uow, isactive):
- self.__parent = parent
- self.__isactive = isactive
- self.__uow = uow
- isactive = property(lambda s:s.__isactive, doc="True if this SessionTrans is the 'active' transaction marker, else its a no-op.")
- parent = property(lambda s:s.__parent, doc="returns the parent Session of this SessionTrans object.")
- uow = property(lambda s:s.__uow, doc="returns the parent UnitOfWork corresponding to this transaction.")
- def begin(self):
- """calls begin() on the underlying Session object, returning a new no-op SessionTrans object."""
- if self.parent.uow is not self.uow:
- raise InvalidRequestError("This SessionTrans is no longer valid")
- return self.parent.begin()
- def commit(self):
- """commits the transaction noted by this SessionTrans object."""
- self.__parent._trans_commit(self)
- self.__isactive = False
- def rollback(self):
- """rolls back the current UnitOfWork transaction, in the case that begin()
- has been called. The changes logged since the begin() call are discarded."""
- self.__parent._trans_rollback(self)
- self.__isactive = False
- def begin(self):
- """begins a new UnitOfWork transaction and returns a tranasaction-holding
- object. commit() or rollback() should be called on the returned object.
- commit() on the Session will do nothing while a transaction is pending, and further
- calls to begin() will return no-op transactional objects."""
- if self.parent_uow is not None:
- return Session.SessionTrans(self, self.uow, False)
- self.parent_uow = self.uow
- self.uow = unitofwork.UnitOfWork(identity_map = self.uow.identity_map)
- return Session.SessionTrans(self, self.uow, True)
- def commit(self, *objects):
- """commits the current UnitOfWork transaction. called with
- no arguments, this is only used
- for "implicit" transactions when there was no begin().
- if individual objects are submitted, then only those objects are committed, and the
- begin/commit cycle is not affected."""
- # if an object list is given, commit just those but dont
- # change begin/commit status
- if len(objects):
- self._commit_uow(*objects)
- self.uow.flush(self, *objects)
- return
- if self.parent_uow is None:
- self._commit_uow()
- def _trans_commit(self, trans):
- if trans.uow is self.uow and trans.isactive:
- try:
- self._commit_uow()
- finally:
- self.uow = self.parent_uow
- self.parent_uow = None
- def _trans_rollback(self, trans):
- if trans.uow is self.uow:
- self.uow = self.parent_uow
- self.parent_uow = None
- def _commit_uow(self, *obj):
- self.was_pushed()
- try:
- self.uow.flush(self, *obj)
- finally:
- self.was_popped()
-
-Session = LegacySession
-
-def get_id_key(ident, class_, entity_name=None):
- return Session.get_id_key(ident, class_, entity_name)
-
-def get_row_key(row, class_, primary_key, entity_name=None):
- return Session.get_row_key(row, class_, primary_key, entity_name)
-
-def begin():
- """deprecated. use s = Session(new_imap=False)."""
- return get_session().begin()
-
-def commit(*obj):
- """deprecated; use flush(*obj)"""
- get_session().flush(*obj)
-
-def flush(*obj):
- """flushes the current UnitOfWork transaction. if a transaction was begun
- via begin(), flushes only those objects that were created, modified, or deleted
- since that begin statement. otherwise flushes all objects that have been
- changed.
-
- if individual objects are submitted, then only those objects are committed, and the
- begin/commit cycle is not affected."""
- get_session().flush(*obj)
-
-def clear():
- """removes all current UnitOfWorks and IdentityMaps for this thread and
- establishes a new one. It is probably a good idea to discard all
- current mapped object instances, as they are no longer in the Identity Map."""
- get_session().clear()
-
-def refresh(*obj):
- """reloads the state of this object from the database, and cancels any in-memory
- changes."""
- get_session().refresh(*obj)
-
-def expire(*obj):
- """invalidates the data in the given objects and sets them to refresh themselves
- the next time they are requested."""
- get_session().expire(*obj)
-
-def expunge(*obj):
- get_session().expunge(*obj)
-
-def delete(*obj):
- """registers the given objects as to be deleted upon the next commit"""
- s = get_session().delete(*obj)
-
-def has_key(key):
- """returns True if the current thread-local IdentityMap contains the given instance key"""
- return get_session().has_key(key)
-
-def has_instance(instance):
- """returns True if the current thread-local IdentityMap contains the given instance"""
- return get_session().has_instance(instance)
-
-def is_dirty(obj):
- """returns True if the given object is in the current UnitOfWork's new or dirty list,
- or if its a modified list attribute on an object."""
- return get_session().is_dirty(obj)
-
-def instance_key(instance):
- """returns the IdentityMap key for the given instance"""
- return get_session().instance_key(instance)
-
-def import_instance(instance):
- return get_session().import_instance(instance)
-
-def mapper(*args, **params):
- return sqlalchemy.mapping.mapper(*args, **params)
-
-def object_mapper(obj):
- return sqlalchemy.mapping.object_mapper(obj)
-
-def class_mapper(class_):
- return sqlalchemy.mapping.class_mapper(class_)
-
-global_attributes = unitofwork.global_attributes
-
-session_registry = util.ScopedRegistry(Session) # Default session registry
-_sessions = weakref.WeakValueDictionary() # all referenced sessions (including user-created)
-
-def get_session(obj=None):
- # object-specific session ?
- if obj is not None:
- # does it have a hash key ?
- hashkey = getattr(obj, '_sa_session_id', None)
- if hashkey is not None:
- # ok, return that
- try:
- return _sessions[hashkey]
- except KeyError:
- raise InvalidRequestError("Session '%s' referenced by object '%s' no longer exists" % (hashkey, repr(obj)))
-
- return session_registry()
-
-unitofwork.get_session = get_session
-uow = get_session # deprecated
-
-def push_session(sess):
- old = get_session()
- if getattr(sess, '_previous', None) is not None:
- raise InvalidRequestError("Given Session is already pushed onto some thread's stack")
- sess._previous = old
- session_registry.set(sess)
- sess.was_pushed()
-
-def pop_session():
- sess = get_session()
- old = sess._previous
- sess._previous = None
- session_registry.set(old)
- sess.was_popped()
- return old
-
-def using_session(sess, func):
- push_session(sess)
- try:
- return func()
- finally:
- pop_session()
-
diff --git a/lib/sqlalchemy/mapping/properties.py b/lib/sqlalchemy/mapping/properties.py
deleted file mode 100644
index b7ff9fbb2..000000000
--- a/lib/sqlalchemy/mapping/properties.py
+++ /dev/null
@@ -1,980 +0,0 @@
-# properties.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""defines a set of MapperProperty objects, including basic column properties as
-well as relationships. also defines some MapperOptions that can be used with the
-properties."""
-
-from mapper import *
-import sqlalchemy.sql as sql
-import sqlalchemy.schema as schema
-import sqlalchemy.engine as engine
-import sqlalchemy.util as util
-import sqlalchemy.attributes as attributes
-import sync
-import mapper
-import objectstore
-from sqlalchemy.exceptions import *
-import random
-
-class ColumnProperty(MapperProperty):
- """describes an object attribute that corresponds to a table column."""
- def __init__(self, *columns):
- """the list of columns describes a single object property. if there
- are multiple tables joined together for the mapper, this list represents
- the equivalent column as it appears across each table."""
- self.columns = list(columns)
- def getattr(self, object):
- return getattr(object, self.key, None)
- def setattr(self, object, value):
- setattr(object, self.key, value)
- def get_history(self, obj, passive=False):
- return objectstore.global_attributes.get_history(obj, self.key, passive=passive)
- def copy(self):
- return ColumnProperty(*self.columns)
- def setup(self, key, statement, eagertable=None, **options):
- for c in self.columns:
- if eagertable is not None:
- statement.append_column(eagertable._get_col_by_original(c))
- else:
- statement.append_column(c)
- def do_init(self, key, parent):
- self.key = key
- # establish a SmartProperty property manager on the object for this key
- if parent._is_primary_mapper():
- #print "regiser col on class %s key %s" % (parent.class_.__name__, key)
- objectstore.uow().register_attribute(parent.class_, key, uselist = False)
- def execute(self, session, instance, row, identitykey, imap, isnew):
- if isnew:
- #print "POPULATING OBJ", instance.__class__.__name__, "COL", self.columns[0]._label, "WITH DATA", row[self.columns[0]], "ROW IS A", row.__class__.__name__, "COL ID", id(self.columns[0])
- instance.__dict__[self.key] = row[self.columns[0]]
- def __repr__(self):
- return "ColumnProperty(%s)" % repr([str(c) for c in self.columns])
-
-class DeferredColumnProperty(ColumnProperty):
- """describes an object attribute that corresponds to a table column, which also
- will "lazy load" its value from the table. this is per-column lazy loading."""
- def __init__(self, *columns, **kwargs):
- self.group = kwargs.get('group', None)
- ColumnProperty.__init__(self, *columns)
- def copy(self):
- return DeferredColumnProperty(*self.columns)
- def do_init(self, key, parent):
- self.key = key
- self.parent = parent
- # establish a SmartProperty property manager on the object for this key,
- # containing a callable to load in the attribute
- if self.is_primary():
- objectstore.uow().register_attribute(parent.class_, key, uselist=False, callable_=lambda i:self.setup_loader(i))
- def setup_loader(self, instance):
- def lazyload():
- clause = sql.and_()
- try:
- pk = self.parent.pks_by_table[self.columns[0].table]
- except KeyError:
- pk = self.columns[0].table.primary_key
- for primary_key in pk:
- attr = self.parent._getattrbycolumn(instance, primary_key)
- if not attr:
- return None
- clause.clauses.append(primary_key == attr)
-
- if self.group is not None:
- groupcols = [p for p in self.parent.props.values() if isinstance(p, DeferredColumnProperty) and p.group==self.group]
- row = sql.select([g.columns[0] for g in groupcols], clause, use_labels=True).execute().fetchone()
- for prop in groupcols:
- if prop is self:
- continue
- instance.__dict__[prop.key] = row[prop.columns[0]]
- objectstore.global_attributes.create_history(instance, prop.key, uselist=False)
- return row[self.columns[0]]
- else:
- return sql.select([self.columns[0]], clause, use_labels=True).scalar()
- return lazyload
- def setup(self, key, statement, **options):
- pass
- def execute(self, session, instance, row, identitykey, imap, isnew):
- if isnew:
- if not self.is_primary():
- objectstore.global_attributes.create_history(instance, self.key, False, callable_=self.setup_loader(instance))
- else:
- objectstore.global_attributes.reset_history(instance, self.key)
-
-mapper.ColumnProperty = ColumnProperty
-
-class PropertyLoader(MapperProperty):
- ONETOMANY = 0
- MANYTOONE = 1
- MANYTOMANY = 2
-
- """describes an object property that holds a single item or list of items that correspond
- to a related database table."""
- def __init__(self, argument, secondary, primaryjoin, secondaryjoin, foreignkey=None, uselist=None, private=False, association=None, use_alias=None, selectalias=None, order_by=False, attributeext=None, backref=None, is_backref=False, post_update=False):
- self.uselist = uselist
- self.argument = argument
- self.secondary = secondary
- self.primaryjoin = primaryjoin
- self.secondaryjoin = secondaryjoin
- self.post_update = post_update
- self.direction = None
-
- # would like to have foreignkey be a list.
- # however, have to figure out how to do
- # <column> in <list>, since column overrides the == operator or somethign
- # and it doesnt work
- self.foreignkey = foreignkey #util.to_set(foreignkey)
- if foreignkey:
- self.foreigntable = foreignkey.table
- else:
- self.foreigntable = None
-
- self.private = private
- self.association = association
- if selectalias is not None:
- print "'selectalias' argument to relation() is deprecated. eager loads automatically alias-ize tables now."
- if use_alias is not None:
- print "'use_alias' argument to relation() is deprecated. eager loads automatically alias-ize tables now."
- self.order_by = order_by
- self.attributeext=attributeext
- if isinstance(backref, str):
- self.backref = BackRef(backref)
- else:
- self.backref = backref
- self.is_backref = is_backref
-
- def copy(self):
- x = self.__class__.__new__(self.__class__)
- x.__dict__.update(self.__dict__)
- return x
-
- def do_init_subclass(self, key, parent):
- """template method for subclasses of PropertyLoader"""
- pass
-
- def do_init(self, key, parent):
- import sqlalchemy.mapping
- if isinstance(self.argument, type):
- self.mapper = sqlalchemy.mapping.class_mapper(self.argument)
- else:
- self.mapper = self.argument
-
- if self.association is not None:
- if isinstance(self.association, type):
- self.association = sqlalchemy.mapping.class_mapper(self.association)
-
- self.target = self.mapper.table
- self.key = key
- self.parent = parent
-
- if self.secondaryjoin is not None and self.secondary is None:
- raise ArgumentError("Property '" + self.key + "' specified with secondary join condition but no secondary argument")
- # if join conditions were not specified, figure them out based on foreign keys
- if self.secondary is not None:
- if self.secondaryjoin is None:
- self.secondaryjoin = sql.join(self.mapper.noninherited_table, self.secondary).onclause
- if self.primaryjoin is None:
- self.primaryjoin = sql.join(parent.noninherited_table, self.secondary).onclause
- else:
- if self.primaryjoin is None:
- self.primaryjoin = sql.join(parent.noninherited_table, self.target).onclause
- # if the foreign key wasnt specified and theres no assocaition table, try to figure
- # out who is dependent on who. we dont need all the foreign keys represented in the join,
- # just one of them.
- if self.foreignkey is None and self.secondaryjoin is None:
- # else we usually will have a one-to-many where the secondary depends on the primary
- # but its possible that its reversed
- self._find_dependent()
-
- # if we are re-initializing, as in a copy made for an inheriting
- # mapper, dont re-evaluate the direction.
- if self.direction is None:
- self.direction = self._get_direction()
-
- if self.uselist is None and self.direction == PropertyLoader.MANYTOONE:
- self.uselist = False
-
- if self.uselist is None:
- self.uselist = True
-
- self._compile_synchronizers()
-
- # primary property handler, set up class attributes
- if self.is_primary():
- # if a backref name is defined, set up an extension to populate
- # attributes in the other direction
- if self.backref is not None:
- self.attributeext = self.backref.get_extension()
-
- # set our class attribute
- self._set_class_attribute(parent.class_, key)
-
- if self.backref is not None:
- self.backref.compile(self)
- elif not objectstore.global_attributes.is_class_managed(parent.class_, key):
- raise ArgumentError("Non-primary property created for attribute '%s' on class '%s', but that attribute is not managed! Insure that the primary mapper for this class defines this property" % (key, parent.class_.__name__))
-
- self.do_init_subclass(key, parent)
-
- def _set_class_attribute(self, class_, key):
- """sets attribute behavior on our target class."""
- objectstore.uow().register_attribute(class_, key, uselist = self.uselist, deleteremoved = self.private, extension=self.attributeext)
-
- def _get_direction(self):
- """determines our 'direction', i.e. do we represent one to many, many to many, etc."""
- #print self.key, repr(self.parent.table.name), repr(self.parent.primarytable.name), repr(self.foreignkey.table.name), repr(self.target), repr(self.foreigntable.name)
-
- if self.secondaryjoin is not None:
- return PropertyLoader.MANYTOMANY
- elif self.parent.table is self.target:
- if self.foreignkey.primary_key:
- return PropertyLoader.MANYTOONE
- else:
- return PropertyLoader.ONETOMANY
- elif self.foreigntable == self.mapper.noninherited_table:
- return PropertyLoader.ONETOMANY
- elif self.foreigntable == self.parent.noninherited_table:
- return PropertyLoader.MANYTOONE
- else:
- raise ArgumentError("Cant determine relation direction")
-
- def _find_dependent(self):
- """searches through the primary join condition to determine which side
- has the primary key and which has the foreign key - from this we return
- the "foreign key" for this property which helps determine one-to-many/many-to-one."""
-
- # set as a reference to allow assignment from inside a first-class function
- dependent = [None]
- def foo(binary):
- if binary.operator != '=':
- return
- if isinstance(binary.left, schema.Column) and binary.left.primary_key:
- if dependent[0] is binary.left.table:
- raise ArgumentError("bidirectional dependency not supported...specify foreignkey")
- dependent[0] = binary.right.table
- self.foreignkey= binary.right
- elif isinstance(binary.right, schema.Column) and binary.right.primary_key:
- if dependent[0] is binary.right.table:
- raise ArgumentError("bidirectional dependency not supported...specify foreignkey")
- dependent[0] = binary.left.table
- self.foreignkey = binary.left
- visitor = BinaryVisitor(foo)
- self.primaryjoin.accept_visitor(visitor)
- if dependent[0] is None:
- raise ArgumentError("cant determine primary foreign key in the join relationship....specify foreignkey=<column> or foreignkey=[<columns>]")
- else:
- self.foreigntable = dependent[0]
-
-
- def get_criterion(self, key, value):
- """given a key/value pair, determines if this PropertyLoader's mapper contains a key of the
- given name in its property list, or if this PropertyLoader's association mapper, if any,
- contains a key of the given name in its property list, and returns a WHERE clause against
- the given value if found.
-
- this is called by a mappers select_by method to formulate a set of key/value pairs into
- a WHERE criterion that spans multiple tables if needed."""
- # TODO: optimization: change mapper to accept a WHERE clause with separate bind parameters
- # then cache the generated WHERE clauses here, since the creation + the copy_container
- # is an extra expense
- if self.mapper.props.has_key(key):
- if self.secondaryjoin is not None:
- c = (self.mapper.props[key].columns[0]==value) & self.primaryjoin & self.secondaryjoin
- else:
- c = (self.mapper.props[key].columns[0]==value) & self.primaryjoin
- return c.copy_container()
- elif self.mapper.table.c.has_key(key):
- if self.secondaryjoin is not None:
- c = (self.mapper.table.c[key].columns[0]==value) & self.primaryjoin & self.secondaryjoin
- else:
- c = (self.mapper.table.c[key].columns[0]==value) & self.primaryjoin
- return c.copy_container()
- elif self.association is not None:
- c = self.mapper._get_criterion(key, value) & self.primaryjoin
- return c.copy_container()
- return None
-
- def register_deleted(self, obj, uow):
- if not self.private:
- return
-
- if self.uselist:
- childlist = uow.attributes.get_history(obj, self.key, passive = False)
- else:
- childlist = uow.attributes.get_history(obj, self.key)
- for child in childlist.deleted_items() + childlist.unchanged_items():
- if child is not None:
- uow.register_deleted(child)
-
- class MapperStub(object):
- """poses as a Mapper representing the association table in a many-to-many
- join, when performing a commit().
-
- The Task objects in the objectstore module treat it just like
- any other Mapper, but in fact it only serves as a "dependency" placeholder
- for the many-to-many update task."""
- def __init__(self, mapper):
- self.mapper = mapper
- def save_obj(self, *args, **kwargs):
- pass
- def delete_obj(self, *args, **kwargs):
- pass
- def _primary_mapper(self):
- return self
-
- def register_dependencies(self, uowcommit):
- """tells a UOWTransaction what mappers are dependent on which, with regards
- to the two or three mappers handled by this PropertyLoader.
-
- Also registers itself as a "processor" for one of its mappers, which
- will be executed after that mapper's objects have been saved or before
- they've been deleted. The process operation manages attributes and dependent
- operations upon the objects of one of the involved mappers."""
- if self.association is not None:
- # association object. our mapper should be dependent on both
- # the parent mapper and the association object mapper.
- # this is where we put the "stub" as a marker, so we get
- # association/parent->stub->self, then we process the child
- # elments after the 'stub' save, which is before our own
- # mapper's save.
- stub = PropertyLoader.MapperStub(self.association)
- uowcommit.register_dependency(self.parent, stub)
- uowcommit.register_dependency(self.association, stub)
- uowcommit.register_dependency(stub, self.mapper)
- uowcommit.register_processor(stub, self, self.parent, False)
- uowcommit.register_processor(stub, self, self.parent, True)
-
- elif self.direction == PropertyLoader.MANYTOMANY:
- # many-to-many. create a "Stub" mapper to represent the
- # "middle table" in the relationship. This stub mapper doesnt save
- # or delete any objects, but just marks a dependency on the two
- # related mappers. its dependency processor then populates the
- # association table.
-
- if self.is_backref:
- # if we are the "backref" half of a two-way backref
- # relationship, let the other mapper handle inserting the rows
- return
- stub = PropertyLoader.MapperStub(self.mapper)
- uowcommit.register_dependency(self.parent, stub)
- uowcommit.register_dependency(self.mapper, stub)
- uowcommit.register_processor(stub, self, self.parent, False)
- uowcommit.register_processor(stub, self, self.parent, True)
- elif self.direction == PropertyLoader.ONETOMANY:
- if self.post_update:
- stub = PropertyLoader.MapperStub(self.mapper)
- uowcommit.register_dependency(self.mapper, stub)
- uowcommit.register_dependency(self.parent, stub)
- uowcommit.register_processor(stub, self, self.parent, False)
- uowcommit.register_processor(stub, self, self.parent, True)
- else:
- uowcommit.register_dependency(self.parent, self.mapper)
- uowcommit.register_processor(self.parent, self, self.parent, False)
- uowcommit.register_processor(self.parent, self, self.parent, True)
- elif self.direction == PropertyLoader.MANYTOONE:
- if self.post_update:
- stub = PropertyLoader.MapperStub(self.mapper)
- uowcommit.register_dependency(self.mapper, stub)
- uowcommit.register_dependency(self.parent, stub)
- uowcommit.register_processor(stub, self, self.parent, False)
- uowcommit.register_processor(stub, self, self.parent, True)
- else:
- uowcommit.register_dependency(self.mapper, self.parent)
- uowcommit.register_processor(self.mapper, self, self.parent, False)
- uowcommit.register_processor(self.mapper, self, self.parent, True)
- else:
- raise AssertionError(" no foreign key ?")
-
- def get_object_dependencies(self, obj, uowcommit, passive = True):
- return uowcommit.uow.attributes.get_history(obj, self.key, passive = passive)
-
- def whose_dependent_on_who(self, obj1, obj2):
- """given an object pair assuming obj2 is a child of obj1, returns a tuple
- with the dependent object second, or None if they are equal.
- used by objectstore's object-level topological sort (i.e. cyclical
- table dependency)."""
- if obj1 is obj2:
- return None
- elif self.direction == PropertyLoader.ONETOMANY:
- return (obj1, obj2)
- else:
- return (obj2, obj1)
-
- def process_dependencies(self, task, deplist, uowcommit, delete = False):
- """this method is called during a commit operation to synchronize data between a parent and child object.
- it also can establish child or parent objects within the unit of work as "to be saved" or "deleted"
- in some cases."""
- #print self.mapper.table.name + " " + self.key + " " + repr(len(deplist)) + " process_dep isdelete " + repr(delete) + " direction " + repr(self.direction)
-
- def getlist(obj, passive=True):
- l = self.get_object_dependencies(obj, uowcommit, passive)
- uowcommit.register_saved_history(l)
- return l
-
- # plugin point
-
- if self.direction == PropertyLoader.MANYTOMANY:
- secondary_delete = []
- secondary_insert = []
- if delete:
- for obj in deplist:
- childlist = getlist(obj, False)
- for child in childlist.deleted_items() + childlist.unchanged_items():
- associationrow = {}
- self._synchronize(obj, child, associationrow, False)
- secondary_delete.append(associationrow)
- else:
- for obj in deplist:
- childlist = getlist(obj)
- if childlist is None: continue
- for child in childlist.added_items():
- associationrow = {}
- self._synchronize(obj, child, associationrow, False)
- secondary_insert.append(associationrow)
- for child in childlist.deleted_items():
- associationrow = {}
- self._synchronize(obj, child, associationrow, False)
- secondary_delete.append(associationrow)
- if len(secondary_delete):
- # TODO: precompile the delete/insert queries and store them as instance variables
- # on the PropertyLoader
- statement = self.secondary.delete(sql.and_(*[c == sql.bindparam(c.key) for c in self.secondary.c]))
- statement.execute(*secondary_delete)
- if len(secondary_insert):
- statement = self.secondary.insert()
- statement.execute(*secondary_insert)
- elif self.direction == PropertyLoader.MANYTOONE and delete:
- if self.private:
- for obj in deplist:
- childlist = getlist(obj, False)
- for child in childlist.deleted_items() + childlist.unchanged_items():
- if child is None:
- continue
- # if private child object, and is in the uow's "deleted" list,
- # insure its in the list of items to be deleted
- if child in uowcommit.uow.deleted:
- uowcommit.register_object(child, isdelete=True)
- elif self.post_update:
- # post_update means we have to update our row to not reference the child object
- # before we can DELETE the row
- for obj in deplist:
- self._synchronize(obj, None, None, True)
- uowcommit.register_object(obj, postupdate=True)
- elif self.direction == PropertyLoader.ONETOMANY and delete:
- # head object is being deleted, and we manage its list of child objects
- # the child objects have to have their foreign key to the parent set to NULL
- if self.private and not self.post_update:
- for obj in deplist:
- childlist = getlist(obj, False)
- for child in childlist.deleted_items() + childlist.unchanged_items():
- if child is None:
- continue
- # if private child object, and is in the uow's "deleted" list,
- # insure its in the list of items to be deleted
- if child in uowcommit.uow.deleted:
- uowcommit.register_object(child, isdelete=True)
- else:
- for obj in deplist:
- childlist = getlist(obj, False)
- for child in childlist.deleted_items() + childlist.unchanged_items():
- if child is not None:
- self._synchronize(obj, child, None, True)
- uowcommit.register_object(child, postupdate=self.post_update)
- elif self.association is not None:
- # manage association objects.
- for obj in deplist:
- childlist = getlist(obj, passive=True)
- if childlist is None: continue
-
- #print "DIRECTION", self.direction
- d = {}
- for child in childlist:
- self._synchronize(obj, child, None, False)
- key = self.mapper.instance_key(child)
- #print "SYNCHRONIZED", child, "INSTANCE KEY", key
- d[key] = child
- uowcommit.unregister_object(child)
-
- for child in childlist.added_items():
- uowcommit.register_object(child)
- key = self.mapper.instance_key(child)
- #print "ADDED, INSTANCE KEY", key
- d[key] = child
-
- for child in childlist.unchanged_items():
- key = self.mapper.instance_key(child)
- o = d[key]
- o._instance_key= key
-
- for child in childlist.deleted_items():
- key = self.mapper.instance_key(child)
- #print "DELETED, INSTANCE KEY", key
- if d.has_key(key):
- o = d[key]
- o._instance_key = key
- uowcommit.unregister_object(child)
- else:
- #print "DELETE ASSOC OBJ", repr(child)
- uowcommit.register_object(child, isdelete=True)
- else:
- for obj in deplist:
- childlist = getlist(obj, passive=True)
- if childlist is not None:
- for child in childlist.added_items():
- self._synchronize(obj, child, None, False)
- if self.direction == PropertyLoader.ONETOMANY and child is not None:
- uowcommit.register_object(child, postupdate=self.post_update)
- if self.direction == PropertyLoader.MANYTOONE:
- uowcommit.register_object(obj, postupdate=self.post_update)
- if self.direction != PropertyLoader.MANYTOONE:
- for child in childlist.deleted_items():
- if not self.private:
- self._synchronize(obj, child, None, True)
- uowcommit.register_object(child, isdelete=self.private)
-
- def execute(self, session, instance, row, identitykey, imap, isnew):
- if self.is_primary():
- return
- #print "PLAIN PROPLOADER EXEC NON-PRIAMRY", repr(id(self)), repr(self.mapper.class_), self.key
- objectstore.global_attributes.create_history(instance, self.key, self.uselist)
-
- def _compile_synchronizers(self):
- """assembles a list of 'synchronization rules', which are instructions on how to populate
- the objects on each side of a relationship. This is done when a PropertyLoader is
- first initialized.
-
- The list of rules is used within commits by the _synchronize() method when dependent
- objects are processed."""
-
-
- parent_tables = util.HashSet(self.parent.tables + [self.parent.primarytable])
- target_tables = util.HashSet(self.mapper.tables + [self.mapper.primarytable])
-
- self.syncrules = sync.ClauseSynchronizer(self.parent, self.mapper, self.direction)
- if self.direction == PropertyLoader.MANYTOMANY:
- #print "COMPILING p/c", self.parent, self.mapper
- self.syncrules.compile(self.primaryjoin, parent_tables, [self.secondary], False)
- self.syncrules.compile(self.secondaryjoin, target_tables, [self.secondary], True)
- else:
- self.syncrules.compile(self.primaryjoin, parent_tables, target_tables)
-
- def _synchronize(self, obj, child, associationrow, clearkeys):
- """called during a commit to execute the full list of syncrules on the
- given object/child/optional association row"""
- if self.direction == PropertyLoader.ONETOMANY:
- source = obj
- dest = child
- elif self.direction == PropertyLoader.MANYTOONE:
- source = child
- dest = obj
- elif self.direction == PropertyLoader.MANYTOMANY:
- dest = associationrow
- source = None
-
- if dest is None:
- return
-
- self.syncrules.execute(source, dest, obj, child, clearkeys)
-
-class LazyLoader(PropertyLoader):
- def do_init_subclass(self, key, parent):
- (self.lazywhere, self.lazybinds, self.lazyreverse) = create_lazy_clause(self.parent.noninherited_table, self.primaryjoin, self.secondaryjoin, self.foreignkey)
- # determine if our "lazywhere" clause is the same as the mapper's
- # get() clause. then we can just use mapper.get()
- self.use_get = not self.uselist and self.mapper.query._get_clause.compare(self.lazywhere)
-
- def _set_class_attribute(self, class_, key):
- # establish a class-level lazy loader on our class
- #print "SETCLASSATTR LAZY", repr(class_), key
- objectstore.global_attributes.register_attribute(class_, key, uselist = self.uselist, deleteremoved = self.private, callable_=lambda i: self.setup_loader(i), extension=self.attributeext)
-
- def setup_loader(self, instance):
- if not self.parent.is_assigned(instance):
- return object_mapper(instance).props[self.key].setup_loader(instance)
- def lazyload():
- params = {}
- allparams = True
- session = objectstore.get_session(instance)
- #print "setting up loader, lazywhere", str(self.lazywhere)
- for col, bind in self.lazybinds.iteritems():
- params[bind.key] = self.parent._getattrbycolumn(instance, col)
- if params[bind.key] is None:
- allparams = False
- break
- if allparams:
- # if we have a simple straight-primary key load, use mapper.get()
- # to possibly save a DB round trip
- if self.use_get:
- ident = []
- for primary_key in self.mapper.pks_by_table[self.mapper.table]:
- bind = self.lazyreverse[primary_key]
- ident.append(params[bind.key])
- return self.mapper.using(session).get(*ident)
- elif self.order_by is not False:
- order_by = self.order_by
- elif self.secondary is not None and self.secondary.default_order_by() is not None:
- order_by = self.secondary.default_order_by()
- else:
- order_by = False
- result = self.mapper.using(session).select_whereclause(self.lazywhere, order_by=order_by, params=params)
- else:
- result = []
- if self.uselist:
- return result
- else:
- if len(result):
- return result[0]
- else:
- return None
- return lazyload
-
- def execute(self, session, instance, row, identitykey, imap, isnew):
- if isnew:
- # new object instance being loaded from a result row
- if not self.is_primary():
- #print "EXEC NON-PRIAMRY", repr(self.mapper.class_), self.key
- # we are not the primary manager for this attribute on this class - set up a per-instance lazyloader,
- # which will override the class-level behavior
- objectstore.global_attributes.create_history(instance, self.key, self.uselist, callable_=self.setup_loader(instance))
- else:
- #print "EXEC PRIMARY", repr(self.mapper.class_), self.key
- # we are the primary manager for this attribute on this class - reset its per-instance attribute state,
- # so that the class-level lazy loader is executed when next referenced on this instance.
- # this usually is not needed unless the constructor of the object referenced the attribute before we got
- # to load data into it.
- objectstore.global_attributes.reset_history(instance, self.key)
-
-def create_lazy_clause(table, primaryjoin, secondaryjoin, foreignkey):
- binds = {}
- reverselookup = {}
-
- def bind_label():
- return "lazy_" + hex(random.randint(0, 65535))[2:]
-
- def visit_binary(binary):
- circular = isinstance(binary.left, schema.Column) and isinstance(binary.right, schema.Column) and binary.left.table is binary.right.table
- if isinstance(binary.left, schema.Column) and isinstance(binary.right, schema.Column) and ((not circular and binary.left.table is table) or (circular and binary.right is foreignkey)):
- col = binary.left
- binary.left = binds.setdefault(binary.left,
- sql.BindParamClause(bind_label(), None, shortname = binary.left.name))
- reverselookup[binary.right] = binds[col]
- #binary.swap()
-
- if isinstance(binary.right, schema.Column) and isinstance(binary.left, schema.Column) and ((not circular and binary.right.table is table) or (circular and binary.left is foreignkey)):
- col = binary.right
- binary.right = binds.setdefault(binary.right,
- sql.BindParamClause(bind_label(), None, shortname = binary.right.name))
- reverselookup[binary.left] = binds[col]
-
- lazywhere = primaryjoin.copy_container()
- li = BinaryVisitor(visit_binary)
- lazywhere.accept_visitor(li)
- #print "PRIMARYJOIN", str(lazywhere), [b.key for b in binds.values()]
- if secondaryjoin is not None:
- lazywhere = sql.and_(lazywhere, secondaryjoin)
- return (lazywhere, binds, reverselookup)
-
-
-class EagerLoader(PropertyLoader):
- """loads related objects inline with a parent query."""
- def do_init_subclass(self, key, parent, recursion_stack=None):
- parent._has_eager = True
-
- self.eagertarget = self.target.alias()
-# print "ALIAS", str(self.eagertarget.select()) #selectable.__class__.__name__
- if self.secondary:
- self.eagersecondary = self.secondary.alias()
- self.aliasizer = Aliasizer(self.target, self.secondary, aliases={
- self.target:self.eagertarget,
- self.secondary:self.eagersecondary
- })
- #print "TARGET", self.target
- self.eagersecondaryjoin = self.secondaryjoin.copy_container()
- self.eagersecondaryjoin.accept_visitor(self.aliasizer)
- self.eagerprimary = self.primaryjoin.copy_container()
- self.eagerprimary.accept_visitor(self.aliasizer)
- #print "JOINS:", str(self.eagerprimary), "|", str(self.eagersecondaryjoin)
- else:
- self.aliasizer = Aliasizer(self.target, aliases={self.target:self.eagertarget})
- self.eagerprimary = self.primaryjoin.copy_container()
- self.eagerprimary.accept_visitor(self.aliasizer)
-
- if self.order_by:
- self.eager_order_by = self._aliasize_orderby(self.order_by)
- else:
- self.eager_order_by = None
-
-
- def _create_eager_chain(self, in_chain=False, recursion_stack=None):
- if not in_chain and getattr(self, '_eager_chained', False):
- return
-
- if recursion_stack is None:
- recursion_stack = {}
-
- eagerprops = []
- # create a new "eager chain", starting from this eager loader and descending downwards
- # through all sub-eagerloaders. this will copy all those eagerloaders and have them set up
- # aliases distinct to this eager chain. if a recursive relationship to any of the tables is detected,
- # the chain will terminate by copying that eager loader into a lazy loader.
- for key, prop in self.mapper.props.iteritems():
- if isinstance(prop, EagerLoader):
- eagerprops.append(prop)
- if len(eagerprops):
- recursion_stack[self.parent.table] = True
- self.mapper = self.mapper.copy()
- try:
- for prop in eagerprops:
- if recursion_stack.has_key(prop.target):
- # recursion - set the relationship as a LazyLoader
- p = EagerLazyOption(None, False).create_prop(self.mapper, prop.key)
- continue
- p = prop.copy()
- self.mapper.props[prop.key] = p
-# print "we are:", id(self), self.target.name, (self.secondary and self.secondary.name or "None"), self.parent.table.name
-# print "prop is",id(prop), prop.target.name, (prop.secondary and prop.secondary.name or "None"), prop.parent.table.name
- p.do_init_subclass(prop.key, prop.parent, recursion_stack)
- p._create_eager_chain(in_chain=True, recursion_stack=recursion_stack)
- p.eagerprimary = p.eagerprimary.copy_container()
-# aliasizer = Aliasizer(p.parent.table, aliases={p.parent.table:self.eagertarget})
- p.eagerprimary.accept_visitor(self.aliasizer)
- #print "new eagertqarget", p.eagertarget.name, (p.secondary and p.secondary.name or "none"), p.parent.table.name
- finally:
- del recursion_stack[self.parent.table]
-
- self._row_decorator = self._create_decorator_row()
-
- self._eager_chained = True
-
- def _aliasize_orderby(self, orderby, copy=True):
- if copy:
- orderby = [o.copy_container() for o in util.to_list(orderby)]
- else:
- orderby = util.to_list(orderby)
- for i in range(0, len(orderby)):
- if isinstance(orderby[i], schema.Column):
- orderby[i] = self.eagertarget._get_col_by_original(orderby[i])
- else:
- orderby[i].accept_visitor(self.aliasizer)
- return orderby
-
- def setup(self, key, statement, eagertable=None, **options):
- """add a left outer join to the statement thats being constructed"""
-
- # initialize the eager chains late in the game
- self._create_eager_chain()
-
- if hasattr(statement, '_outerjoin'):
- towrap = statement._outerjoin
- else:
- towrap = self.parent.table
-
- # print "hello, towrap", str(towrap)
- if self.secondaryjoin is not None:
- statement._outerjoin = sql.outerjoin(towrap, self.eagersecondary, self.eagerprimary).outerjoin(self.eagertarget, self.eagersecondaryjoin)
- if self.order_by is False and self.secondary.default_order_by() is not None:
- statement.order_by(*self.eagersecondary.default_order_by())
- else:
- statement._outerjoin = towrap.outerjoin(self.eagertarget, self.eagerprimary)
- if self.order_by is False and self.eagertarget.default_order_by() is not None:
- statement.order_by(*self.eagertarget.default_order_by())
-
- if self.eager_order_by:
- statement.order_by(*util.to_list(self.eager_order_by))
- elif getattr(statement, 'order_by_clause', None):
- self._aliasize_orderby(statement.order_by_clause, False)
-
- statement.append_from(statement._outerjoin)
- for key, value in self.mapper.props.iteritems():
- value.setup(key, statement, eagertable=self.eagertarget)
-
-
- def execute(self, session, instance, row, identitykey, imap, isnew):
- """receive a row. tell our mapper to look for a new object instance in the row, and attach
- it to a list on the parent instance."""
-
- if isnew:
- # new row loaded from the database. initialize a blank container on the instance.
- # this will override any per-class lazyloading type of stuff.
- h = objectstore.global_attributes.create_history(instance, self.key, self.uselist)
-
- if not self.uselist:
- if isnew:
- h.setattr_clean(self._instance(session, row, imap))
- else:
- # call _instance on the row, even though the object has been created,
- # so that we further descend into properties
- self._instance(session, row, imap)
-
- return
- elif isnew:
- result_list = h
- else:
- result_list = getattr(instance, self.key)
-
- self._instance(session, row, imap, result_list)
-
- def _create_decorator_row(self):
- class DecoratorDict(object):
- def __init__(self, row):
- self.row = row
- def __getitem__(self, key):
- if map.has_key(key):
- key = map[key]
- return self.row[key]
- def keys(self):
- return map.keys()
- map = {}
- for c in self.eagertarget.c:
- parent = self.target._get_col_by_original(c.original)
- map[parent] = c
- map[parent._label] = c
- map[parent.name] = c
- return DecoratorDict
-
- def _instance(self, session, row, imap, result_list=None):
- """gets an instance from a row, via this EagerLoader's mapper."""
- # since the EagerLoader makes an Alias of its mapper's table,
- # we translate the actual result columns back to what they
- # would normally be into a "virtual row" which is passed to the child mapper.
- # that way the mapper doesnt have to know about the modified column name
- # (neither do any MapperExtensions). The row is keyed off the Column object
- # (which is what mappers use) as well as its "label" (which might be what
- # user-defined code is using)
- row = self._row_decorator(row)
- return self.mapper._instance(session, row, imap, result_list)
-
-class GenericOption(MapperOption):
- """a mapper option that can handle dotted property names,
- descending down through the relations of a mapper until it
- reaches the target."""
- def __init__(self, key):
- self.key = key
- def process(self, mapper):
- self.process_by_key(mapper, self.key)
- def process_by_key(self, mapper, key):
- tokens = key.split('.', 1)
- if len(tokens) > 1:
- oldprop = mapper.props[tokens[0]]
- newprop = oldprop.copy()
- newprop.argument = self.process_by_key(oldprop.mapper.copy(), tokens[1])
- mapper.set_property(tokens[0], newprop)
- else:
- self.create_prop(mapper, tokens[0])
- return mapper
-
- def create_prop(self, mapper, key):
- kwargs = util.constructor_args(oldprop)
- mapper.set_property(key, class_(**kwargs ))
-
-class BackRef(object):
- """stores the name of a backreference property as well as options to
- be used on the resulting PropertyLoader."""
- def __init__(self, key, **kwargs):
- self.key = key
- self.kwargs = kwargs
- def compile(self, prop):
- """called by the owning PropertyLoader to set up a backreference on the
- PropertyLoader's mapper."""
- # try to set a LazyLoader on our mapper referencing the parent mapper
- if not prop.mapper.props.has_key(self.key):
- if prop.secondaryjoin is not None:
- # if setting up a backref to a many-to-many, reverse the order
- # of the "primary" and "secondary" joins
- pj = prop.secondaryjoin
- sj = prop.primaryjoin
- else:
- pj = prop.primaryjoin
- sj = None
- lazy = self.kwargs.pop('lazy', True)
- if lazy:
- cls = LazyLoader
- else:
- cls = EagerLoader
- relation = cls(prop.parent, prop.secondary, pj, sj, backref=prop.key, is_backref=True, **self.kwargs)
- prop.mapper.add_property(self.key, relation);
- else:
- # else set one of us as the "backreference"
- if not prop.mapper.props[self.key].is_backref:
- prop.is_backref=True
- def get_extension(self):
- """returns an attribute extension to use with this backreference."""
- return attributes.GenericBackrefExtension(self.key)
-
-class EagerLazyOption(GenericOption):
- """an option that switches a PropertyLoader to be an EagerLoader or LazyLoader"""
- def __init__(self, key, toeager = True, **kwargs):
- self.key = key
- self.toeager = toeager
- self.kwargs = kwargs
-
- def hash_key(self):
- return "EagerLazyOption(%s, %s)" % (repr(self.key), repr(self.toeager))
-
- def create_prop(self, mapper, key):
- if self.toeager:
- class_ = EagerLoader
- elif self.toeager is None:
- class_ = PropertyLoader
- else:
- class_ = LazyLoader
-
- oldprop = mapper.props[key]
- newprop = class_.__new__(class_)
- newprop.__dict__.update(oldprop.__dict__)
- newprop.do_init_subclass(key, mapper)
- mapper.set_property(key, newprop)
-
-class DeferredOption(GenericOption):
- def __init__(self, key, defer=False, **kwargs):
- self.key = key
- self.defer = defer
- self.kwargs = kwargs
- def hash_key(self):
- return "DeferredOption(%s,%s)" % (self.key, self.defer)
- def create_prop(self, mapper, key):
- oldprop = mapper.props[key]
- if self.defer:
- prop = DeferredColumnProperty(*oldprop.columns, **self.kwargs)
- else:
- prop = ColumnProperty(*oldprop.columns, **self.kwargs)
- mapper.set_property(key, prop)
-
-class Aliasizer(sql.ClauseVisitor):
- """converts a table instance within an expression to be an alias of that table."""
- def __init__(self, *tables, **kwargs):
- self.tables = {}
- self.aliases = kwargs.get('aliases', {})
- for t in tables:
- self.tables[t] = t
- if not self.aliases.has_key(t):
- self.aliases[t] = sql.alias(t)
- if isinstance(t, sql.Join):
- for t2 in t.columns:
- self.tables[t2.table] = t2
- self.aliases[t2.table] = self.aliases[t]
- self.binary = None
- def get_alias(self, table):
- return self.aliases[table]
- def visit_compound(self, compound):
- self.visit_clauselist(compound)
- def visit_clauselist(self, clist):
- for i in range(0, len(clist.clauses)):
- if isinstance(clist.clauses[i], schema.Column) and self.tables.has_key(clist.clauses[i].table):
- orig = clist.clauses[i]
- clist.clauses[i] = self.get_alias(clist.clauses[i].table)._get_col_by_original(clist.clauses[i])
- if clist.clauses[i] is None:
- raise "cant get orig for " + str(orig) + " against table " + orig.table.name + " " + self.get_alias(orig.table).name
- def visit_binary(self, binary):
- if isinstance(binary.left, schema.Column) and self.tables.has_key(binary.left.table):
- binary.left = self.get_alias(binary.left.table)._get_col_by_original(binary.left)
- if isinstance(binary.right, schema.Column) and self.tables.has_key(binary.right.table):
- binary.right = self.get_alias(binary.right.table)._get_col_by_original(binary.right)
-
-class BinaryVisitor(sql.ClauseVisitor):
- def __init__(self, func):
- self.func = func
- def visit_binary(self, binary):
- self.func(binary)
diff --git a/lib/sqlalchemy/mapping/query.py b/lib/sqlalchemy/mapping/query.py
deleted file mode 100644
index 283e8c189..000000000
--- a/lib/sqlalchemy/mapping/query.py
+++ /dev/null
@@ -1,263 +0,0 @@
-# mapper/query.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-import objectstore
-import sqlalchemy.sql as sql
-import sqlalchemy.util as util
-import mapper
-from sqlalchemy.exceptions import *
-
-class Query(object):
- """encapsulates the object-fetching operations provided by Mappers."""
- def __init__(self, mapper, **kwargs):
- self.mapper = mapper
- self.always_refresh = kwargs.pop('always_refresh', self.mapper.always_refresh)
- self.order_by = kwargs.pop('order_by', self.mapper.order_by)
- self.extension = kwargs.pop('extension', self.mapper.extension)
- self._session = kwargs.pop('session', None)
- if not hasattr(mapper, '_get_clause'):
- _get_clause = sql.and_()
- for primary_key in self.mapper.pks_by_table[self.table]:
- _get_clause.clauses.append(primary_key == sql.bindparam("pk_"+primary_key.key))
- self.mapper._get_clause = _get_clause
- self._get_clause = self.mapper._get_clause
- def _get_session(self):
- if self._session is None:
- return objectstore.get_session()
- else:
- return self._session
- table = property(lambda s:s.mapper.table)
- props = property(lambda s:s.mapper.props)
- session = property(_get_session)
-
- def get(self, *ident, **kwargs):
- """returns an instance of the object based on the given identifier, or None
- if not found. The *ident argument is a
- list of primary key columns in the order of the table def's primary key columns."""
- key = self.mapper.identity_key(*ident)
- #print "key: " + repr(key) + " ident: " + repr(ident)
- return self._get(key, ident, **kwargs)
-
- def get_by(self, *args, **params):
- """returns a single object instance based on the given key/value criterion.
- this is either the first value in the result list, or None if the list is
- empty.
-
- the keys are mapped to property or column names mapped by this mapper's Table, and the values
- are coerced into a WHERE clause separated by AND operators. If the local property/column
- names dont contain the key, a search will be performed against this mapper's immediate
- list of relations as well, forming the appropriate join conditions if a matching property
- is located.
-
- e.g. u = usermapper.get_by(user_name = 'fred')
- """
- x = self.select_whereclause(self._by_clause(*args, **params), limit=1)
- if x:
- return x[0]
- else:
- return None
-
- def select_by(self, *args, **params):
- """returns an array of object instances based on the given clauses and key/value criterion.
-
- *args is a list of zero or more ClauseElements which will be connected by AND operators.
- **params is a set of zero or more key/value parameters which are converted into ClauseElements.
- the keys are mapped to property or column names mapped by this mapper's Table, and the values
- are coerced into a WHERE clause separated by AND operators. If the local property/column
- names dont contain the key, a search will be performed against this mapper's immediate
- list of relations as well, forming the appropriate join conditions if a matching property
- is located.
-
- e.g. result = usermapper.select_by(user_name = 'fred')
- """
- ret = self.extension.select_by(self, *args, **params)
- if ret is not mapper.EXT_PASS:
- return ret
- return self.select_whereclause(self._by_clause(*args, **params))
-
- def selectfirst_by(self, *args, **params):
- """works like select_by(), but only returns the first result by itself, or None if no
- objects returned. Synonymous with get_by()"""
- return self.get_by(*args, **params)
-
- def selectone_by(self, *args, **params):
- """works like selectfirst_by(), but throws an error if not exactly one result was returned."""
- ret = self.select_whereclause(self._by_clause(*args, **params), limit=2)
- if len(ret) == 1:
- return ret[0]
- raise InvalidRequestError('Multiple rows returned for selectone_by')
-
- def count_by(self, *args, **params):
- """returns the count of instances based on the given clauses and key/value criterion.
- The criterion is constructed in the same way as the select_by() method."""
- return self.count(self._by_clause(*args, **params))
-
- def selectfirst(self, *args, **params):
- """works like select(), but only returns the first result by itself, or None if no
- objects returned."""
- params['limit'] = 1
- ret = self.select_whereclause(*args, **params)
- if ret:
- return ret[0]
- else:
- return None
-
- def selectone(self, *args, **params):
- """works like selectfirst(), but throws an error if not exactly one result was returned."""
- ret = list(self.select(*args, **params)[0:2])
- if len(ret) == 1:
- return ret[0]
- raise InvalidRequestError('Multiple rows returned for selectone')
-
- def select(self, arg=None, **kwargs):
- """selects instances of the object from the database.
-
- arg can be any ClauseElement, which will form the criterion with which to
- load the objects.
-
- For more advanced usage, arg can also be a Select statement object, which
- will be executed and its resulting rowset used to build new object instances.
- in this case, the developer must insure that an adequate set of columns exists in the
- rowset with which to build new object instances."""
-
- ret = self.extension.select(self, arg=arg, **kwargs)
- if ret is not mapper.EXT_PASS:
- return ret
- elif arg is not None and isinstance(arg, sql.Selectable):
- return self.select_statement(arg, **kwargs)
- else:
- return self.select_whereclause(whereclause=arg, **kwargs)
-
- def select_whereclause(self, whereclause=None, params=None, **kwargs):
- statement = self._compile(whereclause, **kwargs)
- return self._select_statement(statement, params=params)
-
- def count(self, whereclause=None, params=None, **kwargs):
- s = self.table.count(whereclause)
- if params is not None:
- return s.scalar(**params)
- else:
- return s.scalar()
-
- def select_statement(self, statement, **params):
- return self._select_statement(statement, params=params)
-
- def select_text(self, text, **params):
- t = sql.text(text, engine=self.mapper.primarytable.engine)
- return self.instances(t.execute(**params))
-
- def __getattr__(self, key):
- if (key.startswith('select_by_')):
- key = key[10:]
- def foo(arg):
- return self.select_by(**{key:arg})
- return foo
- elif (key.startswith('get_by_')):
- key = key[7:]
- def foo(arg):
- return self.get_by(**{key:arg})
- return foo
- else:
- raise AttributeError(key)
-
- def instances(self, *args, **kwargs):
- return self.mapper.instances(session=self.session, *args, **kwargs)
-
- def _by_clause(self, *args, **params):
- clause = None
- for arg in args:
- if clause is None:
- clause = arg
- else:
- clause &= arg
- for key, value in params.iteritems():
- if value is False:
- continue
- c = self.mapper._get_criterion(key, value)
- if c is None:
- raise InvalidRequestError("Cant find criterion for property '"+ key + "'")
- if clause is None:
- clause = c
- else:
- clause &= c
- return clause
-
- def _get(self, key, ident=None, reload=False):
- if not reload and not self.always_refresh:
- try:
- return self.session._get(key)
- except KeyError:
- pass
-
- if ident is None:
- ident = key[1]
- i = 0
- params = {}
- for primary_key in self.mapper.pks_by_table[self.table]:
- params["pk_"+primary_key.key] = ident[i]
- i += 1
- try:
- statement = self._compile(self._get_clause)
- return self._select_statement(statement, params=params, populate_existing=reload)[0]
- except IndexError:
- return None
-
- def _select_statement(self, statement, params=None, **kwargs):
- statement.use_labels = True
- if params is None:
- params = {}
- return self.instances(statement.execute(**params), **kwargs)
-
- def _should_nest(self, **kwargs):
- """returns True if the given statement options indicate that we should "nest" the
- generated query as a subquery inside of a larger eager-loading query. this is used
- with keywords like distinct, limit and offset and the mapper defines eager loads."""
- return (
- self.mapper.has_eager()
- and (kwargs.has_key('limit') or kwargs.has_key('offset') or kwargs.get('distinct', False))
- )
-
- def _compile(self, whereclause = None, **kwargs):
- order_by = kwargs.pop('order_by', False)
- from_obj = kwargs.pop('from_obj', [])
- if order_by is False:
- order_by = self.order_by
- if order_by is False:
- if self.table.default_order_by() is not None:
- order_by = self.table.default_order_by()
-
- if self._should_nest(**kwargs):
- from_obj.append(self.table)
- s2 = sql.select(self.table.primary_key, whereclause, use_labels=True, from_obj=from_obj, **kwargs)
-# raise "ok first thing", str(s2)
- if not kwargs.get('distinct', False) and order_by:
- s2.order_by(*util.to_list(order_by))
- s3 = s2.alias('rowcount')
- crit = []
- for i in range(0, len(self.table.primary_key)):
- crit.append(s3.primary_key[i] == self.table.primary_key[i])
- statement = sql.select([], sql.and_(*crit), from_obj=[self.table], use_labels=True)
- # raise "OK statement", str(statement)
- if order_by:
- statement.order_by(*util.to_list(order_by))
- else:
- from_obj.append(self.table)
- statement = sql.select([], whereclause, from_obj=from_obj, use_labels=True, **kwargs)
- if order_by:
- statement.order_by(*util.to_list(order_by))
- # for a DISTINCT query, you need the columns explicitly specified in order
- # to use it in "order_by". insure they are in the column criterion (particularly oid).
- # TODO: this should be done at the SQL level not the mapper level
- if kwargs.get('distinct', False) and order_by:
- statement.append_column(*util.to_list(order_by))
- # plugin point
-
- # give all the attached properties a chance to modify the query
- for key, value in self.mapper.props.iteritems():
- value.setup(key, statement, **kwargs)
- return statement
-
diff --git a/lib/sqlalchemy/mapping/sync.py b/lib/sqlalchemy/mapping/sync.py
deleted file mode 100644
index cfce9b6b6..000000000
--- a/lib/sqlalchemy/mapping/sync.py
+++ /dev/null
@@ -1,129 +0,0 @@
-# mapper/sync.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-
-import sqlalchemy.sql as sql
-import sqlalchemy.schema as schema
-from sqlalchemy.exceptions import *
-
-"""contains the ClauseSynchronizer class which is used to map attributes between two objects
-in a manner corresponding to a SQL clause that compares column values."""
-
-ONETOMANY = 0
-MANYTOONE = 1
-MANYTOMANY = 2
-
-class ClauseSynchronizer(object):
- """Given a SQL clause, usually a series of one or more binary
- expressions between columns, and a set of 'source' and 'destination' mappers, compiles a set of SyncRules
- corresponding to that information. The ClauseSynchronizer can then be executed given a set of parent/child
- objects or destination dictionary, which will iterate through each of its SyncRules and execute them.
- Each SyncRule will copy the value of a single attribute from the parent
- to the child, corresponding to the pair of columns in a particular binary expression, using the source and
- destination mappers to map those two columns to object attributes within parent and child."""
- def __init__(self, parent_mapper, child_mapper, direction):
- self.parent_mapper = parent_mapper
- self.child_mapper = child_mapper
- self.direction = direction
- self.syncrules = []
-
- def compile(self, sqlclause, source_tables, target_tables, issecondary=None):
- def check_for_table(binary, list1, list2):
- #print "check for table", str(binary), [str(c) for c in l]
- if binary.left.table in list1 and binary.right.table in list2:
- return (binary.left, binary.right)
- elif binary.right.table in list1 and binary.left.table in list2:
- return (binary.right, binary.left)
- else:
- return (None, None)
-
- def compile_binary(binary):
- """assembles a SyncRule given a single binary condition"""
- if binary.operator != '=' or not isinstance(binary.left, schema.Column) or not isinstance(binary.right, schema.Column):
- return
-
- if binary.left.table == binary.right.table:
- # self-cyclical relation
- if binary.left.primary_key:
- source = binary.left
- dest = binary.right
- elif binary.right.primary_key:
- source = binary.right
- dest = binary.left
- else:
- raise ArgumentError("Cant determine direction for relationship %s = %s" % (binary.left.fullname, binary.right.fullname))
- if self.direction == ONETOMANY:
- self.syncrules.append(SyncRule(self.parent_mapper, source, dest, dest_mapper=self.child_mapper))
- elif self.direction == MANYTOONE:
- self.syncrules.append(SyncRule(self.child_mapper, source, dest, dest_mapper=self.parent_mapper))
- else:
- raise AssertionError("assert failed")
- else:
- (pt, tt) = check_for_table(binary, source_tables, target_tables)
- #print "OK", binary, [t.name for t in source_tables], [t.name for t in target_tables]
- if pt and tt:
- if self.direction == ONETOMANY:
- self.syncrules.append(SyncRule(self.parent_mapper, pt, tt, dest_mapper=self.child_mapper))
- elif self.direction == MANYTOONE:
- self.syncrules.append(SyncRule(self.child_mapper, tt, pt, dest_mapper=self.parent_mapper))
- else:
- if not issecondary:
- self.syncrules.append(SyncRule(self.parent_mapper, pt, tt, dest_mapper=self.child_mapper, issecondary=issecondary))
- else:
- self.syncrules.append(SyncRule(self.child_mapper, pt, tt, dest_mapper=self.parent_mapper, issecondary=issecondary))
-
- rules_added = len(self.syncrules)
- processor = BinaryVisitor(compile_binary)
- sqlclause.accept_visitor(processor)
- if len(self.syncrules) == rules_added:
- raise ArgumentError("No syncrules generated for join criterion " + str(sqlclause))
-
- def execute(self, source, dest, obj=None, child=None, clearkeys=None):
- for rule in self.syncrules:
- rule.execute(source, dest, obj, child, clearkeys)
-
-class SyncRule(object):
- """An instruction indicating how to populate the objects on each side of a relationship.
- i.e. if table1 column A is joined against
- table2 column B, and we are a one-to-many from table1 to table2, a syncrule would say
- 'take the A attribute from object1 and assign it to the B attribute on object2'.
-
- A rule contains the source mapper, the source column, destination column,
- destination mapper in the case of a one/many relationship, and
- the integer direction of this mapper relative to the association in the case
- of a many to many relationship.
- """
- def __init__(self, source_mapper, source_column, dest_column, dest_mapper=None, issecondary=None):
- self.source_mapper = source_mapper
- self.source_column = source_column
- self.issecondary = issecondary
- self.dest_mapper = dest_mapper
- self.dest_column = dest_column
- #print "SyncRule", source_mapper, source_column, dest_column, dest_mapper
-
- def execute(self, source, dest, obj, child, clearkeys):
- if source is None:
- if self.issecondary is False:
- source = obj
- elif self.issecondary is True:
- source = child
- if clearkeys or source is None:
- value = None
- else:
- value = self.source_mapper._getattrbycolumn(source, self.source_column)
- if isinstance(dest, dict):
- dest[self.dest_column.key] = value
- else:
- #print "SYNC VALUE", value, "TO", dest, self.source_column, self.dest_column
- self.dest_mapper._setattrbycolumn(dest, self.dest_column, value)
-
-class BinaryVisitor(sql.ClauseVisitor):
- def __init__(self, func):
- self.func = func
- def visit_binary(self, binary):
- self.func(binary)
-
diff --git a/lib/sqlalchemy/mapping/topological.py b/lib/sqlalchemy/mapping/topological.py
deleted file mode 100644
index 495eec8ce..000000000
--- a/lib/sqlalchemy/mapping/topological.py
+++ /dev/null
@@ -1,349 +0,0 @@
-# topological.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""topological sorting algorithms. the key to the unit of work is to assemble a list
-of dependencies amongst all the different mappers that have been defined for classes.
-Related tables with foreign key constraints have a definite insert order, deletion order,
-objects need dependent properties from parent objects set up before saved, etc.
-These are all encoded as dependencies, in the form "mapper X is dependent on mapper Y",
-meaning mapper Y's objects must be saved before those of mapper X, and mapper X's objects
-must be deleted before those of mapper Y.
-
-The topological sort is an algorithm that receives this list of dependencies as a "partial
-ordering", that is a list of pairs which might say, "X is dependent on Y", "Q is dependent
-on Z", but does not necessarily tell you anything about Q being dependent on X. Therefore,
-its not a straight sort where every element can be compared to another...only some of the
-elements have any sorting preference, and then only towards just some of the other elements.
-For a particular partial ordering, there can be many possible sorts that satisfy the
-conditions.
-
-An intrinsic "gotcha" to this algorithm is that since there are many possible outcomes
-to sorting a partial ordering, the algorithm can return any number of different results for the
-same input; just running it on a different machine architecture, or just random differences
-in the ordering of dictionaries, can change the result that is returned. While this result
-is guaranteed to be true to the incoming partial ordering, if the partial ordering itself
-does not properly represent the dependencies, code that works fine will suddenly break, then
-work again, then break, etc. Most of the bugs I've chased down while developing the "unit of work"
-have been of this nature - very tricky to reproduce and track down, particularly before I
-realized this characteristic of the algorithm.
-"""
-import string, StringIO
-from sets import *
-import sqlalchemy.util as util
-from sqlalchemy.exceptions import *
-
-class QueueDependencySorter(object):
- """this is a topological sort from wikipedia. its very stable. it creates a straight-line
- list of elements, then a second pass groups non-dependent actions together to build
- more of a tree structure with siblings."""
- class Node:
- """represents a node in a tree. stores an 'item' which represents the
- dependent thing we are talking about. if node 'a' is an ancestor node of
- node 'b', it means 'a's item is *not* dependent on that of 'b'."""
- def __init__(self, item):
- self.item = item
- self.edges = {}
- self.dependencies = {}
- self.children = []
- self.cycles = None
- def __str__(self):
- return self.safestr()
- def safestr(self, indent=0):
- return (' ' * indent) + "%s (idself=%s)" % (str(self.item), repr(id(self))) + repr(self.cycles) + "\n" + string.join([n.safestr(indent + 1) for n in self.children], '')
- def describe(self):
- return "%s (idself=%s)" % (str(self.item), repr(id(self)))
- def __repr__(self):
- return self.describe()
- def is_dependent(self, child):
- if self.cycles is not None:
- for c in self.cycles:
- if c.dependencies.has_key(child):
- return True
- if child.cycles is not None:
- for c in child.cycles:
- if self.dependencies.has_key(c):
- return True
- return self.dependencies.has_key(child)
-
- def __init__(self, tuples, allitems):
- self.tuples = tuples
- self.allitems = allitems
-
- def _dump_edges(self, edges):
- s = StringIO.StringIO()
- for key, value in edges.iteritems():
- for c in value.keys():
- s.write("%s->%s\n" % (repr(key), repr(c)))
- return s.getvalue()
-
- def sort(self, allow_self_cycles=True, allow_all_cycles=False):
- (tuples, allitems) = (self.tuples, self.allitems)
-
- #print "\n---------------------------------\n"
- #print repr([t for t in tuples])
- #print repr([a for a in allitems])
- #print "\n---------------------------------\n"
-
- nodes = {}
- edges = {}
- for item in allitems + [t[0] for t in tuples] + [t[1] for t in tuples]:
- if not nodes.has_key(item):
- node = QueueDependencySorter.Node(item)
- nodes[item] = node
- edges[node] = {}
-
- for t in tuples:
- if t[0] is t[1]:
- if allow_self_cycles:
- n = nodes[t[0]]
- n.cycles = Set([n])
- continue
- else:
- raise CommitError("Self-referential dependency detected " + repr(t))
- childnode = nodes[t[1]]
- parentnode = nodes[t[0]]
- self._add_edge(edges, (parentnode, childnode))
-
- queue = []
- for n in nodes.values():
- if len(n.edges) == 0:
- queue.append(n)
- cycles = {}
- output = []
- while len(edges) > 0:
- #print self._dump_edges(edges)
- if len(queue) == 0:
- # edges remain but no edgeless nodes to remove; this indicates
- # a cycle
- if allow_all_cycles:
- cycle = self._find_cycle(edges)
- lead = cycle[0][0]
- lead.cycles = Set()
- for edge in cycle:
- n = self._remove_edge(edges, edge)
- lead.cycles.add(edge[0])
- lead.cycles.add(edge[1])
- if n is not None:
- queue.append(n)
- if n is not lead:
- n._cyclical = True
- # loop through cycle
- # remove edges from the edge dictionary
- # install the cycled nodes in the "cycle" list of one of the nodes
- continue
- else:
- # long cycles not allowed
- raise CommitError("Circular dependency detected " + repr(edges) + repr(queue))
- node = queue.pop()
- if not hasattr(node, '_cyclical'):
- output.append(node)
- nodeedges = edges.pop(node, None)
- if nodeedges is None:
- continue
- for childnode in nodeedges.keys():
- del childnode.edges[node]
- if len(childnode.edges) == 0:
- queue.append(childnode)
-
- return self._create_batched_tree(output)
-
-
- def _create_batched_tree(self, nodes):
- """given a list of nodes from a topological sort, organizes the nodes into a tree structure,
- with as many non-dependent nodes set as silbings to each other as possible."""
- def sort(index=None, l=None):
- if index is None:
- index = 0
-
- if index >= len(nodes):
- return None
-
- node = nodes[index]
- l2 = []
- sort(index + 1, l2)
- for n in l2:
- if l is None or search_dep(node, n):
- node.children.append(n)
- else:
- l.append(n)
- if l is not None:
- l.append(node)
- return node
-
- def search_dep(parent, child):
- if child is None:
- return False
- elif parent.is_dependent(child):
- return True
- else:
- for c in child.children:
- x = search_dep(parent, c)
- if x is True:
- return True
- else:
- return False
- return sort()
-
-
- def _add_edge(self, edges, edge):
- (parentnode, childnode) = edge
- edges[parentnode][childnode] = True
- parentnode.dependencies[childnode] = True
- childnode.edges[parentnode] = True
-
- def _remove_edge(self, edges, edge):
- (parentnode, childnode) = edge
- del edges[parentnode][childnode]
- del childnode.edges[parentnode]
- del parentnode.dependencies[childnode]
- if len(childnode.edges) == 0:
- return childnode
-
- def _find_cycle(self, edges):
- """given a structure of edges, locates a cycle in the strucure and returns
- as a list of tuples representing edges involved in the cycle."""
- seen = Set()
- cycled_edges = []
- def traverse(d, parent=None):
- for key in d.keys():
- if not edges.has_key(key):
- continue
- if key in seen:
- if parent is not None:
- cycled_edges.append((parent, key))
- return key
- seen.add(key)
- x = traverse(edges[key], parent=key)
- if x is None:
- seen.remove(key)
- else:
- if parent is not None:
- cycled_edges.append((parent, key))
- return x
- else:
- return None
- s = traverse(edges)
- if s is None:
- return None
- else:
- return cycled_edges
-
-class TreeDependencySorter(object):
- """
- this is my first topological sorting algorithm. its crazy, but matched my thinking
- at the time. it also creates the kind of structure I want. but, I am not 100% sure
- it works in all cases since I always did really poorly in linear algebra. anyway,
- I got the other one above to produce a tree structure too so we should be OK.
- """
- class Node:
- """represents a node in a tree. stores an 'item' which represents the
- dependent thing we are talking about. if node 'a' is an ancestor node of
- node 'b', it means 'a's item is *not* dependent on that of 'b'."""
- def __init__(self, item):
- #print "new node on " + str(item)
- self.item = item
- self.children = HashSet()
- self.parent = None
- def append(self, node):
- """appends the given node as a child on this node. removes the node from
- its preexisting parent."""
- if node.parent is not None:
- del node.parent.children[node]
- self.children.append(node)
- node.parent = self
- def is_descendant_of(self, node):
- """returns true if this node is a descendant of the given node"""
- n = self
- while n is not None:
- if n is node:
- return True
- else:
- n = n.parent
- return False
- def get_root(self):
- """returns the highest ancestor node of this node, i.e. which has no parent"""
- n = self
- while n.parent is not None:
- n = n.parent
- return n
- def get_sibling_ancestor(self, node):
- """returns the node which is:
- - an ancestor of this node
- - is a sibling of the given node
- - not an ancestor of the given node
-
- - else returns this node's root node."""
- n = self
- while n.parent is not None and n.parent is not node.parent and not node.is_descendant_of(n.parent):
- n = n.parent
- return n
- def __str__(self):
- return self.safestr({})
- def safestr(self, hash, indent = 0):
- if hash.has_key(self):
- return (' ' * indent) + "RECURSIVE:%s(%s, %s)" % (str(self.item), repr(id(self)), self.parent and repr(id(self.parent)) or 'None')
- hash[self] = True
- return (' ' * indent) + "%s (idself=%s, idparent=%s)" % (str(self.item), repr(id(self)), self.parent and repr(id(self.parent)) or "None") + "\n" + string.join([n.safestr(hash, indent + 1) for n in self.children], '')
- def describe(self):
- return "%s (idself=%s)" % (str(self.item), repr(id(self)))
-
- def __init__(self, tuples, allitems):
- self.tuples = tuples
- self.allitems = allitems
-
- def sort(self):
- (tuples, allitems) = (self.tuples, self.allitems)
-
- nodes = {}
- # make nodes for all the items and store in the hash
- for item in allitems + [t[0] for t in tuples] + [t[1] for t in tuples]:
- if not nodes.has_key(item):
- nodes[item] = TreeDependencySorter.Node(item)
-
- # loop through tuples
- for tup in tuples:
- (parent, child) = (tup[0], tup[1])
- # get parent node
- parentnode = nodes[parent]
-
- # if parent is child, mark "circular" attribute on the node
- if parent is child:
- parentnode.circular = True
- # and just continue
- continue
-
- # get child node
- childnode = nodes[child]
-
- if parentnode.parent is childnode:
- # check for "a switch"
- t = parentnode.item
- parentnode.item = childnode.item
- childnode.item = t
- nodes[parentnode.item] = parentnode
- nodes[childnode.item] = childnode
- elif parentnode.is_descendant_of(childnode):
- # check for a line thats backwards with nodes in between, this is a
- # circular dependency (although confirmation on this would be helpful)
- raise CommitError("Circular dependency detected")
- elif not childnode.is_descendant_of(parentnode):
- # if relationship doesnt exist, connect nodes together
- root = childnode.get_sibling_ancestor(parentnode)
- parentnode.append(root)
-
-
- # now we have a collection of subtrees which represent dependencies.
- # go through the collection root nodes wire them together into one tree
- head = None
- for node in nodes.values():
- if node.parent is None:
- if head is not None:
- head.append(node)
- else:
- head = node
- #print str(head)
- return head
- \ No newline at end of file
diff --git a/lib/sqlalchemy/mapping/unitofwork.py b/lib/sqlalchemy/mapping/unitofwork.py
deleted file mode 100644
index 873bed548..000000000
--- a/lib/sqlalchemy/mapping/unitofwork.py
+++ /dev/null
@@ -1,857 +0,0 @@
-# unitofwork.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-"""the internals for the Unit Of Work system. includes hooks into the attributes package
-enabling the routing of change events to Unit Of Work objects, as well as the flush() mechanism
-which creates a dependency structure that executes change operations.
-
-a Unit of Work is essentially a system of maintaining a graph of in-memory objects and their
-modified state. Objects are maintained as unique against their primary key identity using
-an "identity map" pattern. The Unit of Work then maintains lists of objects that are new,
-dirty, or deleted and provides the capability to flush all those changes at once.
-"""
-
-from sqlalchemy import attributes
-from sqlalchemy import util
-import sqlalchemy
-from sqlalchemy.exceptions import *
-import StringIO
-import weakref
-import topological
-from sets import *
-
-# a global indicating if all flush() operations should have their plan
-# printed to standard output. also can be affected by creating an engine
-# with the "echo_uow=True" keyword argument.
-LOG = False
-
-class UOWProperty(attributes.SmartProperty):
- """overrides SmartProperty to provide ORM-specific accessors"""
- def __init__(self, class_, *args, **kwargs):
- super(UOWProperty, self).__init__(*args, **kwargs)
- self.class_ = class_
- property = property(lambda s:class_mapper(s.class_).props[s.key], doc="returns the MapperProperty object associated with this property")
-
-class UOWListElement(attributes.ListElement):
- """overrides ListElement to provide unit-of-work "dirty" hooks when list attributes are modified,
- plus specialzed append() method."""
- def __init__(self, obj, key, data=None, deleteremoved=False, **kwargs):
- attributes.ListElement.__init__(self, obj, key, data=data, **kwargs)
- self.deleteremoved = deleteremoved
- def list_value_changed(self, obj, key, item, listval, isdelete):
- sess = get_session(obj)
- if not isdelete and sess.deleted.contains(item):
- #raise InvalidRequestError("re-inserting a deleted value into a list")
- del sess.deleted[item]
- sess.modified_lists.append(self)
- if self.deleteremoved and isdelete:
- sess.register_deleted(item)
- def append(self, item, _mapper_nohistory = False):
- if _mapper_nohistory:
- self.append_nohistory(item)
- else:
- attributes.ListElement.append(self, item)
-
-class UOWAttributeManager(attributes.AttributeManager):
- """overrides AttributeManager to provide unit-of-work "dirty" hooks when scalar attribues are modified, plus factory methods for UOWProperrty/UOWListElement."""
- def __init__(self):
- attributes.AttributeManager.__init__(self)
-
- def value_changed(self, obj, key, value):
- if hasattr(obj, '_instance_key'):
- get_session(obj).register_dirty(obj)
- else:
- get_session(obj).register_new(obj)
-
- def create_prop(self, class_, key, uselist, callable_, **kwargs):
- return UOWProperty(class_, self, key, uselist, callable_, **kwargs)
-
- def create_list(self, obj, key, list_, **kwargs):
- return UOWListElement(obj, key, list_, **kwargs)
-
-class UnitOfWork(object):
- """main UOW object which stores lists of dirty/new/deleted objects, as well as 'modified_lists' for list attributes. provides top-level "flush" functionality as well as the transaction boundaries with the SQLEngine(s) involved in a write operation."""
- def __init__(self, identity_map=None):
- if identity_map is not None:
- self.identity_map = identity_map
- else:
- self.identity_map = weakref.WeakValueDictionary()
-
- self.attributes = global_attributes
- self.new = util.HashSet(ordered = True)
- self.dirty = util.HashSet()
- self.modified_lists = util.HashSet()
- self.deleted = util.HashSet()
-
- def get(self, class_, *id):
- """given a class and a list of primary key values in their table-order, locates the mapper
- for this class and calls get with the given primary key values."""
- return object_mapper(class_).get(*id)
-
- def _get(self, key):
- return self.identity_map[key]
-
- def _put(self, key, obj):
- self.identity_map[key] = obj
-
- def refresh(self, obj):
- self.rollback_object(obj)
- object_mapper(obj)._get(obj._instance_key, reload=True)
-
- def expire(self, obj):
- self.rollback_object(obj)
- def exp():
- object_mapper(obj)._get(obj._instance_key, reload=True)
- global_attributes.trigger_history(obj, exp)
-
- def is_expired(self, obj, unexpire=False):
- ret = global_attributes.has_trigger(obj)
- if ret and unexpire:
- global_attributes.untrigger_history(obj)
- return ret
-
- def has_key(self, key):
- """returns True if the given key is present in this UnitOfWork's identity map."""
- return self.identity_map.has_key(key)
-
- def expunge(self, obj):
- """removes this object completely from the UnitOfWork, including the identity map,
- and the "new", "dirty" and "deleted" lists."""
- self._remove_deleted(obj)
-
- def _remove_deleted(self, obj):
- if hasattr(obj, "_instance_key"):
- del self.identity_map[obj._instance_key]
- try:
- del self.deleted[obj]
- except KeyError:
- pass
- try:
- del self.dirty[obj]
- except KeyError:
- pass
- try:
- del self.new[obj]
- except KeyError:
- pass
- #self.attributes.commit(obj)
- self.attributes.remove(obj)
-
- def _validate_obj(self, obj):
- """validates that dirty/delete/flush operations can occur upon the given object, by checking
- if it has an instance key and that the instance key is present in the identity map."""
- if hasattr(obj, '_instance_key') and not self.identity_map.has_key(obj._instance_key):
- raise InvalidRequestError("Detected a mapped object not present in the current thread's Identity Map: '%s'. Use objectstore.import_instance() to place deserialized instances or instances from other threads" % repr(obj._instance_key))
-
- def update(self, obj):
- """called to add an object to this UnitOfWork as though it were loaded from the DB,
- but is actually coming from somewhere else, like a web session or similar."""
- self._put(obj._instance_key, obj)
- self.register_dirty(obj)
-
- def register_attribute(self, class_, key, uselist, **kwargs):
- self.attributes.register_attribute(class_, key, uselist, **kwargs)
-
- def register_callable(self, obj, key, func, uselist, **kwargs):
- self.attributes.set_callable(obj, key, func, uselist, **kwargs)
-
- def register_clean(self, obj):
- try:
- del self.dirty[obj]
- except KeyError:
- pass
- try:
- del self.new[obj]
- except KeyError:
- pass
- if not hasattr(obj, '_instance_key'):
- mapper = object_mapper(obj)
- obj._instance_key = mapper.instance_key(obj)
- self._put(obj._instance_key, obj)
- self.attributes.commit(obj)
-
- def register_new(self, obj):
- if not self.new.contains(obj):
- self.new.append(obj)
-
- def register_dirty(self, obj):
- if not self.dirty.contains(obj):
- self._validate_obj(obj)
- self.dirty.append(obj)
-
- def is_dirty(self, obj):
- if not self.dirty.contains(obj):
- return False
- else:
- return True
-
- def register_deleted(self, obj):
- if not self.deleted.contains(obj):
- self._validate_obj(obj)
- self.deleted.append(obj)
- mapper = object_mapper(obj)
- # TODO: should the cascading delete dependency thing
- # happen wtihin PropertyLoader.process_dependencies ?
- mapper.register_deleted(obj, self)
-
- def unregister_deleted(self, obj):
- try:
- self.deleted.remove(obj)
- except KeyError:
- pass
-
- def flush(self, session, *objects):
- flush_context = UOWTransaction(self, session)
-
- if len(objects):
- objset = util.HashSet(iter=objects)
- else:
- objset = None
-
- for obj in [n for n in self.new] + [d for d in self.dirty]:
- if objset is not None and not objset.contains(obj):
- continue
- if self.deleted.contains(obj):
- continue
- flush_context.register_object(obj)
- for item in self.modified_lists:
- obj = item.obj
- if objset is not None and not objset.contains(obj):
- continue
- if self.deleted.contains(obj):
- continue
- flush_context.register_object(obj, listonly = True)
- flush_context.register_saved_history(item)
-
-# for o in item.added_items() + item.deleted_items():
-# if self.deleted.contains(o):
-# continue
-# flush_context.register_object(o, listonly=True)
-
- for obj in self.deleted:
- if objset is not None and not objset.contains(obj):
- continue
- flush_context.register_object(obj, isdelete=True)
-
- engines = util.HashSet()
- for mapper in flush_context.mappers:
- for e in session.engines(mapper):
- engines.append(e)
-
- echo_commit = False
- for e in engines:
- echo_commit = echo_commit or e.echo_uow
- e.begin()
- try:
- flush_context.execute(echo=echo_commit)
- except:
- for e in engines:
- e.rollback()
- raise
- for e in engines:
- e.commit()
-
- flush_context.post_exec()
-
-
- def rollback_object(self, obj):
- """'rolls back' the attributes that have been changed on an object instance."""
- self.attributes.rollback(obj)
- try:
- del self.dirty[obj]
- except KeyError:
- pass
- try:
- del self.deleted[obj]
- except KeyError:
- pass
-
-class UOWTransaction(object):
- """handles the details of organizing and executing transaction tasks
- during a UnitOfWork object's flush() operation."""
- def __init__(self, uow, session):
- self.uow = uow
- self.session = session
- # unique list of all the mappers we come across
- self.mappers = util.HashSet()
- self.dependencies = {}
- self.tasks = {}
- self.saved_histories = util.HashSet()
- self.__modified = False
-
- def register_object(self, obj, isdelete = False, listonly = False, postupdate=False, **kwargs):
- """adds an object to this UOWTransaction to be updated in the database.
-
- 'isdelete' indicates whether the object is to be deleted or saved (update/inserted).
-
- 'listonly', indicates that only this object's dependency relationships should be
- refreshed/updated to reflect a recent save/upcoming delete operation, but not a full
- save/delete operation on the object itself, unless an additional save/delete
- registration is entered for the object."""
- #print "REGISTER", repr(obj), repr(getattr(obj, '_instance_key', None)), str(isdelete), str(listonly)
- # things can get really confusing if theres duplicate instances floating around,
- # so make sure everything is OK
- self.uow._validate_obj(obj)
-
- mapper = object_mapper(obj)
- self.mappers.append(mapper)
- task = self.get_task_by_mapper(mapper)
-
- if postupdate:
- mod = task.append_postupdate(obj)
- self.__modified = self.__modified or mod
- return
-
- # for a cyclical task, things need to be sorted out already,
- # so this object should have already been added to the appropriate sub-task
- # can put an assertion here to make sure....
- if task.circular:
- return
-
- mod = task.append(obj, listonly, isdelete=isdelete, **kwargs)
- self.__modified = self.__modified or mod
-
- def unregister_object(self, obj):
- mapper = object_mapper(obj)
- task = self.get_task_by_mapper(mapper)
- task.delete(obj)
- self.__modified = True
-
- def get_task_by_mapper(self, mapper):
- """every individual mapper involved in the transaction has a single
- corresponding UOWTask object, which stores all the operations involved
- with that mapper as well as operations dependent on those operations.
- this method returns or creates the single per-transaction instance of
- UOWTask that exists for that mapper."""
- try:
- return self.tasks[mapper]
- except KeyError:
- return UOWTask(self, mapper)
-
- def register_dependency(self, mapper, dependency):
- """called by mapper.PropertyLoader to register the objects handled by
- one mapper being dependent on the objects handled by another."""
- # correct for primary mapper (the mapper offcially associated with the class)
- self.dependencies[(mapper._primary_mapper(), dependency._primary_mapper())] = True
- self.__modified = True
-
- def register_processor(self, mapper, processor, mapperfrom, isdeletefrom):
- """called by mapper.PropertyLoader to register itself as a "processor", which
- will be associated with a particular UOWTask, and be given a list of "dependent"
- objects corresponding to another UOWTask to be processed, either after that secondary
- task saves its objects or before it deletes its objects."""
- # when the task from "mapper" executes, take the objects from the task corresponding
- # to "mapperfrom"'s list of save/delete objects, and send them to "processor"
- # for dependency processing
- #print "registerprocessor", str(mapper), repr(processor.key), str(mapperfrom), repr(isdeletefrom)
-
- # correct for primary mapper (the mapper offcially associated with the class)
- mapper = mapper._primary_mapper()
- mapperfrom = mapperfrom._primary_mapper()
- task = self.get_task_by_mapper(mapper)
- targettask = self.get_task_by_mapper(mapperfrom)
- task.dependencies.append(UOWDependencyProcessor(processor, targettask, isdeletefrom))
- self.__modified = True
-
- def register_saved_history(self, listobj):
- self.saved_histories.append(listobj)
-
- def execute(self, echo=False):
- for task in self.tasks.values():
- task.mapper.register_dependencies(self)
-
- head = self._sort_dependencies()
- self.__modified = False
- if LOG or echo:
- if head is None:
- print "Task dump: None"
- else:
- print "Task dump:\n" + head.dump()
- if head is not None:
- head.execute(self)
- if LOG or echo:
- if self.__modified and head is not None:
- print "\nAfter Execute:\n" + head.dump()
- else:
- print "\nExecute complete (no post-exec changes)\n"
-
- def post_exec(self):
- """after an execute/flush is completed, all of the objects and lists that have
- been flushed are updated in the parent UnitOfWork object to mark them as clean."""
-
- for task in self.tasks.values():
- for elem in task.objects.values():
- if elem.isdelete:
- self.uow._remove_deleted(elem.obj)
- else:
- self.uow.register_clean(elem.obj)
-
- for obj in self.saved_histories:
- try:
- obj.commit()
- del self.uow.modified_lists[obj]
- except KeyError:
- pass
-
- # this assertion only applies to a full flush(), not a
- # partial one
- #if len(self.uow.new) > 0 or len(self.uow.dirty) >0 or len(self.uow.modified_lists) > 0:
- # raise "assertion failed"
-
- def _sort_dependencies(self):
- """creates a hierarchical tree of dependent tasks. the root node is returned.
- when the root node is executed, it also executes its child tasks recursively."""
- def sort_hier(node):
- if node is None:
- return None
- task = self.get_task_by_mapper(node.item)
- if node.cycles is not None:
- tasks = []
- for n in node.cycles:
- tasks.append(self.get_task_by_mapper(n.item))
- task.circular = task._sort_circular_dependencies(self, tasks)
- for child in node.children:
- t = sort_hier(child)
- if t is not None:
- task.childtasks.append(t)
- return task
-
- mappers = util.HashSet()
- for task in self.tasks.values():
- mappers.append(task.mapper)
- head = DependencySorter(self.dependencies, mappers).sort(allow_all_cycles=True)
- #print str(head)
- task = sort_hier(head)
- return task
-
-
-class UOWTaskElement(object):
- """an element within a UOWTask. corresponds to a single object instance
- to be saved, deleted, or just part of the transaction as a placeholder for
- further dependencies (i.e. 'listonly').
- in the case of self-referential mappers, may also store a "childtask", which is a
- UOWTask containing objects dependent on this element's object instance."""
- def __init__(self, obj):
- self.obj = obj
- self.listonly = True
- self.childtasks = []
- self.isdelete = False
- self.mapper = None
- def __repr__(self):
- return "UOWTaskElement/%d: %s/%d %s" % (id(self), self.obj.__class__.__name__, id(self.obj), (self.listonly and 'listonly' or (self.isdelete and 'delete' or 'save')) )
-
-class UOWDependencyProcessor(object):
- """in between the saving and deleting of objects, process "dependent" data, such as filling in
- a foreign key on a child item from a new primary key, or deleting association rows before a
- delete."""
- def __init__(self, processor, targettask, isdeletefrom):
- self.processor = processor
- self.targettask = targettask
- self.isdeletefrom = isdeletefrom
-
- def execute(self, trans, delete):
- if not delete:
- self.processor.process_dependencies(self.targettask, [elem.obj for elem in self.targettask.tosave_elements() if elem.obj is not None], trans, delete = delete)
- else:
- self.processor.process_dependencies(self.targettask, [elem.obj for elem in self.targettask.todelete_elements() if elem.obj is not None], trans, delete = delete)
-
- def get_object_dependencies(self, obj, trans, passive):
- return self.processor.get_object_dependencies(obj, trans, passive=passive)
-
- def whose_dependent_on_who(self, obj, o):
- return self.processor.whose_dependent_on_who(obj, o)
-
- def branch(self, task):
- return UOWDependencyProcessor(self.processor, task, self.isdeletefrom)
-
-class UOWTask(object):
- def __init__(self, uowtransaction, mapper):
- if uowtransaction is not None:
- uowtransaction.tasks[mapper] = self
- self.uowtransaction = uowtransaction
- self.mapper = mapper
- self.objects = util.OrderedDict()
- self.dependencies = []
- self.cyclical_dependencies = []
- self.circular = None
- self.postcircular = None
- self.childtasks = []
-# print "NEW TASK", repr(self)
-
- def is_empty(self):
- return len(self.objects) == 0 and len(self.dependencies) == 0 and len(self.childtasks) == 0
-
- def append(self, obj, listonly = False, childtask = None, isdelete = False):
- """appends an object to this task, to be either saved or deleted depending on the
- 'isdelete' attribute of this UOWTask. 'listonly' indicates that the object should
- only be processed as a dependency and not actually saved/deleted. if the object
- already exists with a 'listonly' flag of False, it is kept as is. 'childtask' is used
- internally when creating a hierarchical list of self-referential tasks, to assign
- dependent operations at the per-object instead of per-task level. """
- try:
- rec = self.objects[obj]
- retval = False
- except KeyError:
- rec = UOWTaskElement(obj)
- self.objects[obj] = rec
- retval = True
- if not listonly:
- rec.listonly = False
- if childtask:
- rec.childtasks.append(childtask)
- if isdelete:
- rec.isdelete = True
- return retval
-
- def append_postupdate(self, obj):
- # postupdates are UPDATED immeditely (for now)
- self.mapper.save_obj([obj], self.uowtransaction, postupdate=True)
- return True
-
- def delete(self, obj):
- try:
- del self.objects[obj]
- except KeyError:
- pass
-
- def execute(self, trans):
- """executes this UOWTask. saves objects to be saved, processes all dependencies
- that have been registered, and deletes objects to be deleted. """
- if self.circular is not None:
- self.circular.execute(trans)
- return
-
- self.mapper.save_obj(self.tosave_objects(), trans)
- for dep in self.cyclical_save_dependencies():
- dep.execute(trans, delete=False)
- for element in self.tosave_elements():
- for task in element.childtasks:
- task.execute(trans)
- for dep in self.save_dependencies():
- dep.execute(trans, delete=False)
- for dep in self.delete_dependencies():
- dep.execute(trans, delete=True)
- for dep in self.cyclical_delete_dependencies():
- dep.execute(trans, delete=True)
- for child in self.childtasks:
- child.execute(trans)
- for element in self.todelete_elements():
- for task in element.childtasks:
- task.execute(trans)
- self.mapper.delete_obj(self.todelete_objects(), trans)
-
- def tosave_elements(self):
- return [rec for rec in self.objects.values() if not rec.isdelete]
- def todelete_elements(self):
- return [rec for rec in self.objects.values() if rec.isdelete]
- def tosave_objects(self):
- return [rec.obj for rec in self.objects.values() if rec.obj is not None and not rec.listonly and rec.isdelete is False]
- def todelete_objects(self):
- return [rec.obj for rec in self.objects.values() if rec.obj is not None and not rec.listonly and rec.isdelete is True]
- def save_dependencies(self):
- return [dep for dep in self.dependencies if not dep.isdeletefrom]
- def cyclical_save_dependencies(self):
- return [dep for dep in self.cyclical_dependencies if not dep.isdeletefrom]
- def delete_dependencies(self):
- return [dep for dep in self.dependencies if dep.isdeletefrom]
- def cyclical_delete_dependencies(self):
- return [dep for dep in self.cyclical_dependencies if dep.isdeletefrom]
-
- def _sort_circular_dependencies(self, trans, cycles):
- """for a single task, creates a hierarchical tree of "subtasks" which associate
- specific dependency actions with individual objects. This is used for a
- "cyclical" task, or a task where elements
- of its object list contain dependencies on each other.
-
- this is not the normal case; this logic only kicks in when something like
- a hierarchical tree is being represented."""
-
- allobjects = []
- for task in cycles:
- allobjects += task.objects.keys()
- tuples = []
-
- objecttotask = {}
-
- cycles = Set(cycles)
-
- # dependency processors that arent part of the cyclical thing
- # get put here
- extradeplist = []
-
- def get_object_task(parent, obj):
- try:
- return objecttotask[obj]
- except KeyError:
- t = UOWTask(None, parent.mapper)
- t.parent = parent
- objecttotask[obj] = t
- return t
-
- dependencies = {}
- def get_dependency_task(obj, depprocessor):
- try:
- dp = dependencies[obj]
- except KeyError:
- dp = {}
- dependencies[obj] = dp
- try:
- l = dp[depprocessor]
- except KeyError:
- l = UOWTask(None, depprocessor.targettask.mapper)
- dp[depprocessor] = l
- return l
-
- # work out a list of all the "dependency processors" that
- # represent objects that have to be dependency sorted at the
- # per-object level. all other dependency processors go in
- # "extradep."
- deps_by_targettask = {}
- for task in cycles:
- for dep in task.dependencies:
- if dep.targettask not in cycles or trans.get_task_by_mapper(dep.processor.mapper) not in cycles:
- extradeplist.append(dep)
- l = deps_by_targettask.setdefault(dep.targettask, [])
- l.append(dep)
-
- for task in cycles:
- for taskelement in task.objects.values():
- obj = taskelement.obj
- #print "OBJ", repr(obj), "TASK", repr(task)
-
- # create a placeholder UOWTask that may be built into the final
- # task tree
- get_object_task(task, obj)
- for dep in deps_by_targettask.get(task, []):
- (processor, targettask, isdelete) = (dep.processor, dep.targettask, dep.isdeletefrom)
- if taskelement.isdelete is not dep.isdeletefrom:
- continue
- #print "GETING LIST OFF PROC", processor.key, "OBJ", repr(obj)
-
- # traverse through the modified child items of each object. normally this
- # is done via PropertyLoader in properties.py, but we need all the info
- # up front here to do the object-level topological sort.
-
- # list of dependent objects from this object
- childlist = dep.get_object_dependencies(obj, trans, passive = True)
- # the task corresponding to the processor's objects
- childtask = trans.get_task_by_mapper(processor.mapper)
- # is this dependency involved in one of the cycles ?
- cyclicaldep = dep.targettask in cycles and trans.get_task_by_mapper(dep.processor.mapper) in cycles
- if isdelete:
- childlist = childlist.unchanged_items() + childlist.deleted_items()
- else:
- childlist = childlist.added_items()
-
- for o in childlist:
- if o is None:
- # this can be None due to the many-to-one dependency processor added
- # for deleted items, line 385 properties.py
- continue
- if not o in childtask.objects:
- # item needs to be saved since its added, or attached to a deleted object
- childtask.append(o, isdelete=isdelete and dep.processor.private)
- if cyclicaldep:
- # cyclical, so create a placeholder UOWTask that may be built into the
- # final task tree
- t = get_object_task(childtask, o)
- if not cyclicaldep:
- # not cyclical, so we are done with this
- continue
- # cyclical, so create an ordered pair for the dependency sort
- whosdep = dep.whose_dependent_on_who(obj, o)
- if whosdep is not None:
- tuples.append(whosdep)
- # then locate a UOWDependencyProcessor to add the object onto, which
- # will handle the modifications between saves/deletes
- if whosdep[0] is obj:
- get_dependency_task(whosdep[0], dep).append(whosdep[0], isdelete=isdelete)
- else:
- get_dependency_task(whosdep[0], dep).append(whosdep[1], isdelete=isdelete)
- else:
- get_dependency_task(obj, dep).append(obj, isdelete=isdelete)
-
- head = DependencySorter(tuples, allobjects).sort()
- if head is None:
- return None
-
- #print str(head)
-
- def make_task_tree(node, parenttask):
- """takes a dependency-sorted tree of objects and creates a tree of UOWTasks"""
- t = objecttotask[node.item]
- can_add_to_parent = t.mapper is parenttask.mapper
- if can_add_to_parent:
- parenttask.append(node.item, t.parent.objects[node.item].listonly, isdelete=t.parent.objects[node.item].isdelete, childtask=t)
- else:
- t.append(node.item, t.parent.objects[node.item].listonly, isdelete=t.parent.objects[node.item].isdelete)
- parenttask.append(None, listonly=False, isdelete=t.parent.objects[node.item].isdelete, childtask=t)
- if dependencies.has_key(node.item):
- for depprocessor, deptask in dependencies[node.item].iteritems():
- if can_add_to_parent:
- parenttask.cyclical_dependencies.append(depprocessor.branch(deptask))
- else:
- t.cyclical_dependencies.append(depprocessor.branch(deptask))
- for n in node.children:
- t2 = make_task_tree(n, t)
- return t
-
- # this is the new "circular" UOWTask which will execute in place of "self"
- t = UOWTask(None, self.mapper)
-
- # stick the non-circular dependencies and child tasks onto the new
- # circular UOWTask
- t.dependencies += [d for d in extradeplist]
- t.childtasks = self.childtasks
- make_task_tree(head, t)
- return t
-
- def dump(self):
- buf = StringIO.StringIO()
- self._dump(buf)
- return buf.getvalue()
-
- def _dump(self, buf, indent=0, circularparent=None):
-
- def _indent():
- return " | " * indent
-
- headers = {}
- def header(buf, text):
- """writes a given header just once"""
- try:
- headers[text]
- except KeyError:
- buf.write(_indent() + " |\n")
- buf.write(text)
- headers[text] = True
-
- def _dump_processor(proc):
- if proc.isdeletefrom:
- val = [t for t in proc.targettask.objects.values() if t.isdelete]
- else:
- val = [t for t in proc.targettask.objects.values() if not t.isdelete]
-
- buf.write(_indent() + " |- UOWDependencyProcessor(%d) %s attribute on %s (%s)\n" % (
- id(proc),
- repr(proc.processor.key),
- (proc.isdeletefrom and
- "%s's to be deleted" % _repr_task_class(proc.targettask)
- or "saved %s's" % _repr_task_class(proc.targettask)),
- _repr_task(proc.targettask))
- )
-
- if len(val) == 0:
- buf.write(_indent() + " | |-" + "(no objects)\n")
- for v in val:
- buf.write(_indent() + " | |-" + _repr_task_element(v) + "\n")
-
- def _repr_task_element(te):
- if te.obj is None:
- objid = "(placeholder)"
- else:
- objid = "%s(%d)" % (te.obj.__class__.__name__, id(te.obj))
- return "UOWTaskElement(%d): %s %s%s" % (id(te), objid, (te.listonly and '(listonly)' or (te.isdelete and '(delete' or '(save')),
- (te.mapper is not None and " w/ " + str(te.mapper) + ")" or ")")
- )
-
- def _repr_task(task):
- if task.mapper is not None:
- if task.mapper.__class__.__name__ == 'Mapper':
- name = task.mapper.class_.__name__ + "/" + str(task.mapper.primarytable) + "/" + str(id(task.mapper))
- else:
- name = repr(task.mapper)
- else:
- name = '(none)'
- return ("UOWTask(%d) '%s'" % (id(task), name))
- def _repr_task_class(task):
- if task.mapper is not None and task.mapper.__class__.__name__ == 'Mapper':
- return task.mapper.class_.__name__
- else:
- return '(none)'
-
- def _repr(obj):
- return "%s(%d)" % (obj.__class__.__name__, id(obj))
-
- if self.circular is not None:
- self.circular._dump(buf, indent, self)
- return
-
- i = _indent()
- if len(i):
- i = i[0:-1] + "-"
- if circularparent is not None:
- buf.write(i + " " + _repr_task(circularparent))
- buf.write("->circular->" + _repr_task(self))
- else:
- buf.write(i + " " + _repr_task(self))
-
- buf.write("\n")
- for rec in self.tosave_elements():
- if rec.listonly:
- continue
- header(buf, _indent() + " |- Save elements\n")
- buf.write(_indent() + " |- Save: " + _repr_task_element(rec) + "\n")
- for dep in self.cyclical_save_dependencies():
- header(buf, _indent() + " |- Cyclical Save dependencies\n")
- _dump_processor(dep)
- for element in self.tosave_elements():
- for task in element.childtasks:
- header(buf, _indent() + " |- Save subelements of UOWTaskElement(%s)\n" % id(element))
- task._dump(buf, indent + 1)
- for dep in self.save_dependencies():
- header(buf, _indent() + " |- Save dependencies\n")
- _dump_processor(dep)
- for dep in self.delete_dependencies():
- header(buf, _indent() + " |- Delete dependencies\n")
- _dump_processor(dep)
- for dep in self.cyclical_delete_dependencies():
- header(buf, _indent() + " |- Cyclical Delete dependencies\n")
- _dump_processor(dep)
- for child in self.childtasks:
- header(buf, _indent() + " |- Child tasks\n")
- child._dump(buf, indent + 1)
-# for obj in self.postupdate:
-# header(buf, _indent() + " |- Post Update objects\n")
-# buf.write(_repr(obj) + "\n")
- for element in self.todelete_elements():
- for task in element.childtasks:
- header(buf, _indent() + " |- Delete subelements of UOWTaskElement(%s)\n" % id(element))
- task._dump(buf, indent + 1)
-
- for rec in self.todelete_elements():
- if rec.listonly:
- continue
- header(buf, _indent() + " |- Delete elements\n")
- buf.write(_indent() + " |- Delete: " + _repr_task_element(rec) + "\n")
-
- buf.write(_indent() + " |----\n")
- buf.write(_indent() + "\n")
-
- def __repr__(self):
- if self.mapper is not None:
- if self.mapper.__class__.__name__ == 'Mapper':
- name = self.mapper.class_.__name__ + "/" + self.mapper.primarytable.name
- else:
- name = repr(self.mapper)
- else:
- name = '(none)'
- return ("UOWTask(%d) Mapper: '%s'" % (id(self), name))
-
-class DependencySorter(topological.QueueDependencySorter):
- pass
-
-def mapper(*args, **params):
- return sqlalchemy.mapper(*args, **params)
-
-def object_mapper(obj):
- return sqlalchemy.object_mapper(obj)
-
-def class_mapper(class_):
- return sqlalchemy.class_mapper(class_)
-
-global_attributes = UOWAttributeManager()
-
diff --git a/lib/sqlalchemy/mapping/util.py b/lib/sqlalchemy/mapping/util.py
deleted file mode 100644
index 4d957241b..000000000
--- a/lib/sqlalchemy/mapping/util.py
+++ /dev/null
@@ -1,31 +0,0 @@
-# mapper/util.py
-# Copyright (C) 2005,2006 Michael Bayer mike_mp@zzzcomputing.com
-#
-# This module is part of SQLAlchemy and is released under
-# the MIT License: http://www.opensource.org/licenses/mit-license.php
-
-
-import sqlalchemy.sql as sql
-
-class TableFinder(sql.ClauseVisitor):
- """given a Clause, locates all the Tables within it into a list."""
- def __init__(self, table, check_columns=False):
- self.tables = []
- self.check_columns = check_columns
- if table is not None:
- table.accept_visitor(self)
- def visit_table(self, table):
- self.tables.append(table)
- def __len__(self):
- return len(self.tables)
- def __getitem__(self, i):
- return self.tables[i]
- def __iter__(self):
- return iter(self.tables)
- def __contains__(self, obj):
- return obj in self.tables
- def __add__(self, obj):
- return self.tables + list(obj)
- def visit_column(self, column):
- if self.check_columns:
- column.table.accept_visitor(self)