summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/orm
diff options
context:
space:
mode:
Diffstat (limited to 'lib/sqlalchemy/orm')
-rw-r--r--lib/sqlalchemy/orm/attributes.py97
-rw-r--r--lib/sqlalchemy/orm/base.py19
-rw-r--r--lib/sqlalchemy/orm/descriptor_props.py4
-rw-r--r--lib/sqlalchemy/orm/events.py3
-rw-r--r--lib/sqlalchemy/orm/interfaces.py62
-rw-r--r--lib/sqlalchemy/orm/loading.py66
-rw-r--r--lib/sqlalchemy/orm/mapper.py42
-rw-r--r--lib/sqlalchemy/orm/path_registry.py14
-rw-r--r--lib/sqlalchemy/orm/persistence.py385
-rw-r--r--lib/sqlalchemy/orm/properties.py21
-rw-r--r--lib/sqlalchemy/orm/query.py284
-rw-r--r--lib/sqlalchemy/orm/relationships.py3
-rw-r--r--lib/sqlalchemy/orm/session.py271
-rw-r--r--lib/sqlalchemy/orm/strategies.py24
-rw-r--r--lib/sqlalchemy/orm/strategy_options.py15
-rw-r--r--lib/sqlalchemy/orm/sync.py17
-rw-r--r--lib/sqlalchemy/orm/unitofwork.py10
-rw-r--r--lib/sqlalchemy/orm/util.py10
18 files changed, 1017 insertions, 330 deletions
diff --git a/lib/sqlalchemy/orm/attributes.py b/lib/sqlalchemy/orm/attributes.py
index 2b4c3ec75..e9c8c511a 100644
--- a/lib/sqlalchemy/orm/attributes.py
+++ b/lib/sqlalchemy/orm/attributes.py
@@ -345,18 +345,16 @@ class Event(object):
.. versionadded:: 0.9.0
- """
-
- impl = None
- """The :class:`.AttributeImpl` which is the current event initiator.
- """
+ :var impl: The :class:`.AttributeImpl` which is the current event
+ initiator.
- op = None
- """The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or :attr:`.OP_REPLACE`,
- indicating the source operation.
+ :var op: The symbol :attr:`.OP_APPEND`, :attr:`.OP_REMOVE` or
+ :attr:`.OP_REPLACE`, indicating the source operation.
"""
+ __slots__ = 'impl', 'op', 'parent_token'
+
def __init__(self, attribute_impl, op):
self.impl = attribute_impl
self.op = op
@@ -455,6 +453,11 @@ class AttributeImpl(object):
self.expire_missing = expire_missing
+ __slots__ = (
+ 'class_', 'key', 'callable_', 'dispatch', 'trackparent',
+ 'parent_token', 'send_modified_events', 'is_equal', 'expire_missing'
+ )
+
def __str__(self):
return "%s.%s" % (self.class_.__name__, self.key)
@@ -654,6 +657,23 @@ class ScalarAttributeImpl(AttributeImpl):
supports_population = True
collection = False
+ __slots__ = '_replace_token', '_append_token', '_remove_token'
+
+ def __init__(self, *arg, **kw):
+ super(ScalarAttributeImpl, self).__init__(*arg, **kw)
+ self._replace_token = self._append_token = None
+ self._remove_token = None
+
+ def _init_append_token(self):
+ self._replace_token = self._append_token = Event(self, OP_REPLACE)
+ return self._replace_token
+
+ _init_append_or_replace_token = _init_append_token
+
+ def _init_remove_token(self):
+ self._remove_token = Event(self, OP_REMOVE)
+ return self._remove_token
+
def delete(self, state, dict_):
# TODO: catch key errors, convert to attributeerror?
@@ -692,27 +712,18 @@ class ScalarAttributeImpl(AttributeImpl):
state._modified_event(dict_, self, old)
dict_[self.key] = value
- @util.memoized_property
- def _replace_token(self):
- return Event(self, OP_REPLACE)
-
- @util.memoized_property
- def _append_token(self):
- return Event(self, OP_REPLACE)
-
- @util.memoized_property
- def _remove_token(self):
- return Event(self, OP_REMOVE)
-
def fire_replace_event(self, state, dict_, value, previous, initiator):
for fn in self.dispatch.set:
value = fn(
- state, value, previous, initiator or self._replace_token)
+ state, value, previous,
+ initiator or self._replace_token or
+ self._init_append_or_replace_token())
return value
def fire_remove_event(self, state, dict_, value, initiator):
for fn in self.dispatch.remove:
- fn(state, value, initiator or self._remove_token)
+ fn(state, value,
+ initiator or self._remove_token or self._init_remove_token())
@property
def type(self):
@@ -732,9 +743,13 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
supports_population = True
collection = False
+ __slots__ = ()
+
def delete(self, state, dict_):
old = self.get(state, dict_)
- self.fire_remove_event(state, dict_, old, self._remove_token)
+ self.fire_remove_event(
+ state, dict_, old,
+ self._remove_token or self._init_remove_token())
del dict_[self.key]
def get_history(self, state, dict_, passive=PASSIVE_OFF):
@@ -807,7 +822,8 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
- fn(state, value, initiator or self._remove_token)
+ fn(state, value, initiator or
+ self._remove_token or self._init_remove_token())
state._modified_event(dict_, self, value)
@@ -819,7 +835,8 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
for fn in self.dispatch.set:
value = fn(
- state, value, previous, initiator or self._replace_token)
+ state, value, previous, initiator or
+ self._replace_token or self._init_append_or_replace_token())
state._modified_event(dict_, self, previous)
@@ -846,6 +863,8 @@ class CollectionAttributeImpl(AttributeImpl):
supports_population = True
collection = True
+ __slots__ = 'copy', 'collection_factory', '_append_token', '_remove_token'
+
def __init__(self, class_, key, callable_, dispatch,
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
@@ -862,6 +881,8 @@ class CollectionAttributeImpl(AttributeImpl):
copy_function = self.__copy
self.copy = copy_function
self.collection_factory = typecallable
+ self._append_token = None
+ self._remove_token = None
if getattr(self.collection_factory, "_sa_linker", None):
@@ -873,6 +894,14 @@ class CollectionAttributeImpl(AttributeImpl):
def unlink(target, collection, collection_adapter):
collection._sa_linker(None)
+ def _init_append_token(self):
+ self._append_token = Event(self, OP_APPEND)
+ return self._append_token
+
+ def _init_remove_token(self):
+ self._remove_token = Event(self, OP_REMOVE)
+ return self._remove_token
+
def __copy(self, item):
return [y for y in collections.collection_adapter(item)]
@@ -915,17 +944,11 @@ class CollectionAttributeImpl(AttributeImpl):
return [(instance_state(o), o) for o in current]
- @util.memoized_property
- def _append_token(self):
- return Event(self, OP_APPEND)
-
- @util.memoized_property
- def _remove_token(self):
- return Event(self, OP_REMOVE)
-
def fire_append_event(self, state, dict_, value, initiator):
for fn in self.dispatch.append:
- value = fn(state, value, initiator or self._append_token)
+ value = fn(
+ state, value,
+ initiator or self._append_token or self._init_append_token())
state._modified_event(dict_, self, NEVER_SET, True)
@@ -942,7 +965,8 @@ class CollectionAttributeImpl(AttributeImpl):
self.sethasparent(instance_state(value), state, False)
for fn in self.dispatch.remove:
- fn(state, value, initiator or self._remove_token)
+ fn(state, value,
+ initiator or self._remove_token or self._init_remove_token())
state._modified_event(dict_, self, NEVER_SET, True)
@@ -1134,7 +1158,8 @@ def backref_listeners(attribute, key, uselist):
impl.pop(old_state,
old_dict,
state.obj(),
- parent_impl._append_token,
+ parent_impl._append_token or
+ parent_impl._init_append_token(),
passive=PASSIVE_NO_FETCH)
if child is not None:
diff --git a/lib/sqlalchemy/orm/base.py b/lib/sqlalchemy/orm/base.py
index 3390ceec4..7bfafdc2b 100644
--- a/lib/sqlalchemy/orm/base.py
+++ b/lib/sqlalchemy/orm/base.py
@@ -437,6 +437,7 @@ class InspectionAttr(object):
here intact for forwards-compatibility.
"""
+ __slots__ = ()
is_selectable = False
"""Return True if this object is an instance of :class:`.Selectable`."""
@@ -488,6 +489,16 @@ class InspectionAttr(object):
"""
+
+class InspectionAttrInfo(InspectionAttr):
+ """Adds the ``.info`` attribute to :class:`.InspectionAttr`.
+
+ The rationale for :class:`.InspectionAttr` vs. :class:`.InspectionAttrInfo`
+ is that the former is compatible as a mixin for classes that specify
+ ``__slots__``; this is essentially an implementation artifact.
+
+ """
+
@util.memoized_property
def info(self):
"""Info dictionary associated with the object, allowing user-defined
@@ -501,9 +512,10 @@ class InspectionAttr(object):
.. versionadded:: 0.8 Added support for .info to all
:class:`.MapperProperty` subclasses.
- .. versionchanged:: 1.0.0 :attr:`.InspectionAttr.info` moved
- from :class:`.MapperProperty` so that it can apply to a wider
- variety of ORM and extension constructs.
+ .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
+ available on extension types via the
+ :attr:`.InspectionAttrInfo.info` attribute, so that it can apply
+ to a wider variety of ORM and extension constructs.
.. seealso::
@@ -520,3 +532,4 @@ class _MappedAttribute(object):
attributes.
"""
+ __slots__ = ()
diff --git a/lib/sqlalchemy/orm/descriptor_props.py b/lib/sqlalchemy/orm/descriptor_props.py
index 19ff71f73..e68ff1bea 100644
--- a/lib/sqlalchemy/orm/descriptor_props.py
+++ b/lib/sqlalchemy/orm/descriptor_props.py
@@ -143,6 +143,7 @@ class CompositeProperty(DescriptorProperty):
class. **Deprecated.** Please see :class:`.AttributeEvents`.
"""
+ super(CompositeProperty, self).__init__()
self.attrs = attrs
self.composite_class = class_
@@ -471,6 +472,7 @@ class ConcreteInheritedProperty(DescriptorProperty):
return comparator_callable
def __init__(self):
+ super(ConcreteInheritedProperty, self).__init__()
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add "
@@ -555,6 +557,7 @@ class SynonymProperty(DescriptorProperty):
more complicated attribute-wrapping schemes than synonyms.
"""
+ super(SynonymProperty, self).__init__()
self.name = name
self.map_column = map_column
@@ -684,6 +687,7 @@ class ComparableProperty(DescriptorProperty):
.. versionadded:: 1.0.0
"""
+ super(ComparableProperty, self).__init__()
self.descriptor = descriptor
self.comparator_factory = comparator_factory
self.doc = doc or (descriptor and descriptor.__doc__) or None
diff --git a/lib/sqlalchemy/orm/events.py b/lib/sqlalchemy/orm/events.py
index 9ea0dd834..4d888a350 100644
--- a/lib/sqlalchemy/orm/events.py
+++ b/lib/sqlalchemy/orm/events.py
@@ -1479,8 +1479,9 @@ class AttributeEvents(event.Events):
@staticmethod
def _set_dispatch(cls, dispatch_cls):
- event.Events._set_dispatch(cls, dispatch_cls)
+ dispatch = event.Events._set_dispatch(cls, dispatch_cls)
dispatch_cls._active_history = False
+ return dispatch
@classmethod
def _accept_with(cls, target):
diff --git a/lib/sqlalchemy/orm/interfaces.py b/lib/sqlalchemy/orm/interfaces.py
index ad2452c1b..299ccaaaf 100644
--- a/lib/sqlalchemy/orm/interfaces.py
+++ b/lib/sqlalchemy/orm/interfaces.py
@@ -24,7 +24,8 @@ from .. import util
from ..sql import operators
from .base import (ONETOMANY, MANYTOONE, MANYTOMANY,
EXT_CONTINUE, EXT_STOP, NOT_EXTENSION)
-from .base import InspectionAttr, _MappedAttribute
+from .base import (InspectionAttr, InspectionAttr,
+ InspectionAttrInfo, _MappedAttribute)
import collections
# imported later
@@ -48,11 +49,8 @@ __all__ = (
)
-class MapperProperty(_MappedAttribute, InspectionAttr):
- """Manage the relationship of a ``Mapper`` to a single class
- attribute, as well as that attribute as it appears on individual
- instances of the class, including attribute instrumentation,
- attribute access, loading behavior, and dependency calculations.
+class MapperProperty(_MappedAttribute, InspectionAttr, util.MemoizedSlots):
+ """Represent a particular class attribute mapped by :class:`.Mapper`.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
@@ -63,6 +61,11 @@ class MapperProperty(_MappedAttribute, InspectionAttr):
"""
+ __slots__ = (
+ '_configure_started', '_configure_finished', 'parent', 'key',
+ 'info'
+ )
+
cascade = frozenset()
"""The set of 'cascade' attribute names.
@@ -78,6 +81,32 @@ class MapperProperty(_MappedAttribute, InspectionAttr):
"""
+ def _memoized_attr_info(self):
+ """Info dictionary associated with the object, allowing user-defined
+ data to be associated with this :class:`.InspectionAttr`.
+
+ The dictionary is generated when first accessed. Alternatively,
+ it can be specified as a constructor argument to the
+ :func:`.column_property`, :func:`.relationship`, or :func:`.composite`
+ functions.
+
+ .. versionadded:: 0.8 Added support for .info to all
+ :class:`.MapperProperty` subclasses.
+
+ .. versionchanged:: 1.0.0 :attr:`.MapperProperty.info` is also
+ available on extension types via the
+ :attr:`.InspectionAttrInfo.info` attribute, so that it can apply
+ to a wider variety of ORM and extension constructs.
+
+ .. seealso::
+
+ :attr:`.QueryableAttribute.info`
+
+ :attr:`.SchemaItem.info`
+
+ """
+ return {}
+
def setup(self, context, entity, path, adapter, **kwargs):
"""Called by Query for the purposes of constructing a SQL statement.
@@ -139,8 +168,9 @@ class MapperProperty(_MappedAttribute, InspectionAttr):
"""
- _configure_started = False
- _configure_finished = False
+ def __init__(self):
+ self._configure_started = False
+ self._configure_finished = False
def init(self):
"""Called after all mappers are created to assemble
@@ -303,6 +333,8 @@ class PropComparator(operators.ColumnOperators):
"""
+ __slots__ = 'prop', 'property', '_parentmapper', '_adapt_to_entity'
+
def __init__(self, prop, parentmapper, adapt_to_entity=None):
self.prop = self.property = prop
self._parentmapper = parentmapper
@@ -331,7 +363,7 @@ class PropComparator(operators.ColumnOperators):
else:
return self._adapt_to_entity._adapt_element
- @util.memoized_property
+ @property
def info(self):
return self.property.info
@@ -420,6 +452,8 @@ class StrategizedProperty(MapperProperty):
"""
+ __slots__ = '_strategies', 'strategy'
+
strategy_wildcard_key = None
def _get_context_loader(self, context, path):
@@ -483,14 +517,14 @@ class StrategizedProperty(MapperProperty):
not mapper.class_manager._attr_has_impl(self.key):
self.strategy.init_class_attribute(mapper)
- _strategies = collections.defaultdict(dict)
+ _all_strategies = collections.defaultdict(dict)
@classmethod
def strategy_for(cls, **kw):
def decorate(dec_cls):
dec_cls._strategy_keys = []
key = tuple(sorted(kw.items()))
- cls._strategies[cls][key] = dec_cls
+ cls._all_strategies[cls][key] = dec_cls
dec_cls._strategy_keys.append(key)
return dec_cls
return decorate
@@ -498,8 +532,8 @@ class StrategizedProperty(MapperProperty):
@classmethod
def _strategy_lookup(cls, *key):
for prop_cls in cls.__mro__:
- if prop_cls in cls._strategies:
- strategies = cls._strategies[prop_cls]
+ if prop_cls in cls._all_strategies:
+ strategies = cls._all_strategies[prop_cls]
try:
return strategies[key]
except KeyError:
@@ -558,6 +592,8 @@ class LoaderStrategy(object):
"""
+ __slots__ = 'parent_property', 'is_class_level', 'parent', 'key'
+
def __init__(self, parent):
self.parent_property = parent
self.is_class_level = False
diff --git a/lib/sqlalchemy/orm/loading.py b/lib/sqlalchemy/orm/loading.py
index 380afcdc7..fdc787545 100644
--- a/lib/sqlalchemy/orm/loading.py
+++ b/lib/sqlalchemy/orm/loading.py
@@ -42,41 +42,45 @@ def instances(query, cursor, context):
def filter_fn(row):
return tuple(fn(x) for x, fn in zip(row, filter_fns))
- (process, labels) = \
- list(zip(*[
- query_entity.row_processor(query,
- context, cursor)
- for query_entity in query._entities
- ]))
-
- if not single_entity:
- keyed_tuple = util.lightweight_named_tuple('result', labels)
-
- while True:
- context.partials = {}
-
- if query._yield_per:
- fetch = cursor.fetchmany(query._yield_per)
- if not fetch:
- break
- else:
- fetch = cursor.fetchall()
+ try:
+ (process, labels) = \
+ list(zip(*[
+ query_entity.row_processor(query,
+ context, cursor)
+ for query_entity in query._entities
+ ]))
+
+ if not single_entity:
+ keyed_tuple = util.lightweight_named_tuple('result', labels)
+
+ while True:
+ context.partials = {}
+
+ if query._yield_per:
+ fetch = cursor.fetchmany(query._yield_per)
+ if not fetch:
+ break
+ else:
+ fetch = cursor.fetchall()
- if single_entity:
- proc = process[0]
- rows = [proc(row) for row in fetch]
- else:
- rows = [keyed_tuple([proc(row) for proc in process])
- for row in fetch]
+ if single_entity:
+ proc = process[0]
+ rows = [proc(row) for row in fetch]
+ else:
+ rows = [keyed_tuple([proc(row) for proc in process])
+ for row in fetch]
- if filtered:
- rows = util.unique_list(rows, filter_fn)
+ if filtered:
+ rows = util.unique_list(rows, filter_fn)
- for row in rows:
- yield row
+ for row in rows:
+ yield row
- if not query._yield_per:
- break
+ if not query._yield_per:
+ break
+ except Exception as err:
+ cursor.close()
+ util.raise_from_cause(err)
@util.dependencies("sqlalchemy.orm.query")
diff --git a/lib/sqlalchemy/orm/mapper.py b/lib/sqlalchemy/orm/mapper.py
index 863dab5cb..eb5abbd4f 100644
--- a/lib/sqlalchemy/orm/mapper.py
+++ b/lib/sqlalchemy/orm/mapper.py
@@ -974,6 +974,15 @@ class Mapper(InspectionAttr):
self._all_tables = self.inherits._all_tables
if self.polymorphic_identity is not None:
+ if self.polymorphic_identity in self.polymorphic_map:
+ util.warn(
+ "Reassigning polymorphic association for identity %r "
+ "from %r to %r: Check for duplicate use of %r as "
+ "value for polymorphic_identity." %
+ (self.polymorphic_identity,
+ self.polymorphic_map[self.polymorphic_identity],
+ self, self.polymorphic_identity)
+ )
self.polymorphic_map[self.polymorphic_identity] = self
else:
@@ -1248,7 +1257,7 @@ class Mapper(InspectionAttr):
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
- if self._columntoproperty[col] not in self._primary_key_props and
+ if self._columntoproperty[col] not in self._identity_key_props and
(not hasattr(col, 'table') or
col.table not in self._cols_by_table))
@@ -2373,16 +2382,31 @@ class Mapper(InspectionAttr):
manager[prop.key].
impl.get(state, dict_,
attributes.PASSIVE_RETURN_NEVER_SET)
- for prop in self._primary_key_props
+ for prop in self._identity_key_props
]
@_memoized_configured_property
- def _primary_key_props(self):
- # TODO: this should really be called "identity key props",
- # as it does not necessarily include primary key columns within
- # individual tables
+ def _identity_key_props(self):
return [self._columntoproperty[col] for col in self.primary_key]
+ @_memoized_configured_property
+ def _all_pk_props(self):
+ collection = set()
+ for table in self.tables:
+ collection.update(self._pks_by_table[table])
+ return collection
+
+ @_memoized_configured_property
+ def _should_undefer_in_wildcard(self):
+ cols = set(self.primary_key)
+ if self.polymorphic_on is not None:
+ cols.add(self.polymorphic_on)
+ return cols
+
+ @_memoized_configured_property
+ def _primary_key_propkeys(self):
+ return set([prop.key for prop in self._all_pk_props])
+
def _get_state_attr_by_column(
self, state, dict_, column,
passive=attributes.PASSIVE_RETURN_NEVER_SET):
@@ -2635,7 +2659,7 @@ def configure_mappers():
if not Mapper._new_mappers:
return
- Mapper.dispatch(Mapper).before_configured()
+ Mapper.dispatch._for_class(Mapper).before_configured()
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
@@ -2667,7 +2691,7 @@ def configure_mappers():
_already_compiling = False
finally:
_CONFIGURE_MUTEX.release()
- Mapper.dispatch(Mapper).after_configured()
+ Mapper.dispatch._for_class(Mapper).after_configured()
def reconstructor(fn):
@@ -2779,6 +2803,8 @@ def _event_on_init(state, args, kwargs):
class _ColumnMapping(dict):
"""Error reporting helper for mapper._columntoproperty."""
+ __slots__ = 'mapper',
+
def __init__(self, mapper):
self.mapper = mapper
diff --git a/lib/sqlalchemy/orm/path_registry.py b/lib/sqlalchemy/orm/path_registry.py
index d4dbf29a0..ec80c70cc 100644
--- a/lib/sqlalchemy/orm/path_registry.py
+++ b/lib/sqlalchemy/orm/path_registry.py
@@ -52,6 +52,9 @@ class PathRegistry(object):
"""
+ is_token = False
+ is_root = False
+
def __eq__(self, other):
return other is not None and \
self.path == other.path
@@ -153,6 +156,8 @@ class RootRegistry(PathRegistry):
"""
path = ()
has_entity = False
+ is_aliased_class = False
+ is_root = True
def __getitem__(self, entity):
return entity._path_registry
@@ -168,6 +173,15 @@ class TokenRegistry(PathRegistry):
has_entity = False
+ is_token = True
+
+ def generate_for_superclasses(self):
+ if not self.parent.is_aliased_class and not self.parent.is_root:
+ for ent in self.parent.mapper.iterate_to_root():
+ yield TokenRegistry(self.parent.parent[ent], self.token)
+ else:
+ yield self
+
def __getitem__(self, entity):
raise NotImplementedError()
diff --git a/lib/sqlalchemy/orm/persistence.py b/lib/sqlalchemy/orm/persistence.py
index 6b8d5af14..c3b2d7bcb 100644
--- a/lib/sqlalchemy/orm/persistence.py
+++ b/lib/sqlalchemy/orm/persistence.py
@@ -15,7 +15,7 @@ in unitofwork.py.
"""
import operator
-from itertools import groupby
+from itertools import groupby, chain
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import state_str, _attr_as_key, _entity_descriptor
@@ -23,7 +23,105 @@ from ..sql import expression
from . import loading
-def save_obj(base_mapper, states, uowtransaction, single=False):
+def _bulk_insert(
+ mapper, mappings, session_transaction, isstates, return_defaults):
+ base_mapper = mapper.base_mapper
+
+ cached_connections = _cached_connection_dict(base_mapper)
+
+ if session_transaction.session.connection_callable:
+ raise NotImplementedError(
+ "connection_callable / per-instance sharding "
+ "not supported in bulk_insert()")
+
+ if isstates:
+ if return_defaults:
+ states = [(state, state.dict) for state in mappings]
+ mappings = [dict_ for (state, dict_) in states]
+ else:
+ mappings = [state.dict for state in mappings]
+ else:
+ mappings = list(mappings)
+
+ connection = session_transaction.connection(base_mapper)
+ for table, super_mapper in base_mapper._sorted_tables.items():
+ if not mapper.isa(super_mapper):
+ continue
+
+ records = (
+ (None, state_dict, params, mapper,
+ connection, value_params, has_all_pks, has_all_defaults)
+ for
+ state, state_dict, params, mp,
+ conn, value_params, has_all_pks,
+ has_all_defaults in _collect_insert_commands(table, (
+ (None, mapping, mapper, connection)
+ for mapping in mappings),
+ bulk=True, return_defaults=return_defaults
+ )
+ )
+ _emit_insert_statements(base_mapper, None,
+ cached_connections,
+ super_mapper, table, records,
+ bookkeeping=return_defaults)
+
+ if return_defaults and isstates:
+ identity_cls = mapper._identity_class
+ identity_props = [p.key for p in mapper._identity_key_props]
+ for state, dict_ in states:
+ state.key = (
+ identity_cls,
+ tuple([dict_[key] for key in identity_props])
+ )
+
+
+def _bulk_update(mapper, mappings, session_transaction,
+ isstates, update_changed_only):
+ base_mapper = mapper.base_mapper
+
+ cached_connections = _cached_connection_dict(base_mapper)
+
+ def _changed_dict(mapper, state):
+ return dict(
+ (k, v)
+ for k, v in state.dict.items() if k in state.committed_state or k
+ in mapper._primary_key_propkeys
+ )
+
+ if isstates:
+ if update_changed_only:
+ mappings = [_changed_dict(mapper, state) for state in mappings]
+ else:
+ mappings = [state.dict for state in mappings]
+ else:
+ mappings = list(mappings)
+
+ if session_transaction.session.connection_callable:
+ raise NotImplementedError(
+ "connection_callable / per-instance sharding "
+ "not supported in bulk_update()")
+
+ connection = session_transaction.connection(base_mapper)
+
+ for table, super_mapper in base_mapper._sorted_tables.items():
+ if not mapper.isa(super_mapper):
+ continue
+
+ records = _collect_update_commands(None, table, (
+ (None, mapping, mapper, connection,
+ (mapping[mapper._version_id_prop.key]
+ if mapper._version_id_prop else None))
+ for mapping in mappings
+ ), bulk=True)
+
+ _emit_update_statements(base_mapper, None,
+ cached_connections,
+ super_mapper, table, records,
+ bookkeeping=False)
+
+
+def save_obj(
+ base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
@@ -76,17 +174,16 @@ def save_obj(base_mapper, states, uowtransaction, single=False):
_finalize_insert_update_commands(
base_mapper, uowtransaction,
- (
- (state, state_dict, mapper, connection, False)
- for state, state_dict, mapper, connection in states_to_insert
- )
- )
- _finalize_insert_update_commands(
- base_mapper, uowtransaction,
- (
- (state, state_dict, mapper, connection, True)
- for state, state_dict, mapper, connection,
- update_version_id in states_to_update
+ chain(
+ (
+ (state, state_dict, mapper, connection, False)
+ for state, state_dict, mapper, connection in states_to_insert
+ ),
+ (
+ (state, state_dict, mapper, connection, True)
+ for state, state_dict, mapper, connection,
+ update_version_id in states_to_update
+ )
)
)
@@ -261,7 +358,9 @@ def _organize_states_for_delete(base_mapper, states, uowtransaction):
state, dict_, mapper, connection, update_version_id)
-def _collect_insert_commands(table, states_to_insert):
+def _collect_insert_commands(
+ table, states_to_insert,
+ bulk=False, return_defaults=False):
"""Identify sets of values to use in INSERT statements for a
list of states.
@@ -280,22 +379,26 @@ def _collect_insert_commands(table, states_to_insert):
col = propkey_to_col[propkey]
if value is None:
continue
- elif isinstance(value, sql.ClauseElement):
+ elif not bulk and isinstance(value, sql.ClauseElement):
value_params[col.key] = value
else:
params[col.key] = value
- for colkey in mapper._insert_cols_as_none[table].\
- difference(params).difference(value_params):
- params[colkey] = None
+ if not bulk:
+ for colkey in mapper._insert_cols_as_none[table].\
+ difference(params).difference(value_params):
+ params[colkey] = None
- has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
+ if not bulk or return_defaults:
+ has_all_pks = mapper._pk_keys_by_table[table].issubset(params)
- if mapper.base_mapper.eager_defaults:
- has_all_defaults = mapper._server_default_cols[table].\
- issubset(params)
+ if mapper.base_mapper.eager_defaults:
+ has_all_defaults = mapper._server_default_cols[table].\
+ issubset(params)
+ else:
+ has_all_defaults = True
else:
- has_all_defaults = True
+ has_all_defaults = has_all_pks = True
if mapper.version_id_generator is not False \
and mapper.version_id_col is not None and \
@@ -309,7 +412,9 @@ def _collect_insert_commands(table, states_to_insert):
has_all_defaults)
-def _collect_update_commands(uowtransaction, table, states_to_update):
+def _collect_update_commands(
+ uowtransaction, table, states_to_update,
+ bulk=False):
"""Identify sets of values to use in UPDATE statements for a
list of states.
@@ -329,23 +434,32 @@ def _collect_update_commands(uowtransaction, table, states_to_update):
pks = mapper._pks_by_table[table]
- params = {}
value_params = {}
propkey_to_col = mapper._propkey_to_col[table]
- for propkey in set(propkey_to_col).intersection(state.committed_state):
- value = state_dict[propkey]
- col = propkey_to_col[propkey]
-
- if not state.manager[propkey].impl.is_equal(
- value, state.committed_state[propkey]):
- if isinstance(value, sql.ClauseElement):
- value_params[col] = value
- else:
- params[col.key] = value
+ if bulk:
+ params = dict(
+ (propkey_to_col[propkey].key, state_dict[propkey])
+ for propkey in
+ set(propkey_to_col).intersection(state_dict)
+ )
+ else:
+ params = {}
+ for propkey in set(propkey_to_col).intersection(
+ state.committed_state):
+ value = state_dict[propkey]
+ col = propkey_to_col[propkey]
+
+ if not state.manager[propkey].impl.is_equal(
+ value, state.committed_state[propkey]):
+ if isinstance(value, sql.ClauseElement):
+ value_params[col] = value
+ else:
+ params[col.key] = value
- if update_version_id is not None:
+ if update_version_id is not None and \
+ mapper.version_id_col in mapper._cols_by_table[table]:
col = mapper.version_id_col
params[col._label] = update_version_id
@@ -357,28 +471,37 @@ def _collect_update_commands(uowtransaction, table, states_to_update):
if not (params or value_params):
continue
- pk_params = {}
- for col in pks:
- propkey = mapper._columntoproperty[col].key
- history = state.manager[propkey].impl.get_history(
- state, state_dict, attributes.PASSIVE_OFF)
-
- if history.added:
- if not history.deleted or \
- ("pk_cascaded", state, col) in \
- uowtransaction.attributes:
- pk_params[col._label] = history.added[0]
- params.pop(col.key, None)
+ if bulk:
+ pk_params = dict(
+ (propkey_to_col[propkey]._label, state_dict.get(propkey))
+ for propkey in
+ set(propkey_to_col).
+ intersection(mapper._pk_keys_by_table[table])
+ )
+ else:
+ pk_params = {}
+ for col in pks:
+ propkey = mapper._columntoproperty[col].key
+
+ history = state.manager[propkey].impl.get_history(
+ state, state_dict, attributes.PASSIVE_OFF)
+
+ if history.added:
+ if not history.deleted or \
+ ("pk_cascaded", state, col) in \
+ uowtransaction.attributes:
+ pk_params[col._label] = history.added[0]
+ params.pop(col.key, None)
+ else:
+ # else, use the old value to locate the row
+ pk_params[col._label] = history.deleted[0]
+ params[col.key] = history.added[0]
else:
- # else, use the old value to locate the row
- pk_params[col._label] = history.deleted[0]
- params[col.key] = history.added[0]
- else:
- pk_params[col._label] = history.unchanged[0]
- if pk_params[col._label] is None:
- raise orm_exc.FlushError(
- "Can't update table %s using NULL for primary "
- "key value on column %s" % (table, col))
+ pk_params[col._label] = history.unchanged[0]
+ if pk_params[col._label] is None:
+ raise orm_exc.FlushError(
+ "Can't update table %s using NULL for primary "
+ "key value on column %s" % (table, col))
if params or value_params:
params.update(pk_params)
@@ -446,18 +569,19 @@ def _collect_delete_commands(base_mapper, uowtransaction, table,
"key value on column %s" % (table, col))
if update_version_id is not None and \
- table.c.contains_column(mapper.version_id_col):
+ mapper.version_id_col in mapper._cols_by_table[table]:
params[mapper.version_id_col.key] = update_version_id
yield params, connection
def _emit_update_statements(base_mapper, uowtransaction,
- cached_connections, mapper, table, update):
+ cached_connections, mapper, table, update,
+ bookkeeping=True):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
- table.c.contains_column(mapper.version_id_col)
+ mapper.version_id_col in mapper._cols_by_table[table]
def update_stmt():
clause = sql.and_()
@@ -486,32 +610,42 @@ def _emit_update_statements(base_mapper, uowtransaction,
records in groupby(
update,
lambda rec: (
- rec[4],
- tuple(sorted(rec[2])),
- bool(rec[5]))):
+ rec[4], # connection
+ set(rec[2]), # set of parameter keys
+ bool(rec[5]))): # whether or not we have "value" parameters
rows = 0
records = list(records)
+
+ # TODO: would be super-nice to not have to determine this boolean
+ # inside the loop here, in the 99.9999% of the time there's only
+ # one connection in use
+ assert_singlerow = connection.dialect.supports_sane_rowcount
+ assert_multirow = assert_singlerow and \
+ connection.dialect.supports_sane_multi_rowcount
+ allow_multirow = not needs_version_id or assert_multirow
+
if hasvalue:
for state, state_dict, params, mapper, \
connection, value_params in records:
c = connection.execute(
statement.values(value_params),
params)
- _postfetch(
- mapper,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- c.context.compiled_parameters[0],
- value_params)
+ if bookkeeping:
+ _postfetch(
+ mapper,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ c.context.compiled_parameters[0],
+ value_params)
rows += c.rowcount
+ check_rowcount = True
else:
- if needs_version_id and \
- not connection.dialect.supports_sane_multi_rowcount and \
- connection.dialect.supports_sane_rowcount:
+ if not allow_multirow:
+ check_rowcount = assert_singlerow
for state, state_dict, params, mapper, \
connection, value_params in records:
c = cached_connections[connection].\
@@ -528,6 +662,12 @@ def _emit_update_statements(base_mapper, uowtransaction,
rows += c.rowcount
else:
multiparams = [rec[2] for rec in records]
+
+ check_rowcount = assert_multirow or (
+ assert_singlerow and
+ len(multiparams) == 1
+ )
+
c = cached_connections[connection].\
execute(statement, multiparams)
@@ -544,7 +684,7 @@ def _emit_update_statements(base_mapper, uowtransaction,
c.context.compiled_parameters[0],
value_params)
- if connection.dialect.supports_sane_rowcount:
+ if check_rowcount:
if rows != len(records):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
@@ -558,20 +698,23 @@ def _emit_update_statements(base_mapper, uowtransaction,
def _emit_insert_statements(base_mapper, uowtransaction,
- cached_connections, mapper, table, insert):
+ cached_connections, mapper, table, insert,
+ bookkeeping=True):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
- records in groupby(insert,
- lambda rec: (rec[4],
- tuple(sorted(rec[2].keys())),
- bool(rec[5]),
- rec[6], rec[7])
- ):
- if \
+ records in groupby(
+ insert,
+ lambda rec: (
+ rec[4], # connection
+ set(rec[2]), # parameter keys
+ bool(rec[5]), # whether we have "value" parameters
+ rec[6],
+ rec[7])):
+ if not bookkeeping or \
(
has_all_defaults
or not base_mapper.eager_defaults
@@ -584,19 +727,20 @@ def _emit_insert_statements(base_mapper, uowtransaction,
c = cached_connections[connection].\
execute(statement, multiparams)
- for (state, state_dict, params, mapper_rec,
- conn, value_params, has_all_pks, has_all_defaults), \
- last_inserted_params in \
- zip(records, c.context.compiled_parameters):
- _postfetch(
- mapper_rec,
- uowtransaction,
- table,
- state,
- state_dict,
- c,
- last_inserted_params,
- value_params)
+ if bookkeeping:
+ for (state, state_dict, params, mapper_rec,
+ conn, value_params, has_all_pks, has_all_defaults), \
+ last_inserted_params in \
+ zip(records, c.context.compiled_parameters):
+ _postfetch(
+ mapper_rec,
+ uowtransaction,
+ table,
+ state,
+ state_dict,
+ c,
+ last_inserted_params,
+ value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
@@ -657,7 +801,10 @@ def _emit_post_update_statements(base_mapper, uowtransaction,
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
- update, lambda rec: (rec[1], sorted(rec[0]))
+ update, lambda rec: (
+ rec[1], # connection
+ set(rec[0]) # parameter keys
+ )
):
connection = key[0]
multiparams = [params for params, conn in grouper]
@@ -671,7 +818,7 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
- table.c.contains_column(mapper.version_id_col)
+ mapper.version_id_col in mapper._cols_by_table[table]
def delete_stmt():
clause = sql.and_()
@@ -693,12 +840,9 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
statement = base_mapper._memo(('delete', table), delete_stmt)
for connection, recs in groupby(
delete,
- lambda rec: rec[1]
+ lambda rec: rec[1] # connection
):
- del_objects = [
- params
- for params, connection in recs
- ]
+ del_objects = [params for params, connection in recs]
connection = cached_connections[connection]
@@ -775,9 +919,8 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
toload_now.extend(state._unloaded_non_object)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
- prop = mapper._columntoproperty[mapper.version_id_col]
- if prop.key in state.unloaded:
- toload_now.extend([prop.key])
+ if mapper._version_id_prop.key in state.unloaded:
+ toload_now.extend([mapper._version_id_prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
@@ -794,7 +937,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction, states):
def _postfetch(mapper, uowtransaction, table,
- state, dict_, result, params, value_params):
+ state, dict_, result, params, value_params, bulk=False):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
@@ -803,7 +946,8 @@ def _postfetch(mapper, uowtransaction, table,
postfetch_cols = result.context.compiled.postfetch
returning_cols = result.context.compiled.returning
- if mapper.version_id_col is not None:
+ if mapper.version_id_col is not None and \
+ mapper.version_id_col in mapper._cols_by_table[table]:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
@@ -829,10 +973,13 @@ def _postfetch(mapper, uowtransaction, table,
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
- sync.populate(state, m, state, m,
- equated_pairs,
- uowtransaction,
- mapper.passive_updates)
+ if state is None:
+ sync.bulk_populate_inherit_keys(dict_, m, equated_pairs)
+ else:
+ sync.populate(state, m, state, m,
+ equated_pairs,
+ uowtransaction,
+ mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
@@ -883,6 +1030,7 @@ class BulkUD(object):
def __init__(self, query):
self.query = query.enable_eagerloads(False)
+ self.mapper = self.query._bind_mapper()
@property
def session(self):
@@ -977,6 +1125,7 @@ class BulkFetch(BulkUD):
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
+ mapper=self.mapper,
params=query._params).fetchall()
@@ -987,7 +1136,6 @@ class BulkUpdate(BulkUD):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
- self.mapper = self.query._mapper_zero_or_none()
@classmethod
def factory(cls, query, synchronize_session, values):
@@ -1033,7 +1181,8 @@ class BulkUpdate(BulkUD):
self.context.whereclause, values)
self.result = self.query.session.execute(
- update_stmt, params=self.query._params)
+ update_stmt, params=self.query._params,
+ mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
@@ -1060,8 +1209,10 @@ class BulkDelete(BulkUD):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
- self.result = self.query.session.execute(delete_stmt,
- params=self.query._params)
+ self.result = self.query.session.execute(
+ delete_stmt,
+ params=self.query._params,
+ mapper=self.mapper)
self.rowcount = self.result.rowcount
def _do_post(self):
diff --git a/lib/sqlalchemy/orm/properties.py b/lib/sqlalchemy/orm/properties.py
index 62ea93fb3..d51b6920d 100644
--- a/lib/sqlalchemy/orm/properties.py
+++ b/lib/sqlalchemy/orm/properties.py
@@ -34,6 +34,13 @@ class ColumnProperty(StrategizedProperty):
strategy_wildcard_key = 'column'
+ __slots__ = (
+ '_orig_columns', 'columns', 'group', 'deferred',
+ 'instrument', 'comparator_factory', 'descriptor', 'extension',
+ 'active_history', 'expire_on_flush', 'info', 'doc',
+ 'strategy_class', '_creation_order', '_is_polymorphic_discriminator',
+ '_mapped_by_synonym')
+
def __init__(self, *columns, **kwargs):
"""Provide a column-level property for use with a Mapper.
@@ -109,6 +116,7 @@ class ColumnProperty(StrategizedProperty):
**Deprecated.** Please see :class:`.AttributeEvents`.
"""
+ super(ColumnProperty, self).__init__()
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_full_deannotate(c))
for c in columns]
@@ -206,7 +214,7 @@ class ColumnProperty(StrategizedProperty):
elif dest_state.has_identity and self.key not in dest_dict:
dest_state._expire_attributes(dest_dict, [self.key])
- class Comparator(PropComparator):
+ class Comparator(util.MemoizedSlots, PropComparator):
"""Produce boolean, comparison, and other operators for
:class:`.ColumnProperty` attributes.
@@ -224,8 +232,10 @@ class ColumnProperty(StrategizedProperty):
:attr:`.TypeEngine.comparator_factory`
"""
- @util.memoized_instancemethod
- def __clause_element__(self):
+
+ __slots__ = '__clause_element__', 'info'
+
+ def _memoized_method___clause_element__(self):
if self.adapter:
return self.adapter(self.prop.columns[0])
else:
@@ -233,15 +243,14 @@ class ColumnProperty(StrategizedProperty):
"parententity": self._parentmapper,
"parentmapper": self._parentmapper})
- @util.memoized_property
- def info(self):
+ def _memoized_attr_info(self):
ce = self.__clause_element__()
try:
return ce.info
except AttributeError:
return self.prop.info
- def __getattr__(self, key):
+ def _fallback_getattr(self, key):
"""proxy attribute access down to the mapped column.
this allows user-defined comparison methods to be accessed.
diff --git a/lib/sqlalchemy/orm/query.py b/lib/sqlalchemy/orm/query.py
index 790686288..60a637952 100644
--- a/lib/sqlalchemy/orm/query.py
+++ b/lib/sqlalchemy/orm/query.py
@@ -75,6 +75,7 @@ class Query(object):
_having = None
_distinct = False
_prefixes = None
+ _suffixes = None
_offset = None
_limit = None
_for_update_arg = None
@@ -159,7 +160,6 @@ class Query(object):
for from_obj in obj:
info = inspect(from_obj)
-
if hasattr(info, 'mapper') and \
(info.is_mapper or info.is_aliased_class):
self._select_from_entity = from_obj
@@ -285,8 +285,9 @@ class Query(object):
return self._entities[0]
def _mapper_zero(self):
- return self._select_from_entity or \
- self._entity_zero().entity_zero
+ return self._select_from_entity \
+ if self._select_from_entity is not None \
+ else self._entity_zero().entity_zero
@property
def _mapper_entities(self):
@@ -300,11 +301,14 @@ class Query(object):
self._mapper_zero()
)
- def _mapper_zero_or_none(self):
- if self._primary_entity:
- return self._primary_entity.mapper
- else:
- return None
+ def _bind_mapper(self):
+ ezero = self._mapper_zero()
+ if ezero is not None:
+ insp = inspect(ezero)
+ if hasattr(insp, 'mapper'):
+ return insp.mapper
+
+ return None
def _only_mapper_zero(self, rationale=None):
if len(self._entities) > 1:
@@ -810,7 +814,7 @@ class Query(object):
foreign-key-to-primary-key criterion, will also use an
operation equivalent to :meth:`~.Query.get` in order to retrieve
the target value from the local identity map
- before querying the database. See :doc:`/orm/loading`
+ before querying the database. See :doc:`/orm/loading_relationships`
for further details on relationship loading.
:param ident: A scalar or tuple value representing
@@ -987,6 +991,7 @@ class Query(object):
statement.correlate(None)
q = self._from_selectable(fromclause)
q._enable_single_crit = False
+ q._select_from_entity = self._mapper_zero()
if entities:
q._set_entities(entities)
return q
@@ -1003,7 +1008,7 @@ class Query(object):
'_limit', '_offset',
'_joinpath', '_joinpoint',
'_distinct', '_having',
- '_prefixes',
+ '_prefixes', '_suffixes'
):
self.__dict__.pop(attr, None)
self._set_select_from([fromclause], True)
@@ -1099,7 +1104,7 @@ class Query(object):
Most supplied options regard changing how column- and
relationship-mapped attributes are loaded. See the sections
- :ref:`deferred` and :doc:`/orm/loading` for reference
+ :ref:`deferred` and :doc:`/orm/loading_relationships` for reference
documentation.
"""
@@ -2359,12 +2364,38 @@ class Query(object):
.. versionadded:: 0.7.7
+ .. seealso::
+
+ :meth:`.HasPrefixes.prefix_with`
+
"""
if self._prefixes:
self._prefixes += prefixes
else:
self._prefixes = prefixes
+ @_generative()
+ def suffix_with(self, *suffixes):
+ """Apply the suffix to the query and return the newly resulting
+ ``Query``.
+
+ :param \*suffixes: optional suffixes, typically strings,
+ not using any commas.
+
+ .. versionadded:: 1.0.0
+
+ .. seealso::
+
+ :meth:`.Query.prefix_with`
+
+ :meth:`.HasSuffixes.suffix_with`
+
+ """
+ if self._suffixes:
+ self._suffixes += suffixes
+ else:
+ self._suffixes = suffixes
+
def all(self):
"""Return the results represented by this ``Query`` as a list.
@@ -2499,7 +2530,7 @@ class Query(object):
def _execute_and_instances(self, querycontext):
conn = self._connection_from_session(
- mapper=self._mapper_zero_or_none(),
+ mapper=self._bind_mapper(),
clause=querycontext.statement,
close_with_result=True)
@@ -2601,6 +2632,7 @@ class Query(object):
'offset': self._offset,
'distinct': self._distinct,
'prefixes': self._prefixes,
+ 'suffixes': self._suffixes,
'group_by': self._group_by or None,
'having': self._having
}
@@ -2697,6 +2729,18 @@ class Query(object):
Deletes rows matched by this query from the database.
+ E.g.::
+
+ sess.query(User).filter(User.age == 25).\\
+ delete(synchronize_session=False)
+
+ sess.query(User).filter(User.age == 25).\\
+ delete(synchronize_session='evaluate')
+
+ .. warning:: The :meth:`.Query.delete` method is a "bulk" operation,
+ which bypasses ORM unit-of-work automation in favor of greater
+ performance. **Please read all caveats and warnings below.**
+
:param synchronize_session: chooses the strategy for the removal of
matched objects from the session. Valid values are:
@@ -2715,8 +2759,7 @@ class Query(object):
``'evaluate'`` - Evaluate the query's criteria in Python straight
on the objects in the session. If evaluation of the criteria isn't
- implemented, an error is raised. In that case you probably
- want to use the 'fetch' strategy as a fallback.
+ implemented, an error is raised.
The expression evaluator currently doesn't account for differing
string collations between the database and Python.
@@ -2724,29 +2767,42 @@ class Query(object):
:return: the count of rows matched as returned by the database's
"row count" feature.
- This method has several key caveats:
-
- * The method does **not** offer in-Python cascading of relationships
- - it is assumed that ON DELETE CASCADE/SET NULL/etc. is configured
- for any foreign key references which require it, otherwise the
- database may emit an integrity violation if foreign key references
- are being enforced.
-
- After the DELETE, dependent objects in the :class:`.Session` which
- were impacted by an ON DELETE may not contain the current
- state, or may have been deleted. This issue is resolved once the
- :class:`.Session` is expired,
- which normally occurs upon :meth:`.Session.commit` or can be forced
- by using :meth:`.Session.expire_all`. Accessing an expired object
- whose row has been deleted will invoke a SELECT to locate the
- row; when the row is not found, an
- :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
-
- * The :meth:`.MapperEvents.before_delete` and
- :meth:`.MapperEvents.after_delete`
- events are **not** invoked from this method. Instead, the
- :meth:`.SessionEvents.after_bulk_delete` method is provided to act
- upon a mass DELETE of entity rows.
+ .. warning:: **Additional Caveats for bulk query deletes**
+
+ * The method does **not** offer in-Python cascading of
+ relationships - it is assumed that ON DELETE CASCADE/SET
+ NULL/etc. is configured for any foreign key references
+ which require it, otherwise the database may emit an
+ integrity violation if foreign key references are being
+ enforced.
+
+ After the DELETE, dependent objects in the
+ :class:`.Session` which were impacted by an ON DELETE
+ may not contain the current state, or may have been
+ deleted. This issue is resolved once the
+ :class:`.Session` is expired, which normally occurs upon
+ :meth:`.Session.commit` or can be forced by using
+ :meth:`.Session.expire_all`. Accessing an expired
+ object whose row has been deleted will invoke a SELECT
+ to locate the row; when the row is not found, an
+ :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is
+ raised.
+
+ * The ``'fetch'`` strategy results in an additional
+ SELECT statement emitted and will significantly reduce
+ performance.
+
+ * The ``'evaluate'`` strategy performs a scan of
+ all matching objects within the :class:`.Session`; if the
+ contents of the :class:`.Session` are expired, such as
+ via a proceeding :meth:`.Session.commit` call, **this will
+ result in SELECT queries emitted for every matching object**.
+
+ * The :meth:`.MapperEvents.before_delete` and
+ :meth:`.MapperEvents.after_delete`
+ events **are not invoked** from this method. Instead, the
+ :meth:`.SessionEvents.after_bulk_delete` method is provided to
+ act upon a mass DELETE of entity rows.
.. seealso::
@@ -2769,17 +2825,21 @@ class Query(object):
E.g.::
- sess.query(User).filter(User.age == 25).\
- update({User.age: User.age - 10}, synchronize_session='fetch')
-
+ sess.query(User).filter(User.age == 25).\\
+ update({User.age: User.age - 10}, synchronize_session=False)
- sess.query(User).filter(User.age == 25).\
+ sess.query(User).filter(User.age == 25).\\
update({"age": User.age - 10}, synchronize_session='evaluate')
+ .. warning:: The :meth:`.Query.update` method is a "bulk" operation,
+ which bypasses ORM unit-of-work automation in favor of greater
+ performance. **Please read all caveats and warnings below.**
+
+
:param values: a dictionary with attributes names, or alternatively
- mapped attributes or SQL expressions, as keys, and literal
- values or sql expressions as values.
+ mapped attributes or SQL expressions, as keys, and literal
+ values or sql expressions as values.
.. versionchanged:: 1.0.0 - string names in the values dictionary
are now resolved against the mapped entity; previously, these
@@ -2787,7 +2847,7 @@ class Query(object):
translation.
:param synchronize_session: chooses the strategy to update the
- attributes on objects in the session. Valid values are:
+ attributes on objects in the session. Valid values are:
``False`` - don't synchronize the session. This option is the most
efficient and is reliable once the session is expired, which
@@ -2808,43 +2868,56 @@ class Query(object):
string collations between the database and Python.
:return: the count of rows matched as returned by the database's
- "row count" feature.
-
- This method has several key caveats:
-
- * The method does **not** offer in-Python cascading of relationships
- - it is assumed that ON UPDATE CASCADE is configured for any foreign
- key references which require it, otherwise the database may emit an
- integrity violation if foreign key references are being enforced.
-
- After the UPDATE, dependent objects in the :class:`.Session` which
- were impacted by an ON UPDATE CASCADE may not contain the current
- state; this issue is resolved once the :class:`.Session` is expired,
- which normally occurs upon :meth:`.Session.commit` or can be forced
- by using :meth:`.Session.expire_all`.
-
- * The method supports multiple table updates, as
- detailed in :ref:`multi_table_updates`, and this behavior does
- extend to support updates of joined-inheritance and other multiple
- table mappings. However, the **join condition of an inheritance
- mapper is currently not automatically rendered**.
- Care must be taken in any multiple-table update to explicitly
- include the joining condition between those tables, even in mappings
- where this is normally automatic.
- E.g. if a class ``Engineer`` subclasses ``Employee``, an UPDATE of
- the ``Engineer`` local table using criteria against the ``Employee``
- local table might look like::
-
- session.query(Engineer).\\
- filter(Engineer.id == Employee.id).\\
- filter(Employee.name == 'dilbert').\\
- update({"engineer_type": "programmer"})
-
- * The :meth:`.MapperEvents.before_update` and
- :meth:`.MapperEvents.after_update`
- events are **not** invoked from this method. Instead, the
- :meth:`.SessionEvents.after_bulk_update` method is provided to act
- upon a mass UPDATE of entity rows.
+ "row count" feature.
+
+ .. warning:: **Additional Caveats for bulk query updates**
+
+ * The method does **not** offer in-Python cascading of
+ relationships - it is assumed that ON UPDATE CASCADE is
+ configured for any foreign key references which require
+ it, otherwise the database may emit an integrity
+ violation if foreign key references are being enforced.
+
+ After the UPDATE, dependent objects in the
+ :class:`.Session` which were impacted by an ON UPDATE
+ CASCADE may not contain the current state; this issue is
+ resolved once the :class:`.Session` is expired, which
+ normally occurs upon :meth:`.Session.commit` or can be
+ forced by using :meth:`.Session.expire_all`.
+
+ * The ``'fetch'`` strategy results in an additional
+ SELECT statement emitted and will significantly reduce
+ performance.
+
+ * The ``'evaluate'`` strategy performs a scan of
+ all matching objects within the :class:`.Session`; if the
+ contents of the :class:`.Session` are expired, such as
+ via a proceeding :meth:`.Session.commit` call, **this will
+ result in SELECT queries emitted for every matching object**.
+
+ * The method supports multiple table updates, as detailed
+ in :ref:`multi_table_updates`, and this behavior does
+ extend to support updates of joined-inheritance and
+ other multiple table mappings. However, the **join
+ condition of an inheritance mapper is not
+ automatically rendered**. Care must be taken in any
+ multiple-table update to explicitly include the joining
+ condition between those tables, even in mappings where
+ this is normally automatic. E.g. if a class ``Engineer``
+ subclasses ``Employee``, an UPDATE of the ``Engineer``
+ local table using criteria against the ``Employee``
+ local table might look like::
+
+ session.query(Engineer).\\
+ filter(Engineer.id == Employee.id).\\
+ filter(Employee.name == 'dilbert').\\
+ update({"engineer_type": "programmer"})
+
+ * The :meth:`.MapperEvents.before_update` and
+ :meth:`.MapperEvents.after_update`
+ events **are not invoked from this method**. Instead, the
+ :meth:`.SessionEvents.after_bulk_update` method is provided to
+ act upon a mass UPDATE of entity rows.
.. seealso::
@@ -3473,26 +3546,26 @@ class _ColumnEntity(_QueryEntity):
)):
self._label_name = column.key
column = column._query_clause_element()
- else:
- self._label_name = getattr(column, 'key', None)
-
- if not isinstance(column, expression.ColumnElement) and \
- hasattr(column, '_select_iterable'):
- for c in column._select_iterable:
- if c is column:
- break
- _ColumnEntity(query, c, namespace=column)
- else:
+ if isinstance(column, Bundle):
+ _BundleEntity(query, column)
return
- elif isinstance(column, Bundle):
- _BundleEntity(query, column)
- return
+ elif not isinstance(column, sql.ColumnElement):
+ if hasattr(column, '_select_iterable'):
+ # break out an object like Table into
+ # individual columns
+ for c in column._select_iterable:
+ if c is column:
+ break
+ _ColumnEntity(query, c, namespace=column)
+ else:
+ return
- if not isinstance(column, sql.ColumnElement):
raise sa_exc.InvalidRequestError(
"SQL expression, column, or mapped entity "
"expected - got '%r'" % (column, )
)
+ else:
+ self._label_name = getattr(column, 'key', None)
self.type = type_ = column.type
if type_.hashable:
@@ -3523,15 +3596,26 @@ class _ColumnEntity(_QueryEntity):
# leaking out their entities into the main select construct
self.actual_froms = actual_froms = set(column._from_objects)
- self.entities = util.OrderedSet(
+ all_elements = [
+ elem for elem in visitors.iterate(column, {})
+ if 'parententity' in elem._annotations
+ ]
+
+ self.entities = util.unique_list(
+ elem._annotations['parententity']
+ for elem in all_elements
+ if 'parententity' in elem._annotations
+ )
+
+ self._from_entities = set(
elem._annotations['parententity']
- for elem in visitors.iterate(column, {})
+ for elem in all_elements
if 'parententity' in elem._annotations
and actual_froms.intersection(elem._from_objects)
)
if self.entities:
- self.entity_zero = list(self.entities)[0]
+ self.entity_zero = self.entities[0]
elif self.namespace is not None:
self.entity_zero = self.namespace
else:
@@ -3557,7 +3641,9 @@ class _ColumnEntity(_QueryEntity):
def setup_entity(self, ext_info, aliased_adapter):
if 'selectable' not in self.__dict__:
self.selectable = ext_info.selectable
- self.froms.add(ext_info.selectable)
+
+ if self.actual_froms.intersection(ext_info.selectable._from_objects):
+ self.froms.add(ext_info.selectable)
def corresponds_to(self, entity):
# TODO: just returning False here,
diff --git a/lib/sqlalchemy/orm/relationships.py b/lib/sqlalchemy/orm/relationships.py
index 86f1b3f82..df2250a4c 100644
--- a/lib/sqlalchemy/orm/relationships.py
+++ b/lib/sqlalchemy/orm/relationships.py
@@ -528,7 +528,7 @@ class RelationshipProperty(StrategizedProperty):
.. seealso::
- :doc:`/orm/loading` - Full documentation on relationship loader
+ :doc:`/orm/loading_relationships` - Full documentation on relationship loader
configuration.
:ref:`dynamic_relationship` - detail on the ``dynamic`` option.
@@ -775,6 +775,7 @@ class RelationshipProperty(StrategizedProperty):
"""
+ super(RelationshipProperty, self).__init__()
self.uselist = uselist
self.argument = argument
diff --git a/lib/sqlalchemy/orm/session.py b/lib/sqlalchemy/orm/session.py
index f23983cbc..0e272dc95 100644
--- a/lib/sqlalchemy/orm/session.py
+++ b/lib/sqlalchemy/orm/session.py
@@ -20,6 +20,8 @@ from .base import (
_class_to_mapper, _state_mapper, object_state,
_none_set, state_str, instance_str
)
+import itertools
+from . import persistence
from .unitofwork import UOWTransaction
from . import state as statelib
import sys
@@ -433,11 +435,13 @@ class SessionTransaction(object):
self.session.dispatch.after_rollback(self.session)
- def close(self):
+ def close(self, invalidate=False):
self.session.transaction = self._parent
if self._parent is None:
for connection, transaction, autoclose in \
set(self._connections.values()):
+ if invalidate:
+ connection.invalidate()
if autoclose:
connection.close()
else:
@@ -482,7 +486,8 @@ class Session(_SessionClassMethods):
'__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested',
'close', 'commit', 'connection', 'delete', 'execute', 'expire',
'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind',
- 'is_modified',
+ 'is_modified', 'bulk_save_objects', 'bulk_insert_mappings',
+ 'bulk_update_mappings',
'merge', 'query', 'refresh', 'rollback',
'scalar')
@@ -591,8 +596,8 @@ class Session(_SessionClassMethods):
.. versionadded:: 0.9.0
:param query_cls: Class which should be used to create new Query
- objects, as returned by the :meth:`~.Session.query` method.
- Defaults to :class:`.Query`.
+ objects, as returned by the :meth:`~.Session.query` method.
+ Defaults to :class:`.Query`.
:param twophase: When ``True``, all transactions will be started as
a "two phase" transaction, i.e. using the "two phase" semantics
@@ -997,10 +1002,46 @@ class Session(_SessionClassMethods):
not use any connection resources until they are first needed.
"""
+ self._close_impl(invalidate=False)
+
+ def invalidate(self):
+ """Close this Session, using connection invalidation.
+
+ This is a variant of :meth:`.Session.close` that will additionally
+ ensure that the :meth:`.Connection.invalidate` method will be called
+ on all :class:`.Connection` objects. This can be called when
+ the database is known to be in a state where the connections are
+ no longer safe to be used.
+
+ E.g.::
+
+ try:
+ sess = Session()
+ sess.add(User())
+ sess.commit()
+ except gevent.Timeout:
+ sess.invalidate()
+ raise
+ except:
+ sess.rollback()
+ raise
+
+ This clears all items and ends any transaction in progress.
+
+ If this session were created with ``autocommit=False``, a new
+ transaction is immediately begun. Note that this new transaction does
+ not use any connection resources until they are first needed.
+
+ .. versionadded:: 0.9.9
+
+ """
+ self._close_impl(invalidate=True)
+
+ def _close_impl(self, invalidate):
self.expunge_all()
if self.transaction is not None:
for transaction in self.transaction._iterate_parents():
- transaction.close()
+ transaction.close(invalidate)
def expunge_all(self):
"""Remove all object instances from this ``Session``.
@@ -2044,6 +2085,226 @@ class Session(_SessionClassMethods):
with util.safe_reraise():
transaction.rollback(_capture_exception=True)
+ def bulk_save_objects(
+ self, objects, return_defaults=False, update_changed_only=True):
+ """Perform a bulk save of the given list of objects.
+
+ The bulk save feature allows mapped objects to be used as the
+ source of simple INSERT and UPDATE operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations; the extraction of data from the objects is also performed
+ using a lower-latency process that ignores whether or not attributes
+ have actually been modified in the case of UPDATEs, and also ignores
+ SQL expressions.
+
+ The objects as given are not added to the session and no additional
+ state is established on them, unless the ``return_defaults`` flag
+ is also set, in which case primary key attributes and server-side
+ default values will be populated.
+
+ .. versionadded:: 1.0.0
+
+ .. warning::
+
+ The bulk save feature allows for a lower-latency INSERT/UPDATE
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ INSERT/UPDATES of records.
+
+ **Please read the list of caveats at** :ref:`bulk_operations`
+ **before using this method, and fully test and confirm the
+ functionality of all code developed using these systems.**
+
+ :param objects: a list of mapped object instances. The mapped
+ objects are persisted as is, and are **not** associated with the
+ :class:`.Session` afterwards.
+
+ For each object, whether the object is sent as an INSERT or an
+ UPDATE is dependent on the same rules used by the :class:`.Session`
+ in traditional operation; if the object has the
+ :attr:`.InstanceState.key`
+ attribute set, then the object is assumed to be "detached" and
+ will result in an UPDATE. Otherwise, an INSERT is used.
+
+ In the case of an UPDATE, statements are grouped based on which
+ attributes have changed, and are thus to be the subject of each
+ SET clause. If ``update_changed_only`` is False, then all
+ attributes present within each object are applied to the UPDATE
+ statement, which may help in allowing the statements to be grouped
+ together into a larger executemany(), and will also reduce the
+ overhead of checking history on attributes.
+
+ :param return_defaults: when True, rows that are missing values which
+ generate defaults, namely integer primary key defaults and sequences,
+ will be inserted **one at a time**, so that the primary key value
+ is available. In particular this will allow joined-inheritance
+ and other multi-table mappings to insert correctly without the need
+ to provide primary key values ahead of time; however,
+ :paramref:`.Session.bulk_save_objects.return_defaults` **greatly
+ reduces the performance gains** of the method overall.
+
+ :param update_changed_only: when True, UPDATE statements are rendered
+ based on those attributes in each state that have logged changes.
+ When False, all attributes present are rendered into the SET clause
+ with the exception of primary key attributes.
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_insert_mappings`
+
+ :meth:`.Session.bulk_update_mappings`
+
+ """
+ for (mapper, isupdate), states in itertools.groupby(
+ (attributes.instance_state(obj) for obj in objects),
+ lambda state: (state.mapper, state.key is not None)
+ ):
+ self._bulk_save_mappings(
+ mapper, states, isupdate, True,
+ return_defaults, update_changed_only)
+
+ def bulk_insert_mappings(self, mapper, mappings, return_defaults=False):
+ """Perform a bulk insert of the given list of mapping dictionaries.
+
+ The bulk insert feature allows plain Python dictionaries to be used as
+ the source of simple INSERT operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations. Using dictionaries, there is no "history" or session
+ state management features in use, reducing latency when inserting
+ large numbers of simple rows.
+
+ The values within the dictionaries as given are typically passed
+ without modification into Core :meth:`.Insert` constructs, after
+ organizing the values within them across the tables to which
+ the given mapper is mapped.
+
+ .. versionadded:: 1.0.0
+
+ .. warning::
+
+ The bulk insert feature allows for a lower-latency INSERT
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ INSERT of records.
+
+ **Please read the list of caveats at** :ref:`bulk_operations`
+ **before using this method, and fully test and confirm the
+ functionality of all code developed using these systems.**
+
+ :param mapper: a mapped class, or the actual :class:`.Mapper` object,
+ representing the single kind of object represented within the mapping
+ list.
+
+ :param mappings: a list of dictionaries, each one containing the state
+ of the mapped row to be inserted, in terms of the attribute names
+ on the mapped class. If the mapping refers to multiple tables,
+ such as a joined-inheritance mapping, each dictionary must contain
+ all keys to be populated into all tables.
+
+ :param return_defaults: when True, rows that are missing values which
+ generate defaults, namely integer primary key defaults and sequences,
+ will be inserted **one at a time**, so that the primary key value
+ is available. In particular this will allow joined-inheritance
+ and other multi-table mappings to insert correctly without the need
+ to provide primary
+ key values ahead of time; however,
+ :paramref:`.Session.bulk_insert_mappings.return_defaults`
+ **greatly reduces the performance gains** of the method overall.
+ If the rows
+ to be inserted only refer to a single table, then there is no
+ reason this flag should be set as the returned default information
+ is not used.
+
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_save_objects`
+
+ :meth:`.Session.bulk_update_mappings`
+
+ """
+ self._bulk_save_mappings(
+ mapper, mappings, False, False, return_defaults, False)
+
+ def bulk_update_mappings(self, mapper, mappings):
+ """Perform a bulk update of the given list of mapping dictionaries.
+
+ The bulk update feature allows plain Python dictionaries to be used as
+ the source of simple UPDATE operations which can be more easily
+ grouped together into higher performing "executemany"
+ operations. Using dictionaries, there is no "history" or session
+ state management features in use, reducing latency when updating
+ large numbers of simple rows.
+
+ .. versionadded:: 1.0.0
+
+ .. warning::
+
+ The bulk update feature allows for a lower-latency UPDATE
+ of rows at the expense of most other unit-of-work features.
+ Features such as object management, relationship handling,
+ and SQL clause support are **silently omitted** in favor of raw
+ UPDATES of records.
+
+ **Please read the list of caveats at** :ref:`bulk_operations`
+ **before using this method, and fully test and confirm the
+ functionality of all code developed using these systems.**
+
+ :param mapper: a mapped class, or the actual :class:`.Mapper` object,
+ representing the single kind of object represented within the mapping
+ list.
+
+ :param mappings: a list of dictionaries, each one containing the state
+ of the mapped row to be updated, in terms of the attribute names
+ on the mapped class. If the mapping refers to multiple tables,
+ such as a joined-inheritance mapping, each dictionary may contain
+ keys corresponding to all tables. All those keys which are present
+ and are not part of the primary key are applied to the SET clause
+ of the UPDATE statement; the primary key values, which are required,
+ are applied to the WHERE clause.
+
+
+ .. seealso::
+
+ :ref:`bulk_operations`
+
+ :meth:`.Session.bulk_insert_mappings`
+
+ :meth:`.Session.bulk_save_objects`
+
+ """
+ self._bulk_save_mappings(mapper, mappings, True, False, False, False)
+
+ def _bulk_save_mappings(
+ self, mapper, mappings, isupdate, isstates,
+ return_defaults, update_changed_only):
+ mapper = _class_to_mapper(mapper)
+ self._flushing = True
+
+ transaction = self.begin(
+ subtransactions=True)
+ try:
+ if isupdate:
+ persistence._bulk_update(
+ mapper, mappings, transaction,
+ isstates, update_changed_only)
+ else:
+ persistence._bulk_insert(
+ mapper, mappings, transaction, isstates, return_defaults)
+ transaction.commit()
+
+ except:
+ with util.safe_reraise():
+ transaction.rollback(_capture_exception=True)
+ finally:
+ self._flushing = False
+
def is_modified(self, instance, include_collections=True,
passive=True):
"""Return ``True`` if the given instance has locally
diff --git a/lib/sqlalchemy/orm/strategies.py b/lib/sqlalchemy/orm/strategies.py
index d95f17f64..8a4c8e731 100644
--- a/lib/sqlalchemy/orm/strategies.py
+++ b/lib/sqlalchemy/orm/strategies.py
@@ -105,6 +105,8 @@ class UninstrumentedColumnLoader(LoaderStrategy):
if the argument is against the with_polymorphic selectable.
"""
+ __slots__ = 'columns',
+
def __init__(self, parent):
super(UninstrumentedColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
@@ -128,6 +130,8 @@ class UninstrumentedColumnLoader(LoaderStrategy):
class ColumnLoader(LoaderStrategy):
"""Provide loading behavior for a :class:`.ColumnProperty`."""
+ __slots__ = 'columns', 'is_composite'
+
def __init__(self, parent):
super(ColumnLoader, self).__init__(parent)
self.columns = self.parent_property.columns
@@ -176,6 +180,8 @@ class ColumnLoader(LoaderStrategy):
class DeferredColumnLoader(LoaderStrategy):
"""Provide loading behavior for a deferred :class:`.ColumnProperty`."""
+ __slots__ = 'columns', 'group'
+
def __init__(self, parent):
super(DeferredColumnLoader, self).__init__(parent)
if hasattr(self.parent_property, 'composite_class'):
@@ -225,7 +231,8 @@ class DeferredColumnLoader(LoaderStrategy):
(
loadopt and
'undefer_pks' in loadopt.local_opts and
- set(self.columns).intersection(self.parent.primary_key)
+ set(self.columns).intersection(
+ self.parent._should_undefer_in_wildcard)
)
or
(
@@ -300,6 +307,8 @@ class LoadDeferredColumns(object):
class AbstractRelationshipLoader(LoaderStrategy):
"""LoaderStratgies which deal with related objects."""
+ __slots__ = 'mapper', 'target', 'uselist'
+
def __init__(self, parent):
super(AbstractRelationshipLoader, self).__init__(parent)
self.mapper = self.parent_property.mapper
@@ -316,6 +325,8 @@ class NoLoader(AbstractRelationshipLoader):
"""
+ __slots__ = ()
+
def init_class_attribute(self, mapper):
self.is_class_level = True
@@ -343,6 +354,10 @@ class LazyLoader(AbstractRelationshipLoader):
"""
+ __slots__ = (
+ '_lazywhere', '_rev_lazywhere', 'use_get', '_bind_to_col',
+ '_equated_columns', '_rev_bind_to_col', '_rev_equated_columns')
+
def __init__(self, parent):
super(LazyLoader, self).__init__(parent)
join_condition = self.parent_property._join_condition
@@ -661,6 +676,8 @@ class LoadLazyAttribute(object):
@properties.RelationshipProperty.strategy_for(lazy="immediate")
class ImmediateLoader(AbstractRelationshipLoader):
+ __slots__ = ()
+
def init_class_attribute(self, mapper):
self.parent_property.\
_get_strategy_by_cls(LazyLoader).\
@@ -684,6 +701,8 @@ class ImmediateLoader(AbstractRelationshipLoader):
@log.class_logger
@properties.RelationshipProperty.strategy_for(lazy="subquery")
class SubqueryLoader(AbstractRelationshipLoader):
+ __slots__ = 'join_depth',
+
def __init__(self, parent):
super(SubqueryLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
@@ -1069,6 +1088,9 @@ class JoinedLoader(AbstractRelationshipLoader):
using joined eager loading.
"""
+
+ __slots__ = 'join_depth',
+
def __init__(self, parent):
super(JoinedLoader, self).__init__(parent)
self.join_depth = self.parent_property.join_depth
diff --git a/lib/sqlalchemy/orm/strategy_options.py b/lib/sqlalchemy/orm/strategy_options.py
index 276da2ae0..90e4e9661 100644
--- a/lib/sqlalchemy/orm/strategy_options.py
+++ b/lib/sqlalchemy/orm/strategy_options.py
@@ -364,6 +364,7 @@ class _UnboundLoad(Load):
return None
token = start_path[0]
+
if isinstance(token, util.string_types):
entity = self._find_entity_basestring(query, token, raiseerr)
elif isinstance(token, PropComparator):
@@ -407,10 +408,18 @@ class _UnboundLoad(Load):
# prioritize "first class" options over those
# that were "links in the chain", e.g. "x" and "y" in
# someload("x.y.z") versus someload("x") / someload("x.y")
- if self._is_chain_link:
- effective_path.setdefault(context, "loader", loader)
+
+ if effective_path.is_token:
+ for path in effective_path.generate_for_superclasses():
+ if self._is_chain_link:
+ path.setdefault(context, "loader", loader)
+ else:
+ path.set(context, "loader", loader)
else:
- effective_path.set(context, "loader", loader)
+ if self._is_chain_link:
+ effective_path.setdefault(context, "loader", loader)
+ else:
+ effective_path.set(context, "loader", loader)
def _find_entity_prop_comparator(self, query, token, mapper, raiseerr):
if _is_aliased_class(mapper):
diff --git a/lib/sqlalchemy/orm/sync.py b/lib/sqlalchemy/orm/sync.py
index e1ef85c1d..671c7c067 100644
--- a/lib/sqlalchemy/orm/sync.py
+++ b/lib/sqlalchemy/orm/sync.py
@@ -45,6 +45,23 @@ def populate(source, source_mapper, dest, dest_mapper,
uowcommit.attributes[("pk_cascaded", dest, r)] = True
+def bulk_populate_inherit_keys(
+ source_dict, source_mapper, synchronize_pairs):
+ # a simplified version of populate() used by bulk insert mode
+ for l, r in synchronize_pairs:
+ try:
+ prop = source_mapper._columntoproperty[l]
+ value = source_dict[prop.key]
+ except exc.UnmappedColumnError:
+ _raise_col_to_prop(False, source_mapper, l, source_mapper, r)
+
+ try:
+ prop = source_mapper._columntoproperty[r]
+ source_dict[prop.key] = value
+ except exc.UnmappedColumnError:
+ _raise_col_to_prop(True, source_mapper, l, source_mapper, r)
+
+
def clear(dest, dest_mapper, synchronize_pairs):
for l, r in synchronize_pairs:
if r.primary_key and \
diff --git a/lib/sqlalchemy/orm/unitofwork.py b/lib/sqlalchemy/orm/unitofwork.py
index 71e61827b..05265b13f 100644
--- a/lib/sqlalchemy/orm/unitofwork.py
+++ b/lib/sqlalchemy/orm/unitofwork.py
@@ -16,6 +16,7 @@ organizes them in order of dependency, and executes.
from .. import util, event
from ..util import topological
from . import attributes, persistence, util as orm_util
+import itertools
def track_cascade_events(descriptor, prop):
@@ -379,14 +380,19 @@ class UOWTransaction(object):
execute() method has succeeded and the transaction has been committed.
"""
+ if not self.states:
+ return
+
states = set(self.states)
isdel = set(
s for (s, (isdelete, listonly)) in self.states.items()
if isdelete
)
other = states.difference(isdel)
- self.session._remove_newly_deleted(isdel)
- self.session._register_newly_persistent(other)
+ if isdel:
+ self.session._remove_newly_deleted(isdel)
+ if other:
+ self.session._register_newly_persistent(other)
class IterateMappersMixin(object):
diff --git a/lib/sqlalchemy/orm/util.py b/lib/sqlalchemy/orm/util.py
index 4be8d19ff..ee629b034 100644
--- a/lib/sqlalchemy/orm/util.py
+++ b/lib/sqlalchemy/orm/util.py
@@ -30,6 +30,10 @@ class CascadeOptions(frozenset):
'all', 'none', 'delete-orphan'])
_allowed_cascades = all_cascades
+ __slots__ = (
+ 'save_update', 'delete', 'refresh_expire', 'merge',
+ 'expunge', 'delete_orphan')
+
def __new__(cls, value_list):
if isinstance(value_list, str) or value_list is None:
return cls.from_string(value_list)
@@ -38,10 +42,7 @@ class CascadeOptions(frozenset):
raise sa_exc.ArgumentError(
"Invalid cascade option(s): %s" %
", ".join([repr(x) for x in
- sorted(
- values.difference(cls._allowed_cascades)
- )])
- )
+ sorted(values.difference(cls._allowed_cascades))]))
if "all" in values:
values.update(cls._add_w_all_cascades)
@@ -76,6 +77,7 @@ class CascadeOptions(frozenset):
]
return cls(values)
+
def _validator_events(
desc, key, validator, include_removes, include_backrefs):
"""Runs a validation method on an attribute value to be set or