diff options
| author | Mike Bayer <mike_mp@zzzcomputing.com> | 2018-12-22 19:16:50 -0500 |
|---|---|---|
| committer | Mike Bayer <mike_mp@zzzcomputing.com> | 2018-12-28 08:40:44 -0500 |
| commit | 847d1359421ebb3b4ba653ca1a9d238e62e8e8a8 (patch) | |
| tree | 88f7ab56680d37ae1cdf32d2d8cb9d6b0126da67 /lib/sqlalchemy/util | |
| parent | 07cea66ccb74c68fa505b5fbba91984e0375993d (diff) | |
| download | sqlalchemy-847d1359421ebb3b4ba653ca1a9d238e62e8e8a8.tar.gz | |
Check collection less than two items remaining before firing scalar backref remove
Fixed long-standing issue where duplicate collection members would cause a
backref to delete the association between the member and its parent object
when one of the duplicates were removed, as occurs as a side effect of
swapping two objects in one statement.
Fixes: #1103
Change-Id: Ic12877f7bd5a4eb688091725a78410748e7fdf16
Diffstat (limited to 'lib/sqlalchemy/util')
| -rw-r--r-- | lib/sqlalchemy/util/__init__.py | 3 | ||||
| -rw-r--r-- | lib/sqlalchemy/util/_collections.py | 30 |
2 files changed, 32 insertions, 1 deletions
diff --git a/lib/sqlalchemy/util/__init__.py b/lib/sqlalchemy/util/__init__.py index 9229d0797..d8c28d6af 100644 --- a/lib/sqlalchemy/util/__init__.py +++ b/lib/sqlalchemy/util/__init__.py @@ -21,7 +21,8 @@ from ._collections import KeyedTuple, ImmutableContainer, immutabledict, \ UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ to_column_set, update_copy, flatten_iterator, has_intersection, \ LRUCache, ScopedRegistry, ThreadLocalRegistry, WeakSequence, \ - coerce_generator_arg, lightweight_named_tuple, collections_abc + coerce_generator_arg, lightweight_named_tuple, collections_abc, \ + has_dupes from .langhelpers import iterate_attributes, class_hierarchy, \ portable_instancemethod, unbound_method_to_callable, \ diff --git a/lib/sqlalchemy/util/_collections.py b/lib/sqlalchemy/util/_collections.py index 3152458bf..43440134a 100644 --- a/lib/sqlalchemy/util/_collections.py +++ b/lib/sqlalchemy/util/_collections.py @@ -1057,3 +1057,33 @@ def _iter_id(iterable): for item in iterable: yield id(item), item + + +def has_dupes(sequence, target): + """Given a sequence and search object, return True if there's more + than one, False if zero or one of them. + + + """ + # compare to .index version below, this version introduces less function + # overhead and is usually the same speed. At 15000 items (way bigger than + # a relationship-bound collection in memory usually is) it begins to + # fall behind the other version only by microseconds. + c = 0 + for item in sequence: + if item is target: + c += 1 + if c > 1: + return True + return False + +# .index version. the two __contains__ calls as well +# as .index() and isinstance() slow this down. +# def has_dupes(sequence, target): +# if target not in sequence: +# return False +# elif not isinstance(sequence, collections_abc.Sequence): +# return False +# +# idx = sequence.index(target) +# return target in sequence[idx + 1:] |
