summaryrefslogtreecommitdiff
path: root/test/sql/test_returning.py
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2022-08-07 12:14:19 -0400
committerMike Bayer <mike_mp@zzzcomputing.com>2022-09-24 11:18:01 -0400
commita8029f5a7e3e376ec57f1614ab0294b717d53c05 (patch)
tree84b1a3b3a6d3f4c9d6e8054f9cdfa190344436cb /test/sql/test_returning.py
parent2bcc97da424eef7db9a5d02f81d02344925415ee (diff)
downloadsqlalchemy-a8029f5a7e3e376ec57f1614ab0294b717d53c05.tar.gz
ORM bulk insert via execute
* ORM Insert now includes "bulk" mode that will run essentially the same process as session.bulk_insert_mappings; interprets the given list of values as ORM attributes for key names * ORM UPDATE has a similar feature, without RETURNING support, for session.bulk_update_mappings * Added support for upserts to do RETURNING ORM objects as well * ORM UPDATE/DELETE with list of parameters + WHERE criteria is a not implemented; use connection * ORM UPDATE/DELETE defaults to "auto" synchronize_session; use fetch if RETURNING is present, evaluate if not, as "fetch" is much more efficient (no expired object SELECT problem) and less error prone if RETURNING is available UPDATE: howver this is inefficient! please continue to use evaluate for simple cases, auto can move to fetch if criteria not evaluable * "Evaluate" criteria will now not preemptively unexpire and SELECT attributes that were individually expired. Instead, if evaluation of the criteria indicates that the necessary attrs were expired, we expire the object completely (delete) or expire the SET attrs unconditionally (update). This keeps the object in the same unloaded state where it will refresh those attrs on the next pass, for this generally unusual case. (originally #5664) * Core change! update/delete rowcount comes from len(rows) if RETURNING was used. SQLite at least otherwise did not support this. adjusted test_rowcount accordingly * ORM DELETE with a list of parameters at all is also a not implemented as this would imply "bulk", and there is no bulk_delete_mappings (could be, but we dont have that) * ORM insert().values() with single or multi-values translates key names based on ORM attribute names * ORM returning() implemented for insert, update, delete; explcit returning clauses now interpret rows in an ORM context, with support for qualifying loader options as well * session.bulk_insert_mappings() assigns polymorphic identity if not set. * explicit RETURNING + synchronize_session='fetch' is now supported with UPDATE and DELETE. * expanded return_defaults() to work with DELETE also. * added support for composite attributes to be present in the dictionaries used by bulk_insert_mappings and bulk_update_mappings, which is also the new ORM bulk insert/update feature, that will expand the composite values into their individual mapped attributes the way they'd be on a mapped instance. * bulk UPDATE supports "synchronize_session=evaluate", is the default. this does not apply to session.bulk_update_mappings, just the new version * both bulk UPDATE and bulk INSERT, the latter with or without RETURNING, support *heterogenous* parameter sets. session.bulk_insert/update_mappings did this, so this feature is maintained. now cursor result can be both horizontally and vertically spliced :) This is now a long story with a lot of options, which in itself is a problem to be able to document all of this in some way that makes sense. raising exceptions for use cases we haven't supported is pretty important here too, the tradition of letting unsupported things just not work is likely not a good idea at this point, though there are still many cases that aren't easily avoidable Fixes: #8360 Fixes: #7864 Fixes: #7865 Change-Id: Idf28379f8705e403a3c6a937f6a798a042ef2540
Diffstat (limited to 'test/sql/test_returning.py')
-rw-r--r--test/sql/test_returning.py195
1 files changed, 195 insertions, 0 deletions
diff --git a/test/sql/test_returning.py b/test/sql/test_returning.py
index f8cc32517..c26f825c2 100644
--- a/test/sql/test_returning.py
+++ b/test/sql/test_returning.py
@@ -23,6 +23,7 @@ from sqlalchemy.testing import config
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises_message
from sqlalchemy.testing import fixtures
+from sqlalchemy.testing import is_
from sqlalchemy.testing import mock
from sqlalchemy.testing import provision
from sqlalchemy.testing.schema import Column
@@ -76,6 +77,7 @@ class ReturnCombinationTests(fixtures.TestBase, AssertsCompiledSQL):
stmt = stmt.returning(t.c.x)
stmt = stmt.return_defaults()
+
assert_raises_message(
sa_exc.CompileError,
r"Can't compile statement that includes returning\(\) "
@@ -330,6 +332,7 @@ class InsertReturningTest(fixtures.TablesTest, AssertsExecutionResults):
table = self.tables.returning_tbl
exprs = testing.resolve_lambda(testcase, table=table)
+
result = connection.execute(
table.insert().returning(*exprs),
{"persons": 5, "full": False, "strval": "str1"},
@@ -679,6 +682,30 @@ class InsertReturnDefaultsTest(fixtures.TablesTest):
Column("upddef", Integer, onupdate=IncDefault()),
)
+ Table(
+ "table_no_addtl_defaults",
+ metadata,
+ Column(
+ "id", Integer, primary_key=True, test_needs_autoincrement=True
+ ),
+ Column("data", String(50)),
+ )
+
+ class MyType(TypeDecorator):
+ impl = String(50)
+
+ def process_result_value(self, value, dialect):
+ return f"PROCESSED! {value}"
+
+ Table(
+ "table_datatype_has_result_proc",
+ metadata,
+ Column(
+ "id", Integer, primary_key=True, test_needs_autoincrement=True
+ ),
+ Column("data", MyType()),
+ )
+
def test_chained_insert_pk(self, connection):
t1 = self.tables.t1
result = connection.execute(
@@ -758,6 +785,38 @@ class InsertReturnDefaultsTest(fixtures.TablesTest):
)
eq_(result.inserted_primary_key, (1,))
+ def test_insert_w_defaults_supplemental_cols(self, connection):
+ t1 = self.tables.t1
+ result = connection.execute(
+ t1.insert().return_defaults(supplemental_cols=[t1.c.id]),
+ {"data": "d1"},
+ )
+ eq_(result.all(), [(1, 0, None)])
+
+ def test_insert_w_no_defaults_supplemental_cols(self, connection):
+ t1 = self.tables.table_no_addtl_defaults
+ result = connection.execute(
+ t1.insert().return_defaults(supplemental_cols=[t1.c.id]),
+ {"data": "d1"},
+ )
+ eq_(result.all(), [(1,)])
+
+ def test_insert_w_defaults_supplemental_processor_cols(self, connection):
+ """test that the cursor._rewind() used by supplemental RETURNING
+ clears out result-row processors as we will have already processed
+ the rows.
+
+ """
+
+ t1 = self.tables.table_datatype_has_result_proc
+ result = connection.execute(
+ t1.insert().return_defaults(
+ supplemental_cols=[t1.c.id, t1.c.data]
+ ),
+ {"data": "d1"},
+ )
+ eq_(result.all(), [(1, "PROCESSED! d1")])
+
class UpdatedReturnDefaultsTest(fixtures.TablesTest):
__requires__ = ("update_returning",)
@@ -792,6 +851,7 @@ class UpdatedReturnDefaultsTest(fixtures.TablesTest):
t1 = self.tables.t1
connection.execute(t1.insert().values(upddef=1))
+
result = connection.execute(
t1.update().values(upddef=2).return_defaults(t1.c.data)
)
@@ -800,6 +860,72 @@ class UpdatedReturnDefaultsTest(fixtures.TablesTest):
[None],
)
+ def test_update_values_col_is_excluded(self, connection):
+ """columns that are in values() are not returned"""
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+
+ result = connection.execute(
+ t1.update().values(data="x", upddef=2).return_defaults(t1.c.data)
+ )
+ is_(result.returned_defaults, None)
+
+ result = connection.execute(
+ t1.update()
+ .values(data="x", upddef=2)
+ .return_defaults(t1.c.data, t1.c.id)
+ )
+ eq_(result.returned_defaults, (1,))
+
+ def test_update_supplemental_cols(self, connection):
+ """with supplemental_cols, we can get back arbitrary cols."""
+
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(
+ t1.update()
+ .values(data="x", insdef=3)
+ .return_defaults(supplemental_cols=[t1.c.data, t1.c.insdef])
+ )
+
+ row = result.returned_defaults
+
+ # row has all the cols in it
+ eq_(row, ("x", 3, 1))
+ eq_(row._mapping[t1.c.upddef], 1)
+ eq_(row._mapping[t1.c.insdef], 3)
+
+ # result is rewound
+ # but has both return_defaults + supplemental_cols
+ eq_(result.all(), [("x", 3, 1)])
+
+ def test_update_expl_return_defaults_plus_supplemental_cols(
+ self, connection
+ ):
+ """with supplemental_cols, we can get back arbitrary cols."""
+
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(
+ t1.update()
+ .values(data="x", insdef=3)
+ .return_defaults(
+ t1.c.id, supplemental_cols=[t1.c.data, t1.c.insdef]
+ )
+ )
+
+ row = result.returned_defaults
+
+ # row has all the cols in it
+ eq_(row, (1, "x", 3))
+ eq_(row._mapping[t1.c.id], 1)
+ eq_(row._mapping[t1.c.insdef], 3)
+ assert t1.c.upddef not in row._mapping
+
+ # result is rewound
+ # but has both return_defaults + supplemental_cols
+ eq_(result.all(), [(1, "x", 3)])
+
def test_update_sql_expr(self, connection):
from sqlalchemy import literal
@@ -833,6 +959,75 @@ class UpdatedReturnDefaultsTest(fixtures.TablesTest):
eq_(dict(result.returned_defaults._mapping), {"upddef": 1})
+class DeleteReturnDefaultsTest(fixtures.TablesTest):
+ __requires__ = ("delete_returning",)
+ run_define_tables = "each"
+ __backend__ = True
+
+ define_tables = InsertReturnDefaultsTest.define_tables
+
+ def test_delete(self, connection):
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(t1.delete().return_defaults(t1.c.upddef))
+ eq_(
+ [result.returned_defaults._mapping[k] for k in (t1.c.upddef,)], [1]
+ )
+
+ def test_delete_empty_return_defaults(self, connection):
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=5))
+ result = connection.execute(t1.delete().return_defaults())
+
+ # there's no "delete" default, so we get None. we have to
+ # ask for them in all cases
+ eq_(result.returned_defaults, None)
+
+ def test_delete_non_default(self, connection):
+ """test that a column not marked at all as a
+ default works with this feature."""
+
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(t1.delete().return_defaults(t1.c.data))
+ eq_(
+ [result.returned_defaults._mapping[k] for k in (t1.c.data,)],
+ [None],
+ )
+
+ def test_delete_non_default_plus_default(self, connection):
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(
+ t1.delete().return_defaults(t1.c.data, t1.c.upddef)
+ )
+ eq_(
+ dict(result.returned_defaults._mapping),
+ {"data": None, "upddef": 1},
+ )
+
+ def test_delete_supplemental_cols(self, connection):
+ """with supplemental_cols, we can get back arbitrary cols."""
+
+ t1 = self.tables.t1
+ connection.execute(t1.insert().values(upddef=1))
+ result = connection.execute(
+ t1.delete().return_defaults(
+ t1.c.id, supplemental_cols=[t1.c.data, t1.c.insdef]
+ )
+ )
+
+ row = result.returned_defaults
+
+ # row has all the cols in it
+ eq_(row, (1, None, 0))
+ eq_(row._mapping[t1.c.insdef], 0)
+
+ # result is rewound
+ # but has both return_defaults + supplemental_cols
+ eq_(result.all(), [(1, None, 0)])
+
+
class InsertManyReturnDefaultsTest(fixtures.TablesTest):
__requires__ = ("insert_executemany_returning",)
run_define_tables = "each"