diff options
| author | Mike Bayer <mike_mp@zzzcomputing.com> | 2020-06-23 16:21:04 -0400 |
|---|---|---|
| committer | Mike Bayer <mike_mp@zzzcomputing.com> | 2020-06-25 18:58:34 -0400 |
| commit | f1a3038f480ee1965928cdcd1dc0c47347f270bc (patch) | |
| tree | 8b03334c438631e72f132533db676b3bf25a3f00 /test/dialect/postgresql | |
| parent | 660a340bff8fcefd2826032e75210c0924a2335e (diff) | |
| download | sqlalchemy-f1a3038f480ee1965928cdcd1dc0c47347f270bc.tar.gz | |
Default psycopg2 executemany mode to "values_only"
The psycopg2 dialect now defaults to using the very performant
``execute_values()`` psycopg2 extension for compiled INSERT statements,
and also impements RETURNING support when this extension is used. This
allows INSERT statements that even include an autoincremented SERIAL
or IDENTITY value to run very fast while still being able to return the
newly generated primary key values. The ORM will then integrate this
new feature in a separate change.
Implements RETURNING for insert with executemany
Adds support to return_defaults() mode and inserted_primary_key
to support mutiple INSERTed rows, via return_defauls_rows
and inserted_primary_key_rows accessors.
within default execution context, new cached compiler
getters are used to fetch primary keys from rows
inserted_primary_key now returns a plain tuple. this
is not yet a row-like object however this can be
added.
Adds distinct "values_only" and "batch" modes, as
"values" has a lot of benefits but "batch" breaks
cursor.rowcount
psycopg2 minimum version 2.7 so we can remove the
large number of checks for very old versions of
psycopg2
simplify tests to no longer distinguish between
native and non-native json
Fixes: #5401
Change-Id: Ic08fd3423d4c5d16ca50994460c0c234868bd61c
Diffstat (limited to 'test/dialect/postgresql')
| -rw-r--r-- | test/dialect/postgresql/test_compiler.py | 3 | ||||
| -rw-r--r-- | test/dialect/postgresql/test_dialect.py | 219 | ||||
| -rw-r--r-- | test/dialect/postgresql/test_on_conflict.py | 30 | ||||
| -rw-r--r-- | test/dialect/postgresql/test_query.py | 12 | ||||
| -rw-r--r-- | test/dialect/postgresql/test_reflection.py | 4 | ||||
| -rw-r--r-- | test/dialect/postgresql/test_types.py | 192 |
6 files changed, 224 insertions, 236 deletions
diff --git a/test/dialect/postgresql/test_compiler.py b/test/dialect/postgresql/test_compiler.py index 2223b0a76..b4ac69d2d 100644 --- a/test/dialect/postgresql/test_compiler.py +++ b/test/dialect/postgresql/test_compiler.py @@ -39,6 +39,7 @@ from sqlalchemy.sql import operators from sqlalchemy.sql import table from sqlalchemy.sql import util as sql_util from sqlalchemy.testing import engines +from sqlalchemy.testing import eq_ from sqlalchemy.testing import fixtures from sqlalchemy.testing.assertions import assert_raises from sqlalchemy.testing.assertions import assert_raises_message @@ -89,7 +90,7 @@ class SequenceTest(fixtures.TestBase, AssertsCompiledSQL): t.create(engine) with engine.begin() as conn: r = conn.execute(t.insert()) - assert r.inserted_primary_key == [1] + eq_(r.inserted_primary_key, (1,)) class CompileTest(fixtures.TestBase, AssertsCompiledSQL): diff --git a/test/dialect/postgresql/test_dialect.py b/test/dialect/postgresql/test_dialect.py index d381d83fa..1fbe870ba 100644 --- a/test/dialect/postgresql/test_dialect.py +++ b/test/dialect/postgresql/test_dialect.py @@ -1,5 +1,4 @@ # coding: utf-8 -import contextlib import datetime import logging import logging.handlers @@ -32,10 +31,10 @@ from sqlalchemy.dialects.postgresql import psycopg2 as psycopg2_dialect from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_BATCH from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_DEFAULT from sqlalchemy.dialects.postgresql.psycopg2 import EXECUTEMANY_VALUES +from sqlalchemy.engine import cursor as _cursor from sqlalchemy.engine import engine_from_config from sqlalchemy.engine import url from sqlalchemy.testing import engines -from sqlalchemy.testing import expect_deprecated from sqlalchemy.testing import fixtures from sqlalchemy.testing import is_ from sqlalchemy.testing import mock @@ -48,6 +47,11 @@ from sqlalchemy.testing.assertions import eq_regex from sqlalchemy.testing.assertions import ne_ from ...engine import test_execute +if True: + from sqlalchemy.dialects.postgresql.psycopg2 import ( + EXECUTEMANY_VALUES_PLUS_BATCH, + ) + class DialectTest(fixtures.TestBase): """python-side dialect tests. """ @@ -170,14 +174,9 @@ class ExecuteManyMode(object): Column("z", Integer, server_default="5"), ) - @contextlib.contextmanager - def expect_deprecated_opts(self): - yield - def setup(self): super(ExecuteManyMode, self).setup() - with self.expect_deprecated_opts(): - self.engine = engines.testing_engine(options=self.options) + self.engine = engines.testing_engine(options=self.options) def teardown(self): self.engine.dispose() @@ -186,14 +185,22 @@ class ExecuteManyMode(object): def test_insert(self): from psycopg2 import extras - if self.engine.dialect.executemany_mode is EXECUTEMANY_BATCH: + values_page_size = self.engine.dialect.executemany_values_page_size + batch_page_size = self.engine.dialect.executemany_batch_page_size + if self.engine.dialect.executemany_mode & EXECUTEMANY_VALUES: + meth = extras.execute_values + stmt = "INSERT INTO data (x, y) VALUES %s" + expected_kwargs = { + "template": "(%(x)s, %(y)s)", + "page_size": values_page_size, + "fetch": False, + } + elif self.engine.dialect.executemany_mode & EXECUTEMANY_BATCH: meth = extras.execute_batch stmt = "INSERT INTO data (x, y) VALUES (%(x)s, %(y)s)" - expected_kwargs = {} + expected_kwargs = {"page_size": batch_page_size} else: - meth = extras.execute_values - stmt = "INSERT INTO data (x, y) VALUES %s" - expected_kwargs = {"template": "(%(x)s, %(y)s)"} + assert False with mock.patch.object( extras, meth.__name__, side_effect=meth @@ -235,15 +242,24 @@ class ExecuteManyMode(object): def test_insert_no_page_size(self): from psycopg2 import extras + values_page_size = self.engine.dialect.executemany_values_page_size + batch_page_size = self.engine.dialect.executemany_batch_page_size + eng = self.engine - if eng.dialect.executemany_mode is EXECUTEMANY_BATCH: + if eng.dialect.executemany_mode & EXECUTEMANY_VALUES: + meth = extras.execute_values + stmt = "INSERT INTO data (x, y) VALUES %s" + expected_kwargs = { + "template": "(%(x)s, %(y)s)", + "page_size": values_page_size, + "fetch": False, + } + elif eng.dialect.executemany_mode & EXECUTEMANY_BATCH: meth = extras.execute_batch stmt = "INSERT INTO data (x, y) VALUES (%(x)s, %(y)s)" - expected_kwargs = {} + expected_kwargs = {"page_size": batch_page_size} else: - meth = extras.execute_values - stmt = "INSERT INTO data (x, y) VALUES %s" - expected_kwargs = {"template": "(%(x)s, %(y)s)"} + assert False with mock.patch.object( extras, meth.__name__, side_effect=meth @@ -281,17 +297,22 @@ class ExecuteManyMode(object): opts["executemany_batch_page_size"] = 500 opts["executemany_values_page_size"] = 1000 - with self.expect_deprecated_opts(): - eng = engines.testing_engine(options=opts) + eng = engines.testing_engine(options=opts) - if eng.dialect.executemany_mode is EXECUTEMANY_BATCH: + if eng.dialect.executemany_mode & EXECUTEMANY_VALUES: + meth = extras.execute_values + stmt = "INSERT INTO data (x, y) VALUES %s" + expected_kwargs = { + "fetch": False, + "page_size": 1000, + "template": "(%(x)s, %(y)s)", + } + elif eng.dialect.executemany_mode & EXECUTEMANY_BATCH: meth = extras.execute_batch stmt = "INSERT INTO data (x, y) VALUES (%(x)s, %(y)s)" expected_kwargs = {"page_size": 500} else: - meth = extras.execute_values - stmt = "INSERT INTO data (x, y) VALUES %s" - expected_kwargs = {"page_size": 1000, "template": "(%(x)s, %(y)s)"} + assert False with mock.patch.object( extras, meth.__name__, side_effect=meth @@ -325,10 +346,11 @@ class ExecuteManyMode(object): def test_update_fallback(self): from psycopg2 import extras + batch_page_size = self.engine.dialect.executemany_batch_page_size eng = self.engine meth = extras.execute_batch stmt = "UPDATE data SET y=%(yval)s WHERE data.x = %(xval)s" - expected_kwargs = {} + expected_kwargs = {"page_size": batch_page_size} with mock.patch.object( extras, meth.__name__, side_effect=meth @@ -344,24 +366,30 @@ class ExecuteManyMode(object): ], ) - eq_( - mock_exec.mock_calls, - [ - mock.call( - mock.ANY, - stmt, - ( - {"xval": "x1", "yval": "y5"}, - {"xval": "x3", "yval": "y6"}, - ), - **expected_kwargs - ) - ], - ) + if eng.dialect.executemany_mode & EXECUTEMANY_BATCH: + eq_( + mock_exec.mock_calls, + [ + mock.call( + mock.ANY, + stmt, + ( + {"xval": "x1", "yval": "y5"}, + {"xval": "x3", "yval": "y6"}, + ), + **expected_kwargs + ) + ], + ) + else: + eq_(mock_exec.mock_calls, []) def test_not_sane_rowcount(self): self.engine.connect().close() - assert not self.engine.dialect.supports_sane_multi_rowcount + if self.engine.dialect.executemany_mode & EXECUTEMANY_BATCH: + assert not self.engine.dialect.supports_sane_multi_rowcount + else: + assert self.engine.dialect.supports_sane_multi_rowcount def test_update(self): with self.engine.connect() as conn: @@ -388,22 +416,64 @@ class ExecuteManyMode(object): ) -class UseBatchModeTest(ExecuteManyMode, fixtures.TablesTest): - options = {"use_batch_mode": True} - - def expect_deprecated_opts(self): - return expect_deprecated( - "The psycopg2 use_batch_mode flag is superseded by " - "executemany_mode='batch'" - ) - - class ExecutemanyBatchModeTest(ExecuteManyMode, fixtures.TablesTest): options = {"executemany_mode": "batch"} class ExecutemanyValuesInsertsTest(ExecuteManyMode, fixtures.TablesTest): - options = {"executemany_mode": "values"} + options = {"executemany_mode": "values_only"} + + def test_insert_returning_values(self): + """the psycopg2 dialect needs to assemble a fully buffered result + with the return value of execute_values(). + + """ + t = self.tables.data + + with self.engine.connect() as conn: + page_size = conn.dialect.executemany_values_page_size or 100 + data = [ + {"x": "x%d" % i, "y": "y%d" % i} + for i in range(1, page_size * 5 + 27) + ] + result = conn.execute(t.insert().returning(t.c.x, t.c.y), data) + + eq_([tup[0] for tup in result.cursor.description], ["x", "y"]) + eq_(result.keys(), ["x", "y"]) + assert t.c.x in result.keys() + assert t.c.id not in result.keys() + assert not result._soft_closed + assert isinstance( + result.cursor_strategy, + _cursor.FullyBufferedCursorFetchStrategy, + ) + assert not result.cursor.closed + assert not result.closed + eq_(result.mappings().all(), data) + + assert result._soft_closed + # assert result.closed + assert result.cursor is None + + def test_insert_returning_defaults(self): + t = self.tables.data + + with self.engine.connect() as conn: + + result = conn.execute(t.insert(), {"x": "x0", "y": "y0"}) + first_pk = result.inserted_primary_key[0] + + page_size = conn.dialect.executemany_values_page_size or 100 + total_rows = page_size * 5 + 27 + data = [ + {"x": "x%d" % i, "y": "y%d" % i} for i in range(1, total_rows) + ] + result = conn.execute(t.insert().returning(t.c.id, t.c.z), data) + + eq_( + result.all(), + [(pk, 5) for pk in range(1 + first_pk, total_rows + first_pk)], + ) def test_insert_w_newlines(self): from psycopg2 import extras @@ -451,6 +521,8 @@ class ExecutemanyValuesInsertsTest(ExecuteManyMode, fixtures.TablesTest): {"id": 3, "y": "y3", "z": 3}, ), template="(%(id)s, (SELECT 5 \nFROM data), %(y)s, %(z)s)", + fetch=False, + page_size=conn.dialect.executemany_values_page_size, ) ], ) @@ -506,21 +578,31 @@ class ExecutemanyValuesInsertsTest(ExecuteManyMode, fixtures.TablesTest): ) eq_(mock_values.mock_calls, []) - eq_( - mock_batch.mock_calls, - [ - mock.call( - mock.ANY, - "INSERT INTO data (id, y, z) VALUES " - "(%(id)s, %(y)s, %(z)s)", - ( - {"id": 1, "y": "y1", "z": 1}, - {"id": 2, "y": "y2", "z": 2}, - {"id": 3, "y": "y3", "z": 3}, - ), - ) - ], - ) + + if self.engine.dialect.executemany_mode & EXECUTEMANY_BATCH: + eq_( + mock_batch.mock_calls, + [ + mock.call( + mock.ANY, + "INSERT INTO data (id, y, z) VALUES " + "(%(id)s, %(y)s, %(z)s)", + ( + {"id": 1, "y": "y1", "z": 1}, + {"id": 2, "y": "y2", "z": 2}, + {"id": 3, "y": "y3", "z": 3}, + ), + ) + ], + ) + else: + eq_(mock_batch.mock_calls, []) + + +class ExecutemanyValuesPlusBatchInsertsTest( + ExecuteManyMode, fixtures.TablesTest +): + options = {"executemany_mode": "values_plus_batch"} class ExecutemanyFlagOptionsTest(fixtures.TablesTest): @@ -531,7 +613,8 @@ class ExecutemanyFlagOptionsTest(fixtures.TablesTest): for opt, expected in [ (None, EXECUTEMANY_DEFAULT), ("batch", EXECUTEMANY_BATCH), - ("values", EXECUTEMANY_VALUES), + ("values_only", EXECUTEMANY_VALUES), + ("values_plus_batch", EXECUTEMANY_VALUES_PLUS_BATCH), ]: self.engine = engines.testing_engine( options={"executemany_mode": opt} @@ -774,7 +857,7 @@ $$ LANGUAGE plpgsql; r = connection.execute( t.insert(), user_name="user", user_password="lala" ) - assert r.inserted_primary_key == [1] + eq_(r.inserted_primary_key, (1,)) result = connection.execute(t.select()).fetchall() assert result == [(1, "user", "lala")] connection.execute(text("DROP TABLE speedy_users")) diff --git a/test/dialect/postgresql/test_on_conflict.py b/test/dialect/postgresql/test_on_conflict.py index b7316ca60..f24fb2cfe 100644 --- a/test/dialect/postgresql/test_on_conflict.py +++ b/test/dialect/postgresql/test_on_conflict.py @@ -98,14 +98,14 @@ class OnConflictTest(fixtures.TablesTest): insert(users).on_conflict_do_nothing(), dict(id=1, name="name1"), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) result = conn.execute( insert(users).on_conflict_do_nothing(), dict(id=1, name="name2"), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -120,7 +120,7 @@ class OnConflictTest(fixtures.TablesTest): insert(users).on_conflict_do_nothing(constraint="uq_login_email"), dict(name="name1", login_email="email1"), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, (1,)) result = connection.execute( @@ -148,7 +148,7 @@ class OnConflictTest(fixtures.TablesTest): ), dict(id=1, name="name1"), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) result = conn.execute( @@ -157,7 +157,7 @@ class OnConflictTest(fixtures.TablesTest): ), dict(id=1, name="name2"), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -177,7 +177,7 @@ class OnConflictTest(fixtures.TablesTest): ) result = conn.execute(i, dict(id=1, name="name1")) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -198,7 +198,7 @@ class OnConflictTest(fixtures.TablesTest): ) result = conn.execute(i, dict(id=1, name="name2")) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -218,7 +218,7 @@ class OnConflictTest(fixtures.TablesTest): set_=dict(name=i.excluded.name), ) result = conn.execute(i, dict(id=1, name="name3")) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -239,7 +239,7 @@ class OnConflictTest(fixtures.TablesTest): ).values(id=1, name="name4") result = conn.execute(i) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -260,7 +260,7 @@ class OnConflictTest(fixtures.TablesTest): ).values(id=1, name="name4") result = conn.execute(i) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -292,7 +292,7 @@ class OnConflictTest(fixtures.TablesTest): ) result = conn.execute(i) - eq_(result.inserted_primary_key, [None]) + eq_(result.inserted_primary_key, (None,)) eq_(result.returned_defaults, None) eq_( @@ -349,7 +349,7 @@ class OnConflictTest(fixtures.TablesTest): lets_index_this="not", ), ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, None) eq_( @@ -383,7 +383,7 @@ class OnConflictTest(fixtures.TablesTest): lets_index_this="unique", ), ) - eq_(result.inserted_primary_key, [42]) + eq_(result.inserted_primary_key, (42,)) eq_(result.returned_defaults, None) eq_( @@ -422,7 +422,7 @@ class OnConflictTest(fixtures.TablesTest): lets_index_this="unique", ), ) - eq_(result.inserted_primary_key, [43]) + eq_(result.inserted_primary_key, (43,)) eq_(result.returned_defaults, None) eq_( @@ -454,7 +454,7 @@ class OnConflictTest(fixtures.TablesTest): result = conn.execute( i, dict(name="name3", login_email="name1@gmail.com") ) - eq_(result.inserted_primary_key, [1]) + eq_(result.inserted_primary_key, (1,)) eq_(result.returned_defaults, (1,)) eq_( diff --git a/test/dialect/postgresql/test_query.py b/test/dialect/postgresql/test_query.py index fad2ad8f5..3f16c60e9 100644 --- a/test/dialect/postgresql/test_query.py +++ b/test/dialect/postgresql/test_query.py @@ -172,12 +172,12 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): # execute with explicit id r = conn.execute(table.insert(), {"id": 30, "data": "d1"}) - eq_(r.inserted_primary_key, [30]) + eq_(r.inserted_primary_key, (30,)) # execute with prefetch id r = conn.execute(table.insert(), {"data": "d2"}) - eq_(r.inserted_primary_key, [1]) + eq_(r.inserted_primary_key, (1,)) # executemany with explicit ids @@ -254,7 +254,7 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): with engine.connect() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) r = conn.execute(table.insert(), {"data": "d2"}) - eq_(r.inserted_primary_key, [5]) + eq_(r.inserted_primary_key, (5,)) conn.execute( table.insert(), {"id": 31, "data": "d3"}, @@ -316,12 +316,12 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): # execute with explicit id r = conn.execute(table.insert(), {"id": 30, "data": "d1"}) - eq_(r.inserted_primary_key, [30]) + eq_(r.inserted_primary_key, (30,)) # execute with prefetch id r = conn.execute(table.insert(), {"data": "d2"}) - eq_(r.inserted_primary_key, [1]) + eq_(r.inserted_primary_key, (1,)) # executemany with explicit ids @@ -398,7 +398,7 @@ class InsertTest(fixtures.TestBase, AssertsExecutionResults): with engine.connect() as conn: conn.execute(table.insert(), {"id": 30, "data": "d1"}) r = conn.execute(table.insert(), {"data": "d2"}) - eq_(r.inserted_primary_key, [5]) + eq_(r.inserted_primary_key, (5,)) conn.execute( table.insert(), {"id": 31, "data": "d3"}, diff --git a/test/dialect/postgresql/test_reflection.py b/test/dialect/postgresql/test_reflection.py index 89d4ae081..ec9328c2f 100644 --- a/test/dialect/postgresql/test_reflection.py +++ b/test/dialect/postgresql/test_reflection.py @@ -488,7 +488,7 @@ class ReflectionTest(fixtures.TestBase): t2 = Table("t", m2, autoload=True, implicit_returning=False) eq_(t2.c.id.server_default.arg.text, "nextval('t_id_seq'::regclass)") r = t2.insert().execute() - eq_(r.inserted_primary_key, [1]) + eq_(r.inserted_primary_key, (1,)) testing.db.connect().execution_options( autocommit=True ).exec_driver_sql("alter table t_id_seq rename to foobar_id_seq") @@ -499,7 +499,7 @@ class ReflectionTest(fixtures.TestBase): "nextval('foobar_id_seq'::regclass)", ) r = t3.insert().execute() - eq_(r.inserted_primary_key, [2]) + eq_(r.inserted_primary_key, (2,)) @testing.provide_metadata def test_altered_type_autoincrement_pk_reflection(self): diff --git a/test/dialect/postgresql/test_types.py b/test/dialect/postgresql/test_types.py index d7f6faf92..b4ab2e877 100644 --- a/test/dialect/postgresql/test_types.py +++ b/test/dialect/postgresql/test_types.py @@ -12,7 +12,6 @@ from sqlalchemy import Column from sqlalchemy import column from sqlalchemy import DateTime from sqlalchemy import Enum -from sqlalchemy import event from sqlalchemy import exc from sqlalchemy import Float from sqlalchemy import func @@ -2171,7 +2170,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): def test_bind_serialize_default(self): - dialect = postgresql.dialect() + dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_( proc(util.OrderedDict([("key1", "value1"), ("key2", "value2")])), @@ -2179,12 +2178,12 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): ) def test_bind_serialize_with_slashes_and_quotes(self): - dialect = postgresql.dialect() + dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_bind_processor(dialect) eq_(proc({'\\"a': '\\"1'}), '"\\\\\\"a"=>"\\\\\\"1"') def test_parse_error(self): - dialect = postgresql.dialect() + dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) @@ -2198,7 +2197,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): ) def test_result_deserialize_default(self): - dialect = postgresql.dialect() + dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) @@ -2208,7 +2207,7 @@ class HStoreTest(AssertsCompiledSQL, fixtures.TestBase): ) def test_result_deserialize_with_slashes_and_quotes(self): - dialect = postgresql.dialect() + dialect = postgresql.dialect(use_native_hstore=False) proc = self.test_table.c.hash.type._cached_result_processor( dialect, None ) @@ -3123,76 +3122,24 @@ class JSONRoundTripTest(fixtures.TablesTest): ) self._assert_column_is_JSON_NULL(conn, column="nulldata") - def _non_native_engine(self, json_serializer=None, json_deserializer=None): - if json_serializer is not None or json_deserializer is not None: - options = { - "json_serializer": json_serializer, - "json_deserializer": json_deserializer, - } - else: - options = {} - - if testing.against( - "postgresql+psycopg2" - ) and testing.db.dialect.psycopg2_version >= (2, 5): - from psycopg2.extras import register_default_json - - engine = engines.testing_engine(options=options) - - @event.listens_for(engine, "connect") - def connect(dbapi_connection, connection_record): - engine.dialect._has_native_json = False - - def pass_(value): - return value - - register_default_json(dbapi_connection, loads=pass_) - - elif options: - engine = engines.testing_engine(options=options) - else: - engine = testing.db - engine.connect().close() - return engine - def test_reflect(self): insp = inspect(testing.db) cols = insp.get_columns("data_table") assert isinstance(cols[2]["type"], self.test_type) - @testing.requires.psycopg2_native_json - def test_insert_native(self, connection): + def test_insert(self, connection): self._test_insert(connection) - @testing.requires.psycopg2_native_json - def test_insert_native_nulls(self, connection): + def test_insert_nulls(self, connection): self._test_insert_nulls(connection) - @testing.requires.psycopg2_native_json - def test_insert_native_none_as_null(self, connection): + def test_insert_none_as_null(self, connection): self._test_insert_none_as_null(connection) - @testing.requires.psycopg2_native_json - def test_insert_native_nulljson_into_none_as_null(self, connection): + def test_insert_nulljson_into_none_as_null(self, connection): self._test_insert_nulljson_into_none_as_null(connection) - def test_insert_python(self): - engine = self._non_native_engine() - self._test_insert(engine) - - def test_insert_python_nulls(self): - engine = self._non_native_engine() - self._test_insert_nulls(engine) - - def test_insert_python_none_as_null(self): - engine = self._non_native_engine() - self._test_insert_none_as_null(engine) - - def test_insert_python_nulljson_into_none_as_null(self): - engine = self._non_native_engine() - self._test_insert_nulljson_into_none_as_null(engine) - - def _test_custom_serialize_deserialize(self, native): + def test_custom_serialize_deserialize(self): import json def loads(value): @@ -3205,38 +3152,19 @@ class JSONRoundTripTest(fixtures.TablesTest): value["x"] = "dumps_y" return json.dumps(value) - if native: - engine = engines.testing_engine( - options=dict(json_serializer=dumps, json_deserializer=loads) - ) - else: - engine = self._non_native_engine( - json_serializer=dumps, json_deserializer=loads - ) + engine = engines.testing_engine( + options=dict(json_serializer=dumps, json_deserializer=loads) + ) s = select([cast({"key": "value", "x": "q"}, self.test_type)]) with engine.begin() as conn: eq_(conn.scalar(s), {"key": "value", "x": "dumps_y_loads"}) - @testing.requires.psycopg2_native_json - def test_custom_native(self): - self._test_custom_serialize_deserialize(True) - - @testing.requires.psycopg2_native_json - def test_custom_python(self): - self._test_custom_serialize_deserialize(False) - - @testing.requires.psycopg2_native_json - def test_criterion_native(self): + def test_criterion(self): engine = testing.db self._fixture_data(engine) self._test_criterion(engine) - def test_criterion_python(self): - engine = self._non_native_engine() - self._fixture_data(engine) - self._test_criterion(engine) - def test_path_query(self, connection): engine = testing.db self._fixture_data(engine) @@ -3304,59 +3232,39 @@ class JSONRoundTripTest(fixtures.TablesTest): ).first() eq_(result, ({"k1": "r3v1", "k2": "r3v2"},)) - def _test_fixed_round_trip(self, engine): - with engine.begin() as conn: - s = select( - [ - cast( - {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, - self.test_type, - ) - ] - ) - eq_( - conn.scalar(s), - {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, - ) - - def test_fixed_round_trip_python(self): - engine = self._non_native_engine() - self._test_fixed_round_trip(engine) - - @testing.requires.psycopg2_native_json - def test_fixed_round_trip_native(self): - engine = testing.db - self._test_fixed_round_trip(engine) - - def _test_unicode_round_trip(self, engine): - with engine.begin() as conn: - s = select( - [ - cast( - { - util.u("réveillé"): util.u("réveillé"), - "data": {"k1": util.u("drôle")}, - }, - self.test_type, - ) - ] - ) - eq_( - conn.scalar(s), - { - util.u("réveillé"): util.u("réveillé"), - "data": {"k1": util.u("drôle")}, - }, - ) - - def test_unicode_round_trip_python(self): - engine = self._non_native_engine() - self._test_unicode_round_trip(engine) + def test_fixed_round_trip(self, connection): + s = select( + [ + cast( + {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, + self.test_type, + ) + ] + ) + eq_( + connection.scalar(s), + {"key": "value", "key2": {"k1": "v1", "k2": "v2"}}, + ) - @testing.requires.psycopg2_native_json - def test_unicode_round_trip_native(self): - engine = testing.db - self._test_unicode_round_trip(engine) + def test_unicode_round_trip(self, connection): + s = select( + [ + cast( + { + util.u("réveillé"): util.u("réveillé"), + "data": {"k1": util.u("drôle")}, + }, + self.test_type, + ) + ] + ) + eq_( + connection.scalar(s), + { + util.u("réveillé"): util.u("réveillé"), + "data": {"k1": util.u("drôle")}, + }, + ) def test_eval_none_flag_orm(self): Base = declarative_base() @@ -3441,12 +3349,8 @@ class JSONBRoundTripTest(JSONRoundTripTest): test_type = JSONB @testing.requires.postgresql_utf8_server_encoding - def test_unicode_round_trip_python(self): - super(JSONBRoundTripTest, self).test_unicode_round_trip_python() - - @testing.requires.postgresql_utf8_server_encoding - def test_unicode_round_trip_native(self): - super(JSONBRoundTripTest, self).test_unicode_round_trip_native() + def test_unicode_round_trip(self, connection): + super(JSONBRoundTripTest, self).test_unicode_round_trip(connection) class JSONBSuiteTest(suite.JSONTest): |
