summaryrefslogtreecommitdiff
path: root/lib/sqlalchemy/dialects/postgresql
diff options
context:
space:
mode:
authorMike Bayer <mike_mp@zzzcomputing.com>2019-01-06 01:14:26 -0500
committermike bayer <mike_mp@zzzcomputing.com>2019-01-06 17:34:50 +0000
commit1e1a38e7801f410f244e4bbb44ec795ae152e04e (patch)
tree28e725c5c8188bd0cfd133d1e268dbca9b524978 /lib/sqlalchemy/dialects/postgresql
parent404e69426b05a82d905cbb3ad33adafccddb00dd (diff)
downloadsqlalchemy-1e1a38e7801f410f244e4bbb44ec795ae152e04e.tar.gz
Run black -l 79 against all source files
This is a straight reformat run using black as is, with no edits applied at all. The black run will format code consistently, however in some cases that are prevalent in SQLAlchemy code it produces too-long lines. The too-long lines will be resolved in the following commit that will resolve all remaining flake8 issues including shadowed builtins, long lines, import order, unused imports, duplicate imports, and docstring issues. Change-Id: I7eda77fed3d8e73df84b3651fd6cfcfe858d4dc9
Diffstat (limited to 'lib/sqlalchemy/dialects/postgresql')
-rw-r--r--lib/sqlalchemy/dialects/postgresql/__init__.py111
-rw-r--r--lib/sqlalchemy/dialects/postgresql/array.py79
-rw-r--r--lib/sqlalchemy/dialects/postgresql/base.py1437
-rw-r--r--lib/sqlalchemy/dialects/postgresql/dml.py95
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ext.py44
-rw-r--r--lib/sqlalchemy/dialects/postgresql/hstore.py129
-rw-r--r--lib/sqlalchemy/dialects/postgresql/json.py66
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pg8000.py109
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2.py190
-rw-r--r--lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py10
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pygresql.py72
-rw-r--r--lib/sqlalchemy/dialects/postgresql/pypostgresql.py28
-rw-r--r--lib/sqlalchemy/dialects/postgresql/ranges.py57
-rw-r--r--lib/sqlalchemy/dialects/postgresql/zxjdbc.py9
14 files changed, 1491 insertions, 945 deletions
diff --git a/lib/sqlalchemy/dialects/postgresql/__init__.py b/lib/sqlalchemy/dialects/postgresql/__init__.py
index 84f720028..9e65484fa 100644
--- a/lib/sqlalchemy/dialects/postgresql/__init__.py
+++ b/lib/sqlalchemy/dialects/postgresql/__init__.py
@@ -5,33 +5,110 @@
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
-from . import base, psycopg2, pg8000, pypostgresql, pygresql, \
- zxjdbc, psycopg2cffi # noqa
+from . import (
+ base,
+ psycopg2,
+ pg8000,
+ pypostgresql,
+ pygresql,
+ zxjdbc,
+ psycopg2cffi,
+) # noqa
-from .base import \
- INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, \
- INET, CIDR, UUID, BIT, MACADDR, MONEY, OID, REGCLASS, DOUBLE_PRECISION, \
- TIMESTAMP, TIME, DATE, BYTEA, BOOLEAN, INTERVAL, ENUM, TSVECTOR, \
- DropEnumType, CreateEnumType
+from .base import (
+ INTEGER,
+ BIGINT,
+ SMALLINT,
+ VARCHAR,
+ CHAR,
+ TEXT,
+ NUMERIC,
+ FLOAT,
+ REAL,
+ INET,
+ CIDR,
+ UUID,
+ BIT,
+ MACADDR,
+ MONEY,
+ OID,
+ REGCLASS,
+ DOUBLE_PRECISION,
+ TIMESTAMP,
+ TIME,
+ DATE,
+ BYTEA,
+ BOOLEAN,
+ INTERVAL,
+ ENUM,
+ TSVECTOR,
+ DropEnumType,
+ CreateEnumType,
+)
from .hstore import HSTORE, hstore
from .json import JSON, JSONB
from .array import array, ARRAY, Any, All
from .ext import aggregate_order_by, ExcludeConstraint, array_agg
from .dml import insert, Insert
-from .ranges import INT4RANGE, INT8RANGE, NUMRANGE, DATERANGE, TSRANGE, \
- TSTZRANGE
+from .ranges import (
+ INT4RANGE,
+ INT8RANGE,
+ NUMRANGE,
+ DATERANGE,
+ TSRANGE,
+ TSTZRANGE,
+)
base.dialect = dialect = psycopg2.dialect
__all__ = (
- 'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC',
- 'FLOAT', 'REAL', 'INET', 'CIDR', 'UUID', 'BIT', 'MACADDR', 'MONEY', 'OID',
- 'REGCLASS', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', 'DATE', 'BYTEA',
- 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect', 'array', 'HSTORE',
- 'hstore', 'INT4RANGE', 'INT8RANGE', 'NUMRANGE', 'DATERANGE',
- 'TSRANGE', 'TSTZRANGE', 'JSON', 'JSONB', 'Any', 'All',
- 'DropEnumType', 'CreateEnumType', 'ExcludeConstraint',
- 'aggregate_order_by', 'array_agg', 'insert', 'Insert'
+ "INTEGER",
+ "BIGINT",
+ "SMALLINT",
+ "VARCHAR",
+ "CHAR",
+ "TEXT",
+ "NUMERIC",
+ "FLOAT",
+ "REAL",
+ "INET",
+ "CIDR",
+ "UUID",
+ "BIT",
+ "MACADDR",
+ "MONEY",
+ "OID",
+ "REGCLASS",
+ "DOUBLE_PRECISION",
+ "TIMESTAMP",
+ "TIME",
+ "DATE",
+ "BYTEA",
+ "BOOLEAN",
+ "INTERVAL",
+ "ARRAY",
+ "ENUM",
+ "dialect",
+ "array",
+ "HSTORE",
+ "hstore",
+ "INT4RANGE",
+ "INT8RANGE",
+ "NUMRANGE",
+ "DATERANGE",
+ "TSRANGE",
+ "TSTZRANGE",
+ "JSON",
+ "JSONB",
+ "Any",
+ "All",
+ "DropEnumType",
+ "CreateEnumType",
+ "ExcludeConstraint",
+ "aggregate_order_by",
+ "array_agg",
+ "insert",
+ "Insert",
)
diff --git a/lib/sqlalchemy/dialects/postgresql/array.py b/lib/sqlalchemy/dialects/postgresql/array.py
index b2674046e..07167f9d0 100644
--- a/lib/sqlalchemy/dialects/postgresql/array.py
+++ b/lib/sqlalchemy/dialects/postgresql/array.py
@@ -78,7 +78,8 @@ class array(expression.Tuple):
:class:`.postgresql.ARRAY`
"""
- __visit_name__ = 'array'
+
+ __visit_name__ = "array"
def __init__(self, clauses, **kw):
super(array, self).__init__(*clauses, **kw)
@@ -90,18 +91,26 @@ class array(expression.Tuple):
# a Slice object from that
assert isinstance(obj, int)
return expression.BindParameter(
- None, obj, _compared_to_operator=operator,
+ None,
+ obj,
+ _compared_to_operator=operator,
type_=type_,
- _compared_to_type=self.type, unique=True)
+ _compared_to_type=self.type,
+ unique=True,
+ )
else:
- return array([
- self._bind_param(operator, o, _assume_scalar=True, type_=type_)
- for o in obj])
+ return array(
+ [
+ self._bind_param(
+ operator, o, _assume_scalar=True, type_=type_
+ )
+ for o in obj
+ ]
+ )
def self_group(self, against=None):
- if (against in (
- operators.any_op, operators.all_op, operators.getitem)):
+ if against in (operators.any_op, operators.all_op, operators.getitem):
return expression.Grouping(self)
else:
return self
@@ -180,7 +189,8 @@ class ARRAY(sqltypes.ARRAY):
elements of the argument array expression.
"""
return self.operate(
- CONTAINED_BY, other, result_type=sqltypes.Boolean)
+ CONTAINED_BY, other, result_type=sqltypes.Boolean
+ )
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
@@ -190,8 +200,9 @@ class ARRAY(sqltypes.ARRAY):
comparator_factory = Comparator
- def __init__(self, item_type, as_tuple=False, dimensions=None,
- zero_indexes=False):
+ def __init__(
+ self, item_type, as_tuple=False, dimensions=None, zero_indexes=False
+ ):
"""Construct an ARRAY.
E.g.::
@@ -228,8 +239,10 @@ class ARRAY(sqltypes.ARRAY):
"""
if isinstance(item_type, ARRAY):
- raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
- "handles multi-dimensional arrays of basetype")
+ raise ValueError(
+ "Do not nest ARRAY types; ARRAY(basetype) "
+ "handles multi-dimensional arrays of basetype"
+ )
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
@@ -251,11 +264,17 @@ class ARRAY(sqltypes.ARRAY):
def _proc_array(self, arr, itemproc, dim, collection):
if dim is None:
arr = list(arr)
- if dim == 1 or dim is None and (
+ if (
+ dim == 1
+ or dim is None
+ and (
# this has to be (list, tuple), or at least
# not hasattr('__iter__'), since Py3K strings
# etc. have __iter__
- not arr or not isinstance(arr[0], (list, tuple))):
+ not arr
+ or not isinstance(arr[0], (list, tuple))
+ )
+ ):
if itemproc:
return collection(itemproc(x) for x in arr)
else:
@@ -263,30 +282,33 @@ class ARRAY(sqltypes.ARRAY):
else:
return collection(
self._proc_array(
- x, itemproc,
+ x,
+ itemproc,
dim - 1 if dim is not None else None,
- collection)
+ collection,
+ )
for x in arr
)
def bind_processor(self, dialect):
- item_proc = self.item_type.dialect_impl(dialect).\
- bind_processor(dialect)
+ item_proc = self.item_type.dialect_impl(dialect).bind_processor(
+ dialect
+ )
def process(value):
if value is None:
return value
else:
return self._proc_array(
- value,
- item_proc,
- self.dimensions,
- list)
+ value, item_proc, self.dimensions, list
+ )
+
return process
def result_processor(self, dialect, coltype):
- item_proc = self.item_type.dialect_impl(dialect).\
- result_processor(dialect, coltype)
+ item_proc = self.item_type.dialect_impl(dialect).result_processor(
+ dialect, coltype
+ )
def process(value):
if value is None:
@@ -296,8 +318,11 @@ class ARRAY(sqltypes.ARRAY):
value,
item_proc,
self.dimensions,
- tuple if self.as_tuple else list)
+ tuple if self.as_tuple else list,
+ )
+
return process
+
colspecs[sqltypes.ARRAY] = ARRAY
-ischema_names['_array'] = ARRAY
+ischema_names["_array"] = ARRAY
diff --git a/lib/sqlalchemy/dialects/postgresql/base.py b/lib/sqlalchemy/dialects/postgresql/base.py
index d68ab8ef5..11833da57 100644
--- a/lib/sqlalchemy/dialects/postgresql/base.py
+++ b/lib/sqlalchemy/dialects/postgresql/base.py
@@ -930,57 +930,164 @@ try:
except ImportError:
_python_UUID = None
-from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
- CHAR, TEXT, FLOAT, NUMERIC, \
- DATE, BOOLEAN, REAL
+from sqlalchemy.types import (
+ INTEGER,
+ BIGINT,
+ SMALLINT,
+ VARCHAR,
+ CHAR,
+ TEXT,
+ FLOAT,
+ NUMERIC,
+ DATE,
+ BOOLEAN,
+ REAL,
+)
AUTOCOMMIT_REGEXP = re.compile(
- r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|GRANT|REVOKE|'
- 'IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW|TRUNCATE)',
- re.I | re.UNICODE)
+ r"\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|GRANT|REVOKE|"
+ "IMPORT FOREIGN SCHEMA|REFRESH MATERIALIZED VIEW|TRUNCATE)",
+ re.I | re.UNICODE,
+)
RESERVED_WORDS = set(
- ["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
- "asymmetric", "both", "case", "cast", "check", "collate", "column",
- "constraint", "create", "current_catalog", "current_date",
- "current_role", "current_time", "current_timestamp", "current_user",
- "default", "deferrable", "desc", "distinct", "do", "else", "end",
- "except", "false", "fetch", "for", "foreign", "from", "grant", "group",
- "having", "in", "initially", "intersect", "into", "leading", "limit",
- "localtime", "localtimestamp", "new", "not", "null", "of", "off",
- "offset", "old", "on", "only", "or", "order", "placing", "primary",
- "references", "returning", "select", "session_user", "some", "symmetric",
- "table", "then", "to", "trailing", "true", "union", "unique", "user",
- "using", "variadic", "when", "where", "window", "with", "authorization",
- "between", "binary", "cross", "current_schema", "freeze", "full",
- "ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
- "notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
- ])
+ [
+ "all",
+ "analyse",
+ "analyze",
+ "and",
+ "any",
+ "array",
+ "as",
+ "asc",
+ "asymmetric",
+ "both",
+ "case",
+ "cast",
+ "check",
+ "collate",
+ "column",
+ "constraint",
+ "create",
+ "current_catalog",
+ "current_date",
+ "current_role",
+ "current_time",
+ "current_timestamp",
+ "current_user",
+ "default",
+ "deferrable",
+ "desc",
+ "distinct",
+ "do",
+ "else",
+ "end",
+ "except",
+ "false",
+ "fetch",
+ "for",
+ "foreign",
+ "from",
+ "grant",
+ "group",
+ "having",
+ "in",
+ "initially",
+ "intersect",
+ "into",
+ "leading",
+ "limit",
+ "localtime",
+ "localtimestamp",
+ "new",
+ "not",
+ "null",
+ "of",
+ "off",
+ "offset",
+ "old",
+ "on",
+ "only",
+ "or",
+ "order",
+ "placing",
+ "primary",
+ "references",
+ "returning",
+ "select",
+ "session_user",
+ "some",
+ "symmetric",
+ "table",
+ "then",
+ "to",
+ "trailing",
+ "true",
+ "union",
+ "unique",
+ "user",
+ "using",
+ "variadic",
+ "when",
+ "where",
+ "window",
+ "with",
+ "authorization",
+ "between",
+ "binary",
+ "cross",
+ "current_schema",
+ "freeze",
+ "full",
+ "ilike",
+ "inner",
+ "is",
+ "isnull",
+ "join",
+ "left",
+ "like",
+ "natural",
+ "notnull",
+ "outer",
+ "over",
+ "overlaps",
+ "right",
+ "similar",
+ "verbose",
+ ]
+)
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
+
class BYTEA(sqltypes.LargeBinary):
- __visit_name__ = 'BYTEA'
+ __visit_name__ = "BYTEA"
class DOUBLE_PRECISION(sqltypes.Float):
- __visit_name__ = 'DOUBLE_PRECISION'
+ __visit_name__ = "DOUBLE_PRECISION"
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
+
+
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
+
+
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
+
+
PGMacAddr = MACADDR
@@ -991,6 +1098,7 @@ class MONEY(sqltypes.TypeEngine):
.. versionadded:: 1.2
"""
+
__visit_name__ = "MONEY"
@@ -1001,6 +1109,7 @@ class OID(sqltypes.TypeEngine):
.. versionadded:: 0.9.5
"""
+
__visit_name__ = "OID"
@@ -1011,18 +1120,17 @@ class REGCLASS(sqltypes.TypeEngine):
.. versionadded:: 1.2.7
"""
+
__visit_name__ = "REGCLASS"
class TIMESTAMP(sqltypes.TIMESTAMP):
-
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
-
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
@@ -1036,7 +1144,8 @@ class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
- __visit_name__ = 'INTERVAL'
+
+ __visit_name__ = "INTERVAL"
native = True
def __init__(self, precision=None, fields=None):
@@ -1065,11 +1174,12 @@ class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
def python_type(self):
return dt.timedelta
+
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
- __visit_name__ = 'BIT'
+ __visit_name__ = "BIT"
def __init__(self, length=None, varying=False):
if not varying:
@@ -1080,6 +1190,7 @@ class BIT(sqltypes.TypeEngine):
self.length = length
self.varying = varying
+
PGBit = BIT
@@ -1095,7 +1206,8 @@ class UUID(sqltypes.TypeEngine):
It is known to work on psycopg2 and not pg8000.
"""
- __visit_name__ = 'UUID'
+
+ __visit_name__ = "UUID"
def __init__(self, as_uuid=False):
"""Construct a UUID type.
@@ -1115,24 +1227,29 @@ class UUID(sqltypes.TypeEngine):
def bind_processor(self, dialect):
if self.as_uuid:
+
def process(value):
if value is not None:
value = util.text_type(value)
return value
+
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
+
def process(value):
if value is not None:
value = _python_UUID(value)
return value
+
return process
else:
return None
+
PGUuid = UUID
@@ -1151,7 +1268,8 @@ class TSVECTOR(sqltypes.TypeEngine):
:ref:`postgresql_match`
"""
- __visit_name__ = 'TSVECTOR'
+
+ __visit_name__ = "TSVECTOR"
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
@@ -1273,12 +1391,12 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
"""
kw.setdefault("validate_strings", impl.validate_strings)
- kw.setdefault('name', impl.name)
- kw.setdefault('schema', impl.schema)
- kw.setdefault('inherit_schema', impl.inherit_schema)
- kw.setdefault('metadata', impl.metadata)
- kw.setdefault('_create_events', False)
- kw.setdefault('values_callable', impl.values_callable)
+ kw.setdefault("name", impl.name)
+ kw.setdefault("schema", impl.schema)
+ kw.setdefault("inherit_schema", impl.inherit_schema)
+ kw.setdefault("metadata", impl.metadata)
+ kw.setdefault("_create_events", False)
+ kw.setdefault("values_callable", impl.values_callable)
return cls(**kw)
def create(self, bind=None, checkfirst=True):
@@ -1300,9 +1418,9 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
if not bind.dialect.supports_native_enum:
return
- if not checkfirst or \
- not bind.dialect.has_type(
- bind, self.name, schema=self.schema):
+ if not checkfirst or not bind.dialect.has_type(
+ bind, self.name, schema=self.schema
+ ):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
@@ -1323,8 +1441,9 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
if not bind.dialect.supports_native_enum:
return
- if not checkfirst or \
- bind.dialect.has_type(bind, self.name, schema=self.schema):
+ if not checkfirst or bind.dialect.has_type(
+ bind, self.name, schema=self.schema
+ ):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
@@ -1338,12 +1457,12 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
"""
if not self.create_type:
return True
- if '_ddl_runner' in kw:
- ddl_runner = kw['_ddl_runner']
- if '_pg_enums' in ddl_runner.memo:
- pg_enums = ddl_runner.memo['_pg_enums']
+ if "_ddl_runner" in kw:
+ ddl_runner = kw["_ddl_runner"]
+ if "_pg_enums" in ddl_runner.memo:
+ pg_enums = ddl_runner.memo["_pg_enums"]
else:
- pg_enums = ddl_runner.memo['_pg_enums'] = set()
+ pg_enums = ddl_runner.memo["_pg_enums"] = set()
present = (self.schema, self.name) in pg_enums
pg_enums.add((self.schema, self.name))
return present
@@ -1351,16 +1470,22 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
return False
def _on_table_create(self, target, bind, checkfirst=False, **kw):
- if checkfirst or (
- not self.metadata and
- not kw.get('_is_metadata_operation', False)) and \
- not self._check_for_name_in_memos(checkfirst, kw):
+ if (
+ checkfirst
+ or (
+ not self.metadata
+ and not kw.get("_is_metadata_operation", False)
+ )
+ and not self._check_for_name_in_memos(checkfirst, kw)
+ ):
self.create(bind=bind, checkfirst=checkfirst)
def _on_table_drop(self, target, bind, checkfirst=False, **kw):
- if not self.metadata and \
- not kw.get('_is_metadata_operation', False) and \
- not self._check_for_name_in_memos(checkfirst, kw):
+ if (
+ not self.metadata
+ and not kw.get("_is_metadata_operation", False)
+ and not self._check_for_name_in_memos(checkfirst, kw)
+ ):
self.drop(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
@@ -1371,49 +1496,46 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
-colspecs = {
- sqltypes.Interval: INTERVAL,
- sqltypes.Enum: ENUM,
-}
+
+colspecs = {sqltypes.Interval: INTERVAL, sqltypes.Enum: ENUM}
ischema_names = {
- 'integer': INTEGER,
- 'bigint': BIGINT,
- 'smallint': SMALLINT,
- 'character varying': VARCHAR,
- 'character': CHAR,
+ "integer": INTEGER,
+ "bigint": BIGINT,
+ "smallint": SMALLINT,
+ "character varying": VARCHAR,
+ "character": CHAR,
'"char"': sqltypes.String,
- 'name': sqltypes.String,
- 'text': TEXT,
- 'numeric': NUMERIC,
- 'float': FLOAT,
- 'real': REAL,
- 'inet': INET,
- 'cidr': CIDR,
- 'uuid': UUID,
- 'bit': BIT,
- 'bit varying': BIT,
- 'macaddr': MACADDR,
- 'money': MONEY,
- 'oid': OID,
- 'regclass': REGCLASS,
- 'double precision': DOUBLE_PRECISION,
- 'timestamp': TIMESTAMP,
- 'timestamp with time zone': TIMESTAMP,
- 'timestamp without time zone': TIMESTAMP,
- 'time with time zone': TIME,
- 'time without time zone': TIME,
- 'date': DATE,
- 'time': TIME,
- 'bytea': BYTEA,
- 'boolean': BOOLEAN,
- 'interval': INTERVAL,
- 'tsvector': TSVECTOR
+ "name": sqltypes.String,
+ "text": TEXT,
+ "numeric": NUMERIC,
+ "float": FLOAT,
+ "real": REAL,
+ "inet": INET,
+ "cidr": CIDR,
+ "uuid": UUID,
+ "bit": BIT,
+ "bit varying": BIT,
+ "macaddr": MACADDR,
+ "money": MONEY,
+ "oid": OID,
+ "regclass": REGCLASS,
+ "double precision": DOUBLE_PRECISION,
+ "timestamp": TIMESTAMP,
+ "timestamp with time zone": TIMESTAMP,
+ "timestamp without time zone": TIMESTAMP,
+ "time with time zone": TIME,
+ "time without time zone": TIME,
+ "date": DATE,
+ "time": TIME,
+ "bytea": BYTEA,
+ "boolean": BOOLEAN,
+ "interval": INTERVAL,
+ "tsvector": TSVECTOR,
}
class PGCompiler(compiler.SQLCompiler):
-
def visit_array(self, element, **kw):
return "ARRAY[%s]" % self.visit_clauselist(element, **kw)
@@ -1424,77 +1546,75 @@ class PGCompiler(compiler.SQLCompiler):
)
def visit_json_getitem_op_binary(self, binary, operator, **kw):
- kw['eager_grouping'] = True
- return self._generate_generic_binary(
- binary, " -> ", **kw
- )
+ kw["eager_grouping"] = True
+ return self._generate_generic_binary(binary, " -> ", **kw)
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
- kw['eager_grouping'] = True
- return self._generate_generic_binary(
- binary, " #> ", **kw
- )
+ kw["eager_grouping"] = True
+ return self._generate_generic_binary(binary, " #> ", **kw)
def visit_getitem_binary(self, binary, operator, **kw):
return "%s[%s]" % (
self.process(binary.left, **kw),
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw),
)
def visit_aggregate_order_by(self, element, **kw):
return "%s ORDER BY %s" % (
self.process(element.target, **kw),
- self.process(element.order_by, **kw)
+ self.process(element.order_by, **kw),
)
def visit_match_op_binary(self, binary, operator, **kw):
if "postgresql_regconfig" in binary.modifiers:
regconfig = self.render_literal_value(
- binary.modifiers['postgresql_regconfig'],
- sqltypes.STRINGTYPE)
+ binary.modifiers["postgresql_regconfig"], sqltypes.STRINGTYPE
+ )
if regconfig:
return "%s @@ to_tsquery(%s, %s)" % (
self.process(binary.left, **kw),
regconfig,
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw),
)
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left, **kw),
- self.process(binary.right, **kw)
+ self.process(binary.right, **kw),
)
def visit_ilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
- return '%s ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
- + (
- ' ESCAPE ' +
- self.render_literal_value(escape, sqltypes.STRINGTYPE)
- if escape else ''
- )
+ return "%s ILIKE %s" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw),
+ ) + (
+ " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
+ if escape
+ else ""
+ )
def visit_notilike_op_binary(self, binary, operator, **kw):
escape = binary.modifiers.get("escape", None)
- return '%s NOT ILIKE %s' % \
- (self.process(binary.left, **kw),
- self.process(binary.right, **kw)) \
- + (
- ' ESCAPE ' +
- self.render_literal_value(escape, sqltypes.STRINGTYPE)
- if escape else ''
- )
+ return "%s NOT ILIKE %s" % (
+ self.process(binary.left, **kw),
+ self.process(binary.right, **kw),
+ ) + (
+ " ESCAPE " + self.render_literal_value(escape, sqltypes.STRINGTYPE)
+ if escape
+ else ""
+ )
def visit_empty_set_expr(self, element_types):
# cast the empty set to the type we are comparing against. if
# we are comparing against the null type, pick an arbitrary
# datatype for the empty set
- return 'SELECT %s WHERE 1!=1' % (
+ return "SELECT %s WHERE 1!=1" % (
", ".join(
- "CAST(NULL AS %s)" % self.dialect.type_compiler.process(
- INTEGER() if type_._isnull else type_,
- ) for type_ in element_types or [INTEGER()]
+ "CAST(NULL AS %s)"
+ % self.dialect.type_compiler.process(
+ INTEGER() if type_._isnull else type_
+ )
+ for type_ in element_types or [INTEGER()]
),
)
@@ -1502,7 +1622,7 @@ class PGCompiler(compiler.SQLCompiler):
value = super(PGCompiler, self).render_literal_value(value, type_)
if self.dialect._backslash_escapes:
- value = value.replace('\\', '\\\\')
+ value = value.replace("\\", "\\\\")
return value
def visit_sequence(self, seq, **kw):
@@ -1519,7 +1639,7 @@ class PGCompiler(compiler.SQLCompiler):
return text
def format_from_hint_text(self, sqltext, table, hint, iscrud):
- if hint.upper() != 'ONLY':
+ if hint.upper() != "ONLY":
raise exc.CompileError("Unrecognized hint: %r" % hint)
return "ONLY " + sqltext
@@ -1528,12 +1648,19 @@ class PGCompiler(compiler.SQLCompiler):
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
- return "DISTINCT ON (" + ', '.join(
- [self.process(col, **kw) for col in select._distinct]
- ) + ") "
+ return (
+ "DISTINCT ON ("
+ + ", ".join(
+ [self.process(col, **kw) for col in select._distinct]
+ )
+ + ") "
+ )
else:
- return "DISTINCT ON (" + \
- self.process(select._distinct, **kw) + ") "
+ return (
+ "DISTINCT ON ("
+ + self.process(select._distinct, **kw)
+ + ") "
+ )
else:
return ""
@@ -1551,8 +1678,9 @@ class PGCompiler(compiler.SQLCompiler):
if select._for_update_arg.of:
tables = util.OrderedSet(
- c.table if isinstance(c, expression.ColumnClause)
- else c for c in select._for_update_arg.of)
+ c.table if isinstance(c, expression.ColumnClause) else c
+ for c in select._for_update_arg.of
+ )
tmp += " OF " + ", ".join(
self.process(table, ashint=True, use_schema=False, **kw)
for table in tables
@@ -1572,7 +1700,7 @@ class PGCompiler(compiler.SQLCompiler):
for c in expression._select_iterables(returning_cols)
]
- return 'RETURNING ' + ', '.join(columns)
+ return "RETURNING " + ", ".join(columns)
def visit_substring_func(self, func, **kw):
s = self.process(func.clauses.clauses[0], **kw)
@@ -1586,24 +1714,24 @@ class PGCompiler(compiler.SQLCompiler):
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
- target_text = 'ON CONSTRAINT %s' % clause.constraint_target
+ target_text = "ON CONSTRAINT %s" % clause.constraint_target
elif clause.inferred_target_elements is not None:
- target_text = '(%s)' % ', '.join(
- (self.preparer.quote(c)
- if isinstance(c, util.string_types)
- else
- self.process(c, include_table=False, use_schema=False))
+ target_text = "(%s)" % ", ".join(
+ (
+ self.preparer.quote(c)
+ if isinstance(c, util.string_types)
+ else self.process(c, include_table=False, use_schema=False)
+ )
for c in clause.inferred_target_elements
)
if clause.inferred_target_whereclause is not None:
- target_text += ' WHERE %s' % \
- self.process(
- clause.inferred_target_whereclause,
- include_table=False,
- use_schema=False
- )
+ target_text += " WHERE %s" % self.process(
+ clause.inferred_target_whereclause,
+ include_table=False,
+ use_schema=False,
+ )
else:
- target_text = ''
+ target_text = ""
return target_text
@@ -1627,36 +1755,35 @@ class PGCompiler(compiler.SQLCompiler):
set_parameters = dict(clause.update_values_to_set)
# create a list of column assignment clauses as tuples
- insert_statement = self.stack[-1]['selectable']
+ insert_statement = self.stack[-1]["selectable"]
cols = insert_statement.table.c
for c in cols:
col_key = c.key
if col_key in set_parameters:
value = set_parameters.pop(col_key)
if elements._is_literal(value):
- value = elements.BindParameter(
- None, value, type_=c.type
- )
+ value = elements.BindParameter(None, value, type_=c.type)
else:
- if isinstance(value, elements.BindParameter) and \
- value.type._isnull:
+ if (
+ isinstance(value, elements.BindParameter)
+ and value.type._isnull
+ ):
value = value._clone()
value.type = c.type
value_text = self.process(value.self_group(), use_schema=False)
- key_text = (
- self.preparer.quote(col_key)
- )
- action_set_ops.append('%s = %s' % (key_text, value_text))
+ key_text = self.preparer.quote(col_key)
+ action_set_ops.append("%s = %s" % (key_text, value_text))
# check for names that don't match columns
if set_parameters:
util.warn(
"Additional column names not matching "
- "any column keys in table '%s': %s" % (
+ "any column keys in table '%s': %s"
+ % (
self.statement.table.name,
- (", ".join("'%s'" % c for c in set_parameters))
+ (", ".join("'%s'" % c for c in set_parameters)),
)
)
for k, v in set_parameters.items():
@@ -1666,42 +1793,37 @@ class PGCompiler(compiler.SQLCompiler):
else self.process(k, use_schema=False)
)
value_text = self.process(
- elements._literal_as_binds(v),
- use_schema=False
+ elements._literal_as_binds(v), use_schema=False
)
- action_set_ops.append('%s = %s' % (key_text, value_text))
+ action_set_ops.append("%s = %s" % (key_text, value_text))
- action_text = ', '.join(action_set_ops)
+ action_text = ", ".join(action_set_ops)
if clause.update_whereclause is not None:
- action_text += ' WHERE %s' % \
- self.process(
- clause.update_whereclause,
- include_table=True,
- use_schema=False
- )
+ action_text += " WHERE %s" % self.process(
+ clause.update_whereclause, include_table=True, use_schema=False
+ )
- return 'ON CONFLICT %s DO UPDATE SET %s' % (target_text, action_text)
+ return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
- def update_from_clause(self, update_stmt,
- from_table, extra_froms,
- from_hints,
- **kw):
- return "FROM " + ', '.join(
- t._compiler_dispatch(self, asfrom=True,
- fromhints=from_hints, **kw)
- for t in extra_froms)
+ def update_from_clause(
+ self, update_stmt, from_table, extra_froms, from_hints, **kw
+ ):
+ return "FROM " + ", ".join(
+ t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
+ for t in extra_froms
+ )
- def delete_extra_from_clause(self, delete_stmt, from_table,
- extra_froms, from_hints, **kw):
+ def delete_extra_from_clause(
+ self, delete_stmt, from_table, extra_froms, from_hints, **kw
+ ):
"""Render the DELETE .. USING clause specific to PostgreSQL."""
- return "USING " + ', '.join(
- t._compiler_dispatch(self, asfrom=True,
- fromhints=from_hints, **kw)
- for t in extra_froms)
+ return "USING " + ", ".join(
+ t._compiler_dispatch(self, asfrom=True, fromhints=from_hints, **kw)
+ for t in extra_froms
+ )
class PGDDLCompiler(compiler.DDLCompiler):
-
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
@@ -1709,17 +1831,21 @@ class PGDDLCompiler(compiler.DDLCompiler):
if isinstance(impl_type, sqltypes.TypeDecorator):
impl_type = impl_type.impl
- if column.primary_key and \
- column is column.table._autoincrement_column and \
- (
- self.dialect.supports_smallserial or
- not isinstance(impl_type, sqltypes.SmallInteger)
- ) and (
- column.default is None or
- (
- isinstance(column.default, schema.Sequence) and
- column.default.optional
- )):
+ if (
+ column.primary_key
+ and column is column.table._autoincrement_column
+ and (
+ self.dialect.supports_smallserial
+ or not isinstance(impl_type, sqltypes.SmallInteger)
+ )
+ and (
+ column.default is None
+ or (
+ isinstance(column.default, schema.Sequence)
+ and column.default.optional
+ )
+ )
+ ):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
elif isinstance(impl_type, sqltypes.SmallInteger):
@@ -1728,7 +1854,8 @@ class PGDDLCompiler(compiler.DDLCompiler):
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(
- column.type, type_expression=column)
+ column.type, type_expression=column
+ )
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
@@ -1744,15 +1871,14 @@ class PGDDLCompiler(compiler.DDLCompiler):
self.preparer.format_type(type_),
", ".join(
self.sql_compiler.process(sql.literal(e), literal_binds=True)
- for e in type_.enums)
+ for e in type_.enums
+ ),
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
- return "DROP TYPE %s" % (
- self.preparer.format_type(type_)
- )
+ return "DROP TYPE %s" % (self.preparer.format_type(type_))
def visit_create_index(self, create):
preparer = self.preparer
@@ -1764,46 +1890,53 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += "INDEX "
if self.dialect._supports_create_index_concurrently:
- concurrently = index.dialect_options['postgresql']['concurrently']
+ concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
text += "%s ON %s " % (
- self._prepared_index_name(index,
- include_schema=False),
- preparer.format_table(index.table)
+ self._prepared_index_name(index, include_schema=False),
+ preparer.format_table(index.table),
)
- using = index.dialect_options['postgresql']['using']
+ using = index.dialect_options["postgresql"]["using"]
if using:
text += "USING %s " % preparer.quote(using)
ops = index.dialect_options["postgresql"]["ops"]
- text += "(%s)" \
- % (
- ', '.join([
- self.sql_compiler.process(
- expr.self_group()
- if not isinstance(expr, expression.ColumnClause)
- else expr,
- include_table=False, literal_binds=True) +
- (
- (' ' + ops[expr.key])
- if hasattr(expr, 'key')
- and expr.key in ops else ''
- )
- for expr in index.expressions
- ])
- )
+ text += "(%s)" % (
+ ", ".join(
+ [
+ self.sql_compiler.process(
+ expr.self_group()
+ if not isinstance(expr, expression.ColumnClause)
+ else expr,
+ include_table=False,
+ literal_binds=True,
+ )
+ + (
+ (" " + ops[expr.key])
+ if hasattr(expr, "key") and expr.key in ops
+ else ""
+ )
+ for expr in index.expressions
+ ]
+ )
+ )
- withclause = index.dialect_options['postgresql']['with']
+ withclause = index.dialect_options["postgresql"]["with"]
if withclause:
- text += " WITH (%s)" % (', '.join(
- ['%s = %s' % storage_parameter
- for storage_parameter in withclause.items()]))
+ text += " WITH (%s)" % (
+ ", ".join(
+ [
+ "%s = %s" % storage_parameter
+ for storage_parameter in withclause.items()
+ ]
+ )
+ )
- tablespace_name = index.dialect_options['postgresql']['tablespace']
+ tablespace_name = index.dialect_options["postgresql"]["tablespace"]
if tablespace_name:
text += " TABLESPACE %s" % preparer.quote(tablespace_name)
@@ -1812,8 +1945,8 @@ class PGDDLCompiler(compiler.DDLCompiler):
if whereclause is not None:
where_compiled = self.sql_compiler.process(
- whereclause, include_table=False,
- literal_binds=True)
+ whereclause, include_table=False, literal_binds=True
+ )
text += " WHERE " + where_compiled
return text
@@ -1823,7 +1956,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
text = "\nDROP INDEX "
if self.dialect._supports_drop_index_concurrently:
- concurrently = index.dialect_options['postgresql']['concurrently']
+ concurrently = index.dialect_options["postgresql"]["concurrently"]
if concurrently:
text += "CONCURRENTLY "
@@ -1833,55 +1966,59 @@ class PGDDLCompiler(compiler.DDLCompiler):
def visit_exclude_constraint(self, constraint, **kw):
text = ""
if constraint.name is not None:
- text += "CONSTRAINT %s " % \
- self.preparer.format_constraint(constraint)
+ text += "CONSTRAINT %s " % self.preparer.format_constraint(
+ constraint
+ )
elements = []
for expr, name, op in constraint._render_exprs:
- kw['include_table'] = False
+ kw["include_table"] = False
elements.append(
"%s WITH %s" % (self.sql_compiler.process(expr, **kw), op)
)
- text += "EXCLUDE USING %s (%s)" % (constraint.using,
- ', '.join(elements))
+ text += "EXCLUDE USING %s (%s)" % (
+ constraint.using,
+ ", ".join(elements),
+ )
if constraint.where is not None:
- text += ' WHERE (%s)' % self.sql_compiler.process(
- constraint.where,
- literal_binds=True)
+ text += " WHERE (%s)" % self.sql_compiler.process(
+ constraint.where, literal_binds=True
+ )
text += self.define_constraint_deferrability(constraint)
return text
def post_create_table(self, table):
table_opts = []
- pg_opts = table.dialect_options['postgresql']
+ pg_opts = table.dialect_options["postgresql"]
- inherits = pg_opts.get('inherits')
+ inherits = pg_opts.get("inherits")
if inherits is not None:
if not isinstance(inherits, (list, tuple)):
- inherits = (inherits, )
+ inherits = (inherits,)
table_opts.append(
- '\n INHERITS ( ' +
- ', '.join(self.preparer.quote(name) for name in inherits) +
- ' )')
+ "\n INHERITS ( "
+ + ", ".join(self.preparer.quote(name) for name in inherits)
+ + " )"
+ )
- if pg_opts['partition_by']:
- table_opts.append('\n PARTITION BY %s' % pg_opts['partition_by'])
+ if pg_opts["partition_by"]:
+ table_opts.append("\n PARTITION BY %s" % pg_opts["partition_by"])
- if pg_opts['with_oids'] is True:
- table_opts.append('\n WITH OIDS')
- elif pg_opts['with_oids'] is False:
- table_opts.append('\n WITHOUT OIDS')
+ if pg_opts["with_oids"] is True:
+ table_opts.append("\n WITH OIDS")
+ elif pg_opts["with_oids"] is False:
+ table_opts.append("\n WITHOUT OIDS")
- if pg_opts['on_commit']:
- on_commit_options = pg_opts['on_commit'].replace("_", " ").upper()
- table_opts.append('\n ON COMMIT %s' % on_commit_options)
+ if pg_opts["on_commit"]:
+ on_commit_options = pg_opts["on_commit"].replace("_", " ").upper()
+ table_opts.append("\n ON COMMIT %s" % on_commit_options)
- if pg_opts['tablespace']:
- tablespace_name = pg_opts['tablespace']
+ if pg_opts["tablespace"]:
+ tablespace_name = pg_opts["tablespace"]
table_opts.append(
- '\n TABLESPACE %s' % self.preparer.quote(tablespace_name)
+ "\n TABLESPACE %s" % self.preparer.quote(tablespace_name)
)
- return ''.join(table_opts)
+ return "".join(table_opts)
class PGTypeCompiler(compiler.GenericTypeCompiler):
@@ -1910,7 +2047,7 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
if not type_.precision:
return "FLOAT"
else:
- return "FLOAT(%(precision)s)" % {'precision': type_.precision}
+ return "FLOAT(%(precision)s)" % {"precision": type_.precision}
def visit_DOUBLE_PRECISION(self, type_, **kw):
return "DOUBLE PRECISION"
@@ -1960,15 +2097,17 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TIMESTAMP(self, type_, **kw):
return "TIMESTAMP%s %s" % (
"(%d)" % type_.precision
- if getattr(type_, 'precision', None) is not None else "",
- (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
+ if getattr(type_, "precision", None) is not None
+ else "",
+ (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_TIME(self, type_, **kw):
return "TIME%s %s" % (
"(%d)" % type_.precision
- if getattr(type_, 'precision', None) is not None else "",
- (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
+ if getattr(type_, "precision", None) is not None
+ else "",
+ (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE",
)
def visit_INTERVAL(self, type_, **kw):
@@ -2002,13 +2141,16 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
# TODO: pass **kw?
inner = self.process(type_.item_type)
return re.sub(
- r'((?: COLLATE.*)?)$',
- (r'%s\1' % (
- "[]" *
- (type_.dimensions if type_.dimensions is not None else 1)
- )),
+ r"((?: COLLATE.*)?)$",
+ (
+ r"%s\1"
+ % (
+ "[]"
+ * (type_.dimensions if type_.dimensions is not None else 1)
+ )
+ ),
inner,
- count=1
+ count=1,
)
@@ -2018,8 +2160,9 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
- value = value[1:-1].\
- replace(self.escape_to_quote, self.escape_quote)
+ value = value[1:-1].replace(
+ self.escape_to_quote, self.escape_quote
+ )
return value
def format_type(self, type_, use_schema=True):
@@ -2029,22 +2172,25 @@ class PGIdentifierPreparer(compiler.IdentifierPreparer):
name = self.quote(type_.name)
effective_schema = self.schema_for_object(type_)
- if not self.omit_schema and use_schema and \
- effective_schema is not None:
+ if (
+ not self.omit_schema
+ and use_schema
+ and effective_schema is not None
+ ):
name = self.quote_schema(effective_schema) + "." + name
return name
class PGInspector(reflection.Inspector):
-
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the OID for the given table name."""
- return self.dialect.get_table_oid(self.bind, table_name, schema,
- info_cache=self.info_cache)
+ return self.dialect.get_table_oid(
+ self.bind, table_name, schema, info_cache=self.info_cache
+ )
def get_enums(self, schema=None):
"""Return a list of ENUM objects.
@@ -2080,7 +2226,7 @@ class PGInspector(reflection.Inspector):
schema = schema or self.default_schema_name
return self.dialect._get_foreign_table_names(self.bind, schema)
- def get_view_names(self, schema=None, include=('plain', 'materialized')):
+ def get_view_names(self, schema=None, include=("plain", "materialized")):
"""Return all view names in `schema`.
:param schema: Optional, retrieve names from a non-default schema.
@@ -2094,9 +2240,9 @@ class PGInspector(reflection.Inspector):
"""
- return self.dialect.get_view_names(self.bind, schema,
- info_cache=self.info_cache,
- include=include)
+ return self.dialect.get_view_names(
+ self.bind, schema, info_cache=self.info_cache, include=include
+ )
class CreateEnumType(schema._CreateDropBase):
@@ -2108,25 +2254,27 @@ class DropEnumType(schema._CreateDropBase):
class PGExecutionContext(default.DefaultExecutionContext):
-
def fire_sequence(self, seq, type_):
- return self._execute_scalar((
- "select nextval('%s')" %
- self.dialect.identifier_preparer.format_sequence(seq)), type_)
+ return self._execute_scalar(
+ (
+ "select nextval('%s')"
+ % self.dialect.identifier_preparer.format_sequence(seq)
+ ),
+ type_,
+ )
def get_insert_default(self, column):
- if column.primary_key and \
- column is column.table._autoincrement_column:
+ if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
- return self._execute_scalar("select %s" %
- column.server_default.arg,
- column.type)
+ return self._execute_scalar(
+ "select %s" % column.server_default.arg, column.type
+ )
- elif (column.default is None or
- (column.default.is_sequence and
- column.default.optional)):
+ elif column.default is None or (
+ column.default.is_sequence and column.default.optional
+ ):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
@@ -2137,23 +2285,25 @@ class PGExecutionContext(default.DefaultExecutionContext):
except AttributeError:
tab = column.table.name
col = column.name
- tab = tab[0:29 + max(0, (29 - len(col)))]
- col = col[0:29 + max(0, (29 - len(tab)))]
+ tab = tab[0 : 29 + max(0, (29 - len(col)))]
+ col = col[0 : 29 + max(0, (29 - len(tab)))]
name = "%s_%s_seq" % (tab, col)
column._postgresql_seq_name = seq_name = name
if column.table is not None:
effective_schema = self.connection.schema_for_object(
- column.table)
+ column.table
+ )
else:
effective_schema = None
if effective_schema is not None:
- exc = "select nextval('\"%s\".\"%s\"')" % \
- (effective_schema, seq_name)
+ exc = 'select nextval(\'"%s"."%s"\')' % (
+ effective_schema,
+ seq_name,
+ )
else:
- exc = "select nextval('\"%s\"')" % \
- (seq_name, )
+ exc = "select nextval('\"%s\"')" % (seq_name,)
return self._execute_scalar(exc, column.type)
@@ -2164,7 +2314,7 @@ class PGExecutionContext(default.DefaultExecutionContext):
class PGDialect(default.DefaultDialect):
- name = 'postgresql'
+ name = "postgresql"
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
@@ -2182,7 +2332,7 @@ class PGDialect(default.DefaultDialect):
supports_default_values = True
supports_empty_insert = False
supports_multivalues_insert = True
- default_paramstyle = 'pyformat'
+ default_paramstyle = "pyformat"
ischema_names = ischema_names
colspecs = colspecs
@@ -2195,32 +2345,43 @@ class PGDialect(default.DefaultDialect):
isolation_level = None
construct_arguments = [
- (schema.Index, {
- "using": False,
- "where": None,
- "ops": {},
- "concurrently": False,
- "with": {},
- "tablespace": None
- }),
- (schema.Table, {
- "ignore_search_path": False,
- "tablespace": None,
- "partition_by": None,
- "with_oids": None,
- "on_commit": None,
- "inherits": None
- }),
+ (
+ schema.Index,
+ {
+ "using": False,
+ "where": None,
+ "ops": {},
+ "concurrently": False,
+ "with": {},
+ "tablespace": None,
+ },
+ ),
+ (
+ schema.Table,
+ {
+ "ignore_search_path": False,
+ "tablespace": None,
+ "partition_by": None,
+ "with_oids": None,
+ "on_commit": None,
+ "inherits": None,
+ },
+ ),
]
- reflection_options = ('postgresql_ignore_search_path', )
+ reflection_options = ("postgresql_ignore_search_path",)
_backslash_escapes = True
_supports_create_index_concurrently = True
_supports_drop_index_concurrently = True
- def __init__(self, isolation_level=None, json_serializer=None,
- json_deserializer=None, **kwargs):
+ def __init__(
+ self,
+ isolation_level=None,
+ json_serializer=None,
+ json_deserializer=None,
+ **kwargs
+ ):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
self._json_deserializer = json_deserializer
@@ -2228,8 +2389,10 @@ class PGDialect(default.DefaultDialect):
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
- self.implicit_returning = self.server_version_info > (8, 2) and \
- self.__dict__.get('implicit_returning', True)
+ self.implicit_returning = self.server_version_info > (
+ 8,
+ 2,
+ ) and self.__dict__.get("implicit_returning", True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
@@ -2241,45 +2404,57 @@ class PGDialect(default.DefaultDialect):
# http://www.postgresql.org/docs/9.3/static/release-9-2.html#AEN116689
self.supports_smallserial = self.server_version_info >= (9, 2)
- self._backslash_escapes = self.server_version_info < (8, 2) or \
- connection.scalar(
- "show standard_conforming_strings"
- ) == 'off'
+ self._backslash_escapes = (
+ self.server_version_info < (8, 2)
+ or connection.scalar("show standard_conforming_strings") == "off"
+ )
- self._supports_create_index_concurrently = \
+ self._supports_create_index_concurrently = (
self.server_version_info >= (8, 2)
- self._supports_drop_index_concurrently = \
- self.server_version_info >= (9, 2)
+ )
+ self._supports_drop_index_concurrently = self.server_version_info >= (
+ 9,
+ 2,
+ )
def on_connect(self):
if self.isolation_level is not None:
+
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
+
return connect
else:
return None
- _isolation_lookup = set(['SERIALIZABLE', 'READ UNCOMMITTED',
- 'READ COMMITTED', 'REPEATABLE READ'])
+ _isolation_lookup = set(
+ [
+ "SERIALIZABLE",
+ "READ UNCOMMITTED",
+ "READ COMMITTED",
+ "REPEATABLE READ",
+ ]
+ )
def set_isolation_level(self, connection, level):
- level = level.replace('_', ' ')
+ level = level.replace("_", " ")
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
+ "Valid isolation levels for %s are %s"
+ % (level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
- "ISOLATION LEVEL %s" % level)
+ "ISOLATION LEVEL %s" % level
+ )
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
- cursor.execute('show transaction isolation level')
+ cursor.execute("show transaction isolation level")
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
@@ -2290,8 +2465,9 @@ class PGDialect(default.DefaultDialect):
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
- def do_rollback_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ def do_rollback_twophase(
+ self, connection, xid, is_prepared=True, recover=False
+ ):
if is_prepared:
if recover:
# FIXME: ugly hack to get out of transaction
@@ -2305,8 +2481,9 @@ class PGDialect(default.DefaultDialect):
else:
self.do_rollback(connection.connection)
- def do_commit_twophase(self, connection, xid,
- is_prepared=True, recover=False):
+ def do_commit_twophase(
+ self, connection, xid, is_prepared=True, recover=False
+ ):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
@@ -2318,22 +2495,27 @@ class PGDialect(default.DefaultDialect):
def do_recover_twophase(self, connection):
resultset = connection.execute(
- sql.text("SELECT gid FROM pg_prepared_xacts"))
+ sql.text("SELECT gid FROM pg_prepared_xacts")
+ )
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
- query = ("select nspname from pg_namespace "
- "where lower(nspname)=:schema")
+ query = (
+ "select nspname from pg_namespace " "where lower(nspname)=:schema"
+ )
cursor = connection.execute(
sql.text(
query,
bindparams=[
sql.bindparam(
- 'schema', util.text_type(schema.lower()),
- type_=sqltypes.Unicode)]
+ "schema",
+ util.text_type(schema.lower()),
+ type_=sqltypes.Unicode,
+ )
+ ],
)
)
@@ -2349,8 +2531,12 @@ class PGDialect(default.DefaultDialect):
"pg_catalog.pg_table_is_visible(c.oid) "
"and relname=:name",
bindparams=[
- sql.bindparam('name', util.text_type(table_name),
- type_=sqltypes.Unicode)]
+ sql.bindparam(
+ "name",
+ util.text_type(table_name),
+ type_=sqltypes.Unicode,
+ )
+ ],
)
)
else:
@@ -2360,12 +2546,17 @@ class PGDialect(default.DefaultDialect):
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name",
bindparams=[
- sql.bindparam('name',
- util.text_type(table_name),
- type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema),
- type_=sqltypes.Unicode)]
+ sql.bindparam(
+ "name",
+ util.text_type(table_name),
+ type_=sqltypes.Unicode,
+ ),
+ sql.bindparam(
+ "schema",
+ util.text_type(schema),
+ type_=sqltypes.Unicode,
+ ),
+ ],
)
)
return bool(cursor.first())
@@ -2379,9 +2570,12 @@ class PGDialect(default.DefaultDialect):
"n.nspname=current_schema() "
"and relname=:name",
bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode)
- ]
+ sql.bindparam(
+ "name",
+ util.text_type(sequence_name),
+ type_=sqltypes.Unicode,
+ )
+ ],
)
)
else:
@@ -2391,12 +2585,17 @@ class PGDialect(default.DefaultDialect):
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name",
bindparams=[
- sql.bindparam('name', util.text_type(sequence_name),
- type_=sqltypes.Unicode),
- sql.bindparam('schema',
- util.text_type(schema),
- type_=sqltypes.Unicode)
- ]
+ sql.bindparam(
+ "name",
+ util.text_type(sequence_name),
+ type_=sqltypes.Unicode,
+ ),
+ sql.bindparam(
+ "schema",
+ util.text_type(schema),
+ type_=sqltypes.Unicode,
+ ),
+ ],
)
)
@@ -2423,13 +2622,15 @@ class PGDialect(default.DefaultDialect):
"""
query = sql.text(query)
query = query.bindparams(
- sql.bindparam('typname',
- util.text_type(type_name), type_=sqltypes.Unicode),
+ sql.bindparam(
+ "typname", util.text_type(type_name), type_=sqltypes.Unicode
+ )
)
if schema is not None:
query = query.bindparams(
- sql.bindparam('nspname',
- util.text_type(schema), type_=sqltypes.Unicode),
+ sql.bindparam(
+ "nspname", util.text_type(schema), type_=sqltypes.Unicode
+ )
)
cursor = connection.execute(query)
return bool(cursor.scalar())
@@ -2437,12 +2638,14 @@ class PGDialect(default.DefaultDialect):
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match(
- r'.*(?:PostgreSQL|EnterpriseDB) '
- r'(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?',
- v)
+ r".*(?:PostgreSQL|EnterpriseDB) "
+ r"(\d+)\.?(\d+)?(?:\.(\d+))?(?:\.\d+)?(?:devel|beta)?",
+ v,
+ )
if not m:
raise AssertionError(
- "Could not determine version from string '%s'" % v)
+ "Could not determine version from string '%s'" % v
+ )
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
@@ -2459,14 +2662,17 @@ class PGDialect(default.DefaultDialect):
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
- query = """
+ query = (
+ """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in
('r', 'v', 'm', 'f', 'p')
- """ % schema_where_clause
+ """
+ % schema_where_clause
+ )
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = util.text_type(table_name)
@@ -2475,7 +2681,7 @@ class PGDialect(default.DefaultDialect):
s = sql.text(query).bindparams(table_name=sqltypes.Unicode)
s = s.columns(oid=sqltypes.Integer)
if schema:
- s = s.bindparams(sql.bindparam('schema', type_=sqltypes.Unicode))
+ s = s.bindparams(sql.bindparam("schema", type_=sqltypes.Unicode))
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
@@ -2485,75 +2691,88 @@ class PGDialect(default.DefaultDialect):
@reflection.cache
def get_schema_names(self, connection, **kw):
result = connection.execute(
- sql.text("SELECT nspname FROM pg_namespace "
- "WHERE nspname NOT LIKE 'pg_%' "
- "ORDER BY nspname"
- ).columns(nspname=sqltypes.Unicode))
+ sql.text(
+ "SELECT nspname FROM pg_namespace "
+ "WHERE nspname NOT LIKE 'pg_%' "
+ "ORDER BY nspname"
+ ).columns(nspname=sqltypes.Unicode)
+ )
return [name for name, in result]
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
result = connection.execute(
- sql.text("SELECT c.relname FROM pg_class c "
- "JOIN pg_namespace n ON n.oid = c.relnamespace "
- "WHERE n.nspname = :schema AND c.relkind in ('r', 'p')"
- ).columns(relname=sqltypes.Unicode),
- schema=schema if schema is not None else self.default_schema_name)
+ sql.text(
+ "SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind in ('r', 'p')"
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name,
+ )
return [name for name, in result]
@reflection.cache
def _get_foreign_table_names(self, connection, schema=None, **kw):
result = connection.execute(
- sql.text("SELECT c.relname FROM pg_class c "
- "JOIN pg_namespace n ON n.oid = c.relnamespace "
- "WHERE n.nspname = :schema AND c.relkind = 'f'"
- ).columns(relname=sqltypes.Unicode),
- schema=schema if schema is not None else self.default_schema_name)
+ sql.text(
+ "SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind = 'f'"
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name,
+ )
return [name for name, in result]
@reflection.cache
def get_view_names(
- self, connection, schema=None,
- include=('plain', 'materialized'), **kw):
+ self, connection, schema=None, include=("plain", "materialized"), **kw
+ ):
- include_kind = {'plain': 'v', 'materialized': 'm'}
+ include_kind = {"plain": "v", "materialized": "m"}
try:
kinds = [include_kind[i] for i in util.to_list(include)]
except KeyError:
raise ValueError(
"include %r unknown, needs to be a sequence containing "
- "one or both of 'plain' and 'materialized'" % (include,))
+ "one or both of 'plain' and 'materialized'" % (include,)
+ )
if not kinds:
raise ValueError(
"empty include, needs to be a sequence containing "
- "one or both of 'plain' and 'materialized'")
+ "one or both of 'plain' and 'materialized'"
+ )
result = connection.execute(
- sql.text("SELECT c.relname FROM pg_class c "
- "JOIN pg_namespace n ON n.oid = c.relnamespace "
- "WHERE n.nspname = :schema AND c.relkind IN (%s)" %
- (", ".join("'%s'" % elem for elem in kinds))
- ).columns(relname=sqltypes.Unicode),
- schema=schema if schema is not None else self.default_schema_name)
+ sql.text(
+ "SELECT c.relname FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relkind IN (%s)"
+ % (", ".join("'%s'" % elem for elem in kinds))
+ ).columns(relname=sqltypes.Unicode),
+ schema=schema if schema is not None else self.default_schema_name,
+ )
return [name for name, in result]
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
view_def = connection.scalar(
- sql.text("SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
- "JOIN pg_namespace n ON n.oid = c.relnamespace "
- "WHERE n.nspname = :schema AND c.relname = :view_name "
- "AND c.relkind IN ('v', 'm')"
- ).columns(view_def=sqltypes.Unicode),
+ sql.text(
+ "SELECT pg_get_viewdef(c.oid) view_def FROM pg_class c "
+ "JOIN pg_namespace n ON n.oid = c.relnamespace "
+ "WHERE n.nspname = :schema AND c.relname = :view_name "
+ "AND c.relkind IN ('v', 'm')"
+ ).columns(view_def=sqltypes.Unicode),
schema=schema if schema is not None else self.default_schema_name,
- view_name=view_name)
+ view_name=view_name,
+ )
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
@@ -2571,13 +2790,11 @@ class PGDialect(default.DefaultDialect):
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
- s = sql.text(SQL_COLS,
- bindparams=[
- sql.bindparam('table_oid', type_=sqltypes.Integer)],
- typemap={
- 'attname': sqltypes.Unicode,
- 'default': sqltypes.Unicode}
- )
+ s = sql.text(
+ SQL_COLS,
+ bindparams=[sql.bindparam("table_oid", type_=sqltypes.Integer)],
+ typemap={"attname": sqltypes.Unicode, "default": sqltypes.Unicode},
+ )
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
@@ -2588,34 +2805,58 @@ class PGDialect(default.DefaultDialect):
# dictionary with (name, ) if default search path or (schema, name)
# as keys
enums = dict(
- ((rec['name'], ), rec)
- if rec['visible'] else ((rec['schema'], rec['name']), rec)
- for rec in self._load_enums(connection, schema='*')
+ ((rec["name"],), rec)
+ if rec["visible"]
+ else ((rec["schema"], rec["name"]), rec)
+ for rec in self._load_enums(connection, schema="*")
)
# format columns
columns = []
- for name, format_type, default_, notnull, attnum, table_oid, \
- comment in rows:
+ for (
+ name,
+ format_type,
+ default_,
+ notnull,
+ attnum,
+ table_oid,
+ comment,
+ ) in rows:
column_info = self._get_column_info(
- name, format_type, default_, notnull, domains, enums,
- schema, comment)
+ name,
+ format_type,
+ default_,
+ notnull,
+ domains,
+ enums,
+ schema,
+ comment,
+ )
columns.append(column_info)
return columns
- def _get_column_info(self, name, format_type, default,
- notnull, domains, enums, schema, comment):
+ def _get_column_info(
+ self,
+ name,
+ format_type,
+ default,
+ notnull,
+ domains,
+ enums,
+ schema,
+ comment,
+ ):
def _handle_array_type(attype):
return (
# strip '[]' from integer[], etc.
- re.sub(r'\[\]$', '', attype),
- attype.endswith('[]'),
+ re.sub(r"\[\]$", "", attype),
+ attype.endswith("[]"),
)
# strip (*) from character varying(5), timestamp(5)
# with time zone, geometry(POLYGON), etc.
- attype = re.sub(r'\(.*\)', '', format_type)
+ attype = re.sub(r"\(.*\)", "", format_type)
# strip '[]' from integer[], etc. and check if an array
attype, is_array = _handle_array_type(attype)
@@ -2625,50 +2866,52 @@ class PGDialect(default.DefaultDialect):
nullable = not notnull
- charlen = re.search(r'\(([\d,]+)\)', format_type)
+ charlen = re.search(r"\(([\d,]+)\)", format_type)
if charlen:
charlen = charlen.group(1)
- args = re.search(r'\((.*)\)', format_type)
+ args = re.search(r"\((.*)\)", format_type)
if args and args.group(1):
- args = tuple(re.split(r'\s*,\s*', args.group(1)))
+ args = tuple(re.split(r"\s*,\s*", args.group(1)))
else:
args = ()
kwargs = {}
- if attype == 'numeric':
+ if attype == "numeric":
if charlen:
- prec, scale = charlen.split(',')
+ prec, scale = charlen.split(",")
args = (int(prec), int(scale))
else:
args = ()
- elif attype == 'double precision':
- args = (53, )
- elif attype == 'integer':
+ elif attype == "double precision":
+ args = (53,)
+ elif attype == "integer":
args = ()
- elif attype in ('timestamp with time zone',
- 'time with time zone'):
- kwargs['timezone'] = True
+ elif attype in ("timestamp with time zone", "time with time zone"):
+ kwargs["timezone"] = True
if charlen:
- kwargs['precision'] = int(charlen)
+ kwargs["precision"] = int(charlen)
args = ()
- elif attype in ('timestamp without time zone',
- 'time without time zone', 'time'):
- kwargs['timezone'] = False
+ elif attype in (
+ "timestamp without time zone",
+ "time without time zone",
+ "time",
+ ):
+ kwargs["timezone"] = False
if charlen:
- kwargs['precision'] = int(charlen)
+ kwargs["precision"] = int(charlen)
args = ()
- elif attype == 'bit varying':
- kwargs['varying'] = True
+ elif attype == "bit varying":
+ kwargs["varying"] = True
if charlen:
args = (int(charlen),)
else:
args = ()
- elif attype.startswith('interval'):
- field_match = re.match(r'interval (.+)', attype, re.I)
+ elif attype.startswith("interval"):
+ field_match = re.match(r"interval (.+)", attype, re.I)
if charlen:
- kwargs['precision'] = int(charlen)
+ kwargs["precision"] = int(charlen)
if field_match:
- kwargs['fields'] = field_match.group(1)
+ kwargs["fields"] = field_match.group(1)
attype = "interval"
args = ()
elif charlen:
@@ -2682,23 +2925,23 @@ class PGDialect(default.DefaultDialect):
elif enum_or_domain_key in enums:
enum = enums[enum_or_domain_key]
coltype = ENUM
- kwargs['name'] = enum['name']
- if not enum['visible']:
- kwargs['schema'] = enum['schema']
- args = tuple(enum['labels'])
+ kwargs["name"] = enum["name"]
+ if not enum["visible"]:
+ kwargs["schema"] = enum["schema"]
+ args = tuple(enum["labels"])
break
elif enum_or_domain_key in domains:
domain = domains[enum_or_domain_key]
- attype = domain['attype']
+ attype = domain["attype"]
attype, is_array = _handle_array_type(attype)
# strip quotes from case sensitive enum or domain names
enum_or_domain_key = tuple(util.quoted_token_parser(attype))
# A table can't override whether the domain is nullable.
- nullable = domain['nullable']
- if domain['default'] and not default:
+ nullable = domain["nullable"]
+ if domain["default"] and not default:
# It can, however, override the default
# value, but can't set it to null.
- default = domain['default']
+ default = domain["default"]
continue
else:
coltype = None
@@ -2707,10 +2950,11 @@ class PGDialect(default.DefaultDialect):
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
- coltype = self.ischema_names['_array'](coltype)
+ coltype = self.ischema_names["_array"](coltype)
else:
- util.warn("Did not recognize type '%s' of column '%s'" %
- (attype, name))
+ util.warn(
+ "Did not recognize type '%s' of column '%s'" % (attype, name)
+ )
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
@@ -2721,23 +2965,33 @@ class PGDialect(default.DefaultDialect):
autoincrement = True
# the default is related to a Sequence
sch = schema
- if '.' not in match.group(2) and sch is not None:
+ if "." not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
- default = match.group(1) + \
- ('"%s"' % sch) + '.' + \
- match.group(2) + match.group(3)
+ default = (
+ match.group(1)
+ + ('"%s"' % sch)
+ + "."
+ + match.group(2)
+ + match.group(3)
+ )
- column_info = dict(name=name, type=coltype, nullable=nullable,
- default=default, autoincrement=autoincrement,
- comment=comment)
+ column_info = dict(
+ name=name,
+ type=coltype,
+ nullable=nullable,
+ default=default,
+ autoincrement=autoincrement,
+ comment=comment,
+ )
return column_info
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
if self.server_version_info < (8, 4):
PK_SQL = """
@@ -2750,7 +3004,9 @@ class PGDialect(default.DefaultDialect):
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
- """ % self._pg_index_any("a.attnum", "ix.indkey")
+ """ % self._pg_index_any(
+ "a.attnum", "ix.indkey"
+ )
else:
# unnest() and generate_subscripts() both introduced in
@@ -2766,7 +3022,7 @@ class PGDialect(default.DefaultDialect):
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
- t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
+ t = sql.text(PK_SQL, typemap={"attname": sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
cols = [r[0] for r in c.fetchall()]
@@ -2776,18 +3032,25 @@ class PGDialect(default.DefaultDialect):
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
- t = sql.text(PK_CONS_SQL, typemap={'conname': sqltypes.Unicode})
+ t = sql.text(PK_CONS_SQL, typemap={"conname": sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
- return {'constrained_columns': cols, 'name': name}
+ return {"constrained_columns": cols, "name": name}
@reflection.cache
- def get_foreign_keys(self, connection, table_name, schema=None,
- postgresql_ignore_search_path=False, **kw):
+ def get_foreign_keys(
+ self,
+ connection,
+ table_name,
+ schema=None,
+ postgresql_ignore_search_path=False,
+ **kw
+ ):
preparer = self.identifier_preparer
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
FK_SQL = """
SELECT r.conname,
@@ -2805,34 +3068,35 @@ class PGDialect(default.DefaultDialect):
"""
# http://www.postgresql.org/docs/9.0/static/sql-createtable.html
FK_REGEX = re.compile(
- r'FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)'
- r'[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?'
- r'[\s]?(ON UPDATE '
- r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(ON DELETE '
- r'(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?'
- r'[\s]?(DEFERRABLE|NOT DEFERRABLE)?'
- r'[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?'
+ r"FOREIGN KEY \((.*?)\) REFERENCES (?:(.*?)\.)?(.*?)\((.*?)\)"
+ r"[\s]?(MATCH (FULL|PARTIAL|SIMPLE)+)?"
+ r"[\s]?(ON UPDATE "
+ r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
+ r"[\s]?(ON DELETE "
+ r"(CASCADE|RESTRICT|NO ACTION|SET NULL|SET DEFAULT)+)?"
+ r"[\s]?(DEFERRABLE|NOT DEFERRABLE)?"
+ r"[\s]?(INITIALLY (DEFERRED|IMMEDIATE)+)?"
)
- t = sql.text(FK_SQL, typemap={
- 'conname': sqltypes.Unicode,
- 'condef': sqltypes.Unicode})
+ t = sql.text(
+ FK_SQL,
+ typemap={"conname": sqltypes.Unicode, "condef": sqltypes.Unicode},
+ )
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search(FK_REGEX, condef).groups()
- constrained_columns, referred_schema, \
- referred_table, referred_columns, \
- _, match, _, onupdate, _, ondelete, \
- deferrable, _, initially = m
+ constrained_columns, referred_schema, referred_table, referred_columns, _, match, _, onupdate, _, ondelete, deferrable, _, initially = (
+ m
+ )
if deferrable is not None:
- deferrable = True if deferrable == 'DEFERRABLE' else False
- constrained_columns = [preparer._unquote_identifier(x)
- for x in re.split(
- r'\s*,\s*', constrained_columns)]
+ deferrable = True if deferrable == "DEFERRABLE" else False
+ constrained_columns = [
+ preparer._unquote_identifier(x)
+ for x in re.split(r"\s*,\s*", constrained_columns)
+ ]
if postgresql_ignore_search_path:
# when ignoring search path, we use the actual schema
@@ -2845,30 +3109,30 @@ class PGDialect(default.DefaultDialect):
# referred_schema is the schema that we regexp'ed from
# pg_get_constraintdef(). If the schema is in the search
# path, pg_get_constraintdef() will give us None.
- referred_schema = \
- preparer._unquote_identifier(referred_schema)
+ referred_schema = preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# If the actual schema matches the schema of the table
# we're reflecting, then we will use that.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
- referred_columns = [preparer._unquote_identifier(x)
- for x in
- re.split(r'\s*,\s', referred_columns)]
+ referred_columns = [
+ preparer._unquote_identifier(x)
+ for x in re.split(r"\s*,\s", referred_columns)
+ ]
fkey_d = {
- 'name': conname,
- 'constrained_columns': constrained_columns,
- 'referred_schema': referred_schema,
- 'referred_table': referred_table,
- 'referred_columns': referred_columns,
- 'options': {
- 'onupdate': onupdate,
- 'ondelete': ondelete,
- 'deferrable': deferrable,
- 'initially': initially,
- 'match': match
- }
+ "name": conname,
+ "constrained_columns": constrained_columns,
+ "referred_schema": referred_schema,
+ "referred_table": referred_table,
+ "referred_columns": referred_columns,
+ "options": {
+ "onupdate": onupdate,
+ "ondelete": ondelete,
+ "deferrable": deferrable,
+ "initially": initially,
+ "match": match,
+ },
}
fkeys.append(fkey_d)
return fkeys
@@ -2882,16 +3146,16 @@ class PGDialect(default.DefaultDialect):
# for now.
# regards, tom lane"
return "(%s)" % " OR ".join(
- "%s[%d] = %s" % (compare_to, ind, col)
- for ind in range(0, 10)
+ "%s[%d] = %s" % (compare_to, ind, col) for ind in range(0, 10)
)
else:
return "%s = ANY(%s)" % (col, compare_to)
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
# cast indkey as varchar since it's an int2vector,
# returned as a list by some drivers such as pypostgresql
@@ -2925,9 +3189,10 @@ class PGDialect(default.DefaultDialect):
# cast does not work in PG 8.2.4, does work in 8.3.0.
# nothing in PG changelogs regarding this.
"::varchar" if self.server_version_info >= (8, 3) else "",
- "i.reloptions" if self.server_version_info >= (8, 2)
+ "i.reloptions"
+ if self.server_version_info >= (8, 2)
else "NULL",
- self._pg_index_any("a.attnum", "ix.indkey")
+ self._pg_index_any("a.attnum", "ix.indkey"),
)
else:
IDX_SQL = """
@@ -2960,76 +3225,93 @@ class PGDialect(default.DefaultDialect):
i.relname
"""
- t = sql.text(IDX_SQL, typemap={
- 'relname': sqltypes.Unicode,
- 'attname': sqltypes.Unicode})
+ t = sql.text(
+ IDX_SQL,
+ typemap={"relname": sqltypes.Unicode, "attname": sqltypes.Unicode},
+ )
c = connection.execute(t, table_oid=table_oid)
indexes = defaultdict(lambda: defaultdict(dict))
sv_idx_name = None
for row in c.fetchall():
- (idx_name, unique, expr, prd, col,
- col_num, conrelid, idx_key, options, amname) = row
+ (
+ idx_name,
+ unique,
+ expr,
+ prd,
+ col,
+ col_num,
+ conrelid,
+ idx_key,
+ options,
+ amname,
+ ) = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
- "expression-based index %s"
- % idx_name)
+ "expression-based index %s" % idx_name
+ )
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
- % idx_name)
+ % idx_name
+ )
sv_idx_name = idx_name
has_idx = idx_name in indexes
index = indexes[idx_name]
if col is not None:
- index['cols'][col_num] = col
+ index["cols"][col_num] = col
if not has_idx:
- index['key'] = [int(k.strip()) for k in idx_key.split()]
- index['unique'] = unique
+ index["key"] = [int(k.strip()) for k in idx_key.split()]
+ index["unique"] = unique
if conrelid is not None:
- index['duplicates_constraint'] = idx_name
+ index["duplicates_constraint"] = idx_name
if options:
- index['options'] = dict(
- [option.split("=") for option in options])
+ index["options"] = dict(
+ [option.split("=") for option in options]
+ )
# it *might* be nice to include that this is 'btree' in the
# reflection info. But we don't want an Index object
# to have a ``postgresql_using`` in it that is just the
# default, so for the moment leaving this out.
- if amname and amname != 'btree':
- index['amname'] = amname
+ if amname and amname != "btree":
+ index["amname"] = amname
result = []
for name, idx in indexes.items():
entry = {
- 'name': name,
- 'unique': idx['unique'],
- 'column_names': [idx['cols'][i] for i in idx['key']]
+ "name": name,
+ "unique": idx["unique"],
+ "column_names": [idx["cols"][i] for i in idx["key"]],
}
- if 'duplicates_constraint' in idx:
- entry['duplicates_constraint'] = idx['duplicates_constraint']
- if 'options' in idx:
- entry.setdefault(
- 'dialect_options', {})["postgresql_with"] = idx['options']
- if 'amname' in idx:
- entry.setdefault(
- 'dialect_options', {})["postgresql_using"] = idx['amname']
+ if "duplicates_constraint" in idx:
+ entry["duplicates_constraint"] = idx["duplicates_constraint"]
+ if "options" in idx:
+ entry.setdefault("dialect_options", {})[
+ "postgresql_with"
+ ] = idx["options"]
+ if "amname" in idx:
+ entry.setdefault("dialect_options", {})[
+ "postgresql_using"
+ ] = idx["amname"]
result.append(entry)
return result
@reflection.cache
- def get_unique_constraints(self, connection, table_name,
- schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ def get_unique_constraints(
+ self, connection, table_name, schema=None, **kw
+ ):
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
UNIQUE_SQL = """
SELECT
@@ -3047,7 +3329,7 @@ class PGDialect(default.DefaultDialect):
cons.contype = 'u'
"""
- t = sql.text(UNIQUE_SQL, typemap={'col_name': sqltypes.Unicode})
+ t = sql.text(UNIQUE_SQL, typemap={"col_name": sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
uniques = defaultdict(lambda: defaultdict(dict))
@@ -3057,15 +3339,15 @@ class PGDialect(default.DefaultDialect):
uc["cols"][row.col_num] = row.col_name
return [
- {'name': name,
- 'column_names': [uc["cols"][i] for i in uc["key"]]}
+ {"name": name, "column_names": [uc["cols"][i] for i in uc["key"]]}
for name, uc in uniques.items()
]
@reflection.cache
def get_table_comment(self, connection, table_name, schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
COMMENT_SQL = """
SELECT
@@ -3081,10 +3363,10 @@ class PGDialect(default.DefaultDialect):
return {"text": c.scalar()}
@reflection.cache
- def get_check_constraints(
- self, connection, table_name, schema=None, **kw):
- table_oid = self.get_table_oid(connection, table_name, schema,
- info_cache=kw.get('info_cache'))
+ def get_check_constraints(self, connection, table_name, schema=None, **kw):
+ table_oid = self.get_table_oid(
+ connection, table_name, schema, info_cache=kw.get("info_cache")
+ )
CHECK_SQL = """
SELECT
@@ -3100,10 +3382,8 @@ class PGDialect(default.DefaultDialect):
c = connection.execute(sql.text(CHECK_SQL), table_oid=table_oid)
return [
- {'name': name,
- 'sqltext': src[1:-1]}
- for name, src in c.fetchall()
- ]
+ {"name": name, "sqltext": src[1:-1]} for name, src in c.fetchall()
+ ]
def _load_enums(self, connection, schema=None):
schema = schema or self.default_schema_name
@@ -3124,17 +3404,18 @@ class PGDialect(default.DefaultDialect):
WHERE t.typtype = 'e'
"""
- if schema != '*':
+ if schema != "*":
SQL_ENUMS += "AND n.nspname = :schema "
# e.oid gives us label order within an enum
SQL_ENUMS += 'ORDER BY "schema", "name", e.oid'
- s = sql.text(SQL_ENUMS, typemap={
- 'attname': sqltypes.Unicode,
- 'label': sqltypes.Unicode})
+ s = sql.text(
+ SQL_ENUMS,
+ typemap={"attname": sqltypes.Unicode, "label": sqltypes.Unicode},
+ )
- if schema != '*':
+ if schema != "*":
s = s.bindparams(schema=schema)
c = connection.execute(s)
@@ -3142,15 +3423,15 @@ class PGDialect(default.DefaultDialect):
enums = []
enum_by_name = {}
for enum in c.fetchall():
- key = (enum['schema'], enum['name'])
+ key = (enum["schema"], enum["name"])
if key in enum_by_name:
- enum_by_name[key]['labels'].append(enum['label'])
+ enum_by_name[key]["labels"].append(enum["label"])
else:
enum_by_name[key] = enum_rec = {
- 'name': enum['name'],
- 'schema': enum['schema'],
- 'visible': enum['visible'],
- 'labels': [enum['label']],
+ "name": enum["name"],
+ "schema": enum["schema"],
+ "visible": enum["visible"],
+ "labels": [enum["label"]],
}
enums.append(enum_rec)
return enums
@@ -3169,26 +3450,26 @@ class PGDialect(default.DefaultDialect):
WHERE t.typtype = 'd'
"""
- s = sql.text(SQL_DOMAINS, typemap={'attname': sqltypes.Unicode})
+ s = sql.text(SQL_DOMAINS, typemap={"attname": sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
# strip (30) from character varying(30)
- attype = re.search(r'([^\(]+)', domain['attype']).group(1)
+ attype = re.search(r"([^\(]+)", domain["attype"]).group(1)
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
- if domain['visible']:
- key = (domain['name'], )
+ if domain["visible"]:
+ key = (domain["name"],)
else:
- key = (domain['schema'], domain['name'])
+ key = (domain["schema"], domain["name"])
domains[key] = {
- 'attype': attype,
- 'nullable': domain['nullable'],
- 'default': domain['default']
+ "attype": attype,
+ "nullable": domain["nullable"],
+ "default": domain["default"],
}
return domains
diff --git a/lib/sqlalchemy/dialects/postgresql/dml.py b/lib/sqlalchemy/dialects/postgresql/dml.py
index 555a9006c..825f13238 100644
--- a/lib/sqlalchemy/dialects/postgresql/dml.py
+++ b/lib/sqlalchemy/dialects/postgresql/dml.py
@@ -14,7 +14,7 @@ from ...sql.base import _generative
from ... import util
from . import ext
-__all__ = ('Insert', 'insert')
+__all__ = ("Insert", "insert")
class Insert(StandardInsert):
@@ -40,13 +40,17 @@ class Insert(StandardInsert):
to use :attr:`.Insert.excluded`
"""
- return alias(self.table, name='excluded').columns
+ return alias(self.table, name="excluded").columns
@_generative
def on_conflict_do_update(
- self,
- constraint=None, index_elements=None,
- index_where=None, set_=None, where=None):
+ self,
+ constraint=None,
+ index_elements=None,
+ index_where=None,
+ set_=None,
+ where=None,
+ ):
"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
@@ -96,13 +100,14 @@ class Insert(StandardInsert):
"""
self._post_values_clause = OnConflictDoUpdate(
- constraint, index_elements, index_where, set_, where)
+ constraint, index_elements, index_where, set_, where
+ )
return self
@_generative
def on_conflict_do_nothing(
- self,
- constraint=None, index_elements=None, index_where=None):
+ self, constraint=None, index_elements=None, index_where=None
+ ):
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
@@ -130,30 +135,29 @@ class Insert(StandardInsert):
"""
self._post_values_clause = OnConflictDoNothing(
- constraint, index_elements, index_where)
+ constraint, index_elements, index_where
+ )
return self
-insert = public_factory(Insert, '.dialects.postgresql.insert')
+
+insert = public_factory(Insert, ".dialects.postgresql.insert")
class OnConflictClause(ClauseElement):
- def __init__(
- self,
- constraint=None,
- index_elements=None,
- index_where=None):
+ def __init__(self, constraint=None, index_elements=None, index_where=None):
if constraint is not None:
- if not isinstance(constraint, util.string_types) and \
- isinstance(constraint, (
- schema.Index, schema.Constraint,
- ext.ExcludeConstraint)):
- constraint = getattr(constraint, 'name') or constraint
+ if not isinstance(constraint, util.string_types) and isinstance(
+ constraint,
+ (schema.Index, schema.Constraint, ext.ExcludeConstraint),
+ ):
+ constraint = getattr(constraint, "name") or constraint
if constraint is not None:
if index_elements is not None:
raise ValueError(
- "'constraint' and 'index_elements' are mutually exclusive")
+ "'constraint' and 'index_elements' are mutually exclusive"
+ )
if isinstance(constraint, util.string_types):
self.constraint_target = constraint
@@ -161,54 +165,61 @@ class OnConflictClause(ClauseElement):
self.inferred_target_whereclause = None
elif isinstance(constraint, schema.Index):
index_elements = constraint.expressions
- index_where = \
- constraint.dialect_options['postgresql'].get("where")
+ index_where = constraint.dialect_options["postgresql"].get(
+ "where"
+ )
elif isinstance(constraint, ext.ExcludeConstraint):
index_elements = constraint.columns
index_where = constraint.where
else:
index_elements = constraint.columns
- index_where = \
- constraint.dialect_options['postgresql'].get("where")
+ index_where = constraint.dialect_options["postgresql"].get(
+ "where"
+ )
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
elif constraint is None:
- self.constraint_target = self.inferred_target_elements = \
- self.inferred_target_whereclause = None
+ self.constraint_target = (
+ self.inferred_target_elements
+ ) = self.inferred_target_whereclause = None
class OnConflictDoNothing(OnConflictClause):
- __visit_name__ = 'on_conflict_do_nothing'
+ __visit_name__ = "on_conflict_do_nothing"
class OnConflictDoUpdate(OnConflictClause):
- __visit_name__ = 'on_conflict_do_update'
+ __visit_name__ = "on_conflict_do_update"
def __init__(
- self,
- constraint=None,
- index_elements=None,
- index_where=None,
- set_=None,
- where=None):
+ self,
+ constraint=None,
+ index_elements=None,
+ index_where=None,
+ set_=None,
+ where=None,
+ ):
super(OnConflictDoUpdate, self).__init__(
constraint=constraint,
index_elements=index_elements,
- index_where=index_where)
+ index_where=index_where,
+ )
- if self.inferred_target_elements is None and \
- self.constraint_target is None:
+ if (
+ self.inferred_target_elements is None
+ and self.constraint_target is None
+ ):
raise ValueError(
"Either constraint or index_elements, "
- "but not both, must be specified unless DO NOTHING")
+ "but not both, must be specified unless DO NOTHING"
+ )
- if (not isinstance(set_, dict) or not set_):
+ if not isinstance(set_, dict) or not set_:
raise ValueError("set parameter must be a non-empty dictionary")
self.update_values_to_set = [
- (key, value)
- for key, value in set_.items()
+ (key, value) for key, value in set_.items()
]
self.update_whereclause = where
diff --git a/lib/sqlalchemy/dialects/postgresql/ext.py b/lib/sqlalchemy/dialects/postgresql/ext.py
index a588eafdd..da0c6250c 100644
--- a/lib/sqlalchemy/dialects/postgresql/ext.py
+++ b/lib/sqlalchemy/dialects/postgresql/ext.py
@@ -47,7 +47,7 @@ class aggregate_order_by(expression.ColumnElement):
"""
- __visit_name__ = 'aggregate_order_by'
+ __visit_name__ = "aggregate_order_by"
def __init__(self, target, *order_by):
self.target = elements._literal_as_binds(target)
@@ -59,8 +59,8 @@ class aggregate_order_by(expression.ColumnElement):
self.order_by = elements._literal_as_binds(order_by[0])
else:
self.order_by = elements.ClauseList(
- *order_by,
- _literal_as_text=elements._literal_as_binds)
+ *order_by, _literal_as_text=elements._literal_as_binds
+ )
def self_group(self, against=None):
return self
@@ -87,7 +87,7 @@ class ExcludeConstraint(ColumnCollectionConstraint):
static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
- __visit_name__ = 'exclude_constraint'
+ __visit_name__ = "exclude_constraint"
where = None
@@ -173,8 +173,7 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
- self._extract_col_expression_collection(expressions),
- operators
+ self._extract_col_expression_collection(expressions), operators
):
if add_element is not None:
columns.append(add_element)
@@ -187,32 +186,31 @@ static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
expr = expression._literal_as_text(expr)
- render_exprs.append(
- (expr, name, operator)
- )
+ render_exprs.append((expr, name, operator))
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
- name=kw.get('name'),
- deferrable=kw.get('deferrable'),
- initially=kw.get('initially')
+ name=kw.get("name"),
+ deferrable=kw.get("deferrable"),
+ initially=kw.get("initially")
)
- self.using = kw.get('using', 'gist')
- where = kw.get('where')
+ self.using = kw.get("using", "gist")
+ where = kw.get("where")
if where is not None:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
- elements = [(col, self.operators[col])
- for col in self.columns.keys()]
- c = self.__class__(*elements,
- name=self.name,
- deferrable=self.deferrable,
- initially=self.initially,
- where=self.where,
- using=self.using)
+ elements = [(col, self.operators[col]) for col in self.columns.keys()]
+ c = self.__class__(
+ *elements,
+ name=self.name,
+ deferrable=self.deferrable,
+ initially=self.initially,
+ where=self.where,
+ using=self.using
+ )
c.dispatch._update(self.dispatch)
return c
@@ -226,5 +224,5 @@ def array_agg(*arg, **kw):
.. versionadded:: 1.1
"""
- kw['_default_array_type'] = ARRAY
+ kw["_default_array_type"] = ARRAY
return functions.func.array_agg(*arg, **kw)
diff --git a/lib/sqlalchemy/dialects/postgresql/hstore.py b/lib/sqlalchemy/dialects/postgresql/hstore.py
index b6c9e7124..e4bac692a 100644
--- a/lib/sqlalchemy/dialects/postgresql/hstore.py
+++ b/lib/sqlalchemy/dialects/postgresql/hstore.py
@@ -14,38 +14,50 @@ from ...sql import functions as sqlfunc
from ...sql import operators
from ... import util
-__all__ = ('HSTORE', 'hstore')
+__all__ = ("HSTORE", "hstore")
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
GETITEM = operators.custom_op(
- "->", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "->",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_KEY = operators.custom_op(
- "?", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_ALL = operators.custom_op(
- "?&", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?&",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_ANY = operators.custom_op(
- "?|", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?|",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
CONTAINS = operators.custom_op(
- "@>", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "@>",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
CONTAINED_BY = operators.custom_op(
- "<@", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "<@",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
@@ -122,7 +134,7 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
"""
- __visit_name__ = 'HSTORE'
+ __visit_name__ = "HSTORE"
hashable = False
text_type = sqltypes.Text()
@@ -139,7 +151,8 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
self.text_type = text_type
class Comparator(
- sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator):
+ sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
+ ):
"""Define comparison operations for :class:`.HSTORE`."""
def has_key(self, other):
@@ -169,7 +182,8 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
keys of the argument jsonb expression.
"""
return self.operate(
- CONTAINED_BY, other, result_type=sqltypes.Boolean)
+ CONTAINED_BY, other, result_type=sqltypes.Boolean
+ )
def _setup_getitem(self, index):
return GETITEM, index, self.type.text_type
@@ -223,12 +237,15 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
return _serialize_hstore(value).encode(encoding)
else:
return value
+
else:
+
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
else:
return value
+
return process
def result_processor(self, dialect, coltype):
@@ -240,16 +257,19 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
return _parse_hstore(value.decode(encoding))
else:
return value
+
else:
+
def process(value):
if value is not None:
return _parse_hstore(value)
else:
return value
+
return process
-ischema_names['hstore'] = HSTORE
+ischema_names["hstore"] = HSTORE
class hstore(sqlfunc.GenericFunction):
@@ -279,43 +299,44 @@ class hstore(sqlfunc.GenericFunction):
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
"""
+
type = HSTORE
- name = 'hstore'
+ name = "hstore"
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
type = sqltypes.Boolean
- name = 'defined'
+ name = "defined"
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
type = HSTORE
- name = 'delete'
+ name = "delete"
class _HStoreSliceFunction(sqlfunc.GenericFunction):
type = HSTORE
- name = 'slice'
+ name = "slice"
class _HStoreKeysFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
- name = 'akeys'
+ name = "akeys"
class _HStoreValsFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
- name = 'avals'
+ name = "avals"
class _HStoreArrayFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
- name = 'hstore_to_array'
+ name = "hstore_to_array"
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
type = ARRAY(sqltypes.Text)
- name = 'hstore_to_matrix'
+ name = "hstore_to_matrix"
#
@@ -326,7 +347,8 @@ class _HStoreMatrixFunction(sqlfunc.GenericFunction):
# My best guess at the parsing rules of hstore literals, since no formal
# grammar is given. This is mostly reverse engineered from PG's input parser
# behavior.
-HSTORE_PAIR_RE = re.compile(r"""
+HSTORE_PAIR_RE = re.compile(
+ r"""
(
"(?P<key> (\\ . | [^"])* )" # Quoted key
)
@@ -335,11 +357,16 @@ HSTORE_PAIR_RE = re.compile(r"""
(?P<value_null> NULL ) # NULL value
| "(?P<value> (\\ . | [^"])* )" # Quoted value
)
-""", re.VERBOSE)
+""",
+ re.VERBOSE,
+)
-HSTORE_DELIMITER_RE = re.compile(r"""
+HSTORE_DELIMITER_RE = re.compile(
+ r"""
[ ]* , [ ]*
-""", re.VERBOSE)
+""",
+ re.VERBOSE,
+)
def _parse_error(hstore_str, pos):
@@ -348,16 +375,19 @@ def _parse_error(hstore_str, pos):
ctx = 20
hslen = len(hstore_str)
- parsed_tail = hstore_str[max(pos - ctx - 1, 0):min(pos, hslen)]
- residual = hstore_str[min(pos, hslen):min(pos + ctx + 1, hslen)]
+ parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
+ residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
if len(parsed_tail) > ctx:
- parsed_tail = '[...]' + parsed_tail[1:]
+ parsed_tail = "[...]" + parsed_tail[1:]
if len(residual) > ctx:
- residual = residual[:-1] + '[...]'
+ residual = residual[:-1] + "[...]"
return "After %r, could not parse residual at position %d: %r" % (
- parsed_tail, pos, residual)
+ parsed_tail,
+ pos,
+ residual,
+ )
def _parse_hstore(hstore_str):
@@ -377,13 +407,15 @@ def _parse_hstore(hstore_str):
pair_match = HSTORE_PAIR_RE.match(hstore_str)
while pair_match is not None:
- key = pair_match.group('key').replace(r'\"', '"').replace(
- "\\\\", "\\")
- if pair_match.group('value_null'):
+ key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
+ if pair_match.group("value_null"):
value = None
else:
- value = pair_match.group('value').replace(
- r'\"', '"').replace("\\\\", "\\")
+ value = (
+ pair_match.group("value")
+ .replace(r"\"", '"')
+ .replace("\\\\", "\\")
+ )
result[key] = value
pos += pair_match.end()
@@ -405,16 +437,17 @@ def _serialize_hstore(val):
both be strings (except None for values).
"""
+
def esc(s, position):
- if position == 'value' and s is None:
- return 'NULL'
+ if position == "value" and s is None:
+ return "NULL"
elif isinstance(s, util.string_types):
- return '"%s"' % s.replace("\\", "\\\\").replace('"', r'\"')
+ return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
else:
- raise ValueError("%r in %s position is not a string." %
- (s, position))
-
- return ', '.join('%s=>%s' % (esc(k, 'key'), esc(v, 'value'))
- for k, v in val.items())
-
+ raise ValueError(
+ "%r in %s position is not a string." % (s, position)
+ )
+ return ", ".join(
+ "%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
+ )
diff --git a/lib/sqlalchemy/dialects/postgresql/json.py b/lib/sqlalchemy/dialects/postgresql/json.py
index e9256daf3..f9421de37 100644
--- a/lib/sqlalchemy/dialects/postgresql/json.py
+++ b/lib/sqlalchemy/dialects/postgresql/json.py
@@ -12,44 +12,58 @@ from ...sql import operators
from ...sql import elements
from ... import util
-__all__ = ('JSON', 'JSONB')
+__all__ = ("JSON", "JSONB")
idx_precedence = operators._PRECEDENCE[operators.json_getitem_op]
ASTEXT = operators.custom_op(
- "->>", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "->>",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
JSONPATH_ASTEXT = operators.custom_op(
- "#>>", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "#>>",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_KEY = operators.custom_op(
- "?", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_ALL = operators.custom_op(
- "?&", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?&",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
HAS_ANY = operators.custom_op(
- "?|", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "?|",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
CONTAINS = operators.custom_op(
- "@>", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "@>",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
CONTAINED_BY = operators.custom_op(
- "<@", precedence=idx_precedence, natural_self_precedent=True,
- eager_grouping=True
+ "<@",
+ precedence=idx_precedence,
+ natural_self_precedent=True,
+ eager_grouping=True,
)
@@ -59,7 +73,7 @@ class JSONPathType(sqltypes.JSON.JSONPathType):
def process(value):
assert isinstance(value, util.collections_abc.Sequence)
- tokens = [util.text_type(elem)for elem in value]
+ tokens = [util.text_type(elem) for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
@@ -72,7 +86,7 @@ class JSONPathType(sqltypes.JSON.JSONPathType):
def process(value):
assert isinstance(value, util.collections_abc.Sequence)
- tokens = [util.text_type(elem)for elem in value]
+ tokens = [util.text_type(elem) for elem in value]
value = "{%s}" % (", ".join(tokens))
if super_proc:
value = super_proc(value)
@@ -80,6 +94,7 @@ class JSONPathType(sqltypes.JSON.JSONPathType):
return process
+
colspecs[sqltypes.JSON.JSONPathType] = JSONPathType
@@ -203,16 +218,19 @@ class JSON(sqltypes.JSON):
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
return self.expr.left.operate(
JSONPATH_ASTEXT,
- self.expr.right, result_type=self.type.astext_type)
+ self.expr.right,
+ result_type=self.type.astext_type,
+ )
else:
return self.expr.left.operate(
- ASTEXT, self.expr.right, result_type=self.type.astext_type)
+ ASTEXT, self.expr.right, result_type=self.type.astext_type
+ )
comparator_factory = Comparator
colspecs[sqltypes.JSON] = JSON
-ischema_names['json'] = JSON
+ischema_names["json"] = JSON
class JSONB(JSON):
@@ -259,7 +277,7 @@ class JSONB(JSON):
"""
- __visit_name__ = 'JSONB'
+ __visit_name__ = "JSONB"
class Comparator(JSON.Comparator):
"""Define comparison operations for :class:`.JSON`."""
@@ -291,8 +309,10 @@ class JSONB(JSON):
keys of the argument jsonb expression.
"""
return self.operate(
- CONTAINED_BY, other, result_type=sqltypes.Boolean)
+ CONTAINED_BY, other, result_type=sqltypes.Boolean
+ )
comparator_factory = Comparator
-ischema_names['jsonb'] = JSONB
+
+ischema_names["jsonb"] = JSONB
diff --git a/lib/sqlalchemy/dialects/postgresql/pg8000.py b/lib/sqlalchemy/dialects/postgresql/pg8000.py
index 80929b808..fef09e0eb 100644
--- a/lib/sqlalchemy/dialects/postgresql/pg8000.py
+++ b/lib/sqlalchemy/dialects/postgresql/pg8000.py
@@ -69,8 +69,15 @@ import decimal
from ... import processors
from ... import types as sqltypes
from .base import (
- PGDialect, PGCompiler, PGIdentifierPreparer, PGExecutionContext,
- _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES, UUID)
+ PGDialect,
+ PGCompiler,
+ PGIdentifierPreparer,
+ PGExecutionContext,
+ _DECIMAL_TYPES,
+ _FLOAT_TYPES,
+ _INT_TYPES,
+ UUID,
+)
import re
from sqlalchemy.dialects.postgresql.json import JSON
from ...sql.elements import quoted_name
@@ -86,13 +93,15 @@ class _PGNumeric(sqltypes.Numeric):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
- decimal.Decimal, self._effective_decimal_return_scale)
+ decimal.Decimal, self._effective_decimal_return_scale
+ )
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
@@ -101,7 +110,8 @@ class _PGNumeric(sqltypes.Numeric):
return processors.to_float
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
class _PGNumericNoBind(_PGNumeric):
@@ -110,7 +120,6 @@ class _PGNumericNoBind(_PGNumeric):
class _PGJSON(JSON):
-
def result_processor(self, dialect, coltype):
if dialect._dbapi_version > (1, 10, 1):
return None # Has native JSON
@@ -121,18 +130,22 @@ class _PGJSON(JSON):
class _PGUUID(UUID):
def bind_processor(self, dialect):
if not self.as_uuid:
+
def process(value):
if value is not None:
value = _python_UUID(value)
return value
+
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid:
+
def process(value):
if value is not None:
value = str(value)
return value
+
return process
@@ -142,36 +155,41 @@ class PGExecutionContext_pg8000(PGExecutionContext):
class PGCompiler_pg8000(PGCompiler):
def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ return (
+ self.process(binary.left, **kw)
+ + " %% "
+ + self.process(binary.right, **kw)
+ )
def post_process_text(self, text):
- if '%%' in text:
- util.warn("The SQLAlchemy postgresql dialect "
- "now automatically escapes '%' in text() "
- "expressions to '%%'.")
- return text.replace('%', '%%')
+ if "%%" in text:
+ util.warn(
+ "The SQLAlchemy postgresql dialect "
+ "now automatically escapes '%' in text() "
+ "expressions to '%%'."
+ )
+ return text.replace("%", "%%")
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace('%', '%%')
+ return value.replace("%", "%%")
class PGDialect_pg8000(PGDialect):
- driver = 'pg8000'
+ driver = "pg8000"
supports_unicode_statements = True
supports_unicode_binds = True
- default_paramstyle = 'format'
+ default_paramstyle = "format"
supports_sane_multi_rowcount = True
execution_ctx_cls = PGExecutionContext_pg8000
statement_compiler = PGCompiler_pg8000
preparer = PGIdentifierPreparer_pg8000
- description_encoding = 'use_encoding'
+ description_encoding = "use_encoding"
colspecs = util.update_copy(
PGDialect.colspecs,
@@ -180,8 +198,8 @@ class PGDialect_pg8000(PGDialect):
sqltypes.Float: _PGNumeric,
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
- UUID: _PGUUID
- }
+ UUID: _PGUUID,
+ },
)
def __init__(self, client_encoding=None, **kwargs):
@@ -194,22 +212,26 @@ class PGDialect_pg8000(PGDialect):
@util.memoized_property
def _dbapi_version(self):
- if self.dbapi and hasattr(self.dbapi, '__version__'):
+ if self.dbapi and hasattr(self.dbapi, "__version__"):
return tuple(
[
- int(x) for x in re.findall(
- r'(\d+)(?:[-\.]?|$)', self.dbapi.__version__)])
+ int(x)
+ for x in re.findall(
+ r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
+ )
+ ]
+ )
else:
return (99, 99, 99)
@classmethod
def dbapi(cls):
- return __import__('pg8000')
+ return __import__("pg8000")
def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
+ opts = url.translate_connect_args(username="user")
+ if "port" in opts:
+ opts["port"] = int(opts["port"])
opts.update(url.query)
return ([], opts)
@@ -217,32 +239,33 @@ class PGDialect_pg8000(PGDialect):
return "connection is closed" in str(e)
def set_isolation_level(self, connection, level):
- level = level.replace('_', ' ')
+ level = level.replace("_", " ")
# adjust for ConnectionFairy possibly being present
- if hasattr(connection, 'connection'):
+ if hasattr(connection, "connection"):
connection = connection.connection
- if level == 'AUTOCOMMIT':
+ if level == "AUTOCOMMIT":
connection.autocommit = True
elif level in self._isolation_lookup:
connection.autocommit = False
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
- "ISOLATION LEVEL %s" % level)
+ "ISOLATION LEVEL %s" % level
+ )
cursor.execute("COMMIT")
cursor.close()
else:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s or AUTOCOMMIT" %
- (level, self.name, ", ".join(self._isolation_lookup))
+ "Valid isolation levels for %s are %s or AUTOCOMMIT"
+ % (level, self.name, ", ".join(self._isolation_lookup))
)
def set_client_encoding(self, connection, client_encoding):
# adjust for ConnectionFairy possibly being present
- if hasattr(connection, 'connection'):
+ if hasattr(connection, "connection"):
connection = connection.connection
cursor = connection.cursor()
@@ -251,18 +274,20 @@ class PGDialect_pg8000(PGDialect):
cursor.close()
def do_begin_twophase(self, connection, xid):
- connection.connection.tpc_begin((0, xid, ''))
+ connection.connection.tpc_begin((0, xid, ""))
def do_prepare_twophase(self, connection, xid):
connection.connection.tpc_prepare()
def do_rollback_twophase(
- self, connection, xid, is_prepared=True, recover=False):
- connection.connection.tpc_rollback((0, xid, ''))
+ self, connection, xid, is_prepared=True, recover=False
+ ):
+ connection.connection.tpc_rollback((0, xid, ""))
def do_commit_twophase(
- self, connection, xid, is_prepared=True, recover=False):
- connection.connection.tpc_commit((0, xid, ''))
+ self, connection, xid, is_prepared=True, recover=False
+ ):
+ connection.connection.tpc_commit((0, xid, ""))
def do_recover_twophase(self, connection):
return [row[1] for row in connection.connection.tpc_recover()]
@@ -272,24 +297,32 @@ class PGDialect_pg8000(PGDialect):
def on_connect(conn):
conn.py_types[quoted_name] = conn.py_types[util.text_type]
+
fns.append(on_connect)
if self.client_encoding is not None:
+
def on_connect(conn):
self.set_client_encoding(conn, self.client_encoding)
+
fns.append(on_connect)
if self.isolation_level is not None:
+
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
+
fns.append(on_connect)
if len(fns) > 0:
+
def on_connect(conn):
for fn in fns:
fn(conn)
+
return on_connect
else:
return None
+
dialect = PGDialect_pg8000
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2.py b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
index baa0e00d5..2c27c6919 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2.py
@@ -353,10 +353,17 @@ from ... import processors
from ...engine import result as _result
from ...sql import expression
from ... import types as sqltypes
-from .base import PGDialect, PGCompiler, \
- PGIdentifierPreparer, PGExecutionContext, \
- ENUM, _DECIMAL_TYPES, _FLOAT_TYPES,\
- _INT_TYPES, UUID
+from .base import (
+ PGDialect,
+ PGCompiler,
+ PGIdentifierPreparer,
+ PGExecutionContext,
+ ENUM,
+ _DECIMAL_TYPES,
+ _FLOAT_TYPES,
+ _INT_TYPES,
+ UUID,
+)
from .hstore import HSTORE
from .json import JSON, JSONB
@@ -366,7 +373,7 @@ except ImportError:
_python_UUID = None
-logger = logging.getLogger('sqlalchemy.dialects.postgresql')
+logger = logging.getLogger("sqlalchemy.dialects.postgresql")
class _PGNumeric(sqltypes.Numeric):
@@ -377,14 +384,15 @@ class _PGNumeric(sqltypes.Numeric):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
+ decimal.Decimal, self._effective_decimal_return_scale
+ )
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# pg8000 returns Decimal natively for 1700
return None
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
else:
if coltype in _FLOAT_TYPES:
# pg8000 returns float natively for 701
@@ -393,7 +401,8 @@ class _PGNumeric(sqltypes.Numeric):
return processors.to_float
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
class _PGEnum(ENUM):
@@ -421,7 +430,6 @@ class _PGHStore(HSTORE):
class _PGJSON(JSON):
-
def result_processor(self, dialect, coltype):
if dialect._has_native_json:
return None
@@ -430,7 +438,6 @@ class _PGJSON(JSON):
class _PGJSONB(JSONB):
-
def result_processor(self, dialect, coltype):
if dialect._has_native_jsonb:
return None
@@ -447,14 +454,17 @@ class _PGUUID(UUID):
if value is not None:
value = _python_UUID(value)
return value
+
return process
def result_processor(self, dialect, coltype):
if not self.as_uuid and dialect.use_native_uuid:
+
def process(value):
if value is not None:
value = str(value)
return value
+
return process
@@ -465,8 +475,7 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
def create_server_side_cursor(self):
# use server-side cursors:
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
- ident = "c_%s_%s" % (hex(id(self))[2:],
- hex(_server_side_id())[2:])
+ ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
return self._dbapi_connection.cursor(ident)
def get_result_proxy(self):
@@ -497,13 +506,13 @@ class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
class PGDialect_psycopg2(PGDialect):
- driver = 'psycopg2'
+ driver = "psycopg2"
if util.py2k:
supports_unicode_statements = False
supports_server_side_cursors = True
- default_paramstyle = 'pyformat'
+ default_paramstyle = "pyformat"
# set to true based on psycopg2 version
supports_sane_multi_rowcount = False
execution_ctx_cls = PGExecutionContext_psycopg2
@@ -516,16 +525,16 @@ class PGDialect_psycopg2(PGDialect):
native_jsonb=(2, 5, 4),
sane_multi_rowcount=(2, 0, 9),
array_oid=(2, 4, 3),
- hstore_adapter=(2, 4)
+ hstore_adapter=(2, 4),
)
_has_native_hstore = False
_has_native_json = False
_has_native_jsonb = False
- engine_config_types = PGDialect.engine_config_types.union([
- ('use_native_unicode', util.asbool),
- ])
+ engine_config_types = PGDialect.engine_config_types.union(
+ [("use_native_unicode", util.asbool)]
+ )
colspecs = util.update_copy(
PGDialect.colspecs,
@@ -537,15 +546,20 @@ class PGDialect_psycopg2(PGDialect):
JSON: _PGJSON,
sqltypes.JSON: _PGJSON,
JSONB: _PGJSONB,
- UUID: _PGUUID
- }
+ UUID: _PGUUID,
+ },
)
- def __init__(self, server_side_cursors=False, use_native_unicode=True,
- client_encoding=None,
- use_native_hstore=True, use_native_uuid=True,
- use_batch_mode=False,
- **kwargs):
+ def __init__(
+ self,
+ server_side_cursors=False,
+ use_native_unicode=True,
+ client_encoding=None,
+ use_native_hstore=True,
+ use_native_uuid=True,
+ use_batch_mode=False,
+ **kwargs
+ ):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
self.use_native_unicode = use_native_unicode
@@ -554,65 +568,70 @@ class PGDialect_psycopg2(PGDialect):
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
self.psycopg2_batch_mode = use_batch_mode
- if self.dbapi and hasattr(self.dbapi, '__version__'):
- m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
- self.dbapi.__version__)
+ if self.dbapi and hasattr(self.dbapi, "__version__"):
+ m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
- int(x)
- for x in m.group(1, 2, 3)
- if x is not None)
+ int(x) for x in m.group(1, 2, 3) if x is not None
+ )
def initialize(self, connection):
super(PGDialect_psycopg2, self).initialize(connection)
- self._has_native_hstore = self.use_native_hstore and \
- self._hstore_oids(connection.connection) \
- is not None
- self._has_native_json = \
- self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_json']
- self._has_native_jsonb = \
- self.psycopg2_version >= self.FEATURE_VERSION_MAP['native_jsonb']
+ self._has_native_hstore = (
+ self.use_native_hstore
+ and self._hstore_oids(connection.connection) is not None
+ )
+ self._has_native_json = (
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_json"]
+ )
+ self._has_native_jsonb = (
+ self.psycopg2_version >= self.FEATURE_VERSION_MAP["native_jsonb"]
+ )
# http://initd.org/psycopg/docs/news.html#what-s-new-in-psycopg-2-0-9
- self.supports_sane_multi_rowcount = \
- self.psycopg2_version >= \
- self.FEATURE_VERSION_MAP['sane_multi_rowcount'] and \
- not self.psycopg2_batch_mode
+ self.supports_sane_multi_rowcount = (
+ self.psycopg2_version
+ >= self.FEATURE_VERSION_MAP["sane_multi_rowcount"]
+ and not self.psycopg2_batch_mode
+ )
@classmethod
def dbapi(cls):
import psycopg2
+
return psycopg2
@classmethod
def _psycopg2_extensions(cls):
from psycopg2 import extensions
+
return extensions
@classmethod
def _psycopg2_extras(cls):
from psycopg2 import extras
+
return extras
@util.memoized_property
def _isolation_lookup(self):
extensions = self._psycopg2_extensions()
return {
- 'AUTOCOMMIT': extensions.ISOLATION_LEVEL_AUTOCOMMIT,
- 'READ COMMITTED': extensions.ISOLATION_LEVEL_READ_COMMITTED,
- 'READ UNCOMMITTED': extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
- 'REPEATABLE READ': extensions.ISOLATION_LEVEL_REPEATABLE_READ,
- 'SERIALIZABLE': extensions.ISOLATION_LEVEL_SERIALIZABLE
+ "AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
+ "READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
+ "READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
+ "REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
+ "SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
}
def set_isolation_level(self, connection, level):
try:
- level = self._isolation_lookup[level.replace('_', ' ')]
+ level = self._isolation_lookup[level.replace("_", " ")]
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
- "Valid isolation levels for %s are %s" %
- (level, self.name, ", ".join(self._isolation_lookup))
+ "Valid isolation levels for %s are %s"
+ % (level, self.name, ", ".join(self._isolation_lookup))
)
connection.set_isolation_level(level)
@@ -623,54 +642,72 @@ class PGDialect_psycopg2(PGDialect):
fns = []
if self.client_encoding is not None:
+
def on_connect(conn):
conn.set_client_encoding(self.client_encoding)
+
fns.append(on_connect)
if self.isolation_level is not None:
+
def on_connect(conn):
self.set_isolation_level(conn, self.isolation_level)
+
fns.append(on_connect)
if self.dbapi and self.use_native_uuid:
+
def on_connect(conn):
extras.register_uuid(None, conn)
+
fns.append(on_connect)
if self.dbapi and self.use_native_unicode:
+
def on_connect(conn):
extensions.register_type(extensions.UNICODE, conn)
extensions.register_type(extensions.UNICODEARRAY, conn)
+
fns.append(on_connect)
if self.dbapi and self.use_native_hstore:
+
def on_connect(conn):
hstore_oids = self._hstore_oids(conn)
if hstore_oids is not None:
oid, array_oid = hstore_oids
- kw = {'oid': oid}
+ kw = {"oid": oid}
if util.py2k:
- kw['unicode'] = True
- if self.psycopg2_version >= \
- self.FEATURE_VERSION_MAP['array_oid']:
- kw['array_oid'] = array_oid
+ kw["unicode"] = True
+ if (
+ self.psycopg2_version
+ >= self.FEATURE_VERSION_MAP["array_oid"]
+ ):
+ kw["array_oid"] = array_oid
extras.register_hstore(conn, **kw)
+
fns.append(on_connect)
if self.dbapi and self._json_deserializer:
+
def on_connect(conn):
if self._has_native_json:
extras.register_default_json(
- conn, loads=self._json_deserializer)
+ conn, loads=self._json_deserializer
+ )
if self._has_native_jsonb:
extras.register_default_jsonb(
- conn, loads=self._json_deserializer)
+ conn, loads=self._json_deserializer
+ )
+
fns.append(on_connect)
if fns:
+
def on_connect(conn):
for fn in fns:
fn(conn)
+
return on_connect
else:
return None
@@ -684,7 +721,7 @@ class PGDialect_psycopg2(PGDialect):
@util.memoized_instancemethod
def _hstore_oids(self, conn):
- if self.psycopg2_version >= self.FEATURE_VERSION_MAP['hstore_adapter']:
+ if self.psycopg2_version >= self.FEATURE_VERSION_MAP["hstore_adapter"]:
extras = self._psycopg2_extras()
oids = extras.HstoreAdapter.get_oids(conn)
if oids is not None and oids[0]:
@@ -692,9 +729,9 @@ class PGDialect_psycopg2(PGDialect):
return None
def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
+ opts = url.translate_connect_args(username="user")
+ if "port" in opts:
+ opts["port"] = int(opts["port"])
opts.update(url.query)
return ([], opts)
@@ -704,7 +741,7 @@ class PGDialect_psycopg2(PGDialect):
# present on old psycopg2 versions. Also,
# this flag doesn't actually help in a lot of disconnect
# situations, so don't rely on it.
- if getattr(connection, 'closed', False):
+ if getattr(connection, "closed", False):
return True
# checks based on strings. in the case that .closed
@@ -713,28 +750,29 @@ class PGDialect_psycopg2(PGDialect):
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
- 'terminating connection',
- 'closed the connection',
- 'connection not open',
- 'could not receive data from server',
- 'could not send data to server',
+ "terminating connection",
+ "closed the connection",
+ "connection not open",
+ "could not receive data from server",
+ "could not send data to server",
# psycopg2 client errors, psycopg2/conenction.h,
# psycopg2/cursor.h
- 'connection already closed',
- 'cursor already closed',
+ "connection already closed",
+ "cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
- 'losed the connection unexpectedly',
+ "losed the connection unexpectedly",
# these can occur in newer SSL
- 'connection has been closed unexpectedly',
- 'SSL SYSCALL error: Bad file descriptor',
- 'SSL SYSCALL error: EOF detected',
- 'SSL error: decryption failed or bad record mac',
- 'SSL SYSCALL error: Operation timed out',
+ "connection has been closed unexpectedly",
+ "SSL SYSCALL error: Bad file descriptor",
+ "SSL SYSCALL error: EOF detected",
+ "SSL error: decryption failed or bad record mac",
+ "SSL SYSCALL error: Operation timed out",
]:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
+
dialect = PGDialect_psycopg2
diff --git a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
index a1141a90e..7343bc973 100644
--- a/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
+++ b/lib/sqlalchemy/dialects/postgresql/psycopg2cffi.py
@@ -28,7 +28,7 @@ from .psycopg2 import PGDialect_psycopg2
class PGDialect_psycopg2cffi(PGDialect_psycopg2):
- driver = 'psycopg2cffi'
+ driver = "psycopg2cffi"
supports_unicode_statements = True
# psycopg2cffi's first release is 2.5.0, but reports
@@ -40,21 +40,21 @@ class PGDialect_psycopg2cffi(PGDialect_psycopg2):
native_jsonb=(2, 7, 1),
sane_multi_rowcount=(2, 4, 4),
array_oid=(2, 4, 4),
- hstore_adapter=(2, 4, 4)
+ hstore_adapter=(2, 4, 4),
)
@classmethod
def dbapi(cls):
- return __import__('psycopg2cffi')
+ return __import__("psycopg2cffi")
@classmethod
def _psycopg2_extensions(cls):
- root = __import__('psycopg2cffi', fromlist=['extensions'])
+ root = __import__("psycopg2cffi", fromlist=["extensions"])
return root.extensions
@classmethod
def _psycopg2_extras(cls):
- root = __import__('psycopg2cffi', fromlist=['extras'])
+ root = __import__("psycopg2cffi", fromlist=["extras"])
return root.extras
diff --git a/lib/sqlalchemy/dialects/postgresql/pygresql.py b/lib/sqlalchemy/dialects/postgresql/pygresql.py
index 304afca44..c7edb8fc3 100644
--- a/lib/sqlalchemy/dialects/postgresql/pygresql.py
+++ b/lib/sqlalchemy/dialects/postgresql/pygresql.py
@@ -20,14 +20,20 @@ import re
from ... import exc, processors, util
from ...types import Numeric, JSON as Json
from ...sql.elements import Null
-from .base import PGDialect, PGCompiler, PGIdentifierPreparer, \
- _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES, UUID
+from .base import (
+ PGDialect,
+ PGCompiler,
+ PGIdentifierPreparer,
+ _DECIMAL_TYPES,
+ _FLOAT_TYPES,
+ _INT_TYPES,
+ UUID,
+)
from .hstore import HSTORE
from .json import JSON, JSONB
class _PGNumeric(Numeric):
-
def bind_processor(self, dialect):
return None
@@ -37,14 +43,15 @@ class _PGNumeric(Numeric):
if self.asdecimal:
if coltype in _FLOAT_TYPES:
return processors.to_decimal_processor_factory(
- decimal.Decimal,
- self._effective_decimal_return_scale)
+ decimal.Decimal, self._effective_decimal_return_scale
+ )
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
# PyGreSQL returns Decimal natively for 1700 (numeric)
return None
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
else:
if coltype in _FLOAT_TYPES:
# PyGreSQL returns float natively for 701 (float8)
@@ -53,19 +60,21 @@ class _PGNumeric(Numeric):
return processors.to_float
else:
raise exc.InvalidRequestError(
- "Unknown PG numeric type: %d" % coltype)
+ "Unknown PG numeric type: %d" % coltype
+ )
class _PGHStore(HSTORE):
-
def bind_processor(self, dialect):
if not dialect.has_native_hstore:
return super(_PGHStore, self).bind_processor(dialect)
hstore = dialect.dbapi.Hstore
+
def process(value):
if isinstance(value, dict):
return hstore(value)
return value
+
return process
def result_processor(self, dialect, coltype):
@@ -74,7 +83,6 @@ class _PGHStore(HSTORE):
class _PGJSON(JSON):
-
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSON, self).bind_processor(dialect)
@@ -84,7 +92,8 @@ class _PGJSON(JSON):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
- value is None and self.none_as_null):
+ value is None and self.none_as_null
+ ):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
@@ -98,7 +107,6 @@ class _PGJSON(JSON):
class _PGJSONB(JSONB):
-
def bind_processor(self, dialect):
if not dialect.has_native_json:
return super(_PGJSONB, self).bind_processor(dialect)
@@ -108,7 +116,8 @@ class _PGJSONB(JSONB):
if value is self.NULL:
value = None
elif isinstance(value, Null) or (
- value is None and self.none_as_null):
+ value is None and self.none_as_null
+ ):
return None
if value is None or isinstance(value, (dict, list)):
return json(value)
@@ -122,7 +131,6 @@ class _PGJSONB(JSONB):
class _PGUUID(UUID):
-
def bind_processor(self, dialect):
if not dialect.has_native_uuid:
return super(_PGUUID, self).bind_processor(dialect)
@@ -145,32 +153,35 @@ class _PGUUID(UUID):
if not dialect.has_native_uuid:
return super(_PGUUID, self).result_processor(dialect, coltype)
if not self.as_uuid:
+
def process(value):
if value is not None:
return str(value)
+
return process
class _PGCompiler(PGCompiler):
-
def visit_mod_binary(self, binary, operator, **kw):
- return self.process(binary.left, **kw) + " %% " + \
- self.process(binary.right, **kw)
+ return (
+ self.process(binary.left, **kw)
+ + " %% "
+ + self.process(binary.right, **kw)
+ )
def post_process_text(self, text):
- return text.replace('%', '%%')
+ return text.replace("%", "%%")
class _PGIdentifierPreparer(PGIdentifierPreparer):
-
def _escape_identifier(self, value):
value = value.replace(self.escape_quote, self.escape_to_quote)
- return value.replace('%', '%%')
+ return value.replace("%", "%%")
class PGDialect_pygresql(PGDialect):
- driver = 'pygresql'
+ driver = "pygresql"
statement_compiler = _PGCompiler
preparer = _PGIdentifierPreparer
@@ -178,6 +189,7 @@ class PGDialect_pygresql(PGDialect):
@classmethod
def dbapi(cls):
import pgdb
+
return pgdb
colspecs = util.update_copy(
@@ -189,14 +201,14 @@ class PGDialect_pygresql(PGDialect):
JSON: _PGJSON,
JSONB: _PGJSONB,
UUID: _PGUUID,
- }
+ },
)
def __init__(self, **kwargs):
super(PGDialect_pygresql, self).__init__(**kwargs)
try:
version = self.dbapi.version
- m = re.match(r'(\d+)\.(\d+)', version)
+ m = re.match(r"(\d+)\.(\d+)", version)
version = (int(m.group(1)), int(m.group(2)))
except (AttributeError, ValueError, TypeError):
version = (0, 0)
@@ -204,8 +216,10 @@ class PGDialect_pygresql(PGDialect):
if version < (5, 0):
has_native_hstore = has_native_json = has_native_uuid = False
if version != (0, 0):
- util.warn("PyGreSQL is only fully supported by SQLAlchemy"
- " since version 5.0.")
+ util.warn(
+ "PyGreSQL is only fully supported by SQLAlchemy"
+ " since version 5.0."
+ )
else:
self.supports_unicode_statements = True
self.supports_unicode_binds = True
@@ -215,10 +229,12 @@ class PGDialect_pygresql(PGDialect):
self.has_native_uuid = has_native_uuid
def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['host'] = '%s:%s' % (
- opts.get('host', '').rsplit(':', 1)[0], opts.pop('port'))
+ opts = url.translate_connect_args(username="user")
+ if "port" in opts:
+ opts["host"] = "%s:%s" % (
+ opts.get("host", "").rsplit(":", 1)[0],
+ opts.pop("port"),
+ )
opts.update(url.query)
return [], opts
diff --git a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
index b633323b4..93bf653a4 100644
--- a/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
+++ b/lib/sqlalchemy/dialects/postgresql/pypostgresql.py
@@ -37,12 +37,12 @@ class PGExecutionContext_pypostgresql(PGExecutionContext):
class PGDialect_pypostgresql(PGDialect):
- driver = 'pypostgresql'
+ driver = "pypostgresql"
supports_unicode_statements = True
supports_unicode_binds = True
description_encoding = None
- default_paramstyle = 'pyformat'
+ default_paramstyle = "pyformat"
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropriately
@@ -54,22 +54,27 @@ class PGDialect_pypostgresql(PGDialect):
PGDialect.colspecs,
{
sqltypes.Numeric: PGNumeric,
-
# prevents PGNumeric from being used
sqltypes.Float: sqltypes.Float,
- }
+ },
)
@classmethod
def dbapi(cls):
from postgresql.driver import dbapi20
+
return dbapi20
_DBAPI_ERROR_NAMES = [
"Error",
- "InterfaceError", "DatabaseError", "DataError",
- "OperationalError", "IntegrityError", "InternalError",
- "ProgrammingError", "NotSupportedError"
+ "InterfaceError",
+ "DatabaseError",
+ "DataError",
+ "OperationalError",
+ "IntegrityError",
+ "InternalError",
+ "ProgrammingError",
+ "NotSupportedError",
]
@util.memoized_property
@@ -83,15 +88,16 @@ class PGDialect_pypostgresql(PGDialect):
)
def create_connect_args(self, url):
- opts = url.translate_connect_args(username='user')
- if 'port' in opts:
- opts['port'] = int(opts['port'])
+ opts = url.translate_connect_args(username="user")
+ if "port" in opts:
+ opts["port"] = int(opts["port"])
else:
- opts['port'] = 5432
+ opts["port"] = 5432
opts.update(url.query)
return ([], opts)
def is_disconnect(self, e, connection, cursor):
return "connection is closed" in str(e)
+
dialect = PGDialect_pypostgresql
diff --git a/lib/sqlalchemy/dialects/postgresql/ranges.py b/lib/sqlalchemy/dialects/postgresql/ranges.py
index eb2d86bbd..62d1275a6 100644
--- a/lib/sqlalchemy/dialects/postgresql/ranges.py
+++ b/lib/sqlalchemy/dialects/postgresql/ranges.py
@@ -7,7 +7,7 @@
from .base import ischema_names
from ... import types as sqltypes
-__all__ = ('INT4RANGE', 'INT8RANGE', 'NUMRANGE')
+__all__ = ("INT4RANGE", "INT8RANGE", "NUMRANGE")
class RangeOperators(object):
@@ -34,35 +34,36 @@ class RangeOperators(object):
def __ne__(self, other):
"Boolean expression. Returns true if two ranges are not equal"
if other is None:
- return super(
- RangeOperators.comparator_factory, self).__ne__(other)
+ return super(RangeOperators.comparator_factory, self).__ne__(
+ other
+ )
else:
- return self.expr.op('<>')(other)
+ return self.expr.op("<>")(other)
def contains(self, other, **kw):
"""Boolean expression. Returns true if the right hand operand,
which can be an element or a range, is contained within the
column.
"""
- return self.expr.op('@>')(other)
+ return self.expr.op("@>")(other)
def contained_by(self, other):
"""Boolean expression. Returns true if the column is contained
within the right hand operand.
"""
- return self.expr.op('<@')(other)
+ return self.expr.op("<@")(other)
def overlaps(self, other):
"""Boolean expression. Returns true if the column overlaps
(has points in common with) the right hand operand.
"""
- return self.expr.op('&&')(other)
+ return self.expr.op("&&")(other)
def strictly_left_of(self, other):
"""Boolean expression. Returns true if the column is strictly
left of the right hand operand.
"""
- return self.expr.op('<<')(other)
+ return self.expr.op("<<")(other)
__lshift__ = strictly_left_of
@@ -70,7 +71,7 @@ class RangeOperators(object):
"""Boolean expression. Returns true if the column is strictly
right of the right hand operand.
"""
- return self.expr.op('>>')(other)
+ return self.expr.op(">>")(other)
__rshift__ = strictly_right_of
@@ -78,26 +79,26 @@ class RangeOperators(object):
"""Boolean expression. Returns true if the range in the column
does not extend right of the range in the operand.
"""
- return self.expr.op('&<')(other)
+ return self.expr.op("&<")(other)
def not_extend_left_of(self, other):
"""Boolean expression. Returns true if the range in the column
does not extend left of the range in the operand.
"""
- return self.expr.op('&>')(other)
+ return self.expr.op("&>")(other)
def adjacent_to(self, other):
"""Boolean expression. Returns true if the range in the column
is adjacent to the range in the operand.
"""
- return self.expr.op('-|-')(other)
+ return self.expr.op("-|-")(other)
def __add__(self, other):
"""Range expression. Returns the union of the two ranges.
Will raise an exception if the resulting range is not
contigous.
"""
- return self.expr.op('+')(other)
+ return self.expr.op("+")(other)
class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
@@ -107,9 +108,10 @@ class INT4RANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'INT4RANGE'
+ __visit_name__ = "INT4RANGE"
-ischema_names['int4range'] = INT4RANGE
+
+ischema_names["int4range"] = INT4RANGE
class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
@@ -119,9 +121,10 @@ class INT8RANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'INT8RANGE'
+ __visit_name__ = "INT8RANGE"
+
-ischema_names['int8range'] = INT8RANGE
+ischema_names["int8range"] = INT8RANGE
class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
@@ -131,9 +134,10 @@ class NUMRANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'NUMRANGE'
+ __visit_name__ = "NUMRANGE"
+
-ischema_names['numrange'] = NUMRANGE
+ischema_names["numrange"] = NUMRANGE
class DATERANGE(RangeOperators, sqltypes.TypeEngine):
@@ -143,9 +147,10 @@ class DATERANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'DATERANGE'
+ __visit_name__ = "DATERANGE"
-ischema_names['daterange'] = DATERANGE
+
+ischema_names["daterange"] = DATERANGE
class TSRANGE(RangeOperators, sqltypes.TypeEngine):
@@ -155,9 +160,10 @@ class TSRANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'TSRANGE'
+ __visit_name__ = "TSRANGE"
+
-ischema_names['tsrange'] = TSRANGE
+ischema_names["tsrange"] = TSRANGE
class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
@@ -167,6 +173,7 @@ class TSTZRANGE(RangeOperators, sqltypes.TypeEngine):
"""
- __visit_name__ = 'TSTZRANGE'
+ __visit_name__ = "TSTZRANGE"
+
-ischema_names['tstzrange'] = TSTZRANGE
+ischema_names["tstzrange"] = TSTZRANGE
diff --git a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py b/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
index ef6e8f1f9..4d984443a 100644
--- a/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
+++ b/lib/sqlalchemy/dialects/postgresql/zxjdbc.py
@@ -19,7 +19,6 @@ from .base import PGDialect, PGExecutionContext
class PGExecutionContext_zxjdbc(PGExecutionContext):
-
def create_cursor(self):
cursor = self._dbapi_connection.cursor()
cursor.datahandler = self.dialect.DataHandler(cursor.datahandler)
@@ -27,8 +26,8 @@ class PGExecutionContext_zxjdbc(PGExecutionContext):
class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
- jdbc_db_name = 'postgresql'
- jdbc_driver_name = 'org.postgresql.Driver'
+ jdbc_db_name = "postgresql"
+ jdbc_driver_name = "org.postgresql.Driver"
execution_ctx_cls = PGExecutionContext_zxjdbc
@@ -37,10 +36,12 @@ class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect):
def __init__(self, *args, **kwargs):
super(PGDialect_zxjdbc, self).__init__(*args, **kwargs)
from com.ziclix.python.sql.handler import PostgresqlDataHandler
+
self.DataHandler = PostgresqlDataHandler
def _get_server_version_info(self, connection):
- parts = connection.connection.dbversion.split('.')
+ parts = connection.connection.dbversion.split(".")
return tuple(int(x) for x in parts)
+
dialect = PGDialect_zxjdbc