Update 2025-04-13_16:25:39
This commit is contained in:
@ -0,0 +1,167 @@
|
||||
# dialects/postgresql/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from types import ModuleType
|
||||
|
||||
from . import array as arraylib # noqa # keep above base and other dialects
|
||||
from . import asyncpg # noqa
|
||||
from . import base
|
||||
from . import pg8000 # noqa
|
||||
from . import psycopg # noqa
|
||||
from . import psycopg2 # noqa
|
||||
from . import psycopg2cffi # noqa
|
||||
from .array import All
|
||||
from .array import Any
|
||||
from .array import ARRAY
|
||||
from .array import array
|
||||
from .base import BIGINT
|
||||
from .base import BOOLEAN
|
||||
from .base import CHAR
|
||||
from .base import DATE
|
||||
from .base import DOMAIN
|
||||
from .base import DOUBLE_PRECISION
|
||||
from .base import FLOAT
|
||||
from .base import INTEGER
|
||||
from .base import NUMERIC
|
||||
from .base import REAL
|
||||
from .base import SMALLINT
|
||||
from .base import TEXT
|
||||
from .base import UUID
|
||||
from .base import VARCHAR
|
||||
from .dml import Insert
|
||||
from .dml import insert
|
||||
from .ext import aggregate_order_by
|
||||
from .ext import array_agg
|
||||
from .ext import ExcludeConstraint
|
||||
from .ext import phraseto_tsquery
|
||||
from .ext import plainto_tsquery
|
||||
from .ext import to_tsquery
|
||||
from .ext import to_tsvector
|
||||
from .ext import ts_headline
|
||||
from .ext import websearch_to_tsquery
|
||||
from .hstore import HSTORE
|
||||
from .hstore import hstore
|
||||
from .json import JSON
|
||||
from .json import JSONB
|
||||
from .json import JSONPATH
|
||||
from .named_types import CreateDomainType
|
||||
from .named_types import CreateEnumType
|
||||
from .named_types import DropDomainType
|
||||
from .named_types import DropEnumType
|
||||
from .named_types import ENUM
|
||||
from .named_types import NamedType
|
||||
from .ranges import AbstractMultiRange
|
||||
from .ranges import AbstractRange
|
||||
from .ranges import AbstractSingleRange
|
||||
from .ranges import DATEMULTIRANGE
|
||||
from .ranges import DATERANGE
|
||||
from .ranges import INT4MULTIRANGE
|
||||
from .ranges import INT4RANGE
|
||||
from .ranges import INT8MULTIRANGE
|
||||
from .ranges import INT8RANGE
|
||||
from .ranges import MultiRange
|
||||
from .ranges import NUMMULTIRANGE
|
||||
from .ranges import NUMRANGE
|
||||
from .ranges import Range
|
||||
from .ranges import TSMULTIRANGE
|
||||
from .ranges import TSRANGE
|
||||
from .ranges import TSTZMULTIRANGE
|
||||
from .ranges import TSTZRANGE
|
||||
from .types import BIT
|
||||
from .types import BYTEA
|
||||
from .types import CIDR
|
||||
from .types import CITEXT
|
||||
from .types import INET
|
||||
from .types import INTERVAL
|
||||
from .types import MACADDR
|
||||
from .types import MACADDR8
|
||||
from .types import MONEY
|
||||
from .types import OID
|
||||
from .types import REGCLASS
|
||||
from .types import REGCONFIG
|
||||
from .types import TIME
|
||||
from .types import TIMESTAMP
|
||||
from .types import TSQUERY
|
||||
from .types import TSVECTOR
|
||||
|
||||
|
||||
# Alias psycopg also as psycopg_async
|
||||
psycopg_async = type(
|
||||
"psycopg_async", (ModuleType,), {"dialect": psycopg.dialect_async}
|
||||
)
|
||||
|
||||
base.dialect = dialect = psycopg2.dialect
|
||||
|
||||
|
||||
__all__ = (
|
||||
"INTEGER",
|
||||
"BIGINT",
|
||||
"SMALLINT",
|
||||
"VARCHAR",
|
||||
"CHAR",
|
||||
"TEXT",
|
||||
"NUMERIC",
|
||||
"FLOAT",
|
||||
"REAL",
|
||||
"INET",
|
||||
"CIDR",
|
||||
"CITEXT",
|
||||
"UUID",
|
||||
"BIT",
|
||||
"MACADDR",
|
||||
"MACADDR8",
|
||||
"MONEY",
|
||||
"OID",
|
||||
"REGCLASS",
|
||||
"REGCONFIG",
|
||||
"TSQUERY",
|
||||
"TSVECTOR",
|
||||
"DOUBLE_PRECISION",
|
||||
"TIMESTAMP",
|
||||
"TIME",
|
||||
"DATE",
|
||||
"BYTEA",
|
||||
"BOOLEAN",
|
||||
"INTERVAL",
|
||||
"ARRAY",
|
||||
"ENUM",
|
||||
"DOMAIN",
|
||||
"dialect",
|
||||
"array",
|
||||
"HSTORE",
|
||||
"hstore",
|
||||
"INT4RANGE",
|
||||
"INT8RANGE",
|
||||
"NUMRANGE",
|
||||
"DATERANGE",
|
||||
"INT4MULTIRANGE",
|
||||
"INT8MULTIRANGE",
|
||||
"NUMMULTIRANGE",
|
||||
"DATEMULTIRANGE",
|
||||
"TSVECTOR",
|
||||
"TSRANGE",
|
||||
"TSTZRANGE",
|
||||
"TSMULTIRANGE",
|
||||
"TSTZMULTIRANGE",
|
||||
"JSON",
|
||||
"JSONB",
|
||||
"JSONPATH",
|
||||
"Any",
|
||||
"All",
|
||||
"DropEnumType",
|
||||
"DropDomainType",
|
||||
"CreateDomainType",
|
||||
"NamedType",
|
||||
"CreateEnumType",
|
||||
"ExcludeConstraint",
|
||||
"Range",
|
||||
"aggregate_order_by",
|
||||
"array_agg",
|
||||
"insert",
|
||||
"Insert",
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,187 @@
|
||||
# dialects/postgresql/_psycopg_common.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
import decimal
|
||||
|
||||
from .array import ARRAY as PGARRAY
|
||||
from .base import _DECIMAL_TYPES
|
||||
from .base import _FLOAT_TYPES
|
||||
from .base import _INT_TYPES
|
||||
from .base import PGDialect
|
||||
from .base import PGExecutionContext
|
||||
from .hstore import HSTORE
|
||||
from .pg_catalog import _SpaceVector
|
||||
from .pg_catalog import INT2VECTOR
|
||||
from .pg_catalog import OIDVECTOR
|
||||
from ... import exc
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...engine import processors
|
||||
|
||||
_server_side_id = util.counter()
|
||||
|
||||
|
||||
class _PsycopgNumeric(sqltypes.Numeric):
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
return processors.to_decimal_processor_factory(
|
||||
decimal.Decimal, self._effective_decimal_return_scale
|
||||
)
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
# psycopg returns Decimal natively for 1700
|
||||
return None
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
else:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
# psycopg returns float natively for 701
|
||||
return None
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
return processors.to_float
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
|
||||
|
||||
class _PsycopgFloat(_PsycopgNumeric):
|
||||
__visit_name__ = "float"
|
||||
|
||||
|
||||
class _PsycopgHStore(HSTORE):
|
||||
def bind_processor(self, dialect):
|
||||
if dialect._has_native_hstore:
|
||||
return None
|
||||
else:
|
||||
return super().bind_processor(dialect)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if dialect._has_native_hstore:
|
||||
return None
|
||||
else:
|
||||
return super().result_processor(dialect, coltype)
|
||||
|
||||
|
||||
class _PsycopgARRAY(PGARRAY):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PsycopgINT2VECTOR(_SpaceVector, INT2VECTOR):
|
||||
pass
|
||||
|
||||
|
||||
class _PsycopgOIDVECTOR(_SpaceVector, OIDVECTOR):
|
||||
pass
|
||||
|
||||
|
||||
class _PGExecutionContext_common_psycopg(PGExecutionContext):
|
||||
def create_server_side_cursor(self):
|
||||
# use server-side cursors:
|
||||
# psycopg
|
||||
# https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#server-side-cursors
|
||||
# psycopg2
|
||||
# https://www.psycopg.org/docs/usage.html#server-side-cursors
|
||||
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
|
||||
return self._dbapi_connection.cursor(ident)
|
||||
|
||||
|
||||
class _PGDialect_common_psycopg(PGDialect):
|
||||
supports_statement_cache = True
|
||||
supports_server_side_cursors = True
|
||||
|
||||
default_paramstyle = "pyformat"
|
||||
|
||||
_has_native_hstore = True
|
||||
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric: _PsycopgNumeric,
|
||||
sqltypes.Float: _PsycopgFloat,
|
||||
HSTORE: _PsycopgHStore,
|
||||
sqltypes.ARRAY: _PsycopgARRAY,
|
||||
INT2VECTOR: _PsycopgINT2VECTOR,
|
||||
OIDVECTOR: _PsycopgOIDVECTOR,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client_encoding=None,
|
||||
use_native_hstore=True,
|
||||
**kwargs,
|
||||
):
|
||||
PGDialect.__init__(self, **kwargs)
|
||||
if not use_native_hstore:
|
||||
self._has_native_hstore = False
|
||||
self.use_native_hstore = use_native_hstore
|
||||
self.client_encoding = client_encoding
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username="user", database="dbname")
|
||||
|
||||
multihosts, multiports = self._split_multihost_from_url(url)
|
||||
|
||||
if opts or url.query:
|
||||
if not opts:
|
||||
opts = {}
|
||||
if "port" in opts:
|
||||
opts["port"] = int(opts["port"])
|
||||
opts.update(url.query)
|
||||
|
||||
if multihosts:
|
||||
opts["host"] = ",".join(multihosts)
|
||||
comma_ports = ",".join(str(p) if p else "" for p in multiports)
|
||||
if comma_ports:
|
||||
opts["port"] = comma_ports
|
||||
return ([], opts)
|
||||
else:
|
||||
# no connection arguments whatsoever; psycopg2.connect()
|
||||
# requires that "dsn" be present as a blank string.
|
||||
return ([""], opts)
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"AUTOCOMMIT",
|
||||
"READ COMMITTED",
|
||||
"READ UNCOMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"SERIALIZABLE",
|
||||
)
|
||||
|
||||
def set_deferrable(self, connection, value):
|
||||
connection.deferrable = value
|
||||
|
||||
def get_deferrable(self, connection):
|
||||
return connection.deferrable
|
||||
|
||||
def _do_autocommit(self, connection, value):
|
||||
connection.autocommit = value
|
||||
|
||||
def do_ping(self, dbapi_connection):
|
||||
cursor = None
|
||||
before_autocommit = dbapi_connection.autocommit
|
||||
|
||||
if not before_autocommit:
|
||||
dbapi_connection.autocommit = True
|
||||
cursor = dbapi_connection.cursor()
|
||||
try:
|
||||
cursor.execute(self._dialect_specific_select_one)
|
||||
finally:
|
||||
cursor.close()
|
||||
if not before_autocommit and not dbapi_connection.closed:
|
||||
dbapi_connection.autocommit = before_autocommit
|
||||
|
||||
return True
|
@ -0,0 +1,509 @@
|
||||
# dialects/postgresql/array.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any as typing_Any
|
||||
from typing import Iterable
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
from typing import Union
|
||||
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import OVERLAP
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...sql import expression
|
||||
from ...sql import operators
|
||||
from ...sql.visitors import InternalTraversal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql._typing import _ColumnExpressionArgument
|
||||
from ...sql._typing import _TypeEngineArgument
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.elements import Grouping
|
||||
from ...sql.expression import BindParameter
|
||||
from ...sql.operators import OperatorType
|
||||
from ...sql.selectable import _SelectIterable
|
||||
from ...sql.type_api import _BindProcessorType
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
from ...sql.type_api import _ResultProcessorType
|
||||
from ...sql.type_api import TypeEngine
|
||||
from ...sql.visitors import _TraverseInternalsType
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
_T = TypeVar("_T", bound=typing_Any)
|
||||
|
||||
|
||||
def Any(
|
||||
other: typing_Any,
|
||||
arrexpr: _ColumnExpressionArgument[_T],
|
||||
operator: OperatorType = operators.eq,
|
||||
) -> ColumnElement[bool]:
|
||||
"""A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.any` method.
|
||||
See that method for details.
|
||||
|
||||
"""
|
||||
|
||||
return arrexpr.any(other, operator) # type: ignore[no-any-return, union-attr] # noqa: E501
|
||||
|
||||
|
||||
def All(
|
||||
other: typing_Any,
|
||||
arrexpr: _ColumnExpressionArgument[_T],
|
||||
operator: OperatorType = operators.eq,
|
||||
) -> ColumnElement[bool]:
|
||||
"""A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.all` method.
|
||||
See that method for details.
|
||||
|
||||
"""
|
||||
|
||||
return arrexpr.all(other, operator) # type: ignore[no-any-return, union-attr] # noqa: E501
|
||||
|
||||
|
||||
class array(expression.ExpressionClauseList[_T]):
|
||||
"""A PostgreSQL ARRAY literal.
|
||||
|
||||
This is used to produce ARRAY literals in SQL expressions, e.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import array
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy import select, func
|
||||
|
||||
stmt = select(array([1, 2]) + array([3, 4, 5]))
|
||||
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces the SQL:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
|
||||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
|
||||
|
||||
An instance of :class:`.array` will always have the datatype
|
||||
:class:`_types.ARRAY`. The "inner" type of the array is inferred from the
|
||||
values present, unless the :paramref:`_postgresql.array.type_` keyword
|
||||
argument is passed::
|
||||
|
||||
array(["foo", "bar"], type_=CHAR)
|
||||
|
||||
When constructing an empty array, the :paramref:`_postgresql.array.type_`
|
||||
argument is particularly important as PostgreSQL server typically requires
|
||||
a cast to be rendered for the inner type in order to render an empty array.
|
||||
SQLAlchemy's compilation for the empty array will produce this cast so
|
||||
that::
|
||||
|
||||
stmt = array([], type_=Integer)
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
ARRAY[]::INTEGER[]
|
||||
|
||||
As required by PostgreSQL for empty arrays.
|
||||
|
||||
.. versionadded:: 2.0.40 added support to render empty PostgreSQL array
|
||||
literals with a required cast.
|
||||
|
||||
Multidimensional arrays are produced by nesting :class:`.array` constructs.
|
||||
The dimensionality of the final :class:`_types.ARRAY`
|
||||
type is calculated by
|
||||
recursively adding the dimensions of the inner :class:`_types.ARRAY`
|
||||
type::
|
||||
|
||||
stmt = select(
|
||||
array(
|
||||
[array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
|
||||
)
|
||||
)
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT ARRAY[
|
||||
ARRAY[%(param_1)s, %(param_2)s],
|
||||
ARRAY[%(param_3)s, %(param_4)s],
|
||||
ARRAY[q, x]
|
||||
] AS anon_1
|
||||
|
||||
.. versionadded:: 1.3.6 added support for multidimensional array literals
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_postgresql.ARRAY`
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
__visit_name__ = "array"
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
|
||||
_traverse_internals: _TraverseInternalsType = [
|
||||
("clauses", InternalTraversal.dp_clauseelement_tuple),
|
||||
("type", InternalTraversal.dp_type),
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clauses: Iterable[_T],
|
||||
*,
|
||||
type_: Optional[_TypeEngineArgument[_T]] = None,
|
||||
**kw: typing_Any,
|
||||
):
|
||||
r"""Construct an ARRAY literal.
|
||||
|
||||
:param clauses: iterable, such as a list, containing elements to be
|
||||
rendered in the array
|
||||
:param type\_: optional type. If omitted, the type is inferred
|
||||
from the contents of the array.
|
||||
|
||||
"""
|
||||
super().__init__(operators.comma_op, *clauses, **kw)
|
||||
|
||||
main_type = (
|
||||
type_
|
||||
if type_ is not None
|
||||
else self.clauses[0].type if self.clauses else sqltypes.NULLTYPE
|
||||
)
|
||||
|
||||
if isinstance(main_type, ARRAY):
|
||||
self.type = ARRAY(
|
||||
main_type.item_type,
|
||||
dimensions=(
|
||||
main_type.dimensions + 1
|
||||
if main_type.dimensions is not None
|
||||
else 2
|
||||
),
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
self.type = ARRAY(main_type) # type: ignore[assignment]
|
||||
|
||||
@property
|
||||
def _select_iterable(self) -> _SelectIterable:
|
||||
return (self,)
|
||||
|
||||
def _bind_param(
|
||||
self,
|
||||
operator: OperatorType,
|
||||
obj: typing_Any,
|
||||
type_: Optional[TypeEngine[_T]] = None,
|
||||
_assume_scalar: bool = False,
|
||||
) -> BindParameter[_T]:
|
||||
if _assume_scalar or operator is operators.getitem:
|
||||
return expression.BindParameter(
|
||||
None,
|
||||
obj,
|
||||
_compared_to_operator=operator,
|
||||
type_=type_,
|
||||
_compared_to_type=self.type,
|
||||
unique=True,
|
||||
)
|
||||
|
||||
else:
|
||||
return array(
|
||||
[
|
||||
self._bind_param(
|
||||
operator, o, _assume_scalar=True, type_=type_
|
||||
)
|
||||
for o in obj
|
||||
]
|
||||
) # type: ignore[return-value]
|
||||
|
||||
def self_group(
|
||||
self, against: Optional[OperatorType] = None
|
||||
) -> Union[Self, Grouping[_T]]:
|
||||
if against in (operators.any_op, operators.all_op, operators.getitem):
|
||||
return expression.Grouping(self)
|
||||
else:
|
||||
return self
|
||||
|
||||
|
||||
class ARRAY(sqltypes.ARRAY[_T]):
|
||||
"""PostgreSQL ARRAY type.
|
||||
|
||||
The :class:`_postgresql.ARRAY` type is constructed in the same way
|
||||
as the core :class:`_types.ARRAY` type; a member type is required, and a
|
||||
number of dimensions is recommended if the type is to be used for more
|
||||
than one dimension::
|
||||
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
mytable = Table(
|
||||
"mytable",
|
||||
metadata,
|
||||
Column("data", postgresql.ARRAY(Integer, dimensions=2)),
|
||||
)
|
||||
|
||||
The :class:`_postgresql.ARRAY` type provides all operations defined on the
|
||||
core :class:`_types.ARRAY` type, including support for "dimensions",
|
||||
indexed access, and simple matching such as
|
||||
:meth:`.types.ARRAY.Comparator.any` and
|
||||
:meth:`.types.ARRAY.Comparator.all`. :class:`_postgresql.ARRAY`
|
||||
class also
|
||||
provides PostgreSQL-specific methods for containment operations, including
|
||||
:meth:`.postgresql.ARRAY.Comparator.contains`
|
||||
:meth:`.postgresql.ARRAY.Comparator.contained_by`, and
|
||||
:meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
|
||||
|
||||
mytable.c.data.contains([1, 2])
|
||||
|
||||
Indexed access is one-based by default, to match that of PostgreSQL;
|
||||
for zero-based indexed access, set
|
||||
:paramref:`_postgresql.ARRAY.zero_indexes`.
|
||||
|
||||
Additionally, the :class:`_postgresql.ARRAY`
|
||||
type does not work directly in
|
||||
conjunction with the :class:`.ENUM` type. For a workaround, see the
|
||||
special type at :ref:`postgresql_array_of_enum`.
|
||||
|
||||
.. container:: topic
|
||||
|
||||
**Detecting Changes in ARRAY columns when using the ORM**
|
||||
|
||||
The :class:`_postgresql.ARRAY` type, when used with the SQLAlchemy ORM,
|
||||
does not detect in-place mutations to the array. In order to detect
|
||||
these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using
|
||||
the :class:`.MutableList` class::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.ext.mutable import MutableList
|
||||
|
||||
|
||||
class SomeOrmClass(Base):
|
||||
# ...
|
||||
|
||||
data = Column(MutableList.as_mutable(ARRAY(Integer)))
|
||||
|
||||
This extension will allow "in-place" changes such to the array
|
||||
such as ``.append()`` to produce events which will be detected by the
|
||||
unit of work. Note that changes to elements **inside** the array,
|
||||
including subarrays that are mutated in place, are **not** detected.
|
||||
|
||||
Alternatively, assigning a new array value to an ORM element that
|
||||
replaces the old one will always trigger a change event.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.ARRAY` - base array type
|
||||
|
||||
:class:`_postgresql.array` - produces a literal array value.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
item_type: _TypeEngineArgument[_T],
|
||||
as_tuple: bool = False,
|
||||
dimensions: Optional[int] = None,
|
||||
zero_indexes: bool = False,
|
||||
):
|
||||
"""Construct an ARRAY.
|
||||
|
||||
E.g.::
|
||||
|
||||
Column("myarray", ARRAY(Integer))
|
||||
|
||||
Arguments are:
|
||||
|
||||
:param item_type: The data type of items of this array. Note that
|
||||
dimensionality is irrelevant here, so multi-dimensional arrays like
|
||||
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
|
||||
``ARRAY(ARRAY(Integer))`` or such.
|
||||
|
||||
:param as_tuple=False: Specify whether return results
|
||||
should be converted to tuples from lists. DBAPIs such
|
||||
as psycopg2 return lists by default. When tuples are
|
||||
returned, the results are hashable.
|
||||
|
||||
:param dimensions: if non-None, the ARRAY will assume a fixed
|
||||
number of dimensions. This will cause the DDL emitted for this
|
||||
ARRAY to include the exact number of bracket clauses ``[]``,
|
||||
and will also optimize the performance of the type overall.
|
||||
Note that PG arrays are always implicitly "non-dimensioned",
|
||||
meaning they can store any number of dimensions no matter how
|
||||
they were declared.
|
||||
|
||||
:param zero_indexes=False: when True, index values will be converted
|
||||
between Python zero-based and PostgreSQL one-based indexes, e.g.
|
||||
a value of one will be added to all index values before passing
|
||||
to the database.
|
||||
|
||||
"""
|
||||
if isinstance(item_type, ARRAY):
|
||||
raise ValueError(
|
||||
"Do not nest ARRAY types; ARRAY(basetype) "
|
||||
"handles multi-dimensional arrays of basetype"
|
||||
)
|
||||
if isinstance(item_type, type):
|
||||
item_type = item_type()
|
||||
self.item_type = item_type
|
||||
self.as_tuple = as_tuple
|
||||
self.dimensions = dimensions
|
||||
self.zero_indexes = zero_indexes
|
||||
|
||||
class Comparator(sqltypes.ARRAY.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.ARRAY`.
|
||||
|
||||
Note that these operations are in addition to those provided
|
||||
by the base :class:`.types.ARRAY.Comparator` class, including
|
||||
:meth:`.types.ARRAY.Comparator.any` and
|
||||
:meth:`.types.ARRAY.Comparator.all`.
|
||||
|
||||
"""
|
||||
|
||||
def contains(
|
||||
self, other: typing_Any, **kwargs: typing_Any
|
||||
) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if elements are a superset of the
|
||||
elements of the argument array expression.
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other: typing_Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if elements are a proper subset of the
|
||||
elements of the argument array expression.
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def overlap(self, other: typing_Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if array has elements in common with
|
||||
an argument array expression.
|
||||
"""
|
||||
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
@util.memoized_property
|
||||
def _against_native_enum(self) -> bool:
|
||||
return (
|
||||
isinstance(self.item_type, sqltypes.Enum)
|
||||
and self.item_type.native_enum # type: ignore[attr-defined]
|
||||
)
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_LiteralProcessorType[_T]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).literal_processor(
|
||||
dialect
|
||||
)
|
||||
if item_proc is None:
|
||||
return None
|
||||
|
||||
def to_str(elements: Iterable[typing_Any]) -> str:
|
||||
return f"ARRAY[{', '.join(elements)}]"
|
||||
|
||||
def process(value: Sequence[typing_Any]) -> str:
|
||||
inner = self._apply_item_processor(
|
||||
value, item_proc, self.dimensions, to_str
|
||||
)
|
||||
return inner
|
||||
|
||||
return process
|
||||
|
||||
def bind_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_BindProcessorType[Sequence[typing_Any]]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).bind_processor(
|
||||
dialect
|
||||
)
|
||||
|
||||
def process(
|
||||
value: Optional[Sequence[typing_Any]],
|
||||
) -> Optional[list[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
else:
|
||||
return self._apply_item_processor(
|
||||
value, item_proc, self.dimensions, list
|
||||
)
|
||||
|
||||
return process
|
||||
|
||||
def result_processor(
|
||||
self, dialect: Dialect, coltype: object
|
||||
) -> _ResultProcessorType[Sequence[typing_Any]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).result_processor(
|
||||
dialect, coltype
|
||||
)
|
||||
|
||||
def process(
|
||||
value: Sequence[typing_Any],
|
||||
) -> Optional[Sequence[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
else:
|
||||
return self._apply_item_processor(
|
||||
value,
|
||||
item_proc,
|
||||
self.dimensions,
|
||||
tuple if self.as_tuple else list,
|
||||
)
|
||||
|
||||
if self._against_native_enum:
|
||||
super_rp = process
|
||||
pattern = re.compile(r"^{(.*)}$")
|
||||
|
||||
def handle_raw_string(value: str) -> list[str]:
|
||||
inner = pattern.match(value).group(1) # type: ignore[union-attr] # noqa: E501
|
||||
return _split_enum_values(inner)
|
||||
|
||||
def process(
|
||||
value: Sequence[typing_Any],
|
||||
) -> Optional[Sequence[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
# isinstance(value, str) is required to handle
|
||||
# the case where a TypeDecorator for and Array of Enum is
|
||||
# used like was required in sa < 1.3.17
|
||||
return super_rp(
|
||||
handle_raw_string(value)
|
||||
if isinstance(value, str)
|
||||
else value
|
||||
)
|
||||
|
||||
return process
|
||||
|
||||
|
||||
def _split_enum_values(array_string: str) -> list[str]:
|
||||
if '"' not in array_string:
|
||||
# no escape char is present so it can just split on the comma
|
||||
return array_string.split(",") if array_string else []
|
||||
|
||||
# handles quoted strings from:
|
||||
# r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr'
|
||||
# returns
|
||||
# ['abc', 'quoted', 'also\\quoted', 'quoted, comma', 'esc " quot', 'qpr']
|
||||
text = array_string.replace(r"\"", "_$ESC_QUOTE$_")
|
||||
text = text.replace(r"\\", "\\")
|
||||
result = []
|
||||
on_quotes = re.split(r'(")', text)
|
||||
in_quotes = False
|
||||
for tok in on_quotes:
|
||||
if tok == '"':
|
||||
in_quotes = not in_quotes
|
||||
elif in_quotes:
|
||||
result.append(tok.replace("_$ESC_QUOTE$_", '"'))
|
||||
else:
|
||||
result.extend(re.findall(r"([^\s,]+),?", tok))
|
||||
return result
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,339 @@
|
||||
# dialects/postgresql/dml.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
from . import ext
|
||||
from .._typing import _OnConflictConstraintT
|
||||
from .._typing import _OnConflictIndexElementsT
|
||||
from .._typing import _OnConflictIndexWhereT
|
||||
from .._typing import _OnConflictSetT
|
||||
from .._typing import _OnConflictWhereT
|
||||
from ... import util
|
||||
from ...sql import coercions
|
||||
from ...sql import roles
|
||||
from ...sql import schema
|
||||
from ...sql._typing import _DMLTableArgument
|
||||
from ...sql.base import _exclusive_against
|
||||
from ...sql.base import _generative
|
||||
from ...sql.base import ColumnCollection
|
||||
from ...sql.base import ReadOnlyColumnCollection
|
||||
from ...sql.dml import Insert as StandardInsert
|
||||
from ...sql.elements import ClauseElement
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.elements import KeyedColumnElement
|
||||
from ...sql.elements import TextClause
|
||||
from ...sql.expression import alias
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
__all__ = ("Insert", "insert")
|
||||
|
||||
|
||||
def insert(table: _DMLTableArgument) -> Insert:
|
||||
"""Construct a PostgreSQL-specific variant :class:`_postgresql.Insert`
|
||||
construct.
|
||||
|
||||
.. container:: inherited_member
|
||||
|
||||
The :func:`sqlalchemy.dialects.postgresql.insert` function creates
|
||||
a :class:`sqlalchemy.dialects.postgresql.Insert`. This class is based
|
||||
on the dialect-agnostic :class:`_sql.Insert` construct which may
|
||||
be constructed using the :func:`_sql.insert` function in
|
||||
SQLAlchemy Core.
|
||||
|
||||
The :class:`_postgresql.Insert` construct includes additional methods
|
||||
:meth:`_postgresql.Insert.on_conflict_do_update`,
|
||||
:meth:`_postgresql.Insert.on_conflict_do_nothing`.
|
||||
|
||||
"""
|
||||
return Insert(table)
|
||||
|
||||
|
||||
class Insert(StandardInsert):
|
||||
"""PostgreSQL-specific implementation of INSERT.
|
||||
|
||||
Adds methods for PG-specific syntaxes such as ON CONFLICT.
|
||||
|
||||
The :class:`_postgresql.Insert` object is created using the
|
||||
:func:`sqlalchemy.dialects.postgresql.insert` function.
|
||||
|
||||
"""
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
inherit_cache = False
|
||||
|
||||
@util.memoized_property
|
||||
def excluded(
|
||||
self,
|
||||
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
|
||||
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
|
||||
|
||||
PG's ON CONFLICT clause allows reference to the row that would
|
||||
be inserted, known as ``excluded``. This attribute provides
|
||||
all columns in this row to be referenceable.
|
||||
|
||||
.. tip:: The :attr:`_postgresql.Insert.excluded` attribute is an
|
||||
instance of :class:`_expression.ColumnCollection`, which provides
|
||||
an interface the same as that of the :attr:`_schema.Table.c`
|
||||
collection described at :ref:`metadata_tables_and_columns`.
|
||||
With this collection, ordinary names are accessible like attributes
|
||||
(e.g. ``stmt.excluded.some_column``), but special names and
|
||||
dictionary method names should be accessed using indexed access,
|
||||
such as ``stmt.excluded["column name"]`` or
|
||||
``stmt.excluded["values"]``. See the docstring for
|
||||
:class:`_expression.ColumnCollection` for further examples.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict` - example of how
|
||||
to use :attr:`_expression.Insert.excluded`
|
||||
|
||||
"""
|
||||
return alias(self.table, name="excluded").columns
|
||||
|
||||
_on_conflict_exclusive = _exclusive_against(
|
||||
"_post_values_clause",
|
||||
msgs={
|
||||
"_post_values_clause": "This Insert construct already has "
|
||||
"an ON CONFLICT clause established"
|
||||
},
|
||||
)
|
||||
|
||||
@_generative
|
||||
@_on_conflict_exclusive
|
||||
def on_conflict_do_update(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
set_: _OnConflictSetT = None,
|
||||
where: _OnConflictWhereT = None,
|
||||
) -> Self:
|
||||
r"""
|
||||
Specifies a DO UPDATE SET action for ON CONFLICT clause.
|
||||
|
||||
Either the ``constraint`` or ``index_elements`` argument is
|
||||
required, but only one of these can be specified.
|
||||
|
||||
:param constraint:
|
||||
The name of a unique or exclusion constraint on the table,
|
||||
or the constraint object itself if it has a .name attribute.
|
||||
|
||||
:param index_elements:
|
||||
A sequence consisting of string column names, :class:`_schema.Column`
|
||||
objects, or other column expression objects that will be used
|
||||
to infer a target index.
|
||||
|
||||
:param index_where:
|
||||
Additional WHERE criterion that can be used to infer a
|
||||
conditional target index.
|
||||
|
||||
:param set\_:
|
||||
A dictionary or other mapping object
|
||||
where the keys are either names of columns in the target table,
|
||||
or :class:`_schema.Column` objects or other ORM-mapped columns
|
||||
matching that of the target table, and expressions or literals
|
||||
as values, specifying the ``SET`` actions to take.
|
||||
|
||||
.. versionadded:: 1.4 The
|
||||
:paramref:`_postgresql.Insert.on_conflict_do_update.set_`
|
||||
parameter supports :class:`_schema.Column` objects from the target
|
||||
:class:`_schema.Table` as keys.
|
||||
|
||||
.. warning:: This dictionary does **not** take into account
|
||||
Python-specified default UPDATE values or generation functions,
|
||||
e.g. those specified using :paramref:`_schema.Column.onupdate`.
|
||||
These values will not be exercised for an ON CONFLICT style of
|
||||
UPDATE, unless they are manually specified in the
|
||||
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
|
||||
|
||||
:param where:
|
||||
Optional argument. An expression object representing a ``WHERE``
|
||||
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
|
||||
meeting the ``WHERE`` condition will not be updated (effectively a
|
||||
``DO NOTHING`` for those rows).
|
||||
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict`
|
||||
|
||||
"""
|
||||
self._post_values_clause = OnConflictDoUpdate(
|
||||
constraint, index_elements, index_where, set_, where
|
||||
)
|
||||
return self
|
||||
|
||||
@_generative
|
||||
@_on_conflict_exclusive
|
||||
def on_conflict_do_nothing(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
) -> Self:
|
||||
"""
|
||||
Specifies a DO NOTHING action for ON CONFLICT clause.
|
||||
|
||||
The ``constraint`` and ``index_elements`` arguments
|
||||
are optional, but only one of these can be specified.
|
||||
|
||||
:param constraint:
|
||||
The name of a unique or exclusion constraint on the table,
|
||||
or the constraint object itself if it has a .name attribute.
|
||||
|
||||
:param index_elements:
|
||||
A sequence consisting of string column names, :class:`_schema.Column`
|
||||
objects, or other column expression objects that will be used
|
||||
to infer a target index.
|
||||
|
||||
:param index_where:
|
||||
Additional WHERE criterion that can be used to infer a
|
||||
conditional target index.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict`
|
||||
|
||||
"""
|
||||
self._post_values_clause = OnConflictDoNothing(
|
||||
constraint, index_elements, index_where
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
class OnConflictClause(ClauseElement):
|
||||
stringify_dialect = "postgresql"
|
||||
|
||||
constraint_target: Optional[str]
|
||||
inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
|
||||
inferred_target_whereclause: Optional[
|
||||
Union[ColumnElement[Any], TextClause]
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
):
|
||||
if constraint is not None:
|
||||
if not isinstance(constraint, str) and isinstance(
|
||||
constraint,
|
||||
(schema.Constraint, ext.ExcludeConstraint),
|
||||
):
|
||||
constraint = getattr(constraint, "name") or constraint
|
||||
|
||||
if constraint is not None:
|
||||
if index_elements is not None:
|
||||
raise ValueError(
|
||||
"'constraint' and 'index_elements' are mutually exclusive"
|
||||
)
|
||||
|
||||
if isinstance(constraint, str):
|
||||
self.constraint_target = constraint
|
||||
self.inferred_target_elements = None
|
||||
self.inferred_target_whereclause = None
|
||||
elif isinstance(constraint, schema.Index):
|
||||
index_elements = constraint.expressions
|
||||
index_where = constraint.dialect_options["postgresql"].get(
|
||||
"where"
|
||||
)
|
||||
elif isinstance(constraint, ext.ExcludeConstraint):
|
||||
index_elements = constraint.columns
|
||||
index_where = constraint.where
|
||||
else:
|
||||
index_elements = constraint.columns
|
||||
index_where = constraint.dialect_options["postgresql"].get(
|
||||
"where"
|
||||
)
|
||||
|
||||
if index_elements is not None:
|
||||
self.constraint_target = None
|
||||
self.inferred_target_elements = [
|
||||
coercions.expect(roles.DDLConstraintColumnRole, column)
|
||||
for column in index_elements
|
||||
]
|
||||
|
||||
self.inferred_target_whereclause = (
|
||||
coercions.expect(
|
||||
(
|
||||
roles.StatementOptionRole
|
||||
if isinstance(constraint, ext.ExcludeConstraint)
|
||||
else roles.WhereHavingRole
|
||||
),
|
||||
index_where,
|
||||
)
|
||||
if index_where is not None
|
||||
else None
|
||||
)
|
||||
|
||||
elif constraint is None:
|
||||
self.constraint_target = self.inferred_target_elements = (
|
||||
self.inferred_target_whereclause
|
||||
) = None
|
||||
|
||||
|
||||
class OnConflictDoNothing(OnConflictClause):
|
||||
__visit_name__ = "on_conflict_do_nothing"
|
||||
|
||||
|
||||
class OnConflictDoUpdate(OnConflictClause):
|
||||
__visit_name__ = "on_conflict_do_update"
|
||||
|
||||
update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
|
||||
update_whereclause: Optional[ColumnElement[Any]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
set_: _OnConflictSetT = None,
|
||||
where: _OnConflictWhereT = None,
|
||||
):
|
||||
super().__init__(
|
||||
constraint=constraint,
|
||||
index_elements=index_elements,
|
||||
index_where=index_where,
|
||||
)
|
||||
|
||||
if (
|
||||
self.inferred_target_elements is None
|
||||
and self.constraint_target is None
|
||||
):
|
||||
raise ValueError(
|
||||
"Either constraint or index_elements, "
|
||||
"but not both, must be specified unless DO NOTHING"
|
||||
)
|
||||
|
||||
if isinstance(set_, dict):
|
||||
if not set_:
|
||||
raise ValueError("set parameter dictionary must not be empty")
|
||||
elif isinstance(set_, ColumnCollection):
|
||||
set_ = dict(set_)
|
||||
else:
|
||||
raise ValueError(
|
||||
"set parameter must be a non-empty dictionary "
|
||||
"or a ColumnCollection such as the `.c.` collection "
|
||||
"of a Table object"
|
||||
)
|
||||
self.update_values_to_set = [
|
||||
(coercions.expect(roles.DMLColumnRole, key), value)
|
||||
for key, value in set_.items()
|
||||
]
|
||||
self.update_whereclause = (
|
||||
coercions.expect(roles.WhereHavingRole, where)
|
||||
if where is not None
|
||||
else None
|
||||
)
|
@ -0,0 +1,501 @@
|
||||
# dialects/postgresql/ext.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
|
||||
from . import types
|
||||
from .array import ARRAY
|
||||
from ...sql import coercions
|
||||
from ...sql import elements
|
||||
from ...sql import expression
|
||||
from ...sql import functions
|
||||
from ...sql import roles
|
||||
from ...sql import schema
|
||||
from ...sql.schema import ColumnCollectionConstraint
|
||||
from ...sql.sqltypes import TEXT
|
||||
from ...sql.visitors import InternalTraversal
|
||||
|
||||
_T = TypeVar("_T", bound=Any)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...sql.visitors import _TraverseInternalsType
|
||||
|
||||
|
||||
class aggregate_order_by(expression.ColumnElement):
|
||||
"""Represent a PostgreSQL aggregate order by expression.
|
||||
|
||||
E.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import aggregate_order_by
|
||||
|
||||
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
|
||||
stmt = select(expr)
|
||||
|
||||
would represent the expression:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT array_agg(a ORDER BY b DESC) FROM table;
|
||||
|
||||
Similarly::
|
||||
|
||||
expr = func.string_agg(
|
||||
table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
|
||||
)
|
||||
stmt = select(expr)
|
||||
|
||||
Would represent:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT string_agg(a, ',' ORDER BY a) FROM table;
|
||||
|
||||
.. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_functions.array_agg`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "aggregate_order_by"
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
_traverse_internals: _TraverseInternalsType = [
|
||||
("target", InternalTraversal.dp_clauseelement),
|
||||
("type", InternalTraversal.dp_type),
|
||||
("order_by", InternalTraversal.dp_clauseelement),
|
||||
]
|
||||
|
||||
def __init__(self, target, *order_by):
|
||||
self.target = coercions.expect(roles.ExpressionElementRole, target)
|
||||
self.type = self.target.type
|
||||
|
||||
_lob = len(order_by)
|
||||
if _lob == 0:
|
||||
raise TypeError("at least one ORDER BY element is required")
|
||||
elif _lob == 1:
|
||||
self.order_by = coercions.expect(
|
||||
roles.ExpressionElementRole, order_by[0]
|
||||
)
|
||||
else:
|
||||
self.order_by = elements.ClauseList(
|
||||
*order_by, _literal_as_text_role=roles.ExpressionElementRole
|
||||
)
|
||||
|
||||
def self_group(self, against=None):
|
||||
return self
|
||||
|
||||
def get_children(self, **kwargs):
|
||||
return self.target, self.order_by
|
||||
|
||||
def _copy_internals(self, clone=elements._clone, **kw):
|
||||
self.target = clone(self.target, **kw)
|
||||
self.order_by = clone(self.order_by, **kw)
|
||||
|
||||
@property
|
||||
def _from_objects(self):
|
||||
return self.target._from_objects + self.order_by._from_objects
|
||||
|
||||
|
||||
class ExcludeConstraint(ColumnCollectionConstraint):
|
||||
"""A table-level EXCLUDE constraint.
|
||||
|
||||
Defines an EXCLUDE constraint as described in the `PostgreSQL
|
||||
documentation`__.
|
||||
|
||||
__ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
|
||||
|
||||
""" # noqa
|
||||
|
||||
__visit_name__ = "exclude_constraint"
|
||||
|
||||
where = None
|
||||
inherit_cache = False
|
||||
|
||||
create_drop_stringify_dialect = "postgresql"
|
||||
|
||||
@elements._document_text_coercion(
|
||||
"where",
|
||||
":class:`.ExcludeConstraint`",
|
||||
":paramref:`.ExcludeConstraint.where`",
|
||||
)
|
||||
def __init__(self, *elements, **kw):
|
||||
r"""
|
||||
Create an :class:`.ExcludeConstraint` object.
|
||||
|
||||
E.g.::
|
||||
|
||||
const = ExcludeConstraint(
|
||||
(Column("period"), "&&"),
|
||||
(Column("group"), "="),
|
||||
where=(Column("group") != "some group"),
|
||||
ops={"group": "my_operator_class"},
|
||||
)
|
||||
|
||||
The constraint is normally embedded into the :class:`_schema.Table`
|
||||
construct
|
||||
directly, or added later using :meth:`.append_constraint`::
|
||||
|
||||
some_table = Table(
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("period", TSRANGE()),
|
||||
Column("group", String),
|
||||
)
|
||||
|
||||
some_table.append_constraint(
|
||||
ExcludeConstraint(
|
||||
(some_table.c.period, "&&"),
|
||||
(some_table.c.group, "="),
|
||||
where=some_table.c.group != "some group",
|
||||
name="some_table_excl_const",
|
||||
ops={"group": "my_operator_class"},
|
||||
)
|
||||
)
|
||||
|
||||
The exclude constraint defined in this example requires the
|
||||
``btree_gist`` extension, that can be created using the
|
||||
command ``CREATE EXTENSION btree_gist;``.
|
||||
|
||||
:param \*elements:
|
||||
|
||||
A sequence of two tuples of the form ``(column, operator)`` where
|
||||
"column" is either a :class:`_schema.Column` object, or a SQL
|
||||
expression element (e.g. ``func.int8range(table.from, table.to)``)
|
||||
or the name of a column as string, and "operator" is a string
|
||||
containing the operator to use (e.g. `"&&"` or `"="`).
|
||||
|
||||
In order to specify a column name when a :class:`_schema.Column`
|
||||
object is not available, while ensuring
|
||||
that any necessary quoting rules take effect, an ad-hoc
|
||||
:class:`_schema.Column` or :func:`_expression.column`
|
||||
object should be used.
|
||||
The ``column`` may also be a string SQL expression when
|
||||
passed as :func:`_expression.literal_column` or
|
||||
:func:`_expression.text`
|
||||
|
||||
:param name:
|
||||
Optional, the in-database name of this constraint.
|
||||
|
||||
:param deferrable:
|
||||
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
|
||||
issuing DDL for this constraint.
|
||||
|
||||
:param initially:
|
||||
Optional string. If set, emit INITIALLY <value> when issuing DDL
|
||||
for this constraint.
|
||||
|
||||
:param using:
|
||||
Optional string. If set, emit USING <index_method> when issuing DDL
|
||||
for this constraint. Defaults to 'gist'.
|
||||
|
||||
:param where:
|
||||
Optional SQL expression construct or literal SQL string.
|
||||
If set, emit WHERE <predicate> when issuing DDL
|
||||
for this constraint.
|
||||
|
||||
:param ops:
|
||||
Optional dictionary. Used to define operator classes for the
|
||||
elements; works the same way as that of the
|
||||
:ref:`postgresql_ops <postgresql_operator_classes>`
|
||||
parameter specified to the :class:`_schema.Index` construct.
|
||||
|
||||
.. versionadded:: 1.3.21
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_operator_classes` - general description of how
|
||||
PostgreSQL operator classes are specified.
|
||||
|
||||
"""
|
||||
columns = []
|
||||
render_exprs = []
|
||||
self.operators = {}
|
||||
|
||||
expressions, operators = zip(*elements)
|
||||
|
||||
for (expr, column, strname, add_element), operator in zip(
|
||||
coercions.expect_col_expression_collection(
|
||||
roles.DDLConstraintColumnRole, expressions
|
||||
),
|
||||
operators,
|
||||
):
|
||||
if add_element is not None:
|
||||
columns.append(add_element)
|
||||
|
||||
name = column.name if column is not None else strname
|
||||
|
||||
if name is not None:
|
||||
# backwards compat
|
||||
self.operators[name] = operator
|
||||
|
||||
render_exprs.append((expr, name, operator))
|
||||
|
||||
self._render_exprs = render_exprs
|
||||
|
||||
ColumnCollectionConstraint.__init__(
|
||||
self,
|
||||
*columns,
|
||||
name=kw.get("name"),
|
||||
deferrable=kw.get("deferrable"),
|
||||
initially=kw.get("initially"),
|
||||
)
|
||||
self.using = kw.get("using", "gist")
|
||||
where = kw.get("where")
|
||||
if where is not None:
|
||||
self.where = coercions.expect(roles.StatementOptionRole, where)
|
||||
|
||||
self.ops = kw.get("ops", {})
|
||||
|
||||
def _set_parent(self, table, **kw):
|
||||
super()._set_parent(table)
|
||||
|
||||
self._render_exprs = [
|
||||
(
|
||||
expr if not isinstance(expr, str) else table.c[expr],
|
||||
name,
|
||||
operator,
|
||||
)
|
||||
for expr, name, operator in (self._render_exprs)
|
||||
]
|
||||
|
||||
def _copy(self, target_table=None, **kw):
|
||||
elements = [
|
||||
(
|
||||
schema._copy_expression(expr, self.parent, target_table),
|
||||
operator,
|
||||
)
|
||||
for expr, _, operator in self._render_exprs
|
||||
]
|
||||
c = self.__class__(
|
||||
*elements,
|
||||
name=self.name,
|
||||
deferrable=self.deferrable,
|
||||
initially=self.initially,
|
||||
where=self.where,
|
||||
using=self.using,
|
||||
)
|
||||
c.dispatch._update(self.dispatch)
|
||||
return c
|
||||
|
||||
|
||||
def array_agg(*arg, **kw):
|
||||
"""PostgreSQL-specific form of :class:`_functions.array_agg`, ensures
|
||||
return type is :class:`_postgresql.ARRAY` and not
|
||||
the plain :class:`_types.ARRAY`, unless an explicit ``type_``
|
||||
is passed.
|
||||
|
||||
"""
|
||||
kw["_default_array_type"] = ARRAY
|
||||
return functions.func.array_agg(*arg, **kw)
|
||||
|
||||
|
||||
class _regconfig_fn(functions.GenericFunction[_T]):
|
||||
inherit_cache = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
if len(args) > 1:
|
||||
initial_arg = coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
args.pop(0),
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
type_=types.REGCONFIG,
|
||||
)
|
||||
initial_arg = [initial_arg]
|
||||
else:
|
||||
initial_arg = []
|
||||
|
||||
addtl_args = [
|
||||
coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
c,
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
)
|
||||
for c in args
|
||||
]
|
||||
super().__init__(*(initial_arg + addtl_args), **kwargs)
|
||||
|
||||
|
||||
class to_tsvector(_regconfig_fn):
|
||||
"""The PostgreSQL ``to_tsvector`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSVECTOR`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.to_tsvector` will be used automatically when invoking
|
||||
``sqlalchemy.func.to_tsvector()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSVECTOR
|
||||
|
||||
|
||||
class to_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``to_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.to_tsquery` will be used automatically when invoking
|
||||
``sqlalchemy.func.to_tsquery()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class plainto_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``plainto_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.plainto_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.plainto_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class phraseto_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``phraseto_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.phraseto_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.phraseto_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class websearch_to_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``websearch_to_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.websearch_to_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.websearch_to_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class ts_headline(_regconfig_fn):
|
||||
"""The PostgreSQL ``ts_headline`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_types.TEXT`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.ts_headline` will be used automatically when invoking
|
||||
``sqlalchemy.func.ts_headline()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = TEXT
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
|
||||
# parse types according to
|
||||
# https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-HEADLINE
|
||||
if len(args) < 2:
|
||||
# invalid args; don't do anything
|
||||
has_regconfig = False
|
||||
elif (
|
||||
isinstance(args[1], elements.ColumnElement)
|
||||
and args[1].type._type_affinity is types.TSQUERY
|
||||
):
|
||||
# tsquery is second argument, no regconfig argument
|
||||
has_regconfig = False
|
||||
else:
|
||||
has_regconfig = True
|
||||
|
||||
if has_regconfig:
|
||||
initial_arg = coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
args.pop(0),
|
||||
apply_propagate_attrs=self,
|
||||
name=getattr(self, "name", None),
|
||||
type_=types.REGCONFIG,
|
||||
)
|
||||
initial_arg = [initial_arg]
|
||||
else:
|
||||
initial_arg = []
|
||||
|
||||
addtl_args = [
|
||||
coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
c,
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
)
|
||||
for c in args
|
||||
]
|
||||
super().__init__(*(initial_arg + addtl_args), **kwargs)
|
@ -0,0 +1,406 @@
|
||||
# dialects/postgresql/hstore.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
import re
|
||||
|
||||
from .array import ARRAY
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import GETITEM
|
||||
from .operators import HAS_ALL
|
||||
from .operators import HAS_ANY
|
||||
from .operators import HAS_KEY
|
||||
from ... import types as sqltypes
|
||||
from ...sql import functions as sqlfunc
|
||||
|
||||
|
||||
__all__ = ("HSTORE", "hstore")
|
||||
|
||||
|
||||
class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
|
||||
"""Represent the PostgreSQL HSTORE type.
|
||||
|
||||
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
|
||||
|
||||
data_table = Table(
|
||||
"data_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("data", HSTORE),
|
||||
)
|
||||
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
data_table.insert(), data={"key1": "value1", "key2": "value2"}
|
||||
)
|
||||
|
||||
:class:`.HSTORE` provides for a wide range of operations, including:
|
||||
|
||||
* Index operations::
|
||||
|
||||
data_table.c.data["some key"] == "some value"
|
||||
|
||||
* Containment operations::
|
||||
|
||||
data_table.c.data.has_key("some key")
|
||||
|
||||
data_table.c.data.has_all(["one", "two", "three"])
|
||||
|
||||
* Concatenation::
|
||||
|
||||
data_table.c.data + {"k1": "v1"}
|
||||
|
||||
For a full list of special methods see
|
||||
:class:`.HSTORE.comparator_factory`.
|
||||
|
||||
.. container:: topic
|
||||
|
||||
**Detecting Changes in HSTORE columns when using the ORM**
|
||||
|
||||
For usage with the SQLAlchemy ORM, it may be desirable to combine the
|
||||
usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now
|
||||
part of the :mod:`sqlalchemy.ext.mutable` extension. This extension
|
||||
will allow "in-place" changes to the dictionary, e.g. addition of new
|
||||
keys or replacement/removal of existing keys to/from the current
|
||||
dictionary, to produce events which will be detected by the unit of
|
||||
work::
|
||||
|
||||
from sqlalchemy.ext.mutable import MutableDict
|
||||
|
||||
|
||||
class MyClass(Base):
|
||||
__tablename__ = "data_table"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
data = Column(MutableDict.as_mutable(HSTORE))
|
||||
|
||||
|
||||
my_object = session.query(MyClass).one()
|
||||
|
||||
# in-place mutation, requires Mutable extension
|
||||
# in order for the ORM to detect
|
||||
my_object.data["some_key"] = "some value"
|
||||
|
||||
session.commit()
|
||||
|
||||
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
|
||||
will not be alerted to any changes to the contents of an existing
|
||||
dictionary, unless that dictionary value is re-assigned to the
|
||||
HSTORE-attribute itself, thus generating a change event.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
|
||||
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
__visit_name__ = "HSTORE"
|
||||
hashable = False
|
||||
text_type = sqltypes.Text()
|
||||
|
||||
def __init__(self, text_type=None):
|
||||
"""Construct a new :class:`.HSTORE`.
|
||||
|
||||
:param text_type: the type that should be used for indexed values.
|
||||
Defaults to :class:`_types.Text`.
|
||||
|
||||
"""
|
||||
if text_type is not None:
|
||||
self.text_type = text_type
|
||||
|
||||
class Comparator(
|
||||
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
|
||||
):
|
||||
"""Define comparison operations for :class:`.HSTORE`."""
|
||||
|
||||
def has_key(self, other):
|
||||
"""Boolean expression. Test for presence of a key. Note that the
|
||||
key may be a SQLA expression.
|
||||
"""
|
||||
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_all(self, other):
|
||||
"""Boolean expression. Test for presence of all keys in jsonb"""
|
||||
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_any(self, other):
|
||||
"""Boolean expression. Test for presence of any key in jsonb"""
|
||||
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contains(self, other, **kwargs):
|
||||
"""Boolean expression. Test if keys (or array) are a superset
|
||||
of/contained the keys of the argument jsonb expression.
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other):
|
||||
"""Boolean expression. Test if keys are a proper subset of the
|
||||
keys of the argument jsonb expression.
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def _setup_getitem(self, index):
|
||||
return GETITEM, index, self.type.text_type
|
||||
|
||||
def defined(self, key):
|
||||
"""Boolean expression. Test for presence of a non-NULL value for
|
||||
the key. Note that the key may be a SQLA expression.
|
||||
"""
|
||||
return _HStoreDefinedFunction(self.expr, key)
|
||||
|
||||
def delete(self, key):
|
||||
"""HStore expression. Returns the contents of this hstore with the
|
||||
given key deleted. Note that the key may be a SQLA expression.
|
||||
"""
|
||||
if isinstance(key, dict):
|
||||
key = _serialize_hstore(key)
|
||||
return _HStoreDeleteFunction(self.expr, key)
|
||||
|
||||
def slice(self, array):
|
||||
"""HStore expression. Returns a subset of an hstore defined by
|
||||
array of keys.
|
||||
"""
|
||||
return _HStoreSliceFunction(self.expr, array)
|
||||
|
||||
def keys(self):
|
||||
"""Text array expression. Returns array of keys."""
|
||||
return _HStoreKeysFunction(self.expr)
|
||||
|
||||
def vals(self):
|
||||
"""Text array expression. Returns array of values."""
|
||||
return _HStoreValsFunction(self.expr)
|
||||
|
||||
def array(self):
|
||||
"""Text array expression. Returns array of alternating keys and
|
||||
values.
|
||||
"""
|
||||
return _HStoreArrayFunction(self.expr)
|
||||
|
||||
def matrix(self):
|
||||
"""Text array expression. Returns array of [key, value] pairs."""
|
||||
return _HStoreMatrixFunction(self.expr)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
# note that dialect-specific types like that of psycopg and
|
||||
# psycopg2 will override this method to allow driver-level conversion
|
||||
# instead, see _PsycopgHStore
|
||||
def process(value):
|
||||
if isinstance(value, dict):
|
||||
return _serialize_hstore(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
# note that dialect-specific types like that of psycopg and
|
||||
# psycopg2 will override this method to allow driver-level conversion
|
||||
# instead, see _PsycopgHStore
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return _parse_hstore(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class hstore(sqlfunc.GenericFunction):
|
||||
"""Construct an hstore value within a SQL expression using the
|
||||
PostgreSQL ``hstore()`` function.
|
||||
|
||||
The :class:`.hstore` function accepts one or two arguments as described
|
||||
in the PostgreSQL documentation.
|
||||
|
||||
E.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import array, hstore
|
||||
|
||||
select(hstore("key1", "value1"))
|
||||
|
||||
select(
|
||||
hstore(
|
||||
array(["key1", "key2", "key3"]),
|
||||
array(["value1", "value2", "value3"]),
|
||||
)
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
|
||||
|
||||
"""
|
||||
|
||||
type = HSTORE
|
||||
name = "hstore"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
|
||||
type = sqltypes.Boolean
|
||||
name = "defined"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
|
||||
type = HSTORE
|
||||
name = "delete"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreSliceFunction(sqlfunc.GenericFunction):
|
||||
type = HSTORE
|
||||
name = "slice"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreKeysFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "akeys"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreValsFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "avals"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreArrayFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "hstore_to_array"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "hstore_to_matrix"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
#
|
||||
# parsing. note that none of this is used with the psycopg2 backend,
|
||||
# which provides its own native extensions.
|
||||
#
|
||||
|
||||
# My best guess at the parsing rules of hstore literals, since no formal
|
||||
# grammar is given. This is mostly reverse engineered from PG's input parser
|
||||
# behavior.
|
||||
HSTORE_PAIR_RE = re.compile(
|
||||
r"""
|
||||
(
|
||||
"(?P<key> (\\ . | [^"])* )" # Quoted key
|
||||
)
|
||||
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
|
||||
(
|
||||
(?P<value_null> NULL ) # NULL value
|
||||
| "(?P<value> (\\ . | [^"])* )" # Quoted value
|
||||
)
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
HSTORE_DELIMITER_RE = re.compile(
|
||||
r"""
|
||||
[ ]* , [ ]*
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def _parse_error(hstore_str, pos):
|
||||
"""format an unmarshalling error."""
|
||||
|
||||
ctx = 20
|
||||
hslen = len(hstore_str)
|
||||
|
||||
parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
|
||||
residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
|
||||
|
||||
if len(parsed_tail) > ctx:
|
||||
parsed_tail = "[...]" + parsed_tail[1:]
|
||||
if len(residual) > ctx:
|
||||
residual = residual[:-1] + "[...]"
|
||||
|
||||
return "After %r, could not parse residual at position %d: %r" % (
|
||||
parsed_tail,
|
||||
pos,
|
||||
residual,
|
||||
)
|
||||
|
||||
|
||||
def _parse_hstore(hstore_str):
|
||||
"""Parse an hstore from its literal string representation.
|
||||
|
||||
Attempts to approximate PG's hstore input parsing rules as closely as
|
||||
possible. Although currently this is not strictly necessary, since the
|
||||
current implementation of hstore's output syntax is stricter than what it
|
||||
accepts as input, the documentation makes no guarantees that will always
|
||||
be the case.
|
||||
|
||||
|
||||
|
||||
"""
|
||||
result = {}
|
||||
pos = 0
|
||||
pair_match = HSTORE_PAIR_RE.match(hstore_str)
|
||||
|
||||
while pair_match is not None:
|
||||
key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
|
||||
if pair_match.group("value_null"):
|
||||
value = None
|
||||
else:
|
||||
value = (
|
||||
pair_match.group("value")
|
||||
.replace(r"\"", '"')
|
||||
.replace("\\\\", "\\")
|
||||
)
|
||||
result[key] = value
|
||||
|
||||
pos += pair_match.end()
|
||||
|
||||
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
|
||||
if delim_match is not None:
|
||||
pos += delim_match.end()
|
||||
|
||||
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
|
||||
|
||||
if pos != len(hstore_str):
|
||||
raise ValueError(_parse_error(hstore_str, pos))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _serialize_hstore(val):
|
||||
"""Serialize a dictionary into an hstore literal. Keys and values must
|
||||
both be strings (except None for values).
|
||||
|
||||
"""
|
||||
|
||||
def esc(s, position):
|
||||
if position == "value" and s is None:
|
||||
return "NULL"
|
||||
elif isinstance(s, str):
|
||||
return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
|
||||
else:
|
||||
raise ValueError(
|
||||
"%r in %s position is not a string." % (s, position)
|
||||
)
|
||||
|
||||
return ", ".join(
|
||||
"%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
|
||||
)
|
@ -0,0 +1,367 @@
|
||||
# dialects/postgresql/json.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from .array import ARRAY
|
||||
from .array import array as _pg_array
|
||||
from .operators import ASTEXT
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import DELETE_PATH
|
||||
from .operators import HAS_ALL
|
||||
from .operators import HAS_ANY
|
||||
from .operators import HAS_KEY
|
||||
from .operators import JSONPATH_ASTEXT
|
||||
from .operators import PATH_EXISTS
|
||||
from .operators import PATH_MATCH
|
||||
from ... import types as sqltypes
|
||||
from ...sql import cast
|
||||
from ...sql._typing import _T
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.type_api import _BindProcessorType
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
from ...sql.type_api import TypeEngine
|
||||
|
||||
__all__ = ("JSON", "JSONB")
|
||||
|
||||
|
||||
class JSONPathType(sqltypes.JSON.JSONPathType):
|
||||
def _processor(
|
||||
self, dialect: Dialect, super_proc: Optional[Callable[[Any], Any]]
|
||||
) -> Callable[[Any], Any]:
|
||||
def process(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
# If it's already a string assume that it's in json path
|
||||
# format. This allows using cast with json paths literals
|
||||
return value
|
||||
elif value:
|
||||
# If it's already a string assume that it's in json path
|
||||
# format. This allows using cast with json paths literals
|
||||
value = "{%s}" % (", ".join(map(str, value)))
|
||||
else:
|
||||
value = "{}"
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def bind_processor(self, dialect: Dialect) -> _BindProcessorType[Any]:
|
||||
return self._processor(dialect, self.string_bind_processor(dialect)) # type: ignore[return-value] # noqa: E501
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> _LiteralProcessorType[Any]:
|
||||
return self._processor(dialect, self.string_literal_processor(dialect)) # type: ignore[return-value] # noqa: E501
|
||||
|
||||
|
||||
class JSONPATH(JSONPathType):
|
||||
"""JSON Path Type.
|
||||
|
||||
This is usually required to cast literal values to json path when using
|
||||
json search like function, such as ``jsonb_path_query_array`` or
|
||||
``jsonb_path_exists``::
|
||||
|
||||
stmt = sa.select(
|
||||
sa.func.jsonb_path_query_array(
|
||||
table.c.jsonb_col, cast("$.address.id", JSONPATH)
|
||||
)
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "JSONPATH"
|
||||
|
||||
|
||||
class JSON(sqltypes.JSON):
|
||||
"""Represent the PostgreSQL JSON type.
|
||||
|
||||
:class:`_postgresql.JSON` is used automatically whenever the base
|
||||
:class:`_types.JSON` datatype is used against a PostgreSQL backend,
|
||||
however base :class:`_types.JSON` datatype does not provide Python
|
||||
accessors for PostgreSQL-specific comparison methods such as
|
||||
:meth:`_postgresql.JSON.Comparator.astext`; additionally, to use
|
||||
PostgreSQL ``JSONB``, the :class:`_postgresql.JSONB` datatype should
|
||||
be used explicitly.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - main documentation for the generic
|
||||
cross-platform JSON datatype.
|
||||
|
||||
The operators provided by the PostgreSQL version of :class:`_types.JSON`
|
||||
include:
|
||||
|
||||
* Index operations (the ``->`` operator)::
|
||||
|
||||
data_table.c.data["some key"]
|
||||
|
||||
data_table.c.data[5]
|
||||
|
||||
* Index operations returning text
|
||||
(the ``->>`` operator)::
|
||||
|
||||
data_table.c.data["some key"].astext == "some value"
|
||||
|
||||
Note that equivalent functionality is available via the
|
||||
:attr:`.JSON.Comparator.as_string` accessor.
|
||||
|
||||
* Index operations with CAST
|
||||
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
|
||||
|
||||
data_table.c.data["some key"].astext.cast(Integer) == 5
|
||||
|
||||
Note that equivalent functionality is available via the
|
||||
:attr:`.JSON.Comparator.as_integer` and similar accessors.
|
||||
|
||||
* Path index operations (the ``#>`` operator)::
|
||||
|
||||
data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
|
||||
|
||||
* Path index operations returning text (the ``#>>`` operator)::
|
||||
|
||||
data_table.c.data[
|
||||
("key_1", "key_2", 5, ..., "key_n")
|
||||
].astext == "some value"
|
||||
|
||||
Index operations return an expression object whose type defaults to
|
||||
:class:`_types.JSON` by default,
|
||||
so that further JSON-oriented instructions
|
||||
may be called upon the result type.
|
||||
|
||||
Custom serializers and deserializers are specified at the dialect level,
|
||||
that is using :func:`_sa.create_engine`. The reason for this is that when
|
||||
using psycopg2, the DBAPI only allows serializers at the per-cursor
|
||||
or per-connection level. E.g.::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@localhost/test",
|
||||
json_serializer=my_serialize_fn,
|
||||
json_deserializer=my_deserialize_fn,
|
||||
)
|
||||
|
||||
When using the psycopg2 dialect, the json_deserializer is registered
|
||||
against the database using ``psycopg2.extras.register_default_json``.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - Core level JSON type
|
||||
|
||||
:class:`_postgresql.JSONB`
|
||||
|
||||
""" # noqa
|
||||
|
||||
render_bind_cast = True
|
||||
astext_type: TypeEngine[str] = sqltypes.Text()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
none_as_null: bool = False,
|
||||
astext_type: Optional[TypeEngine[str]] = None,
|
||||
):
|
||||
"""Construct a :class:`_types.JSON` type.
|
||||
|
||||
:param none_as_null: if True, persist the value ``None`` as a
|
||||
SQL NULL value, not the JSON encoding of ``null``. Note that
|
||||
when this flag is False, the :func:`.null` construct can still
|
||||
be used to persist a NULL value::
|
||||
|
||||
from sqlalchemy import null
|
||||
|
||||
conn.execute(table.insert(), {"data": null()})
|
||||
|
||||
.. seealso::
|
||||
|
||||
:attr:`_types.JSON.NULL`
|
||||
|
||||
:param astext_type: the type to use for the
|
||||
:attr:`.JSON.Comparator.astext`
|
||||
accessor on indexed attributes. Defaults to :class:`_types.Text`.
|
||||
|
||||
"""
|
||||
super().__init__(none_as_null=none_as_null)
|
||||
if astext_type is not None:
|
||||
self.astext_type = astext_type
|
||||
|
||||
class Comparator(sqltypes.JSON.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.JSON`."""
|
||||
|
||||
type: JSON
|
||||
|
||||
@property
|
||||
def astext(self) -> ColumnElement[str]:
|
||||
"""On an indexed expression, use the "astext" (e.g. "->>")
|
||||
conversion when rendered in SQL.
|
||||
|
||||
E.g.::
|
||||
|
||||
select(data_table.c.data["some key"].astext)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:meth:`_expression.ColumnElement.cast`
|
||||
|
||||
"""
|
||||
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
|
||||
return self.expr.left.operate( # type: ignore[no-any-return]
|
||||
JSONPATH_ASTEXT,
|
||||
self.expr.right,
|
||||
result_type=self.type.astext_type,
|
||||
)
|
||||
else:
|
||||
return self.expr.left.operate( # type: ignore[no-any-return]
|
||||
ASTEXT, self.expr.right, result_type=self.type.astext_type
|
||||
)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
|
||||
class JSONB(JSON):
|
||||
"""Represent the PostgreSQL JSONB type.
|
||||
|
||||
The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
|
||||
e.g.::
|
||||
|
||||
data_table = Table(
|
||||
"data_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("data", JSONB),
|
||||
)
|
||||
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
data_table.insert(), data={"key1": "value1", "key2": "value2"}
|
||||
)
|
||||
|
||||
The :class:`_postgresql.JSONB` type includes all operations provided by
|
||||
:class:`_types.JSON`, including the same behaviors for indexing
|
||||
operations.
|
||||
It also adds additional operators specific to JSONB, including
|
||||
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
|
||||
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
|
||||
:meth:`.JSONB.Comparator.contained_by`,
|
||||
:meth:`.JSONB.Comparator.delete_path`,
|
||||
:meth:`.JSONB.Comparator.path_exists` and
|
||||
:meth:`.JSONB.Comparator.path_match`.
|
||||
|
||||
Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB`
|
||||
type does not detect
|
||||
in-place changes when used with the ORM, unless the
|
||||
:mod:`sqlalchemy.ext.mutable` extension is used.
|
||||
|
||||
Custom serializers and deserializers
|
||||
are shared with the :class:`_types.JSON` class,
|
||||
using the ``json_serializer``
|
||||
and ``json_deserializer`` keyword arguments. These must be specified
|
||||
at the dialect level using :func:`_sa.create_engine`. When using
|
||||
psycopg2, the serializers are associated with the jsonb type using
|
||||
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
|
||||
in the same way that ``psycopg2.extras.register_default_json`` is used
|
||||
to register these handlers with the json type.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "JSONB"
|
||||
|
||||
class Comparator(JSON.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.JSON`."""
|
||||
|
||||
type: JSONB
|
||||
|
||||
def has_key(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of a key (equivalent of
|
||||
the ``?`` operator). Note that the key may be a SQLA expression.
|
||||
"""
|
||||
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_all(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of all keys in jsonb
|
||||
(equivalent of the ``?&`` operator)
|
||||
"""
|
||||
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_any(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of any key in jsonb
|
||||
(equivalent of the ``?|`` operator)
|
||||
"""
|
||||
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contains(self, other: Any, **kwargs: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if keys (or array) are a superset
|
||||
of/contained the keys of the argument jsonb expression
|
||||
(equivalent of the ``@>`` operator).
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if keys are a proper subset of the
|
||||
keys of the argument jsonb expression
|
||||
(equivalent of the ``<@`` operator).
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def delete_path(
|
||||
self, array: Union[List[str], _pg_array[str]]
|
||||
) -> ColumnElement[JSONB]:
|
||||
"""JSONB expression. Deletes field or array element specified in
|
||||
the argument array (equivalent of the ``#-`` operator).
|
||||
|
||||
The input may be a list of strings that will be coerced to an
|
||||
``ARRAY`` or an instance of :meth:`_postgres.array`.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
if not isinstance(array, _pg_array):
|
||||
array = _pg_array(array)
|
||||
right_side = cast(array, ARRAY(sqltypes.TEXT))
|
||||
return self.operate(DELETE_PATH, right_side, result_type=JSONB)
|
||||
|
||||
def path_exists(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of item given by the
|
||||
argument JSONPath expression (equivalent of the ``@?`` operator).
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.operate(
|
||||
PATH_EXISTS, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def path_match(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if JSONPath predicate given by the
|
||||
argument JSONPath expression matches
|
||||
(equivalent of the ``@@`` operator).
|
||||
|
||||
Only the first item of the result is taken into account.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.operate(
|
||||
PATH_MATCH, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
comparator_factory = Comparator
|
@ -0,0 +1,505 @@
|
||||
# dialects/postgresql/named_types.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import Optional
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from ... import schema
|
||||
from ... import util
|
||||
from ...sql import coercions
|
||||
from ...sql import elements
|
||||
from ...sql import roles
|
||||
from ...sql import sqltypes
|
||||
from ...sql import type_api
|
||||
from ...sql.base import _NoArg
|
||||
from ...sql.ddl import InvokeCreateDDLBase
|
||||
from ...sql.ddl import InvokeDropDDLBase
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...sql._typing import _TypeEngineArgument
|
||||
|
||||
|
||||
class NamedType(sqltypes.TypeEngine):
|
||||
"""Base for named types."""
|
||||
|
||||
__abstract__ = True
|
||||
DDLGenerator: Type[NamedTypeGenerator]
|
||||
DDLDropper: Type[NamedTypeDropper]
|
||||
create_type: bool
|
||||
|
||||
def create(self, bind, checkfirst=True, **kw):
|
||||
"""Emit ``CREATE`` DDL for this type.
|
||||
|
||||
:param bind: a connectable :class:`_engine.Engine`,
|
||||
:class:`_engine.Connection`, or similar object to emit
|
||||
SQL.
|
||||
:param checkfirst: if ``True``, a query against
|
||||
the PG catalog will be first performed to see
|
||||
if the type does not exist already before
|
||||
creating.
|
||||
|
||||
"""
|
||||
bind._run_ddl_visitor(self.DDLGenerator, self, checkfirst=checkfirst)
|
||||
|
||||
def drop(self, bind, checkfirst=True, **kw):
|
||||
"""Emit ``DROP`` DDL for this type.
|
||||
|
||||
:param bind: a connectable :class:`_engine.Engine`,
|
||||
:class:`_engine.Connection`, or similar object to emit
|
||||
SQL.
|
||||
:param checkfirst: if ``True``, a query against
|
||||
the PG catalog will be first performed to see
|
||||
if the type actually exists before dropping.
|
||||
|
||||
"""
|
||||
bind._run_ddl_visitor(self.DDLDropper, self, checkfirst=checkfirst)
|
||||
|
||||
def _check_for_name_in_memos(self, checkfirst, kw):
|
||||
"""Look in the 'ddl runner' for 'memos', then
|
||||
note our name in that collection.
|
||||
|
||||
This to ensure a particular named type is operated
|
||||
upon only once within any kind of create/drop
|
||||
sequence without relying upon "checkfirst".
|
||||
|
||||
"""
|
||||
if not self.create_type:
|
||||
return True
|
||||
if "_ddl_runner" in kw:
|
||||
ddl_runner = kw["_ddl_runner"]
|
||||
type_name = f"pg_{self.__visit_name__}"
|
||||
if type_name in ddl_runner.memo:
|
||||
existing = ddl_runner.memo[type_name]
|
||||
else:
|
||||
existing = ddl_runner.memo[type_name] = set()
|
||||
present = (self.schema, self.name) in existing
|
||||
existing.add((self.schema, self.name))
|
||||
return present
|
||||
else:
|
||||
return False
|
||||
|
||||
def _on_table_create(self, target, bind, checkfirst=False, **kw):
|
||||
if (
|
||||
checkfirst
|
||||
or (
|
||||
not self.metadata
|
||||
and not kw.get("_is_metadata_operation", False)
|
||||
)
|
||||
) and not self._check_for_name_in_memos(checkfirst, kw):
|
||||
self.create(bind=bind, checkfirst=checkfirst)
|
||||
|
||||
def _on_table_drop(self, target, bind, checkfirst=False, **kw):
|
||||
if (
|
||||
not self.metadata
|
||||
and not kw.get("_is_metadata_operation", False)
|
||||
and not self._check_for_name_in_memos(checkfirst, kw)
|
||||
):
|
||||
self.drop(bind=bind, checkfirst=checkfirst)
|
||||
|
||||
def _on_metadata_create(self, target, bind, checkfirst=False, **kw):
|
||||
if not self._check_for_name_in_memos(checkfirst, kw):
|
||||
self.create(bind=bind, checkfirst=checkfirst)
|
||||
|
||||
def _on_metadata_drop(self, target, bind, checkfirst=False, **kw):
|
||||
if not self._check_for_name_in_memos(checkfirst, kw):
|
||||
self.drop(bind=bind, checkfirst=checkfirst)
|
||||
|
||||
|
||||
class NamedTypeGenerator(InvokeCreateDDLBase):
|
||||
def __init__(self, dialect, connection, checkfirst=False, **kwargs):
|
||||
super().__init__(connection, **kwargs)
|
||||
self.checkfirst = checkfirst
|
||||
|
||||
def _can_create_type(self, type_):
|
||||
if not self.checkfirst:
|
||||
return True
|
||||
|
||||
effective_schema = self.connection.schema_for_object(type_)
|
||||
return not self.connection.dialect.has_type(
|
||||
self.connection, type_.name, schema=effective_schema
|
||||
)
|
||||
|
||||
|
||||
class NamedTypeDropper(InvokeDropDDLBase):
|
||||
def __init__(self, dialect, connection, checkfirst=False, **kwargs):
|
||||
super().__init__(connection, **kwargs)
|
||||
self.checkfirst = checkfirst
|
||||
|
||||
def _can_drop_type(self, type_):
|
||||
if not self.checkfirst:
|
||||
return True
|
||||
|
||||
effective_schema = self.connection.schema_for_object(type_)
|
||||
return self.connection.dialect.has_type(
|
||||
self.connection, type_.name, schema=effective_schema
|
||||
)
|
||||
|
||||
|
||||
class EnumGenerator(NamedTypeGenerator):
|
||||
def visit_enum(self, enum):
|
||||
if not self._can_create_type(enum):
|
||||
return
|
||||
|
||||
with self.with_ddl_events(enum):
|
||||
self.connection.execute(CreateEnumType(enum))
|
||||
|
||||
|
||||
class EnumDropper(NamedTypeDropper):
|
||||
def visit_enum(self, enum):
|
||||
if not self._can_drop_type(enum):
|
||||
return
|
||||
|
||||
with self.with_ddl_events(enum):
|
||||
self.connection.execute(DropEnumType(enum))
|
||||
|
||||
|
||||
class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
|
||||
"""PostgreSQL ENUM type.
|
||||
|
||||
This is a subclass of :class:`_types.Enum` which includes
|
||||
support for PG's ``CREATE TYPE`` and ``DROP TYPE``.
|
||||
|
||||
When the builtin type :class:`_types.Enum` is used and the
|
||||
:paramref:`.Enum.native_enum` flag is left at its default of
|
||||
True, the PostgreSQL backend will use a :class:`_postgresql.ENUM`
|
||||
type as the implementation, so the special create/drop rules
|
||||
will be used.
|
||||
|
||||
The create/drop behavior of ENUM is necessarily intricate, due to the
|
||||
awkward relationship the ENUM type has in relationship to the
|
||||
parent table, in that it may be "owned" by just a single table, or
|
||||
may be shared among many tables.
|
||||
|
||||
When using :class:`_types.Enum` or :class:`_postgresql.ENUM`
|
||||
in an "inline" fashion, the ``CREATE TYPE`` and ``DROP TYPE`` is emitted
|
||||
corresponding to when the :meth:`_schema.Table.create` and
|
||||
:meth:`_schema.Table.drop`
|
||||
methods are called::
|
||||
|
||||
table = Table(
|
||||
"sometable",
|
||||
metadata,
|
||||
Column("some_enum", ENUM("a", "b", "c", name="myenum")),
|
||||
)
|
||||
|
||||
table.create(engine) # will emit CREATE ENUM and CREATE TABLE
|
||||
table.drop(engine) # will emit DROP TABLE and DROP ENUM
|
||||
|
||||
To use a common enumerated type between multiple tables, the best
|
||||
practice is to declare the :class:`_types.Enum` or
|
||||
:class:`_postgresql.ENUM` independently, and associate it with the
|
||||
:class:`_schema.MetaData` object itself::
|
||||
|
||||
my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
|
||||
|
||||
t1 = Table("sometable_one", metadata, Column("some_enum", myenum))
|
||||
|
||||
t2 = Table("sometable_two", metadata, Column("some_enum", myenum))
|
||||
|
||||
When this pattern is used, care must still be taken at the level
|
||||
of individual table creates. Emitting CREATE TABLE without also
|
||||
specifying ``checkfirst=True`` will still cause issues::
|
||||
|
||||
t1.create(engine) # will fail: no such type 'myenum'
|
||||
|
||||
If we specify ``checkfirst=True``, the individual table-level create
|
||||
operation will check for the ``ENUM`` and create if not exists::
|
||||
|
||||
# will check if enum exists, and emit CREATE TYPE if not
|
||||
t1.create(engine, checkfirst=True)
|
||||
|
||||
When using a metadata-level ENUM type, the type will always be created
|
||||
and dropped if either the metadata-wide create/drop is called::
|
||||
|
||||
metadata.create_all(engine) # will emit CREATE TYPE
|
||||
metadata.drop_all(engine) # will emit DROP TYPE
|
||||
|
||||
The type can also be created and dropped directly::
|
||||
|
||||
my_enum.create(engine)
|
||||
my_enum.drop(engine)
|
||||
|
||||
"""
|
||||
|
||||
native_enum = True
|
||||
DDLGenerator = EnumGenerator
|
||||
DDLDropper = EnumDropper
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*enums,
|
||||
name: Union[str, _NoArg, None] = _NoArg.NO_ARG,
|
||||
create_type: bool = True,
|
||||
**kw,
|
||||
):
|
||||
"""Construct an :class:`_postgresql.ENUM`.
|
||||
|
||||
Arguments are the same as that of
|
||||
:class:`_types.Enum`, but also including
|
||||
the following parameters.
|
||||
|
||||
:param create_type: Defaults to True.
|
||||
Indicates that ``CREATE TYPE`` should be
|
||||
emitted, after optionally checking for the
|
||||
presence of the type, when the parent
|
||||
table is being created; and additionally
|
||||
that ``DROP TYPE`` is called when the table
|
||||
is dropped. When ``False``, no check
|
||||
will be performed and no ``CREATE TYPE``
|
||||
or ``DROP TYPE`` is emitted, unless
|
||||
:meth:`~.postgresql.ENUM.create`
|
||||
or :meth:`~.postgresql.ENUM.drop`
|
||||
are called directly.
|
||||
Setting to ``False`` is helpful
|
||||
when invoking a creation scheme to a SQL file
|
||||
without access to the actual database -
|
||||
the :meth:`~.postgresql.ENUM.create` and
|
||||
:meth:`~.postgresql.ENUM.drop` methods can
|
||||
be used to emit SQL to a target bind.
|
||||
|
||||
"""
|
||||
native_enum = kw.pop("native_enum", None)
|
||||
if native_enum is False:
|
||||
util.warn(
|
||||
"the native_enum flag does not apply to the "
|
||||
"sqlalchemy.dialects.postgresql.ENUM datatype; this type "
|
||||
"always refers to ENUM. Use sqlalchemy.types.Enum for "
|
||||
"non-native enum."
|
||||
)
|
||||
self.create_type = create_type
|
||||
if name is not _NoArg.NO_ARG:
|
||||
kw["name"] = name
|
||||
super().__init__(*enums, **kw)
|
||||
|
||||
def coerce_compared_value(self, op, value):
|
||||
super_coerced_type = super().coerce_compared_value(op, value)
|
||||
if (
|
||||
super_coerced_type._type_affinity
|
||||
is type_api.STRINGTYPE._type_affinity
|
||||
):
|
||||
return self
|
||||
else:
|
||||
return super_coerced_type
|
||||
|
||||
@classmethod
|
||||
def __test_init__(cls):
|
||||
return cls(name="name")
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(cls, impl, **kw):
|
||||
"""Produce a PostgreSQL native :class:`_postgresql.ENUM` from plain
|
||||
:class:`.Enum`.
|
||||
|
||||
"""
|
||||
kw.setdefault("validate_strings", impl.validate_strings)
|
||||
kw.setdefault("name", impl.name)
|
||||
kw.setdefault("schema", impl.schema)
|
||||
kw.setdefault("inherit_schema", impl.inherit_schema)
|
||||
kw.setdefault("metadata", impl.metadata)
|
||||
kw.setdefault("_create_events", False)
|
||||
kw.setdefault("values_callable", impl.values_callable)
|
||||
kw.setdefault("omit_aliases", impl._omit_aliases)
|
||||
kw.setdefault("_adapted_from", impl)
|
||||
if type_api._is_native_for_emulated(impl.__class__):
|
||||
kw.setdefault("create_type", impl.create_type)
|
||||
|
||||
return cls(**kw)
|
||||
|
||||
def create(self, bind=None, checkfirst=True):
|
||||
"""Emit ``CREATE TYPE`` for this
|
||||
:class:`_postgresql.ENUM`.
|
||||
|
||||
If the underlying dialect does not support
|
||||
PostgreSQL CREATE TYPE, no action is taken.
|
||||
|
||||
:param bind: a connectable :class:`_engine.Engine`,
|
||||
:class:`_engine.Connection`, or similar object to emit
|
||||
SQL.
|
||||
:param checkfirst: if ``True``, a query against
|
||||
the PG catalog will be first performed to see
|
||||
if the type does not exist already before
|
||||
creating.
|
||||
|
||||
"""
|
||||
if not bind.dialect.supports_native_enum:
|
||||
return
|
||||
|
||||
super().create(bind, checkfirst=checkfirst)
|
||||
|
||||
def drop(self, bind=None, checkfirst=True):
|
||||
"""Emit ``DROP TYPE`` for this
|
||||
:class:`_postgresql.ENUM`.
|
||||
|
||||
If the underlying dialect does not support
|
||||
PostgreSQL DROP TYPE, no action is taken.
|
||||
|
||||
:param bind: a connectable :class:`_engine.Engine`,
|
||||
:class:`_engine.Connection`, or similar object to emit
|
||||
SQL.
|
||||
:param checkfirst: if ``True``, a query against
|
||||
the PG catalog will be first performed to see
|
||||
if the type actually exists before dropping.
|
||||
|
||||
"""
|
||||
if not bind.dialect.supports_native_enum:
|
||||
return
|
||||
|
||||
super().drop(bind, checkfirst=checkfirst)
|
||||
|
||||
def get_dbapi_type(self, dbapi):
|
||||
"""dont return dbapi.STRING for ENUM in PostgreSQL, since that's
|
||||
a different type"""
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class DomainGenerator(NamedTypeGenerator):
|
||||
def visit_DOMAIN(self, domain):
|
||||
if not self._can_create_type(domain):
|
||||
return
|
||||
with self.with_ddl_events(domain):
|
||||
self.connection.execute(CreateDomainType(domain))
|
||||
|
||||
|
||||
class DomainDropper(NamedTypeDropper):
|
||||
def visit_DOMAIN(self, domain):
|
||||
if not self._can_drop_type(domain):
|
||||
return
|
||||
|
||||
with self.with_ddl_events(domain):
|
||||
self.connection.execute(DropDomainType(domain))
|
||||
|
||||
|
||||
class DOMAIN(NamedType, sqltypes.SchemaType):
|
||||
r"""Represent the DOMAIN PostgreSQL type.
|
||||
|
||||
A domain is essentially a data type with optional constraints
|
||||
that restrict the allowed set of values. E.g.::
|
||||
|
||||
PositiveInt = DOMAIN("pos_int", Integer, check="VALUE > 0", not_null=True)
|
||||
|
||||
UsPostalCode = DOMAIN(
|
||||
"us_postal_code",
|
||||
Text,
|
||||
check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'",
|
||||
)
|
||||
|
||||
See the `PostgreSQL documentation`__ for additional details
|
||||
|
||||
__ https://www.postgresql.org/docs/current/sql-createdomain.html
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
DDLGenerator = DomainGenerator
|
||||
DDLDropper = DomainDropper
|
||||
|
||||
__visit_name__ = "DOMAIN"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
data_type: _TypeEngineArgument[Any],
|
||||
*,
|
||||
collation: Optional[str] = None,
|
||||
default: Union[elements.TextClause, str, None] = None,
|
||||
constraint_name: Optional[str] = None,
|
||||
not_null: Optional[bool] = None,
|
||||
check: Union[elements.TextClause, str, None] = None,
|
||||
create_type: bool = True,
|
||||
**kw: Any,
|
||||
):
|
||||
"""
|
||||
Construct a DOMAIN.
|
||||
|
||||
:param name: the name of the domain
|
||||
:param data_type: The underlying data type of the domain.
|
||||
This can include array specifiers.
|
||||
:param collation: An optional collation for the domain.
|
||||
If no collation is specified, the underlying data type's default
|
||||
collation is used. The underlying type must be collatable if
|
||||
``collation`` is specified.
|
||||
:param default: The DEFAULT clause specifies a default value for
|
||||
columns of the domain data type. The default should be a string
|
||||
or a :func:`_expression.text` value.
|
||||
If no default value is specified, then the default value is
|
||||
the null value.
|
||||
:param constraint_name: An optional name for a constraint.
|
||||
If not specified, the backend generates a name.
|
||||
:param not_null: Values of this domain are prevented from being null.
|
||||
By default domain are allowed to be null. If not specified
|
||||
no nullability clause will be emitted.
|
||||
:param check: CHECK clause specify integrity constraint or test
|
||||
which values of the domain must satisfy. A constraint must be
|
||||
an expression producing a Boolean result that can use the key
|
||||
word VALUE to refer to the value being tested.
|
||||
Differently from PostgreSQL, only a single check clause is
|
||||
currently allowed in SQLAlchemy.
|
||||
:param schema: optional schema name
|
||||
:param metadata: optional :class:`_schema.MetaData` object which
|
||||
this :class:`_postgresql.DOMAIN` will be directly associated
|
||||
:param create_type: Defaults to True.
|
||||
Indicates that ``CREATE TYPE`` should be emitted, after optionally
|
||||
checking for the presence of the type, when the parent table is
|
||||
being created; and additionally that ``DROP TYPE`` is called
|
||||
when the table is dropped.
|
||||
|
||||
"""
|
||||
self.data_type = type_api.to_instance(data_type)
|
||||
self.default = default
|
||||
self.collation = collation
|
||||
self.constraint_name = constraint_name
|
||||
self.not_null = bool(not_null)
|
||||
if check is not None:
|
||||
check = coercions.expect(roles.DDLExpressionRole, check)
|
||||
self.check = check
|
||||
self.create_type = create_type
|
||||
super().__init__(name=name, **kw)
|
||||
|
||||
@classmethod
|
||||
def __test_init__(cls):
|
||||
return cls("name", sqltypes.Integer)
|
||||
|
||||
def adapt(self, impl, **kw):
|
||||
if self.default:
|
||||
kw["default"] = self.default
|
||||
if self.constraint_name is not None:
|
||||
kw["constraint_name"] = self.constraint_name
|
||||
if self.not_null:
|
||||
kw["not_null"] = self.not_null
|
||||
if self.check is not None:
|
||||
kw["check"] = str(self.check)
|
||||
if self.create_type:
|
||||
kw["create_type"] = self.create_type
|
||||
|
||||
return super().adapt(impl, **kw)
|
||||
|
||||
|
||||
class CreateEnumType(schema._CreateDropBase):
|
||||
__visit_name__ = "create_enum_type"
|
||||
|
||||
|
||||
class DropEnumType(schema._CreateDropBase):
|
||||
__visit_name__ = "drop_enum_type"
|
||||
|
||||
|
||||
class CreateDomainType(schema._CreateDropBase):
|
||||
"""Represent a CREATE DOMAIN statement."""
|
||||
|
||||
__visit_name__ = "create_domain_type"
|
||||
|
||||
|
||||
class DropDomainType(schema._CreateDropBase):
|
||||
"""Represent a DROP DOMAIN statement."""
|
||||
|
||||
__visit_name__ = "drop_domain_type"
|
@ -0,0 +1,129 @@
|
||||
# dialects/postgresql/operators.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from ...sql import operators
|
||||
|
||||
|
||||
_getitem_precedence = operators._PRECEDENCE[operators.json_getitem_op]
|
||||
_eq_precedence = operators._PRECEDENCE[operators.eq]
|
||||
|
||||
# JSON + JSONB
|
||||
ASTEXT = operators.custom_op(
|
||||
"->>",
|
||||
precedence=_getitem_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
)
|
||||
|
||||
JSONPATH_ASTEXT = operators.custom_op(
|
||||
"#>>",
|
||||
precedence=_getitem_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
)
|
||||
|
||||
# JSONB + HSTORE
|
||||
HAS_KEY = operators.custom_op(
|
||||
"?",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
HAS_ALL = operators.custom_op(
|
||||
"?&",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
HAS_ANY = operators.custom_op(
|
||||
"?|",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
# JSONB
|
||||
DELETE_PATH = operators.custom_op(
|
||||
"#-",
|
||||
precedence=_getitem_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
)
|
||||
|
||||
PATH_EXISTS = operators.custom_op(
|
||||
"@?",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
PATH_MATCH = operators.custom_op(
|
||||
"@@",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
# JSONB + ARRAY + HSTORE + RANGE
|
||||
CONTAINS = operators.custom_op(
|
||||
"@>",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
CONTAINED_BY = operators.custom_op(
|
||||
"<@",
|
||||
precedence=_eq_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
# ARRAY + RANGE
|
||||
OVERLAP = operators.custom_op(
|
||||
"&&",
|
||||
precedence=_eq_precedence,
|
||||
is_comparison=True,
|
||||
)
|
||||
|
||||
# RANGE
|
||||
STRICTLY_LEFT_OF = operators.custom_op(
|
||||
"<<", precedence=_eq_precedence, is_comparison=True
|
||||
)
|
||||
|
||||
STRICTLY_RIGHT_OF = operators.custom_op(
|
||||
">>", precedence=_eq_precedence, is_comparison=True
|
||||
)
|
||||
|
||||
NOT_EXTEND_RIGHT_OF = operators.custom_op(
|
||||
"&<", precedence=_eq_precedence, is_comparison=True
|
||||
)
|
||||
|
||||
NOT_EXTEND_LEFT_OF = operators.custom_op(
|
||||
"&>", precedence=_eq_precedence, is_comparison=True
|
||||
)
|
||||
|
||||
ADJACENT_TO = operators.custom_op(
|
||||
"-|-", precedence=_eq_precedence, is_comparison=True
|
||||
)
|
||||
|
||||
# HSTORE
|
||||
GETITEM = operators.custom_op(
|
||||
"->",
|
||||
precedence=_getitem_precedence,
|
||||
natural_self_precedent=True,
|
||||
eager_grouping=True,
|
||||
)
|
@ -0,0 +1,666 @@
|
||||
# dialects/postgresql/pg8000.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
|
||||
# file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: postgresql+pg8000
|
||||
:name: pg8000
|
||||
:dbapi: pg8000
|
||||
:connectstring: postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://pypi.org/project/pg8000/
|
||||
|
||||
.. versionchanged:: 1.4 The pg8000 dialect has been updated for version
|
||||
1.16.6 and higher, and is again part of SQLAlchemy's continuous integration
|
||||
with full feature support.
|
||||
|
||||
.. _pg8000_unicode:
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
pg8000 will encode / decode string values between it and the server using the
|
||||
PostgreSQL ``client_encoding`` parameter; by default this is the value in
|
||||
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
|
||||
Typically, this can be changed to ``utf-8``, as a more useful default::
|
||||
|
||||
# client_encoding = sql_ascii # actually, defaults to database encoding
|
||||
client_encoding = utf8
|
||||
|
||||
The ``client_encoding`` can be overridden for a session by executing the SQL:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SET CLIENT_ENCODING TO 'utf8';
|
||||
|
||||
SQLAlchemy will execute this SQL on all new connections based on the value
|
||||
passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+pg8000://user:pass@host/dbname", client_encoding="utf8"
|
||||
)
|
||||
|
||||
.. _pg8000_ssl:
|
||||
|
||||
SSL Connections
|
||||
---------------
|
||||
|
||||
pg8000 accepts a Python ``SSLContext`` object which may be specified using the
|
||||
:paramref:`_sa.create_engine.connect_args` dictionary::
|
||||
|
||||
import ssl
|
||||
|
||||
ssl_context = ssl.create_default_context()
|
||||
engine = sa.create_engine(
|
||||
"postgresql+pg8000://scott:tiger@192.168.0.199/test",
|
||||
connect_args={"ssl_context": ssl_context},
|
||||
)
|
||||
|
||||
If the server uses an automatically-generated certificate that is self-signed
|
||||
or does not match the host name (as seen from the client), it may also be
|
||||
necessary to disable hostname checking::
|
||||
|
||||
import ssl
|
||||
|
||||
ssl_context = ssl.create_default_context()
|
||||
ssl_context.check_hostname = False
|
||||
ssl_context.verify_mode = ssl.CERT_NONE
|
||||
engine = sa.create_engine(
|
||||
"postgresql+pg8000://scott:tiger@192.168.0.199/test",
|
||||
connect_args={"ssl_context": ssl_context},
|
||||
)
|
||||
|
||||
.. _pg8000_isolation_level:
|
||||
|
||||
pg8000 Transaction Isolation Level
|
||||
-------------------------------------
|
||||
|
||||
The pg8000 dialect offers the same isolation level settings as that
|
||||
of the :ref:`psycopg2 <psycopg2_isolation_level>` dialect:
|
||||
|
||||
* ``READ COMMITTED``
|
||||
* ``READ UNCOMMITTED``
|
||||
* ``REPEATABLE READ``
|
||||
* ``SERIALIZABLE``
|
||||
* ``AUTOCOMMIT``
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_isolation_level`
|
||||
|
||||
:ref:`psycopg2_isolation_level`
|
||||
|
||||
|
||||
""" # noqa
|
||||
import decimal
|
||||
import re
|
||||
|
||||
from . import ranges
|
||||
from .array import ARRAY as PGARRAY
|
||||
from .base import _DECIMAL_TYPES
|
||||
from .base import _FLOAT_TYPES
|
||||
from .base import _INT_TYPES
|
||||
from .base import ENUM
|
||||
from .base import INTERVAL
|
||||
from .base import PGCompiler
|
||||
from .base import PGDialect
|
||||
from .base import PGExecutionContext
|
||||
from .base import PGIdentifierPreparer
|
||||
from .json import JSON
|
||||
from .json import JSONB
|
||||
from .json import JSONPathType
|
||||
from .pg_catalog import _SpaceVector
|
||||
from .pg_catalog import OIDVECTOR
|
||||
from .types import CITEXT
|
||||
from ... import exc
|
||||
from ... import util
|
||||
from ...engine import processors
|
||||
from ...sql import sqltypes
|
||||
from ...sql.elements import quoted_name
|
||||
|
||||
|
||||
class _PGString(sqltypes.String):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGNumeric(sqltypes.Numeric):
|
||||
render_bind_cast = True
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
return processors.to_decimal_processor_factory(
|
||||
decimal.Decimal, self._effective_decimal_return_scale
|
||||
)
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
# pg8000 returns Decimal natively for 1700
|
||||
return None
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
else:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
# pg8000 returns float natively for 701
|
||||
return None
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
return processors.to_float
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
|
||||
|
||||
class _PGFloat(_PGNumeric, sqltypes.Float):
|
||||
__visit_name__ = "float"
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGNumericNoBind(_PGNumeric):
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSON(JSON):
|
||||
render_bind_cast = True
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSONB(JSONB):
|
||||
render_bind_cast = True
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSONIndexType(sqltypes.JSON.JSONIndexType):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
raise NotImplementedError("should not be here")
|
||||
|
||||
|
||||
class _PGJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
|
||||
__visit_name__ = "json_int_index"
|
||||
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
|
||||
__visit_name__ = "json_str_index"
|
||||
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGJSONPathType(JSONPathType):
|
||||
pass
|
||||
|
||||
# DBAPI type 1009
|
||||
|
||||
|
||||
class _PGEnum(ENUM):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.UNKNOWN
|
||||
|
||||
|
||||
class _PGInterval(INTERVAL):
|
||||
render_bind_cast = True
|
||||
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.INTERVAL
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(cls, interval, **kw):
|
||||
return _PGInterval(precision=interval.second_precision)
|
||||
|
||||
|
||||
class _PGTimeStamp(sqltypes.DateTime):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGDate(sqltypes.Date):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGTime(sqltypes.Time):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGInteger(sqltypes.Integer):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGSmallInteger(sqltypes.SmallInteger):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGNullType(sqltypes.NullType):
|
||||
pass
|
||||
|
||||
|
||||
class _PGBigInteger(sqltypes.BigInteger):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGBoolean(sqltypes.Boolean):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGARRAY(PGARRAY):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGOIDVECTOR(_SpaceVector, OIDVECTOR):
|
||||
pass
|
||||
|
||||
|
||||
class _Pg8000Range(ranges.AbstractSingleRangeImpl):
|
||||
def bind_processor(self, dialect):
|
||||
pg8000_Range = dialect.dbapi.Range
|
||||
|
||||
def to_range(value):
|
||||
if isinstance(value, ranges.Range):
|
||||
value = pg8000_Range(
|
||||
value.lower, value.upper, value.bounds, value.empty
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_range(value):
|
||||
if value is not None:
|
||||
value = ranges.Range(
|
||||
value.lower,
|
||||
value.upper,
|
||||
bounds=value.bounds,
|
||||
empty=value.is_empty,
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
|
||||
class _Pg8000MultiRange(ranges.AbstractMultiRangeImpl):
|
||||
def bind_processor(self, dialect):
|
||||
pg8000_Range = dialect.dbapi.Range
|
||||
|
||||
def to_multirange(value):
|
||||
if isinstance(value, list):
|
||||
mr = []
|
||||
for v in value:
|
||||
if isinstance(v, ranges.Range):
|
||||
mr.append(
|
||||
pg8000_Range(v.lower, v.upper, v.bounds, v.empty)
|
||||
)
|
||||
else:
|
||||
mr.append(v)
|
||||
return mr
|
||||
else:
|
||||
return value
|
||||
|
||||
return to_multirange
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_multirange(value):
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return ranges.MultiRange(
|
||||
ranges.Range(
|
||||
v.lower, v.upper, bounds=v.bounds, empty=v.is_empty
|
||||
)
|
||||
for v in value
|
||||
)
|
||||
|
||||
return to_multirange
|
||||
|
||||
|
||||
_server_side_id = util.counter()
|
||||
|
||||
|
||||
class PGExecutionContext_pg8000(PGExecutionContext):
|
||||
def create_server_side_cursor(self):
|
||||
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
|
||||
return ServerSideCursor(self._dbapi_connection.cursor(), ident)
|
||||
|
||||
def pre_exec(self):
|
||||
if not self.compiled:
|
||||
return
|
||||
|
||||
|
||||
class ServerSideCursor:
|
||||
server_side = True
|
||||
|
||||
def __init__(self, cursor, ident):
|
||||
self.ident = ident
|
||||
self.cursor = cursor
|
||||
|
||||
@property
|
||||
def connection(self):
|
||||
return self.cursor.connection
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
return self.cursor.rowcount
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return self.cursor.description
|
||||
|
||||
def execute(self, operation, args=(), stream=None):
|
||||
op = "DECLARE " + self.ident + " NO SCROLL CURSOR FOR " + operation
|
||||
self.cursor.execute(op, args, stream=stream)
|
||||
return self
|
||||
|
||||
def executemany(self, operation, param_sets):
|
||||
self.cursor.executemany(operation, param_sets)
|
||||
return self
|
||||
|
||||
def fetchone(self):
|
||||
self.cursor.execute("FETCH FORWARD 1 FROM " + self.ident)
|
||||
return self.cursor.fetchone()
|
||||
|
||||
def fetchmany(self, num=None):
|
||||
if num is None:
|
||||
return self.fetchall()
|
||||
else:
|
||||
self.cursor.execute(
|
||||
"FETCH FORWARD " + str(int(num)) + " FROM " + self.ident
|
||||
)
|
||||
return self.cursor.fetchall()
|
||||
|
||||
def fetchall(self):
|
||||
self.cursor.execute("FETCH FORWARD ALL FROM " + self.ident)
|
||||
return self.cursor.fetchall()
|
||||
|
||||
def close(self):
|
||||
self.cursor.execute("CLOSE " + self.ident)
|
||||
self.cursor.close()
|
||||
|
||||
def setinputsizes(self, *sizes):
|
||||
self.cursor.setinputsizes(*sizes)
|
||||
|
||||
def setoutputsize(self, size, column=None):
|
||||
pass
|
||||
|
||||
|
||||
class PGCompiler_pg8000(PGCompiler):
|
||||
def visit_mod_binary(self, binary, operator, **kw):
|
||||
return (
|
||||
self.process(binary.left, **kw)
|
||||
+ " %% "
|
||||
+ self.process(binary.right, **kw)
|
||||
)
|
||||
|
||||
|
||||
class PGIdentifierPreparer_pg8000(PGIdentifierPreparer):
|
||||
def __init__(self, *args, **kwargs):
|
||||
PGIdentifierPreparer.__init__(self, *args, **kwargs)
|
||||
self._double_percents = False
|
||||
|
||||
|
||||
class PGDialect_pg8000(PGDialect):
|
||||
driver = "pg8000"
|
||||
supports_statement_cache = True
|
||||
|
||||
supports_unicode_statements = True
|
||||
|
||||
supports_unicode_binds = True
|
||||
|
||||
default_paramstyle = "format"
|
||||
supports_sane_multi_rowcount = True
|
||||
execution_ctx_cls = PGExecutionContext_pg8000
|
||||
statement_compiler = PGCompiler_pg8000
|
||||
preparer = PGIdentifierPreparer_pg8000
|
||||
supports_server_side_cursors = True
|
||||
|
||||
render_bind_cast = True
|
||||
|
||||
# reversed as of pg8000 1.16.6. 1.16.5 and lower
|
||||
# are no longer compatible
|
||||
description_encoding = None
|
||||
# description_encoding = "use_encoding"
|
||||
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.String: _PGString,
|
||||
sqltypes.Numeric: _PGNumericNoBind,
|
||||
sqltypes.Float: _PGFloat,
|
||||
sqltypes.JSON: _PGJSON,
|
||||
sqltypes.Boolean: _PGBoolean,
|
||||
sqltypes.NullType: _PGNullType,
|
||||
JSONB: _PGJSONB,
|
||||
CITEXT: CITEXT,
|
||||
sqltypes.JSON.JSONPathType: _PGJSONPathType,
|
||||
sqltypes.JSON.JSONIndexType: _PGJSONIndexType,
|
||||
sqltypes.JSON.JSONIntIndexType: _PGJSONIntIndexType,
|
||||
sqltypes.JSON.JSONStrIndexType: _PGJSONStrIndexType,
|
||||
sqltypes.Interval: _PGInterval,
|
||||
INTERVAL: _PGInterval,
|
||||
sqltypes.DateTime: _PGTimeStamp,
|
||||
sqltypes.DateTime: _PGTimeStamp,
|
||||
sqltypes.Date: _PGDate,
|
||||
sqltypes.Time: _PGTime,
|
||||
sqltypes.Integer: _PGInteger,
|
||||
sqltypes.SmallInteger: _PGSmallInteger,
|
||||
sqltypes.BigInteger: _PGBigInteger,
|
||||
sqltypes.Enum: _PGEnum,
|
||||
sqltypes.ARRAY: _PGARRAY,
|
||||
OIDVECTOR: _PGOIDVECTOR,
|
||||
ranges.INT4RANGE: _Pg8000Range,
|
||||
ranges.INT8RANGE: _Pg8000Range,
|
||||
ranges.NUMRANGE: _Pg8000Range,
|
||||
ranges.DATERANGE: _Pg8000Range,
|
||||
ranges.TSRANGE: _Pg8000Range,
|
||||
ranges.TSTZRANGE: _Pg8000Range,
|
||||
ranges.INT4MULTIRANGE: _Pg8000MultiRange,
|
||||
ranges.INT8MULTIRANGE: _Pg8000MultiRange,
|
||||
ranges.NUMMULTIRANGE: _Pg8000MultiRange,
|
||||
ranges.DATEMULTIRANGE: _Pg8000MultiRange,
|
||||
ranges.TSMULTIRANGE: _Pg8000MultiRange,
|
||||
ranges.TSTZMULTIRANGE: _Pg8000MultiRange,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(self, client_encoding=None, **kwargs):
|
||||
PGDialect.__init__(self, **kwargs)
|
||||
self.client_encoding = client_encoding
|
||||
|
||||
if self._dbapi_version < (1, 16, 6):
|
||||
raise NotImplementedError("pg8000 1.16.6 or greater is required")
|
||||
|
||||
if self._native_inet_types:
|
||||
raise NotImplementedError(
|
||||
"The pg8000 dialect does not fully implement "
|
||||
"ipaddress type handling; INET is supported by default, "
|
||||
"CIDR is not"
|
||||
)
|
||||
|
||||
@util.memoized_property
|
||||
def _dbapi_version(self):
|
||||
if self.dbapi and hasattr(self.dbapi, "__version__"):
|
||||
return tuple(
|
||||
[
|
||||
int(x)
|
||||
for x in re.findall(
|
||||
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
|
||||
)
|
||||
]
|
||||
)
|
||||
else:
|
||||
return (99, 99, 99)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("pg8000")
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username="user")
|
||||
if "port" in opts:
|
||||
opts["port"] = int(opts["port"])
|
||||
opts.update(url.query)
|
||||
return ([], opts)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if isinstance(e, self.dbapi.InterfaceError) and "network error" in str(
|
||||
e
|
||||
):
|
||||
# new as of pg8000 1.19.0 for broken connections
|
||||
return True
|
||||
|
||||
# connection was closed normally
|
||||
return "connection is closed" in str(e)
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"AUTOCOMMIT",
|
||||
"READ COMMITTED",
|
||||
"READ UNCOMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"SERIALIZABLE",
|
||||
)
|
||||
|
||||
def set_isolation_level(self, dbapi_connection, level):
|
||||
level = level.replace("_", " ")
|
||||
|
||||
if level == "AUTOCOMMIT":
|
||||
dbapi_connection.autocommit = True
|
||||
else:
|
||||
dbapi_connection.autocommit = False
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute(
|
||||
"SET SESSION CHARACTERISTICS AS TRANSACTION "
|
||||
f"ISOLATION LEVEL {level}"
|
||||
)
|
||||
cursor.execute("COMMIT")
|
||||
cursor.close()
|
||||
|
||||
def set_readonly(self, connection, value):
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute(
|
||||
"SET SESSION CHARACTERISTICS AS TRANSACTION %s"
|
||||
% ("READ ONLY" if value else "READ WRITE")
|
||||
)
|
||||
cursor.execute("COMMIT")
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def get_readonly(self, connection):
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute("show transaction_read_only")
|
||||
val = cursor.fetchone()[0]
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
return val == "on"
|
||||
|
||||
def set_deferrable(self, connection, value):
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute(
|
||||
"SET SESSION CHARACTERISTICS AS TRANSACTION %s"
|
||||
% ("DEFERRABLE" if value else "NOT DEFERRABLE")
|
||||
)
|
||||
cursor.execute("COMMIT")
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
def get_deferrable(self, connection):
|
||||
cursor = connection.cursor()
|
||||
try:
|
||||
cursor.execute("show transaction_deferrable")
|
||||
val = cursor.fetchone()[0]
|
||||
finally:
|
||||
cursor.close()
|
||||
|
||||
return val == "on"
|
||||
|
||||
def _set_client_encoding(self, dbapi_connection, client_encoding):
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute(
|
||||
f"""SET CLIENT_ENCODING TO '{
|
||||
client_encoding.replace("'", "''")
|
||||
}'"""
|
||||
)
|
||||
cursor.execute("COMMIT")
|
||||
cursor.close()
|
||||
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
connection.connection.tpc_begin((0, xid, ""))
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
connection.connection.tpc_prepare()
|
||||
|
||||
def do_rollback_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
connection.connection.tpc_rollback((0, xid, ""))
|
||||
|
||||
def do_commit_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
connection.connection.tpc_commit((0, xid, ""))
|
||||
|
||||
def do_recover_twophase(self, connection):
|
||||
return [row[1] for row in connection.connection.tpc_recover()]
|
||||
|
||||
def on_connect(self):
|
||||
fns = []
|
||||
|
||||
def on_connect(conn):
|
||||
conn.py_types[quoted_name] = conn.py_types[str]
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self.client_encoding is not None:
|
||||
|
||||
def on_connect(conn):
|
||||
self._set_client_encoding(conn, self.client_encoding)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self._native_inet_types is False:
|
||||
|
||||
def on_connect(conn):
|
||||
# inet
|
||||
conn.register_in_adapter(869, lambda s: s)
|
||||
|
||||
# cidr
|
||||
conn.register_in_adapter(650, lambda s: s)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self._json_deserializer:
|
||||
|
||||
def on_connect(conn):
|
||||
# json
|
||||
conn.register_in_adapter(114, self._json_deserializer)
|
||||
|
||||
# jsonb
|
||||
conn.register_in_adapter(3802, self._json_deserializer)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if len(fns) > 0:
|
||||
|
||||
def on_connect(conn):
|
||||
for fn in fns:
|
||||
fn(conn)
|
||||
|
||||
return on_connect
|
||||
else:
|
||||
return None
|
||||
|
||||
@util.memoized_property
|
||||
def _dialect_specific_select_one(self):
|
||||
return ";"
|
||||
|
||||
|
||||
dialect = PGDialect_pg8000
|
@ -0,0 +1,300 @@
|
||||
# dialects/postgresql/pg_catalog.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from .array import ARRAY
|
||||
from .types import OID
|
||||
from .types import REGCLASS
|
||||
from ... import Column
|
||||
from ... import func
|
||||
from ... import MetaData
|
||||
from ... import Table
|
||||
from ...types import BigInteger
|
||||
from ...types import Boolean
|
||||
from ...types import CHAR
|
||||
from ...types import Float
|
||||
from ...types import Integer
|
||||
from ...types import SmallInteger
|
||||
from ...types import String
|
||||
from ...types import Text
|
||||
from ...types import TypeDecorator
|
||||
|
||||
|
||||
# types
|
||||
class NAME(TypeDecorator):
|
||||
impl = String(64, collation="C")
|
||||
cache_ok = True
|
||||
|
||||
|
||||
class PG_NODE_TREE(TypeDecorator):
|
||||
impl = Text(collation="C")
|
||||
cache_ok = True
|
||||
|
||||
|
||||
class INT2VECTOR(TypeDecorator):
|
||||
impl = ARRAY(SmallInteger)
|
||||
cache_ok = True
|
||||
|
||||
|
||||
class OIDVECTOR(TypeDecorator):
|
||||
impl = ARRAY(OID)
|
||||
cache_ok = True
|
||||
|
||||
|
||||
class _SpaceVector:
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
if value is None:
|
||||
return value
|
||||
return [int(p) for p in value.split(" ")]
|
||||
|
||||
return process
|
||||
|
||||
|
||||
REGPROC = REGCLASS # seems an alias
|
||||
|
||||
# functions
|
||||
_pg_cat = func.pg_catalog
|
||||
quote_ident = _pg_cat.quote_ident
|
||||
pg_table_is_visible = _pg_cat.pg_table_is_visible
|
||||
pg_type_is_visible = _pg_cat.pg_type_is_visible
|
||||
pg_get_viewdef = _pg_cat.pg_get_viewdef
|
||||
pg_get_serial_sequence = _pg_cat.pg_get_serial_sequence
|
||||
format_type = _pg_cat.format_type
|
||||
pg_get_expr = _pg_cat.pg_get_expr
|
||||
pg_get_constraintdef = _pg_cat.pg_get_constraintdef
|
||||
pg_get_indexdef = _pg_cat.pg_get_indexdef
|
||||
|
||||
# constants
|
||||
RELKINDS_TABLE_NO_FOREIGN = ("r", "p")
|
||||
RELKINDS_TABLE = RELKINDS_TABLE_NO_FOREIGN + ("f",)
|
||||
RELKINDS_VIEW = ("v",)
|
||||
RELKINDS_MAT_VIEW = ("m",)
|
||||
RELKINDS_ALL_TABLE_LIKE = RELKINDS_TABLE + RELKINDS_VIEW + RELKINDS_MAT_VIEW
|
||||
|
||||
# tables
|
||||
pg_catalog_meta = MetaData(schema="pg_catalog")
|
||||
|
||||
pg_namespace = Table(
|
||||
"pg_namespace",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID),
|
||||
Column("nspname", NAME),
|
||||
Column("nspowner", OID),
|
||||
)
|
||||
|
||||
pg_class = Table(
|
||||
"pg_class",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("relname", NAME),
|
||||
Column("relnamespace", OID),
|
||||
Column("reltype", OID),
|
||||
Column("reloftype", OID),
|
||||
Column("relowner", OID),
|
||||
Column("relam", OID),
|
||||
Column("relfilenode", OID),
|
||||
Column("reltablespace", OID),
|
||||
Column("relpages", Integer),
|
||||
Column("reltuples", Float),
|
||||
Column("relallvisible", Integer, info={"server_version": (9, 2)}),
|
||||
Column("reltoastrelid", OID),
|
||||
Column("relhasindex", Boolean),
|
||||
Column("relisshared", Boolean),
|
||||
Column("relpersistence", CHAR, info={"server_version": (9, 1)}),
|
||||
Column("relkind", CHAR),
|
||||
Column("relnatts", SmallInteger),
|
||||
Column("relchecks", SmallInteger),
|
||||
Column("relhasrules", Boolean),
|
||||
Column("relhastriggers", Boolean),
|
||||
Column("relhassubclass", Boolean),
|
||||
Column("relrowsecurity", Boolean),
|
||||
Column("relforcerowsecurity", Boolean, info={"server_version": (9, 5)}),
|
||||
Column("relispopulated", Boolean, info={"server_version": (9, 3)}),
|
||||
Column("relreplident", CHAR, info={"server_version": (9, 4)}),
|
||||
Column("relispartition", Boolean, info={"server_version": (10,)}),
|
||||
Column("relrewrite", OID, info={"server_version": (11,)}),
|
||||
Column("reloptions", ARRAY(Text)),
|
||||
)
|
||||
|
||||
pg_type = Table(
|
||||
"pg_type",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("typname", NAME),
|
||||
Column("typnamespace", OID),
|
||||
Column("typowner", OID),
|
||||
Column("typlen", SmallInteger),
|
||||
Column("typbyval", Boolean),
|
||||
Column("typtype", CHAR),
|
||||
Column("typcategory", CHAR),
|
||||
Column("typispreferred", Boolean),
|
||||
Column("typisdefined", Boolean),
|
||||
Column("typdelim", CHAR),
|
||||
Column("typrelid", OID),
|
||||
Column("typelem", OID),
|
||||
Column("typarray", OID),
|
||||
Column("typinput", REGPROC),
|
||||
Column("typoutput", REGPROC),
|
||||
Column("typreceive", REGPROC),
|
||||
Column("typsend", REGPROC),
|
||||
Column("typmodin", REGPROC),
|
||||
Column("typmodout", REGPROC),
|
||||
Column("typanalyze", REGPROC),
|
||||
Column("typalign", CHAR),
|
||||
Column("typstorage", CHAR),
|
||||
Column("typnotnull", Boolean),
|
||||
Column("typbasetype", OID),
|
||||
Column("typtypmod", Integer),
|
||||
Column("typndims", Integer),
|
||||
Column("typcollation", OID, info={"server_version": (9, 1)}),
|
||||
Column("typdefault", Text),
|
||||
)
|
||||
|
||||
pg_index = Table(
|
||||
"pg_index",
|
||||
pg_catalog_meta,
|
||||
Column("indexrelid", OID),
|
||||
Column("indrelid", OID),
|
||||
Column("indnatts", SmallInteger),
|
||||
Column("indnkeyatts", SmallInteger, info={"server_version": (11,)}),
|
||||
Column("indisunique", Boolean),
|
||||
Column("indnullsnotdistinct", Boolean, info={"server_version": (15,)}),
|
||||
Column("indisprimary", Boolean),
|
||||
Column("indisexclusion", Boolean, info={"server_version": (9, 1)}),
|
||||
Column("indimmediate", Boolean),
|
||||
Column("indisclustered", Boolean),
|
||||
Column("indisvalid", Boolean),
|
||||
Column("indcheckxmin", Boolean),
|
||||
Column("indisready", Boolean),
|
||||
Column("indislive", Boolean, info={"server_version": (9, 3)}), # 9.3
|
||||
Column("indisreplident", Boolean),
|
||||
Column("indkey", INT2VECTOR),
|
||||
Column("indcollation", OIDVECTOR, info={"server_version": (9, 1)}), # 9.1
|
||||
Column("indclass", OIDVECTOR),
|
||||
Column("indoption", INT2VECTOR),
|
||||
Column("indexprs", PG_NODE_TREE),
|
||||
Column("indpred", PG_NODE_TREE),
|
||||
)
|
||||
|
||||
pg_attribute = Table(
|
||||
"pg_attribute",
|
||||
pg_catalog_meta,
|
||||
Column("attrelid", OID),
|
||||
Column("attname", NAME),
|
||||
Column("atttypid", OID),
|
||||
Column("attstattarget", Integer),
|
||||
Column("attlen", SmallInteger),
|
||||
Column("attnum", SmallInteger),
|
||||
Column("attndims", Integer),
|
||||
Column("attcacheoff", Integer),
|
||||
Column("atttypmod", Integer),
|
||||
Column("attbyval", Boolean),
|
||||
Column("attstorage", CHAR),
|
||||
Column("attalign", CHAR),
|
||||
Column("attnotnull", Boolean),
|
||||
Column("atthasdef", Boolean),
|
||||
Column("atthasmissing", Boolean, info={"server_version": (11,)}),
|
||||
Column("attidentity", CHAR, info={"server_version": (10,)}),
|
||||
Column("attgenerated", CHAR, info={"server_version": (12,)}),
|
||||
Column("attisdropped", Boolean),
|
||||
Column("attislocal", Boolean),
|
||||
Column("attinhcount", Integer),
|
||||
Column("attcollation", OID, info={"server_version": (9, 1)}),
|
||||
)
|
||||
|
||||
pg_constraint = Table(
|
||||
"pg_constraint",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID), # 9.3
|
||||
Column("conname", NAME),
|
||||
Column("connamespace", OID),
|
||||
Column("contype", CHAR),
|
||||
Column("condeferrable", Boolean),
|
||||
Column("condeferred", Boolean),
|
||||
Column("convalidated", Boolean, info={"server_version": (9, 1)}),
|
||||
Column("conrelid", OID),
|
||||
Column("contypid", OID),
|
||||
Column("conindid", OID),
|
||||
Column("conparentid", OID, info={"server_version": (11,)}),
|
||||
Column("confrelid", OID),
|
||||
Column("confupdtype", CHAR),
|
||||
Column("confdeltype", CHAR),
|
||||
Column("confmatchtype", CHAR),
|
||||
Column("conislocal", Boolean),
|
||||
Column("coninhcount", Integer),
|
||||
Column("connoinherit", Boolean, info={"server_version": (9, 2)}),
|
||||
Column("conkey", ARRAY(SmallInteger)),
|
||||
Column("confkey", ARRAY(SmallInteger)),
|
||||
)
|
||||
|
||||
pg_sequence = Table(
|
||||
"pg_sequence",
|
||||
pg_catalog_meta,
|
||||
Column("seqrelid", OID),
|
||||
Column("seqtypid", OID),
|
||||
Column("seqstart", BigInteger),
|
||||
Column("seqincrement", BigInteger),
|
||||
Column("seqmax", BigInteger),
|
||||
Column("seqmin", BigInteger),
|
||||
Column("seqcache", BigInteger),
|
||||
Column("seqcycle", Boolean),
|
||||
info={"server_version": (10,)},
|
||||
)
|
||||
|
||||
pg_attrdef = Table(
|
||||
"pg_attrdef",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("adrelid", OID),
|
||||
Column("adnum", SmallInteger),
|
||||
Column("adbin", PG_NODE_TREE),
|
||||
)
|
||||
|
||||
pg_description = Table(
|
||||
"pg_description",
|
||||
pg_catalog_meta,
|
||||
Column("objoid", OID),
|
||||
Column("classoid", OID),
|
||||
Column("objsubid", Integer),
|
||||
Column("description", Text(collation="C")),
|
||||
)
|
||||
|
||||
pg_enum = Table(
|
||||
"pg_enum",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("enumtypid", OID),
|
||||
Column("enumsortorder", Float(), info={"server_version": (9, 1)}),
|
||||
Column("enumlabel", NAME),
|
||||
)
|
||||
|
||||
pg_am = Table(
|
||||
"pg_am",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("amname", NAME),
|
||||
Column("amhandler", REGPROC, info={"server_version": (9, 6)}),
|
||||
Column("amtype", CHAR, info={"server_version": (9, 6)}),
|
||||
)
|
||||
|
||||
pg_collation = Table(
|
||||
"pg_collation",
|
||||
pg_catalog_meta,
|
||||
Column("oid", OID, info={"server_version": (9, 3)}),
|
||||
Column("collname", NAME),
|
||||
Column("collnamespace", OID),
|
||||
Column("collowner", OID),
|
||||
Column("collprovider", CHAR, info={"server_version": (10,)}),
|
||||
Column("collisdeterministic", Boolean, info={"server_version": (12,)}),
|
||||
Column("collencoding", Integer),
|
||||
Column("collcollate", Text),
|
||||
Column("collctype", Text),
|
||||
Column("colliculocale", Text),
|
||||
Column("collicurules", Text, info={"server_version": (16,)}),
|
||||
Column("collversion", Text, info={"server_version": (10,)}),
|
||||
)
|
@ -0,0 +1,175 @@
|
||||
# dialects/postgresql/provision.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
import time
|
||||
|
||||
from ... import exc
|
||||
from ... import inspect
|
||||
from ... import text
|
||||
from ...testing import warn_test_suite
|
||||
from ...testing.provision import create_db
|
||||
from ...testing.provision import drop_all_schema_objects_post_tables
|
||||
from ...testing.provision import drop_all_schema_objects_pre_tables
|
||||
from ...testing.provision import drop_db
|
||||
from ...testing.provision import log
|
||||
from ...testing.provision import post_configure_engine
|
||||
from ...testing.provision import prepare_for_drop_tables
|
||||
from ...testing.provision import set_default_schema_on_connection
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
from ...testing.provision import upsert
|
||||
|
||||
|
||||
@create_db.for_db("postgresql")
|
||||
def _pg_create_db(cfg, eng, ident):
|
||||
template_db = cfg.options.postgresql_templatedb
|
||||
|
||||
with eng.execution_options(isolation_level="AUTOCOMMIT").begin() as conn:
|
||||
if not template_db:
|
||||
template_db = conn.exec_driver_sql(
|
||||
"select current_database()"
|
||||
).scalar()
|
||||
|
||||
attempt = 0
|
||||
while True:
|
||||
try:
|
||||
conn.exec_driver_sql(
|
||||
"CREATE DATABASE %s TEMPLATE %s" % (ident, template_db)
|
||||
)
|
||||
except exc.OperationalError as err:
|
||||
attempt += 1
|
||||
if attempt >= 3:
|
||||
raise
|
||||
if "accessed by other users" in str(err):
|
||||
log.info(
|
||||
"Waiting to create %s, URI %r, "
|
||||
"template DB %s is in use sleeping for .5",
|
||||
ident,
|
||||
eng.url,
|
||||
template_db,
|
||||
)
|
||||
time.sleep(0.5)
|
||||
except:
|
||||
raise
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
@drop_db.for_db("postgresql")
|
||||
def _pg_drop_db(cfg, eng, ident):
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
with conn.begin():
|
||||
conn.execute(
|
||||
text(
|
||||
"select pg_terminate_backend(pid) from pg_stat_activity "
|
||||
"where usename=current_user and pid != pg_backend_pid() "
|
||||
"and datname=:dname"
|
||||
),
|
||||
dict(dname=ident),
|
||||
)
|
||||
conn.exec_driver_sql("DROP DATABASE %s" % ident)
|
||||
|
||||
|
||||
@temp_table_keyword_args.for_db("postgresql")
|
||||
def _postgresql_temp_table_keyword_args(cfg, eng):
|
||||
return {"prefixes": ["TEMPORARY"]}
|
||||
|
||||
|
||||
@set_default_schema_on_connection.for_db("postgresql")
|
||||
def _postgresql_set_default_schema_on_connection(
|
||||
cfg, dbapi_connection, schema_name
|
||||
):
|
||||
existing_autocommit = dbapi_connection.autocommit
|
||||
dbapi_connection.autocommit = True
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("SET SESSION search_path='%s'" % schema_name)
|
||||
cursor.close()
|
||||
dbapi_connection.autocommit = existing_autocommit
|
||||
|
||||
|
||||
@drop_all_schema_objects_pre_tables.for_db("postgresql")
|
||||
def drop_all_schema_objects_pre_tables(cfg, eng):
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
for xid in conn.exec_driver_sql(
|
||||
"select gid from pg_prepared_xacts"
|
||||
).scalars():
|
||||
conn.exec_driver_sql("ROLLBACK PREPARED '%s'" % xid)
|
||||
|
||||
|
||||
@drop_all_schema_objects_post_tables.for_db("postgresql")
|
||||
def drop_all_schema_objects_post_tables(cfg, eng):
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
inspector = inspect(eng)
|
||||
with eng.begin() as conn:
|
||||
for enum in inspector.get_enums("*"):
|
||||
conn.execute(
|
||||
postgresql.DropEnumType(
|
||||
postgresql.ENUM(name=enum["name"], schema=enum["schema"])
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@prepare_for_drop_tables.for_db("postgresql")
|
||||
def prepare_for_drop_tables(config, connection):
|
||||
"""Ensure there are no locks on the current username/database."""
|
||||
|
||||
result = connection.exec_driver_sql(
|
||||
"select pid, state, wait_event_type, query "
|
||||
# "select pg_terminate_backend(pid), state, wait_event_type "
|
||||
"from pg_stat_activity where "
|
||||
"usename=current_user "
|
||||
"and datname=current_database() and state='idle in transaction' "
|
||||
"and pid != pg_backend_pid()"
|
||||
)
|
||||
rows = result.all() # noqa
|
||||
if rows:
|
||||
warn_test_suite(
|
||||
"PostgreSQL may not be able to DROP tables due to "
|
||||
"idle in transaction: %s"
|
||||
% ("; ".join(row._mapping["query"] for row in rows))
|
||||
)
|
||||
|
||||
|
||||
@upsert.for_db("postgresql")
|
||||
def _upsert(
|
||||
cfg, table, returning, *, set_lambda=None, sort_by_parameter_order=False
|
||||
):
|
||||
from sqlalchemy.dialects.postgresql import insert
|
||||
|
||||
stmt = insert(table)
|
||||
|
||||
table_pk = inspect(table).selectable
|
||||
|
||||
if set_lambda:
|
||||
stmt = stmt.on_conflict_do_update(
|
||||
index_elements=table_pk.primary_key, set_=set_lambda(stmt.excluded)
|
||||
)
|
||||
else:
|
||||
stmt = stmt.on_conflict_do_nothing()
|
||||
|
||||
stmt = stmt.returning(
|
||||
*returning, sort_by_parameter_order=sort_by_parameter_order
|
||||
)
|
||||
return stmt
|
||||
|
||||
|
||||
_extensions = [
|
||||
("citext", (13,)),
|
||||
("hstore", (13,)),
|
||||
]
|
||||
|
||||
|
||||
@post_configure_engine.for_db("postgresql")
|
||||
def _create_citext_extension(url, engine, follower_ident):
|
||||
with engine.connect() as conn:
|
||||
for extension, min_version in _extensions:
|
||||
if conn.dialect.server_version_info >= min_version:
|
||||
conn.execute(
|
||||
text(f"CREATE EXTENSION IF NOT EXISTS {extension}")
|
||||
)
|
||||
conn.commit()
|
@ -0,0 +1,783 @@
|
||||
# dialects/postgresql/psycopg.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: postgresql+psycopg
|
||||
:name: psycopg (a.k.a. psycopg 3)
|
||||
:dbapi: psycopg
|
||||
:connectstring: postgresql+psycopg://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://pypi.org/project/psycopg/
|
||||
|
||||
``psycopg`` is the package and module name for version 3 of the ``psycopg``
|
||||
database driver, formerly known as ``psycopg2``. This driver is different
|
||||
enough from its ``psycopg2`` predecessor that SQLAlchemy supports it
|
||||
via a totally separate dialect; support for ``psycopg2`` is expected to remain
|
||||
for as long as that package continues to function for modern Python versions,
|
||||
and also remains the default dialect for the ``postgresql://`` dialect
|
||||
series.
|
||||
|
||||
The SQLAlchemy ``psycopg`` dialect provides both a sync and an async
|
||||
implementation under the same dialect name. The proper version is
|
||||
selected depending on how the engine is created:
|
||||
|
||||
* calling :func:`_sa.create_engine` with ``postgresql+psycopg://...`` will
|
||||
automatically select the sync version, e.g.::
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
|
||||
sync_engine = create_engine(
|
||||
"postgresql+psycopg://scott:tiger@localhost/test"
|
||||
)
|
||||
|
||||
* calling :func:`_asyncio.create_async_engine` with
|
||||
``postgresql+psycopg://...`` will automatically select the async version,
|
||||
e.g.::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
asyncio_engine = create_async_engine(
|
||||
"postgresql+psycopg://scott:tiger@localhost/test"
|
||||
)
|
||||
|
||||
The asyncio version of the dialect may also be specified explicitly using the
|
||||
``psycopg_async`` suffix, as::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
asyncio_engine = create_async_engine(
|
||||
"postgresql+psycopg_async://scott:tiger@localhost/test"
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_psycopg2` - The SQLAlchemy ``psycopg``
|
||||
dialect shares most of its behavior with the ``psycopg2`` dialect.
|
||||
Further documentation is available there.
|
||||
|
||||
Using a different Cursor class
|
||||
------------------------------
|
||||
|
||||
One of the differences between ``psycopg`` and the older ``psycopg2``
|
||||
is how bound parameters are handled: ``psycopg2`` would bind them
|
||||
client side, while ``psycopg`` by default will bind them server side.
|
||||
|
||||
It's possible to configure ``psycopg`` to do client side binding by
|
||||
specifying the ``cursor_factory`` to be ``ClientCursor`` when creating
|
||||
the engine::
|
||||
|
||||
from psycopg import ClientCursor
|
||||
|
||||
client_side_engine = create_engine(
|
||||
"postgresql+psycopg://...",
|
||||
connect_args={"cursor_factory": ClientCursor},
|
||||
)
|
||||
|
||||
Similarly when using an async engine the ``AsyncClientCursor`` can be
|
||||
specified::
|
||||
|
||||
from psycopg import AsyncClientCursor
|
||||
|
||||
client_side_engine = create_async_engine(
|
||||
"postgresql+psycopg://...",
|
||||
connect_args={"cursor_factory": AsyncClientCursor},
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
`Client-side-binding cursors <https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#client-side-binding-cursors>`_
|
||||
|
||||
""" # noqa
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import deque
|
||||
import logging
|
||||
import re
|
||||
from typing import cast
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from . import ranges
|
||||
from ._psycopg_common import _PGDialect_common_psycopg
|
||||
from ._psycopg_common import _PGExecutionContext_common_psycopg
|
||||
from .base import INTERVAL
|
||||
from .base import PGCompiler
|
||||
from .base import PGIdentifierPreparer
|
||||
from .base import REGCONFIG
|
||||
from .json import JSON
|
||||
from .json import JSONB
|
||||
from .json import JSONPathType
|
||||
from .types import CITEXT
|
||||
from ... import pool
|
||||
from ... import util
|
||||
from ...engine import AdaptedConnection
|
||||
from ...sql import sqltypes
|
||||
from ...util.concurrency import await_fallback
|
||||
from ...util.concurrency import await_only
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing import Iterable
|
||||
|
||||
from psycopg import AsyncConnection
|
||||
|
||||
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
|
||||
|
||||
|
||||
class _PGString(sqltypes.String):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGREGCONFIG(REGCONFIG):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGJSON(JSON):
|
||||
def bind_processor(self, dialect):
|
||||
return self._make_bind_processor(None, dialect._psycopg_Json)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSONB(JSONB):
|
||||
def bind_processor(self, dialect):
|
||||
return self._make_bind_processor(None, dialect._psycopg_Jsonb)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSONIntIndexType(sqltypes.JSON.JSONIntIndexType):
|
||||
__visit_name__ = "json_int_index"
|
||||
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGJSONStrIndexType(sqltypes.JSON.JSONStrIndexType):
|
||||
__visit_name__ = "json_str_index"
|
||||
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGJSONPathType(JSONPathType):
|
||||
pass
|
||||
|
||||
|
||||
class _PGInterval(INTERVAL):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGTimeStamp(sqltypes.DateTime):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGDate(sqltypes.Date):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGTime(sqltypes.Time):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGInteger(sqltypes.Integer):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGSmallInteger(sqltypes.SmallInteger):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGNullType(sqltypes.NullType):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGBigInteger(sqltypes.BigInteger):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PGBoolean(sqltypes.Boolean):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PsycopgRange(ranges.AbstractSingleRangeImpl):
|
||||
def bind_processor(self, dialect):
|
||||
psycopg_Range = cast(PGDialect_psycopg, dialect)._psycopg_Range
|
||||
|
||||
def to_range(value):
|
||||
if isinstance(value, ranges.Range):
|
||||
value = psycopg_Range(
|
||||
value.lower, value.upper, value.bounds, value.empty
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_range(value):
|
||||
if value is not None:
|
||||
value = ranges.Range(
|
||||
value._lower,
|
||||
value._upper,
|
||||
bounds=value._bounds if value._bounds else "[)",
|
||||
empty=not value._bounds,
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
|
||||
class _PsycopgMultiRange(ranges.AbstractMultiRangeImpl):
|
||||
def bind_processor(self, dialect):
|
||||
psycopg_Range = cast(PGDialect_psycopg, dialect)._psycopg_Range
|
||||
psycopg_Multirange = cast(
|
||||
PGDialect_psycopg, dialect
|
||||
)._psycopg_Multirange
|
||||
|
||||
NoneType = type(None)
|
||||
|
||||
def to_range(value):
|
||||
if isinstance(value, (str, NoneType, psycopg_Multirange)):
|
||||
return value
|
||||
|
||||
return psycopg_Multirange(
|
||||
[
|
||||
psycopg_Range(
|
||||
element.lower,
|
||||
element.upper,
|
||||
element.bounds,
|
||||
element.empty,
|
||||
)
|
||||
for element in cast("Iterable[ranges.Range]", value)
|
||||
]
|
||||
)
|
||||
|
||||
return to_range
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_range(value):
|
||||
if value is None:
|
||||
return None
|
||||
else:
|
||||
return ranges.MultiRange(
|
||||
ranges.Range(
|
||||
elem._lower,
|
||||
elem._upper,
|
||||
bounds=elem._bounds if elem._bounds else "[)",
|
||||
empty=not elem._bounds,
|
||||
)
|
||||
for elem in value
|
||||
)
|
||||
|
||||
return to_range
|
||||
|
||||
|
||||
class PGExecutionContext_psycopg(_PGExecutionContext_common_psycopg):
|
||||
pass
|
||||
|
||||
|
||||
class PGCompiler_psycopg(PGCompiler):
|
||||
pass
|
||||
|
||||
|
||||
class PGIdentifierPreparer_psycopg(PGIdentifierPreparer):
|
||||
pass
|
||||
|
||||
|
||||
def _log_notices(diagnostic):
|
||||
logger.info("%s: %s", diagnostic.severity, diagnostic.message_primary)
|
||||
|
||||
|
||||
class PGDialect_psycopg(_PGDialect_common_psycopg):
|
||||
driver = "psycopg"
|
||||
|
||||
supports_statement_cache = True
|
||||
supports_server_side_cursors = True
|
||||
default_paramstyle = "pyformat"
|
||||
supports_sane_multi_rowcount = True
|
||||
|
||||
execution_ctx_cls = PGExecutionContext_psycopg
|
||||
statement_compiler = PGCompiler_psycopg
|
||||
preparer = PGIdentifierPreparer_psycopg
|
||||
psycopg_version = (0, 0)
|
||||
|
||||
_has_native_hstore = True
|
||||
_psycopg_adapters_map = None
|
||||
|
||||
colspecs = util.update_copy(
|
||||
_PGDialect_common_psycopg.colspecs,
|
||||
{
|
||||
sqltypes.String: _PGString,
|
||||
REGCONFIG: _PGREGCONFIG,
|
||||
JSON: _PGJSON,
|
||||
CITEXT: CITEXT,
|
||||
sqltypes.JSON: _PGJSON,
|
||||
JSONB: _PGJSONB,
|
||||
sqltypes.JSON.JSONPathType: _PGJSONPathType,
|
||||
sqltypes.JSON.JSONIntIndexType: _PGJSONIntIndexType,
|
||||
sqltypes.JSON.JSONStrIndexType: _PGJSONStrIndexType,
|
||||
sqltypes.Interval: _PGInterval,
|
||||
INTERVAL: _PGInterval,
|
||||
sqltypes.Date: _PGDate,
|
||||
sqltypes.DateTime: _PGTimeStamp,
|
||||
sqltypes.Time: _PGTime,
|
||||
sqltypes.Integer: _PGInteger,
|
||||
sqltypes.SmallInteger: _PGSmallInteger,
|
||||
sqltypes.BigInteger: _PGBigInteger,
|
||||
ranges.AbstractSingleRange: _PsycopgRange,
|
||||
ranges.AbstractMultiRange: _PsycopgMultiRange,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
|
||||
if self.dbapi:
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
|
||||
if m:
|
||||
self.psycopg_version = tuple(
|
||||
int(x) for x in m.group(1, 2, 3) if x is not None
|
||||
)
|
||||
|
||||
if self.psycopg_version < (3, 0, 2):
|
||||
raise ImportError(
|
||||
"psycopg version 3.0.2 or higher is required."
|
||||
)
|
||||
|
||||
from psycopg.adapt import AdaptersMap
|
||||
|
||||
self._psycopg_adapters_map = adapters_map = AdaptersMap(
|
||||
self.dbapi.adapters
|
||||
)
|
||||
|
||||
if self._native_inet_types is False:
|
||||
import psycopg.types.string
|
||||
|
||||
adapters_map.register_loader(
|
||||
"inet", psycopg.types.string.TextLoader
|
||||
)
|
||||
adapters_map.register_loader(
|
||||
"cidr", psycopg.types.string.TextLoader
|
||||
)
|
||||
|
||||
if self._json_deserializer:
|
||||
from psycopg.types.json import set_json_loads
|
||||
|
||||
set_json_loads(self._json_deserializer, adapters_map)
|
||||
|
||||
if self._json_serializer:
|
||||
from psycopg.types.json import set_json_dumps
|
||||
|
||||
set_json_dumps(self._json_serializer, adapters_map)
|
||||
|
||||
def create_connect_args(self, url):
|
||||
# see https://github.com/psycopg/psycopg/issues/83
|
||||
cargs, cparams = super().create_connect_args(url)
|
||||
|
||||
if self._psycopg_adapters_map:
|
||||
cparams["context"] = self._psycopg_adapters_map
|
||||
if self.client_encoding is not None:
|
||||
cparams["client_encoding"] = self.client_encoding
|
||||
return cargs, cparams
|
||||
|
||||
def _type_info_fetch(self, connection, name):
|
||||
from psycopg.types import TypeInfo
|
||||
|
||||
return TypeInfo.fetch(connection.connection.driver_connection, name)
|
||||
|
||||
def initialize(self, connection):
|
||||
super().initialize(connection)
|
||||
|
||||
# PGDialect.initialize() checks server version for <= 8.2 and sets
|
||||
# this flag to False if so
|
||||
if not self.insert_returning:
|
||||
self.insert_executemany_returning = False
|
||||
|
||||
# HSTORE can't be registered until we have a connection so that
|
||||
# we can look up its OID, so we set up this adapter in
|
||||
# initialize()
|
||||
if self.use_native_hstore:
|
||||
info = self._type_info_fetch(connection, "hstore")
|
||||
self._has_native_hstore = info is not None
|
||||
if self._has_native_hstore:
|
||||
from psycopg.types.hstore import register_hstore
|
||||
|
||||
# register the adapter for connections made subsequent to
|
||||
# this one
|
||||
assert self._psycopg_adapters_map
|
||||
register_hstore(info, self._psycopg_adapters_map)
|
||||
|
||||
# register the adapter for this connection
|
||||
assert connection.connection
|
||||
register_hstore(info, connection.connection.driver_connection)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import psycopg
|
||||
|
||||
return psycopg
|
||||
|
||||
@classmethod
|
||||
def get_async_dialect_cls(cls, url):
|
||||
return PGDialectAsync_psycopg
|
||||
|
||||
@util.memoized_property
|
||||
def _isolation_lookup(self):
|
||||
return {
|
||||
"READ COMMITTED": self.dbapi.IsolationLevel.READ_COMMITTED,
|
||||
"READ UNCOMMITTED": self.dbapi.IsolationLevel.READ_UNCOMMITTED,
|
||||
"REPEATABLE READ": self.dbapi.IsolationLevel.REPEATABLE_READ,
|
||||
"SERIALIZABLE": self.dbapi.IsolationLevel.SERIALIZABLE,
|
||||
}
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg_Json(self):
|
||||
from psycopg.types import json
|
||||
|
||||
return json.Json
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg_Jsonb(self):
|
||||
from psycopg.types import json
|
||||
|
||||
return json.Jsonb
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg_TransactionStatus(self):
|
||||
from psycopg.pq import TransactionStatus
|
||||
|
||||
return TransactionStatus
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg_Range(self):
|
||||
from psycopg.types.range import Range
|
||||
|
||||
return Range
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg_Multirange(self):
|
||||
from psycopg.types.multirange import Multirange
|
||||
|
||||
return Multirange
|
||||
|
||||
def _do_isolation_level(self, connection, autocommit, isolation_level):
|
||||
connection.autocommit = autocommit
|
||||
connection.isolation_level = isolation_level
|
||||
|
||||
def get_isolation_level(self, dbapi_connection):
|
||||
status_before = dbapi_connection.info.transaction_status
|
||||
value = super().get_isolation_level(dbapi_connection)
|
||||
|
||||
# don't rely on psycopg providing enum symbols, compare with
|
||||
# eq/ne
|
||||
if status_before == self._psycopg_TransactionStatus.IDLE:
|
||||
dbapi_connection.rollback()
|
||||
return value
|
||||
|
||||
def set_isolation_level(self, dbapi_connection, level):
|
||||
if level == "AUTOCOMMIT":
|
||||
self._do_isolation_level(
|
||||
dbapi_connection, autocommit=True, isolation_level=None
|
||||
)
|
||||
else:
|
||||
self._do_isolation_level(
|
||||
dbapi_connection,
|
||||
autocommit=False,
|
||||
isolation_level=self._isolation_lookup[level],
|
||||
)
|
||||
|
||||
def set_readonly(self, connection, value):
|
||||
connection.read_only = value
|
||||
|
||||
def get_readonly(self, connection):
|
||||
return connection.read_only
|
||||
|
||||
def on_connect(self):
|
||||
def notices(conn):
|
||||
conn.add_notice_handler(_log_notices)
|
||||
|
||||
fns = [notices]
|
||||
|
||||
if self.isolation_level is not None:
|
||||
|
||||
def on_connect(conn):
|
||||
self.set_isolation_level(conn, self.isolation_level)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
# fns always has the notices function
|
||||
def on_connect(conn):
|
||||
for fn in fns:
|
||||
fn(conn)
|
||||
|
||||
return on_connect
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if isinstance(e, self.dbapi.Error) and connection is not None:
|
||||
if connection.closed or connection.broken:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _do_prepared_twophase(self, connection, command, recover=False):
|
||||
dbapi_conn = connection.connection.dbapi_connection
|
||||
if (
|
||||
recover
|
||||
# don't rely on psycopg providing enum symbols, compare with
|
||||
# eq/ne
|
||||
or dbapi_conn.info.transaction_status
|
||||
!= self._psycopg_TransactionStatus.IDLE
|
||||
):
|
||||
dbapi_conn.rollback()
|
||||
before_autocommit = dbapi_conn.autocommit
|
||||
try:
|
||||
if not before_autocommit:
|
||||
self._do_autocommit(dbapi_conn, True)
|
||||
dbapi_conn.execute(command)
|
||||
finally:
|
||||
if not before_autocommit:
|
||||
self._do_autocommit(dbapi_conn, before_autocommit)
|
||||
|
||||
def do_rollback_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
if is_prepared:
|
||||
self._do_prepared_twophase(
|
||||
connection, f"ROLLBACK PREPARED '{xid}'", recover=recover
|
||||
)
|
||||
else:
|
||||
self.do_rollback(connection.connection)
|
||||
|
||||
def do_commit_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
if is_prepared:
|
||||
self._do_prepared_twophase(
|
||||
connection, f"COMMIT PREPARED '{xid}'", recover=recover
|
||||
)
|
||||
else:
|
||||
self.do_commit(connection.connection)
|
||||
|
||||
@util.memoized_property
|
||||
def _dialect_specific_select_one(self):
|
||||
return ";"
|
||||
|
||||
|
||||
class AsyncAdapt_psycopg_cursor:
|
||||
__slots__ = ("_cursor", "await_", "_rows")
|
||||
|
||||
_psycopg_ExecStatus = None
|
||||
|
||||
def __init__(self, cursor, await_) -> None:
|
||||
self._cursor = cursor
|
||||
self.await_ = await_
|
||||
self._rows = deque()
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._cursor, name)
|
||||
|
||||
@property
|
||||
def arraysize(self):
|
||||
return self._cursor.arraysize
|
||||
|
||||
@arraysize.setter
|
||||
def arraysize(self, value):
|
||||
self._cursor.arraysize = value
|
||||
|
||||
def close(self):
|
||||
self._rows.clear()
|
||||
# Normal cursor just call _close() in a non-sync way.
|
||||
self._cursor._close()
|
||||
|
||||
def execute(self, query, params=None, **kw):
|
||||
result = self.await_(self._cursor.execute(query, params, **kw))
|
||||
# sqlalchemy result is not async, so need to pull all rows here
|
||||
res = self._cursor.pgresult
|
||||
|
||||
# don't rely on psycopg providing enum symbols, compare with
|
||||
# eq/ne
|
||||
if res and res.status == self._psycopg_ExecStatus.TUPLES_OK:
|
||||
rows = self.await_(self._cursor.fetchall())
|
||||
self._rows = deque(rows)
|
||||
return result
|
||||
|
||||
def executemany(self, query, params_seq):
|
||||
return self.await_(self._cursor.executemany(query, params_seq))
|
||||
|
||||
def __iter__(self):
|
||||
while self._rows:
|
||||
yield self._rows.popleft()
|
||||
|
||||
def fetchone(self):
|
||||
if self._rows:
|
||||
return self._rows.popleft()
|
||||
else:
|
||||
return None
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
if size is None:
|
||||
size = self._cursor.arraysize
|
||||
|
||||
rr = self._rows
|
||||
return [rr.popleft() for _ in range(min(size, len(rr)))]
|
||||
|
||||
def fetchall(self):
|
||||
retval = list(self._rows)
|
||||
self._rows.clear()
|
||||
return retval
|
||||
|
||||
|
||||
class AsyncAdapt_psycopg_ss_cursor(AsyncAdapt_psycopg_cursor):
|
||||
def execute(self, query, params=None, **kw):
|
||||
self.await_(self._cursor.execute(query, params, **kw))
|
||||
return self
|
||||
|
||||
def close(self):
|
||||
self.await_(self._cursor.close())
|
||||
|
||||
def fetchone(self):
|
||||
return self.await_(self._cursor.fetchone())
|
||||
|
||||
def fetchmany(self, size=0):
|
||||
return self.await_(self._cursor.fetchmany(size))
|
||||
|
||||
def fetchall(self):
|
||||
return self.await_(self._cursor.fetchall())
|
||||
|
||||
def __iter__(self):
|
||||
iterator = self._cursor.__aiter__()
|
||||
while True:
|
||||
try:
|
||||
yield self.await_(iterator.__anext__())
|
||||
except StopAsyncIteration:
|
||||
break
|
||||
|
||||
|
||||
class AsyncAdapt_psycopg_connection(AdaptedConnection):
|
||||
_connection: AsyncConnection
|
||||
__slots__ = ()
|
||||
await_ = staticmethod(await_only)
|
||||
|
||||
def __init__(self, connection) -> None:
|
||||
self._connection = connection
|
||||
|
||||
def __getattr__(self, name):
|
||||
return getattr(self._connection, name)
|
||||
|
||||
def execute(self, query, params=None, **kw):
|
||||
cursor = self.await_(self._connection.execute(query, params, **kw))
|
||||
return AsyncAdapt_psycopg_cursor(cursor, self.await_)
|
||||
|
||||
def cursor(self, *args, **kw):
|
||||
cursor = self._connection.cursor(*args, **kw)
|
||||
if hasattr(cursor, "name"):
|
||||
return AsyncAdapt_psycopg_ss_cursor(cursor, self.await_)
|
||||
else:
|
||||
return AsyncAdapt_psycopg_cursor(cursor, self.await_)
|
||||
|
||||
def commit(self):
|
||||
self.await_(self._connection.commit())
|
||||
|
||||
def rollback(self):
|
||||
self.await_(self._connection.rollback())
|
||||
|
||||
def close(self):
|
||||
self.await_(self._connection.close())
|
||||
|
||||
@property
|
||||
def autocommit(self):
|
||||
return self._connection.autocommit
|
||||
|
||||
@autocommit.setter
|
||||
def autocommit(self, value):
|
||||
self.set_autocommit(value)
|
||||
|
||||
def set_autocommit(self, value):
|
||||
self.await_(self._connection.set_autocommit(value))
|
||||
|
||||
def set_isolation_level(self, value):
|
||||
self.await_(self._connection.set_isolation_level(value))
|
||||
|
||||
def set_read_only(self, value):
|
||||
self.await_(self._connection.set_read_only(value))
|
||||
|
||||
def set_deferrable(self, value):
|
||||
self.await_(self._connection.set_deferrable(value))
|
||||
|
||||
|
||||
class AsyncAdaptFallback_psycopg_connection(AsyncAdapt_psycopg_connection):
|
||||
__slots__ = ()
|
||||
await_ = staticmethod(await_fallback)
|
||||
|
||||
|
||||
class PsycopgAdaptDBAPI:
|
||||
def __init__(self, psycopg) -> None:
|
||||
self.psycopg = psycopg
|
||||
|
||||
for k, v in self.psycopg.__dict__.items():
|
||||
if k != "connect":
|
||||
self.__dict__[k] = v
|
||||
|
||||
def connect(self, *arg, **kw):
|
||||
async_fallback = kw.pop("async_fallback", False)
|
||||
creator_fn = kw.pop(
|
||||
"async_creator_fn", self.psycopg.AsyncConnection.connect
|
||||
)
|
||||
if util.asbool(async_fallback):
|
||||
return AsyncAdaptFallback_psycopg_connection(
|
||||
await_fallback(creator_fn(*arg, **kw))
|
||||
)
|
||||
else:
|
||||
return AsyncAdapt_psycopg_connection(
|
||||
await_only(creator_fn(*arg, **kw))
|
||||
)
|
||||
|
||||
|
||||
class PGDialectAsync_psycopg(PGDialect_psycopg):
|
||||
is_async = True
|
||||
supports_statement_cache = True
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import psycopg
|
||||
from psycopg.pq import ExecStatus
|
||||
|
||||
AsyncAdapt_psycopg_cursor._psycopg_ExecStatus = ExecStatus
|
||||
|
||||
return PsycopgAdaptDBAPI(psycopg)
|
||||
|
||||
@classmethod
|
||||
def get_pool_class(cls, url):
|
||||
async_fallback = url.query.get("async_fallback", False)
|
||||
|
||||
if util.asbool(async_fallback):
|
||||
return pool.FallbackAsyncAdaptedQueuePool
|
||||
else:
|
||||
return pool.AsyncAdaptedQueuePool
|
||||
|
||||
def _type_info_fetch(self, connection, name):
|
||||
from psycopg.types import TypeInfo
|
||||
|
||||
adapted = connection.connection
|
||||
return adapted.await_(TypeInfo.fetch(adapted.driver_connection, name))
|
||||
|
||||
def _do_isolation_level(self, connection, autocommit, isolation_level):
|
||||
connection.set_autocommit(autocommit)
|
||||
connection.set_isolation_level(isolation_level)
|
||||
|
||||
def _do_autocommit(self, connection, value):
|
||||
connection.set_autocommit(value)
|
||||
|
||||
def set_readonly(self, connection, value):
|
||||
connection.set_read_only(value)
|
||||
|
||||
def set_deferrable(self, connection, value):
|
||||
connection.set_deferrable(value)
|
||||
|
||||
def get_driver_connection(self, connection):
|
||||
return connection._connection
|
||||
|
||||
|
||||
dialect = PGDialect_psycopg
|
||||
dialect_async = PGDialectAsync_psycopg
|
@ -0,0 +1,892 @@
|
||||
# dialects/postgresql/psycopg2.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: postgresql+psycopg2
|
||||
:name: psycopg2
|
||||
:dbapi: psycopg2
|
||||
:connectstring: postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://pypi.org/project/psycopg2/
|
||||
|
||||
.. _psycopg2_toplevel:
|
||||
|
||||
psycopg2 Connect Arguments
|
||||
--------------------------
|
||||
|
||||
Keyword arguments that are specific to the SQLAlchemy psycopg2 dialect
|
||||
may be passed to :func:`_sa.create_engine()`, and include the following:
|
||||
|
||||
|
||||
* ``isolation_level``: This option, available for all PostgreSQL dialects,
|
||||
includes the ``AUTOCOMMIT`` isolation level when using the psycopg2
|
||||
dialect. This option sets the **default** isolation level for the
|
||||
connection that is set immediately upon connection to the database before
|
||||
the connection is pooled. This option is generally superseded by the more
|
||||
modern :paramref:`_engine.Connection.execution_options.isolation_level`
|
||||
execution option, detailed at :ref:`dbapi_autocommit`.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`psycopg2_isolation_level`
|
||||
|
||||
:ref:`dbapi_autocommit`
|
||||
|
||||
|
||||
* ``client_encoding``: sets the client encoding in a libpq-agnostic way,
|
||||
using psycopg2's ``set_client_encoding()`` method.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`psycopg2_unicode`
|
||||
|
||||
|
||||
* ``executemany_mode``, ``executemany_batch_page_size``,
|
||||
``executemany_values_page_size``: Allows use of psycopg2
|
||||
extensions for optimizing "executemany"-style queries. See the referenced
|
||||
section below for details.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`psycopg2_executemany_mode`
|
||||
|
||||
.. tip::
|
||||
|
||||
The above keyword arguments are **dialect** keyword arguments, meaning
|
||||
that they are passed as explicit keyword arguments to :func:`_sa.create_engine()`::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@localhost/test",
|
||||
isolation_level="SERIALIZABLE",
|
||||
)
|
||||
|
||||
These should not be confused with **DBAPI** connect arguments, which
|
||||
are passed as part of the :paramref:`_sa.create_engine.connect_args`
|
||||
dictionary and/or are passed in the URL query string, as detailed in
|
||||
the section :ref:`custom_dbapi_args`.
|
||||
|
||||
.. _psycopg2_ssl:
|
||||
|
||||
SSL Connections
|
||||
---------------
|
||||
|
||||
The psycopg2 module has a connection argument named ``sslmode`` for
|
||||
controlling its behavior regarding secure (SSL) connections. The default is
|
||||
``sslmode=prefer``; it will attempt an SSL connection and if that fails it
|
||||
will fall back to an unencrypted connection. ``sslmode=require`` may be used
|
||||
to ensure that only secure connections are established. Consult the
|
||||
psycopg2 / libpq documentation for further options that are available.
|
||||
|
||||
Note that ``sslmode`` is specific to psycopg2 so it is included in the
|
||||
connection URI::
|
||||
|
||||
engine = sa.create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@192.168.0.199:5432/test?sslmode=require"
|
||||
)
|
||||
|
||||
Unix Domain Connections
|
||||
------------------------
|
||||
|
||||
psycopg2 supports connecting via Unix domain connections. When the ``host``
|
||||
portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2,
|
||||
which specifies Unix-domain communication rather than TCP/IP communication::
|
||||
|
||||
create_engine("postgresql+psycopg2://user:password@/dbname")
|
||||
|
||||
By default, the socket file used is to connect to a Unix-domain socket
|
||||
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
|
||||
was built. This value can be overridden by passing a pathname to psycopg2,
|
||||
using ``host`` as an additional keyword argument::
|
||||
|
||||
create_engine(
|
||||
"postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql"
|
||||
)
|
||||
|
||||
.. warning:: The format accepted here allows for a hostname in the main URL
|
||||
in addition to the "host" query string argument. **When using this URL
|
||||
format, the initial host is silently ignored**. That is, this URL::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2"
|
||||
)
|
||||
|
||||
Above, the hostname ``myhost1`` is **silently ignored and discarded.** The
|
||||
host which is connected is the ``myhost2`` host.
|
||||
|
||||
This is to maintain some degree of compatibility with PostgreSQL's own URL
|
||||
format which has been tested to behave the same way and for which tools like
|
||||
PifPaf hardcode two hostnames.
|
||||
|
||||
.. seealso::
|
||||
|
||||
`PQconnectdbParams \
|
||||
<https://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-PQCONNECTDBPARAMS>`_
|
||||
|
||||
.. _psycopg2_multi_host:
|
||||
|
||||
Specifying multiple fallback hosts
|
||||
-----------------------------------
|
||||
|
||||
psycopg2 supports multiple connection points in the connection string.
|
||||
When the ``host`` parameter is used multiple times in the query section of
|
||||
the URL, SQLAlchemy will create a single string of the host and port
|
||||
information provided to make the connections. Tokens may consist of
|
||||
``host::port`` or just ``host``; in the latter case, the default port
|
||||
is selected by libpq. In the example below, three host connections
|
||||
are specified, for ``HostA::PortA``, ``HostB`` connecting to the default port,
|
||||
and ``HostC::PortC``::
|
||||
|
||||
create_engine(
|
||||
"postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC"
|
||||
)
|
||||
|
||||
As an alternative, libpq query string format also may be used; this specifies
|
||||
``host`` and ``port`` as single query string arguments with comma-separated
|
||||
lists - the default port can be chosen by indicating an empty value
|
||||
in the comma separated list::
|
||||
|
||||
create_engine(
|
||||
"postgresql+psycopg2://user:password@/dbname?host=HostA,HostB,HostC&port=PortA,,PortC"
|
||||
)
|
||||
|
||||
With either URL style, connections to each host is attempted based on a
|
||||
configurable strategy, which may be configured using the libpq
|
||||
``target_session_attrs`` parameter. Per libpq this defaults to ``any``
|
||||
which indicates a connection to each host is then attempted until a connection is successful.
|
||||
Other strategies include ``primary``, ``prefer-standby``, etc. The complete
|
||||
list is documented by PostgreSQL at
|
||||
`libpq connection strings <https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING>`_.
|
||||
|
||||
For example, to indicate two hosts using the ``primary`` strategy::
|
||||
|
||||
create_engine(
|
||||
"postgresql+psycopg2://user:password@/dbname?host=HostA:PortA&host=HostB&host=HostC:PortC&target_session_attrs=primary"
|
||||
)
|
||||
|
||||
.. versionchanged:: 1.4.40 Port specification in psycopg2 multiple host format
|
||||
is repaired, previously ports were not correctly interpreted in this context.
|
||||
libpq comma-separated format is also now supported.
|
||||
|
||||
.. versionadded:: 1.3.20 Support for multiple hosts in PostgreSQL connection
|
||||
string.
|
||||
|
||||
.. seealso::
|
||||
|
||||
`libpq connection strings <https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING>`_ - please refer
|
||||
to this section in the libpq documentation for complete background on multiple host support.
|
||||
|
||||
|
||||
Empty DSN Connections / Environment Variable Connections
|
||||
---------------------------------------------------------
|
||||
|
||||
The psycopg2 DBAPI can connect to PostgreSQL by passing an empty DSN to the
|
||||
libpq client library, which by default indicates to connect to a localhost
|
||||
PostgreSQL database that is open for "trust" connections. This behavior can be
|
||||
further tailored using a particular set of environment variables which are
|
||||
prefixed with ``PG_...``, which are consumed by ``libpq`` to take the place of
|
||||
any or all elements of the connection string.
|
||||
|
||||
For this form, the URL can be passed without any elements other than the
|
||||
initial scheme::
|
||||
|
||||
engine = create_engine("postgresql+psycopg2://")
|
||||
|
||||
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
|
||||
function which in turn represents an empty DSN passed to libpq.
|
||||
|
||||
.. versionadded:: 1.3.2 support for parameter-less connections with psycopg2.
|
||||
|
||||
.. seealso::
|
||||
|
||||
`Environment Variables\
|
||||
<https://www.postgresql.org/docs/current/libpq-envars.html>`_ -
|
||||
PostgreSQL documentation on how to use ``PG_...``
|
||||
environment variables for connections.
|
||||
|
||||
.. _psycopg2_execution_options:
|
||||
|
||||
Per-Statement/Connection Execution Options
|
||||
-------------------------------------------
|
||||
|
||||
The following DBAPI-specific options are respected when used with
|
||||
:meth:`_engine.Connection.execution_options`,
|
||||
:meth:`.Executable.execution_options`,
|
||||
:meth:`_query.Query.execution_options`,
|
||||
in addition to those not specific to DBAPIs:
|
||||
|
||||
* ``isolation_level`` - Set the transaction isolation level for the lifespan
|
||||
of a :class:`_engine.Connection` (can only be set on a connection,
|
||||
not a statement
|
||||
or query). See :ref:`psycopg2_isolation_level`.
|
||||
|
||||
* ``stream_results`` - Enable or disable usage of psycopg2 server side
|
||||
cursors - this feature makes use of "named" cursors in combination with
|
||||
special result handling methods so that result rows are not fully buffered.
|
||||
Defaults to False, meaning cursors are buffered by default.
|
||||
|
||||
* ``max_row_buffer`` - when using ``stream_results``, an integer value that
|
||||
specifies the maximum number of rows to buffer at a time. This is
|
||||
interpreted by the :class:`.BufferedRowCursorResult`, and if omitted the
|
||||
buffer will grow to ultimately store 1000 rows at a time.
|
||||
|
||||
.. versionchanged:: 1.4 The ``max_row_buffer`` size can now be greater than
|
||||
1000, and the buffer will grow to that size.
|
||||
|
||||
.. _psycopg2_batch_mode:
|
||||
|
||||
.. _psycopg2_executemany_mode:
|
||||
|
||||
Psycopg2 Fast Execution Helpers
|
||||
-------------------------------
|
||||
|
||||
Modern versions of psycopg2 include a feature known as
|
||||
`Fast Execution Helpers \
|
||||
<https://www.psycopg.org/docs/extras.html#fast-execution-helpers>`_, which
|
||||
have been shown in benchmarking to improve psycopg2's executemany()
|
||||
performance, primarily with INSERT statements, by at least
|
||||
an order of magnitude.
|
||||
|
||||
SQLAlchemy implements a native form of the "insert many values"
|
||||
handler that will rewrite a single-row INSERT statement to accommodate for
|
||||
many values at once within an extended VALUES clause; this handler is
|
||||
equivalent to psycopg2's ``execute_values()`` handler; an overview of this
|
||||
feature and its configuration are at :ref:`engine_insertmanyvalues`.
|
||||
|
||||
.. versionadded:: 2.0 Replaced psycopg2's ``execute_values()`` fast execution
|
||||
helper with a native SQLAlchemy mechanism known as
|
||||
:ref:`insertmanyvalues <engine_insertmanyvalues>`.
|
||||
|
||||
The psycopg2 dialect retains the ability to use the psycopg2-specific
|
||||
``execute_batch()`` feature, although it is not expected that this is a widely
|
||||
used feature. The use of this extension may be enabled using the
|
||||
``executemany_mode`` flag which may be passed to :func:`_sa.create_engine`::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@host/dbname",
|
||||
executemany_mode="values_plus_batch",
|
||||
)
|
||||
|
||||
Possible options for ``executemany_mode`` include:
|
||||
|
||||
* ``values_only`` - this is the default value. SQLAlchemy's native
|
||||
:ref:`insertmanyvalues <engine_insertmanyvalues>` handler is used for qualifying
|
||||
INSERT statements, assuming
|
||||
:paramref:`_sa.create_engine.use_insertmanyvalues` is left at
|
||||
its default value of ``True``. This handler rewrites simple
|
||||
INSERT statements to include multiple VALUES clauses so that many
|
||||
parameter sets can be inserted with one statement.
|
||||
|
||||
* ``'values_plus_batch'``- SQLAlchemy's native
|
||||
:ref:`insertmanyvalues <engine_insertmanyvalues>` handler is used for qualifying
|
||||
INSERT statements, assuming
|
||||
:paramref:`_sa.create_engine.use_insertmanyvalues` is left at its default
|
||||
value of ``True``. Then, psycopg2's ``execute_batch()`` handler is used for
|
||||
qualifying UPDATE and DELETE statements when executed with multiple parameter
|
||||
sets. When using this mode, the :attr:`_engine.CursorResult.rowcount`
|
||||
attribute will not contain a value for executemany-style executions against
|
||||
UPDATE and DELETE statements.
|
||||
|
||||
.. versionchanged:: 2.0 Removed the ``'batch'`` and ``'None'`` options
|
||||
from psycopg2 ``executemany_mode``. Control over batching for INSERT
|
||||
statements is now configured via the
|
||||
:paramref:`_sa.create_engine.use_insertmanyvalues` engine-level parameter.
|
||||
|
||||
The term "qualifying statements" refers to the statement being executed
|
||||
being a Core :func:`_expression.insert`, :func:`_expression.update`
|
||||
or :func:`_expression.delete` construct, and **not** a plain textual SQL
|
||||
string or one constructed using :func:`_expression.text`. It also may **not** be
|
||||
a special "extension" statement such as an "ON CONFLICT" "upsert" statement.
|
||||
When using the ORM, all insert/update/delete statements used by the ORM flush process
|
||||
are qualifying.
|
||||
|
||||
The "page size" for the psycopg2 "batch" strategy can be affected
|
||||
by using the ``executemany_batch_page_size`` parameter, which defaults to
|
||||
100.
|
||||
|
||||
For the "insertmanyvalues" feature, the page size can be controlled using the
|
||||
:paramref:`_sa.create_engine.insertmanyvalues_page_size` parameter,
|
||||
which defaults to 1000. An example of modifying both parameters
|
||||
is below::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@host/dbname",
|
||||
executemany_mode="values_plus_batch",
|
||||
insertmanyvalues_page_size=5000,
|
||||
executemany_batch_page_size=500,
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`engine_insertmanyvalues` - background on "insertmanyvalues"
|
||||
|
||||
:ref:`tutorial_multiple_parameters` - General information on using the
|
||||
:class:`_engine.Connection`
|
||||
object to execute statements in such a way as to make
|
||||
use of the DBAPI ``.executemany()`` method.
|
||||
|
||||
|
||||
.. _psycopg2_unicode:
|
||||
|
||||
Unicode with Psycopg2
|
||||
----------------------
|
||||
|
||||
The psycopg2 DBAPI driver supports Unicode data transparently.
|
||||
|
||||
The client character encoding can be controlled for the psycopg2 dialect
|
||||
in the following ways:
|
||||
|
||||
* For PostgreSQL 9.1 and above, the ``client_encoding`` parameter may be
|
||||
passed in the database URL; this parameter is consumed by the underlying
|
||||
``libpq`` PostgreSQL client library::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8"
|
||||
)
|
||||
|
||||
Alternatively, the above ``client_encoding`` value may be passed using
|
||||
:paramref:`_sa.create_engine.connect_args` for programmatic establishment with
|
||||
``libpq``::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://user:pass@host/dbname",
|
||||
connect_args={"client_encoding": "utf8"},
|
||||
)
|
||||
|
||||
* For all PostgreSQL versions, psycopg2 supports a client-side encoding
|
||||
value that will be passed to database connections when they are first
|
||||
established. The SQLAlchemy psycopg2 dialect supports this using the
|
||||
``client_encoding`` parameter passed to :func:`_sa.create_engine`::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://user:pass@host/dbname", client_encoding="utf8"
|
||||
)
|
||||
|
||||
.. tip:: The above ``client_encoding`` parameter admittedly is very similar
|
||||
in appearance to usage of the parameter within the
|
||||
:paramref:`_sa.create_engine.connect_args` dictionary; the difference
|
||||
above is that the parameter is consumed by psycopg2 and is
|
||||
passed to the database connection using ``SET client_encoding TO
|
||||
'utf8'``; in the previously mentioned style, the parameter is instead
|
||||
passed through psycopg2 and consumed by the ``libpq`` library.
|
||||
|
||||
* A common way to set up client encoding with PostgreSQL databases is to
|
||||
ensure it is configured within the server-side postgresql.conf file;
|
||||
this is the recommended way to set encoding for a server that is
|
||||
consistently of one encoding in all databases::
|
||||
|
||||
# postgresql.conf file
|
||||
|
||||
# client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
client_encoding = utf8
|
||||
|
||||
Transactions
|
||||
------------
|
||||
|
||||
The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations.
|
||||
|
||||
.. _psycopg2_isolation_level:
|
||||
|
||||
Psycopg2 Transaction Isolation Level
|
||||
-------------------------------------
|
||||
|
||||
As discussed in :ref:`postgresql_isolation_level`,
|
||||
all PostgreSQL dialects support setting of transaction isolation level
|
||||
both via the ``isolation_level`` parameter passed to :func:`_sa.create_engine`
|
||||
,
|
||||
as well as the ``isolation_level`` argument used by
|
||||
:meth:`_engine.Connection.execution_options`. When using the psycopg2 dialect
|
||||
, these
|
||||
options make use of psycopg2's ``set_isolation_level()`` connection method,
|
||||
rather than emitting a PostgreSQL directive; this is because psycopg2's
|
||||
API-level setting is always emitted at the start of each transaction in any
|
||||
case.
|
||||
|
||||
The psycopg2 dialect supports these constants for isolation level:
|
||||
|
||||
* ``READ COMMITTED``
|
||||
* ``READ UNCOMMITTED``
|
||||
* ``REPEATABLE READ``
|
||||
* ``SERIALIZABLE``
|
||||
* ``AUTOCOMMIT``
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_isolation_level`
|
||||
|
||||
:ref:`pg8000_isolation_level`
|
||||
|
||||
|
||||
NOTICE logging
|
||||
---------------
|
||||
|
||||
The psycopg2 dialect will log PostgreSQL NOTICE messages
|
||||
via the ``sqlalchemy.dialects.postgresql`` logger. When this logger
|
||||
is set to the ``logging.INFO`` level, notice messages will be logged::
|
||||
|
||||
import logging
|
||||
|
||||
logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
|
||||
|
||||
Above, it is assumed that logging is configured externally. If this is not
|
||||
the case, configuration such as ``logging.basicConfig()`` must be utilized::
|
||||
|
||||
import logging
|
||||
|
||||
logging.basicConfig() # log messages to stdout
|
||||
logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
|
||||
|
||||
.. seealso::
|
||||
|
||||
`Logging HOWTO <https://docs.python.org/3/howto/logging.html>`_ - on the python.org website
|
||||
|
||||
.. _psycopg2_hstore:
|
||||
|
||||
HSTORE type
|
||||
------------
|
||||
|
||||
The ``psycopg2`` DBAPI includes an extension to natively handle marshalling of
|
||||
the HSTORE type. The SQLAlchemy psycopg2 dialect will enable this extension
|
||||
by default when psycopg2 version 2.4 or greater is used, and
|
||||
it is detected that the target database has the HSTORE type set up for use.
|
||||
In other words, when the dialect makes the first
|
||||
connection, a sequence like the following is performed:
|
||||
|
||||
1. Request the available HSTORE oids using
|
||||
``psycopg2.extras.HstoreAdapter.get_oids()``.
|
||||
If this function returns a list of HSTORE identifiers, we then determine
|
||||
that the ``HSTORE`` extension is present.
|
||||
This function is **skipped** if the version of psycopg2 installed is
|
||||
less than version 2.4.
|
||||
|
||||
2. If the ``use_native_hstore`` flag is at its default of ``True``, and
|
||||
we've detected that ``HSTORE`` oids are available, the
|
||||
``psycopg2.extensions.register_hstore()`` extension is invoked for all
|
||||
connections.
|
||||
|
||||
The ``register_hstore()`` extension has the effect of **all Python
|
||||
dictionaries being accepted as parameters regardless of the type of target
|
||||
column in SQL**. The dictionaries are converted by this extension into a
|
||||
textual HSTORE expression. If this behavior is not desired, disable the
|
||||
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
|
||||
follows::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@localhost/test",
|
||||
use_native_hstore=False,
|
||||
)
|
||||
|
||||
The ``HSTORE`` type is **still supported** when the
|
||||
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
|
||||
means that the coercion between Python dictionaries and the HSTORE
|
||||
string format, on both the parameter side and the result side, will take
|
||||
place within SQLAlchemy's own marshalling logic, and not that of ``psycopg2``
|
||||
which may be more performant.
|
||||
|
||||
""" # noqa
|
||||
from __future__ import annotations
|
||||
|
||||
import collections.abc as collections_abc
|
||||
import logging
|
||||
import re
|
||||
from typing import cast
|
||||
|
||||
from . import ranges
|
||||
from ._psycopg_common import _PGDialect_common_psycopg
|
||||
from ._psycopg_common import _PGExecutionContext_common_psycopg
|
||||
from .base import PGIdentifierPreparer
|
||||
from .json import JSON
|
||||
from .json import JSONB
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...util import FastIntFlag
|
||||
from ...util import parse_user_argument_for_enum
|
||||
|
||||
logger = logging.getLogger("sqlalchemy.dialects.postgresql")
|
||||
|
||||
|
||||
class _PGJSON(JSON):
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _PGJSONB(JSONB):
|
||||
def result_processor(self, dialect, coltype):
|
||||
return None
|
||||
|
||||
|
||||
class _Psycopg2Range(ranges.AbstractSingleRangeImpl):
|
||||
_psycopg2_range_cls = "none"
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
psycopg2_Range = getattr(
|
||||
cast(PGDialect_psycopg2, dialect)._psycopg2_extras,
|
||||
self._psycopg2_range_cls,
|
||||
)
|
||||
|
||||
def to_range(value):
|
||||
if isinstance(value, ranges.Range):
|
||||
value = psycopg2_Range(
|
||||
value.lower, value.upper, value.bounds, value.empty
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
def to_range(value):
|
||||
if value is not None:
|
||||
value = ranges.Range(
|
||||
value._lower,
|
||||
value._upper,
|
||||
bounds=value._bounds if value._bounds else "[)",
|
||||
empty=not value._bounds,
|
||||
)
|
||||
return value
|
||||
|
||||
return to_range
|
||||
|
||||
|
||||
class _Psycopg2NumericRange(_Psycopg2Range):
|
||||
_psycopg2_range_cls = "NumericRange"
|
||||
|
||||
|
||||
class _Psycopg2DateRange(_Psycopg2Range):
|
||||
_psycopg2_range_cls = "DateRange"
|
||||
|
||||
|
||||
class _Psycopg2DateTimeRange(_Psycopg2Range):
|
||||
_psycopg2_range_cls = "DateTimeRange"
|
||||
|
||||
|
||||
class _Psycopg2DateTimeTZRange(_Psycopg2Range):
|
||||
_psycopg2_range_cls = "DateTimeTZRange"
|
||||
|
||||
|
||||
class PGExecutionContext_psycopg2(_PGExecutionContext_common_psycopg):
|
||||
_psycopg2_fetched_rows = None
|
||||
|
||||
def post_exec(self):
|
||||
self._log_notices(self.cursor)
|
||||
|
||||
def _log_notices(self, cursor):
|
||||
# check also that notices is an iterable, after it's already
|
||||
# established that we will be iterating through it. This is to get
|
||||
# around test suites such as SQLAlchemy's using a Mock object for
|
||||
# cursor
|
||||
if not cursor.connection.notices or not isinstance(
|
||||
cursor.connection.notices, collections_abc.Iterable
|
||||
):
|
||||
return
|
||||
|
||||
for notice in cursor.connection.notices:
|
||||
# NOTICE messages have a
|
||||
# newline character at the end
|
||||
logger.info(notice.rstrip())
|
||||
|
||||
cursor.connection.notices[:] = []
|
||||
|
||||
|
||||
class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer):
|
||||
pass
|
||||
|
||||
|
||||
class ExecutemanyMode(FastIntFlag):
|
||||
EXECUTEMANY_VALUES = 0
|
||||
EXECUTEMANY_VALUES_PLUS_BATCH = 1
|
||||
|
||||
|
||||
(
|
||||
EXECUTEMANY_VALUES,
|
||||
EXECUTEMANY_VALUES_PLUS_BATCH,
|
||||
) = ExecutemanyMode.__members__.values()
|
||||
|
||||
|
||||
class PGDialect_psycopg2(_PGDialect_common_psycopg):
|
||||
driver = "psycopg2"
|
||||
|
||||
supports_statement_cache = True
|
||||
supports_server_side_cursors = True
|
||||
|
||||
default_paramstyle = "pyformat"
|
||||
# set to true based on psycopg2 version
|
||||
supports_sane_multi_rowcount = False
|
||||
execution_ctx_cls = PGExecutionContext_psycopg2
|
||||
preparer = PGIdentifierPreparer_psycopg2
|
||||
psycopg2_version = (0, 0)
|
||||
use_insertmanyvalues_wo_returning = True
|
||||
|
||||
returns_native_bytes = False
|
||||
|
||||
_has_native_hstore = True
|
||||
|
||||
colspecs = util.update_copy(
|
||||
_PGDialect_common_psycopg.colspecs,
|
||||
{
|
||||
JSON: _PGJSON,
|
||||
sqltypes.JSON: _PGJSON,
|
||||
JSONB: _PGJSONB,
|
||||
ranges.INT4RANGE: _Psycopg2NumericRange,
|
||||
ranges.INT8RANGE: _Psycopg2NumericRange,
|
||||
ranges.NUMRANGE: _Psycopg2NumericRange,
|
||||
ranges.DATERANGE: _Psycopg2DateRange,
|
||||
ranges.TSRANGE: _Psycopg2DateTimeRange,
|
||||
ranges.TSTZRANGE: _Psycopg2DateTimeTZRange,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
executemany_mode="values_only",
|
||||
executemany_batch_page_size=100,
|
||||
**kwargs,
|
||||
):
|
||||
_PGDialect_common_psycopg.__init__(self, **kwargs)
|
||||
|
||||
if self._native_inet_types:
|
||||
raise NotImplementedError(
|
||||
"The psycopg2 dialect does not implement "
|
||||
"ipaddress type handling; native_inet_types cannot be set "
|
||||
"to ``True`` when using this dialect."
|
||||
)
|
||||
|
||||
# Parse executemany_mode argument, allowing it to be only one of the
|
||||
# symbol names
|
||||
self.executemany_mode = parse_user_argument_for_enum(
|
||||
executemany_mode,
|
||||
{
|
||||
EXECUTEMANY_VALUES: ["values_only"],
|
||||
EXECUTEMANY_VALUES_PLUS_BATCH: ["values_plus_batch"],
|
||||
},
|
||||
"executemany_mode",
|
||||
)
|
||||
|
||||
self.executemany_batch_page_size = executemany_batch_page_size
|
||||
|
||||
if self.dbapi and hasattr(self.dbapi, "__version__"):
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
|
||||
if m:
|
||||
self.psycopg2_version = tuple(
|
||||
int(x) for x in m.group(1, 2, 3) if x is not None
|
||||
)
|
||||
|
||||
if self.psycopg2_version < (2, 7):
|
||||
raise ImportError(
|
||||
"psycopg2 version 2.7 or higher is required."
|
||||
)
|
||||
|
||||
def initialize(self, connection):
|
||||
super().initialize(connection)
|
||||
self._has_native_hstore = (
|
||||
self.use_native_hstore
|
||||
and self._hstore_oids(connection.connection.dbapi_connection)
|
||||
is not None
|
||||
)
|
||||
|
||||
self.supports_sane_multi_rowcount = (
|
||||
self.executemany_mode is not EXECUTEMANY_VALUES_PLUS_BATCH
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import psycopg2
|
||||
|
||||
return psycopg2
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg2_extensions(cls):
|
||||
from psycopg2 import extensions
|
||||
|
||||
return extensions
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg2_extras(cls):
|
||||
from psycopg2 import extras
|
||||
|
||||
return extras
|
||||
|
||||
@util.memoized_property
|
||||
def _isolation_lookup(self):
|
||||
extensions = self._psycopg2_extensions
|
||||
return {
|
||||
"AUTOCOMMIT": extensions.ISOLATION_LEVEL_AUTOCOMMIT,
|
||||
"READ COMMITTED": extensions.ISOLATION_LEVEL_READ_COMMITTED,
|
||||
"READ UNCOMMITTED": extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
|
||||
"REPEATABLE READ": extensions.ISOLATION_LEVEL_REPEATABLE_READ,
|
||||
"SERIALIZABLE": extensions.ISOLATION_LEVEL_SERIALIZABLE,
|
||||
}
|
||||
|
||||
def set_isolation_level(self, dbapi_connection, level):
|
||||
dbapi_connection.set_isolation_level(self._isolation_lookup[level])
|
||||
|
||||
def set_readonly(self, connection, value):
|
||||
connection.readonly = value
|
||||
|
||||
def get_readonly(self, connection):
|
||||
return connection.readonly
|
||||
|
||||
def set_deferrable(self, connection, value):
|
||||
connection.deferrable = value
|
||||
|
||||
def get_deferrable(self, connection):
|
||||
return connection.deferrable
|
||||
|
||||
def on_connect(self):
|
||||
extras = self._psycopg2_extras
|
||||
|
||||
fns = []
|
||||
if self.client_encoding is not None:
|
||||
|
||||
def on_connect(dbapi_conn):
|
||||
dbapi_conn.set_client_encoding(self.client_encoding)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self.dbapi:
|
||||
|
||||
def on_connect(dbapi_conn):
|
||||
extras.register_uuid(None, dbapi_conn)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self.dbapi and self.use_native_hstore:
|
||||
|
||||
def on_connect(dbapi_conn):
|
||||
hstore_oids = self._hstore_oids(dbapi_conn)
|
||||
if hstore_oids is not None:
|
||||
oid, array_oid = hstore_oids
|
||||
kw = {"oid": oid}
|
||||
kw["array_oid"] = array_oid
|
||||
extras.register_hstore(dbapi_conn, **kw)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if self.dbapi and self._json_deserializer:
|
||||
|
||||
def on_connect(dbapi_conn):
|
||||
extras.register_default_json(
|
||||
dbapi_conn, loads=self._json_deserializer
|
||||
)
|
||||
extras.register_default_jsonb(
|
||||
dbapi_conn, loads=self._json_deserializer
|
||||
)
|
||||
|
||||
fns.append(on_connect)
|
||||
|
||||
if fns:
|
||||
|
||||
def on_connect(dbapi_conn):
|
||||
for fn in fns:
|
||||
fn(dbapi_conn)
|
||||
|
||||
return on_connect
|
||||
else:
|
||||
return None
|
||||
|
||||
def do_executemany(self, cursor, statement, parameters, context=None):
|
||||
if self.executemany_mode is EXECUTEMANY_VALUES_PLUS_BATCH:
|
||||
if self.executemany_batch_page_size:
|
||||
kwargs = {"page_size": self.executemany_batch_page_size}
|
||||
else:
|
||||
kwargs = {}
|
||||
self._psycopg2_extras.execute_batch(
|
||||
cursor, statement, parameters, **kwargs
|
||||
)
|
||||
else:
|
||||
cursor.executemany(statement, parameters)
|
||||
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
connection.connection.tpc_begin(xid)
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
connection.connection.tpc_prepare()
|
||||
|
||||
def _do_twophase(self, dbapi_conn, operation, xid, recover=False):
|
||||
if recover:
|
||||
if dbapi_conn.status != self._psycopg2_extensions.STATUS_READY:
|
||||
dbapi_conn.rollback()
|
||||
operation(xid)
|
||||
else:
|
||||
operation()
|
||||
|
||||
def do_rollback_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
dbapi_conn = connection.connection.dbapi_connection
|
||||
self._do_twophase(
|
||||
dbapi_conn, dbapi_conn.tpc_rollback, xid, recover=recover
|
||||
)
|
||||
|
||||
def do_commit_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
dbapi_conn = connection.connection.dbapi_connection
|
||||
self._do_twophase(
|
||||
dbapi_conn, dbapi_conn.tpc_commit, xid, recover=recover
|
||||
)
|
||||
|
||||
@util.memoized_instancemethod
|
||||
def _hstore_oids(self, dbapi_connection):
|
||||
extras = self._psycopg2_extras
|
||||
oids = extras.HstoreAdapter.get_oids(dbapi_connection)
|
||||
if oids is not None and oids[0]:
|
||||
return oids[0:2]
|
||||
else:
|
||||
return None
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if isinstance(e, self.dbapi.Error):
|
||||
# check the "closed" flag. this might not be
|
||||
# present on old psycopg2 versions. Also,
|
||||
# this flag doesn't actually help in a lot of disconnect
|
||||
# situations, so don't rely on it.
|
||||
if getattr(connection, "closed", False):
|
||||
return True
|
||||
|
||||
# checks based on strings. in the case that .closed
|
||||
# didn't cut it, fall back onto these.
|
||||
str_e = str(e).partition("\n")[0]
|
||||
for msg in self._is_disconnect_messages:
|
||||
idx = str_e.find(msg)
|
||||
if idx >= 0 and '"' not in str_e[:idx]:
|
||||
return True
|
||||
return False
|
||||
|
||||
@util.memoized_property
|
||||
def _is_disconnect_messages(self):
|
||||
return (
|
||||
# these error messages from libpq: interfaces/libpq/fe-misc.c
|
||||
# and interfaces/libpq/fe-secure.c.
|
||||
"terminating connection",
|
||||
"closed the connection",
|
||||
"connection not open",
|
||||
"could not receive data from server",
|
||||
"could not send data to server",
|
||||
# psycopg2 client errors, psycopg2/connection.h,
|
||||
# psycopg2/cursor.h
|
||||
"connection already closed",
|
||||
"cursor already closed",
|
||||
# not sure where this path is originally from, it may
|
||||
# be obsolete. It really says "losed", not "closed".
|
||||
"losed the connection unexpectedly",
|
||||
# these can occur in newer SSL
|
||||
"connection has been closed unexpectedly",
|
||||
"SSL error: decryption failed or bad record mac",
|
||||
"SSL SYSCALL error: Bad file descriptor",
|
||||
"SSL SYSCALL error: EOF detected",
|
||||
"SSL SYSCALL error: Operation timed out",
|
||||
"SSL SYSCALL error: Bad address",
|
||||
# This can occur in OpenSSL 1 when an unexpected EOF occurs.
|
||||
# https://www.openssl.org/docs/man1.1.1/man3/SSL_get_error.html#BUGS
|
||||
# It may also occur in newer OpenSSL for a non-recoverable I/O
|
||||
# error as a result of a system call that does not set 'errno'
|
||||
# in libc.
|
||||
"SSL SYSCALL error: Success",
|
||||
)
|
||||
|
||||
|
||||
dialect = PGDialect_psycopg2
|
@ -0,0 +1,61 @@
|
||||
# dialects/postgresql/psycopg2cffi.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: postgresql+psycopg2cffi
|
||||
:name: psycopg2cffi
|
||||
:dbapi: psycopg2cffi
|
||||
:connectstring: postgresql+psycopg2cffi://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://pypi.org/project/psycopg2cffi/
|
||||
|
||||
``psycopg2cffi`` is an adaptation of ``psycopg2``, using CFFI for the C
|
||||
layer. This makes it suitable for use in e.g. PyPy. Documentation
|
||||
is as per ``psycopg2``.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:mod:`sqlalchemy.dialects.postgresql.psycopg2`
|
||||
|
||||
""" # noqa
|
||||
from .psycopg2 import PGDialect_psycopg2
|
||||
from ... import util
|
||||
|
||||
|
||||
class PGDialect_psycopg2cffi(PGDialect_psycopg2):
|
||||
driver = "psycopg2cffi"
|
||||
supports_unicode_statements = True
|
||||
supports_statement_cache = True
|
||||
|
||||
# psycopg2cffi's first release is 2.5.0, but reports
|
||||
# __version__ as 2.4.4. Subsequent releases seem to have
|
||||
# fixed this.
|
||||
|
||||
FEATURE_VERSION_MAP = dict(
|
||||
native_json=(2, 4, 4),
|
||||
native_jsonb=(2, 7, 1),
|
||||
sane_multi_rowcount=(2, 4, 4),
|
||||
array_oid=(2, 4, 4),
|
||||
hstore_adapter=(2, 4, 4),
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("psycopg2cffi")
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg2_extensions(cls):
|
||||
root = __import__("psycopg2cffi", fromlist=["extensions"])
|
||||
return root.extensions
|
||||
|
||||
@util.memoized_property
|
||||
def _psycopg2_extras(cls):
|
||||
root = __import__("psycopg2cffi", fromlist=["extras"])
|
||||
return root.extras
|
||||
|
||||
|
||||
dialect = PGDialect_psycopg2cffi
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,313 @@
|
||||
# dialects/postgresql/types.py
|
||||
# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
from typing import Any
|
||||
from typing import Optional
|
||||
from typing import overload
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
from uuid import UUID as _python_UUID
|
||||
|
||||
from ...sql import sqltypes
|
||||
from ...sql import type_api
|
||||
from ...util.typing import Literal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql.operators import OperatorType
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
from ...sql.type_api import TypeEngine
|
||||
|
||||
_DECIMAL_TYPES = (1231, 1700)
|
||||
_FLOAT_TYPES = (700, 701, 1021, 1022)
|
||||
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
|
||||
|
||||
|
||||
class PGUuid(sqltypes.UUID[sqltypes._UUID_RETURN]):
|
||||
render_bind_cast = True
|
||||
render_literal_cast = True
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self: PGUuid[_python_UUID], as_uuid: Literal[True] = ...
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
def __init__(
|
||||
self: PGUuid[str], as_uuid: Literal[False] = ...
|
||||
) -> None: ...
|
||||
|
||||
def __init__(self, as_uuid: bool = True) -> None: ...
|
||||
|
||||
|
||||
class BYTEA(sqltypes.LargeBinary):
|
||||
__visit_name__ = "BYTEA"
|
||||
|
||||
|
||||
class _NetworkAddressTypeMixin:
|
||||
|
||||
def coerce_compared_value(
|
||||
self, op: Optional[OperatorType], value: Any
|
||||
) -> TypeEngine[Any]:
|
||||
if TYPE_CHECKING:
|
||||
assert isinstance(self, TypeEngine)
|
||||
return self
|
||||
|
||||
|
||||
class INET(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
|
||||
__visit_name__ = "INET"
|
||||
|
||||
|
||||
PGInet = INET
|
||||
|
||||
|
||||
class CIDR(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
|
||||
__visit_name__ = "CIDR"
|
||||
|
||||
|
||||
PGCidr = CIDR
|
||||
|
||||
|
||||
class MACADDR(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
|
||||
__visit_name__ = "MACADDR"
|
||||
|
||||
|
||||
PGMacAddr = MACADDR
|
||||
|
||||
|
||||
class MACADDR8(_NetworkAddressTypeMixin, sqltypes.TypeEngine[str]):
|
||||
__visit_name__ = "MACADDR8"
|
||||
|
||||
|
||||
PGMacAddr8 = MACADDR8
|
||||
|
||||
|
||||
class MONEY(sqltypes.TypeEngine[str]):
|
||||
r"""Provide the PostgreSQL MONEY type.
|
||||
|
||||
Depending on driver, result rows using this type may return a
|
||||
string value which includes currency symbols.
|
||||
|
||||
For this reason, it may be preferable to provide conversion to a
|
||||
numerically-based currency datatype using :class:`_types.TypeDecorator`::
|
||||
|
||||
import re
|
||||
import decimal
|
||||
from sqlalchemy import Dialect
|
||||
from sqlalchemy import TypeDecorator
|
||||
|
||||
|
||||
class NumericMoney(TypeDecorator):
|
||||
impl = MONEY
|
||||
|
||||
def process_result_value(self, value: Any, dialect: Dialect) -> None:
|
||||
if value is not None:
|
||||
# adjust this for the currency and numeric
|
||||
m = re.match(r"\$([\d.]+)", value)
|
||||
if m:
|
||||
value = decimal.Decimal(m.group(1))
|
||||
return value
|
||||
|
||||
Alternatively, the conversion may be applied as a CAST using
|
||||
the :meth:`_types.TypeDecorator.column_expression` method as follows::
|
||||
|
||||
import decimal
|
||||
from sqlalchemy import cast
|
||||
from sqlalchemy import TypeDecorator
|
||||
|
||||
|
||||
class NumericMoney(TypeDecorator):
|
||||
impl = MONEY
|
||||
|
||||
def column_expression(self, column: Any):
|
||||
return cast(column, Numeric())
|
||||
|
||||
.. versionadded:: 1.2
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
__visit_name__ = "MONEY"
|
||||
|
||||
|
||||
class OID(sqltypes.TypeEngine[int]):
|
||||
"""Provide the PostgreSQL OID type."""
|
||||
|
||||
__visit_name__ = "OID"
|
||||
|
||||
|
||||
class REGCONFIG(sqltypes.TypeEngine[str]):
|
||||
"""Provide the PostgreSQL REGCONFIG type.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "REGCONFIG"
|
||||
|
||||
|
||||
class TSQUERY(sqltypes.TypeEngine[str]):
|
||||
"""Provide the PostgreSQL TSQUERY type.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "TSQUERY"
|
||||
|
||||
|
||||
class REGCLASS(sqltypes.TypeEngine[str]):
|
||||
"""Provide the PostgreSQL REGCLASS type.
|
||||
|
||||
.. versionadded:: 1.2.7
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "REGCLASS"
|
||||
|
||||
|
||||
class TIMESTAMP(sqltypes.TIMESTAMP):
|
||||
"""Provide the PostgreSQL TIMESTAMP type."""
|
||||
|
||||
__visit_name__ = "TIMESTAMP"
|
||||
|
||||
def __init__(
|
||||
self, timezone: bool = False, precision: Optional[int] = None
|
||||
) -> None:
|
||||
"""Construct a TIMESTAMP.
|
||||
|
||||
:param timezone: boolean value if timezone present, default False
|
||||
:param precision: optional integer precision value
|
||||
|
||||
.. versionadded:: 1.4
|
||||
|
||||
"""
|
||||
super().__init__(timezone=timezone)
|
||||
self.precision = precision
|
||||
|
||||
|
||||
class TIME(sqltypes.TIME):
|
||||
"""PostgreSQL TIME type."""
|
||||
|
||||
__visit_name__ = "TIME"
|
||||
|
||||
def __init__(
|
||||
self, timezone: bool = False, precision: Optional[int] = None
|
||||
) -> None:
|
||||
"""Construct a TIME.
|
||||
|
||||
:param timezone: boolean value if timezone present, default False
|
||||
:param precision: optional integer precision value
|
||||
|
||||
.. versionadded:: 1.4
|
||||
|
||||
"""
|
||||
super().__init__(timezone=timezone)
|
||||
self.precision = precision
|
||||
|
||||
|
||||
class INTERVAL(type_api.NativeForEmulated, sqltypes._AbstractInterval):
|
||||
"""PostgreSQL INTERVAL type."""
|
||||
|
||||
__visit_name__ = "INTERVAL"
|
||||
native = True
|
||||
|
||||
def __init__(
|
||||
self, precision: Optional[int] = None, fields: Optional[str] = None
|
||||
) -> None:
|
||||
"""Construct an INTERVAL.
|
||||
|
||||
:param precision: optional integer precision value
|
||||
:param fields: string fields specifier. allows storage of fields
|
||||
to be limited, such as ``"YEAR"``, ``"MONTH"``, ``"DAY TO HOUR"``,
|
||||
etc.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
|
||||
"""
|
||||
self.precision = precision
|
||||
self.fields = fields
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(
|
||||
cls, interval: sqltypes.Interval, **kw: Any # type: ignore[override]
|
||||
) -> INTERVAL:
|
||||
return INTERVAL(precision=interval.second_precision)
|
||||
|
||||
@property
|
||||
def _type_affinity(self) -> Type[sqltypes.Interval]:
|
||||
return sqltypes.Interval
|
||||
|
||||
def as_generic(self, allow_nulltype: bool = False) -> sqltypes.Interval:
|
||||
return sqltypes.Interval(native=True, second_precision=self.precision)
|
||||
|
||||
@property
|
||||
def python_type(self) -> Type[dt.timedelta]:
|
||||
return dt.timedelta
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
|
||||
def process(value: dt.timedelta) -> str:
|
||||
return f"make_interval(secs=>{value.total_seconds()})"
|
||||
|
||||
return process
|
||||
|
||||
|
||||
PGInterval = INTERVAL
|
||||
|
||||
|
||||
class BIT(sqltypes.TypeEngine[int]):
|
||||
__visit_name__ = "BIT"
|
||||
|
||||
def __init__(
|
||||
self, length: Optional[int] = None, varying: bool = False
|
||||
) -> None:
|
||||
if varying:
|
||||
# BIT VARYING can be unlimited-length, so no default
|
||||
self.length = length
|
||||
else:
|
||||
# BIT without VARYING defaults to length 1
|
||||
self.length = length or 1
|
||||
self.varying = varying
|
||||
|
||||
|
||||
PGBit = BIT
|
||||
|
||||
|
||||
class TSVECTOR(sqltypes.TypeEngine[str]):
|
||||
"""The :class:`_postgresql.TSVECTOR` type implements the PostgreSQL
|
||||
text search type TSVECTOR.
|
||||
|
||||
It can be used to do full text queries on natural language
|
||||
documents.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_match`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "TSVECTOR"
|
||||
|
||||
|
||||
class CITEXT(sqltypes.TEXT):
|
||||
"""Provide the PostgreSQL CITEXT type.
|
||||
|
||||
.. versionadded:: 2.0.7
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "CITEXT"
|
||||
|
||||
def coerce_compared_value(
|
||||
self, op: Optional[OperatorType], value: Any
|
||||
) -> TypeEngine[Any]:
|
||||
return self
|
Reference in New Issue
Block a user