Update 2025-04-24_11:44:19
This commit is contained in:
@ -0,0 +1,61 @@
|
||||
# dialects/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Callable
|
||||
from typing import Optional
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .. import util
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..engine.interfaces import Dialect
|
||||
|
||||
__all__ = ("mssql", "mysql", "oracle", "postgresql", "sqlite")
|
||||
|
||||
|
||||
def _auto_fn(name: str) -> Optional[Callable[[], Type[Dialect]]]:
|
||||
"""default dialect importer.
|
||||
|
||||
plugs into the :class:`.PluginLoader`
|
||||
as a first-hit system.
|
||||
|
||||
"""
|
||||
if "." in name:
|
||||
dialect, driver = name.split(".")
|
||||
else:
|
||||
dialect = name
|
||||
driver = "base"
|
||||
|
||||
try:
|
||||
if dialect == "mariadb":
|
||||
# it's "OK" for us to hardcode here since _auto_fn is already
|
||||
# hardcoded. if mysql / mariadb etc were third party dialects
|
||||
# they would just publish all the entrypoints, which would actually
|
||||
# look much nicer.
|
||||
module = __import__(
|
||||
"sqlalchemy.dialects.mysql.mariadb"
|
||||
).dialects.mysql.mariadb
|
||||
return module.loader(driver) # type: ignore
|
||||
else:
|
||||
module = __import__("sqlalchemy.dialects.%s" % (dialect,)).dialects
|
||||
module = getattr(module, dialect)
|
||||
except ImportError:
|
||||
return None
|
||||
|
||||
if hasattr(module, driver):
|
||||
module = getattr(module, driver)
|
||||
return lambda: module.dialect
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
registry = util.PluginLoader("sqlalchemy.dialects", auto_fn=_auto_fn)
|
||||
|
||||
plugins = util.PluginLoader("sqlalchemy.plugins")
|
Binary file not shown.
Binary file not shown.
@ -0,0 +1,30 @@
|
||||
# dialects/_typing.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import Iterable
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Union
|
||||
|
||||
from ..sql import roles
|
||||
from ..sql.base import ColumnCollection
|
||||
from ..sql.schema import Column
|
||||
from ..sql.schema import ColumnCollectionConstraint
|
||||
from ..sql.schema import Index
|
||||
|
||||
|
||||
_OnConflictConstraintT = Union[str, ColumnCollectionConstraint, Index, None]
|
||||
_OnConflictIndexElementsT = Optional[
|
||||
Iterable[Union[Column[Any], str, roles.DDLConstraintColumnRole]]
|
||||
]
|
||||
_OnConflictIndexWhereT = Optional[roles.WhereHavingRole]
|
||||
_OnConflictSetT = Optional[
|
||||
Union[Mapping[Any, Any], ColumnCollection[Any, Any]]
|
||||
]
|
||||
_OnConflictWhereT = Optional[roles.WhereHavingRole]
|
@ -0,0 +1,88 @@
|
||||
# dialects/mssql/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from . import aioodbc # noqa
|
||||
from . import base # noqa
|
||||
from . import pymssql # noqa
|
||||
from . import pyodbc # noqa
|
||||
from .base import BIGINT
|
||||
from .base import BINARY
|
||||
from .base import BIT
|
||||
from .base import CHAR
|
||||
from .base import DATE
|
||||
from .base import DATETIME
|
||||
from .base import DATETIME2
|
||||
from .base import DATETIMEOFFSET
|
||||
from .base import DECIMAL
|
||||
from .base import DOUBLE_PRECISION
|
||||
from .base import FLOAT
|
||||
from .base import IMAGE
|
||||
from .base import INTEGER
|
||||
from .base import JSON
|
||||
from .base import MONEY
|
||||
from .base import NCHAR
|
||||
from .base import NTEXT
|
||||
from .base import NUMERIC
|
||||
from .base import NVARCHAR
|
||||
from .base import REAL
|
||||
from .base import ROWVERSION
|
||||
from .base import SMALLDATETIME
|
||||
from .base import SMALLINT
|
||||
from .base import SMALLMONEY
|
||||
from .base import SQL_VARIANT
|
||||
from .base import TEXT
|
||||
from .base import TIME
|
||||
from .base import TIMESTAMP
|
||||
from .base import TINYINT
|
||||
from .base import UNIQUEIDENTIFIER
|
||||
from .base import VARBINARY
|
||||
from .base import VARCHAR
|
||||
from .base import XML
|
||||
from ...sql import try_cast
|
||||
|
||||
|
||||
base.dialect = dialect = pyodbc.dialect
|
||||
|
||||
|
||||
__all__ = (
|
||||
"JSON",
|
||||
"INTEGER",
|
||||
"BIGINT",
|
||||
"SMALLINT",
|
||||
"TINYINT",
|
||||
"VARCHAR",
|
||||
"NVARCHAR",
|
||||
"CHAR",
|
||||
"NCHAR",
|
||||
"TEXT",
|
||||
"NTEXT",
|
||||
"DECIMAL",
|
||||
"NUMERIC",
|
||||
"FLOAT",
|
||||
"DATETIME",
|
||||
"DATETIME2",
|
||||
"DATETIMEOFFSET",
|
||||
"DATE",
|
||||
"DOUBLE_PRECISION",
|
||||
"TIME",
|
||||
"SMALLDATETIME",
|
||||
"BINARY",
|
||||
"VARBINARY",
|
||||
"BIT",
|
||||
"REAL",
|
||||
"IMAGE",
|
||||
"TIMESTAMP",
|
||||
"ROWVERSION",
|
||||
"MONEY",
|
||||
"SMALLMONEY",
|
||||
"UNIQUEIDENTIFIER",
|
||||
"SQL_VARIANT",
|
||||
"XML",
|
||||
"dialect",
|
||||
"try_cast",
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,63 @@
|
||||
# dialects/mssql/aioodbc.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
r"""
|
||||
.. dialect:: mssql+aioodbc
|
||||
:name: aioodbc
|
||||
:dbapi: aioodbc
|
||||
:connectstring: mssql+aioodbc://<username>:<password>@<dsnname>
|
||||
:url: https://pypi.org/project/aioodbc/
|
||||
|
||||
|
||||
Support for the SQL Server database in asyncio style, using the aioodbc
|
||||
driver which itself is a thread-wrapper around pyodbc.
|
||||
|
||||
.. versionadded:: 2.0.23 Added the mssql+aioodbc dialect which builds
|
||||
on top of the pyodbc and general aio* dialect architecture.
|
||||
|
||||
Using a special asyncio mediation layer, the aioodbc dialect is usable
|
||||
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
|
||||
extension package.
|
||||
|
||||
Most behaviors and caveats for this driver are the same as that of the
|
||||
pyodbc dialect used on SQL Server; see :ref:`mssql_pyodbc` for general
|
||||
background.
|
||||
|
||||
This dialect should normally be used only with the
|
||||
:func:`_asyncio.create_async_engine` engine creation function; connection
|
||||
styles are otherwise equivalent to those documented in the pyodbc section::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
engine = create_async_engine(
|
||||
"mssql+aioodbc://scott:tiger@mssql2017:1433/test?"
|
||||
"driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from .pyodbc import MSDialect_pyodbc
|
||||
from .pyodbc import MSExecutionContext_pyodbc
|
||||
from ...connectors.aioodbc import aiodbcConnector
|
||||
|
||||
|
||||
class MSExecutionContext_aioodbc(MSExecutionContext_pyodbc):
|
||||
def create_server_side_cursor(self):
|
||||
return self._dbapi_connection.cursor(server_side=True)
|
||||
|
||||
|
||||
class MSDialectAsync_aioodbc(aiodbcConnector, MSDialect_pyodbc):
|
||||
driver = "aioodbc"
|
||||
|
||||
supports_statement_cache = True
|
||||
|
||||
execution_ctx_cls = MSExecutionContext_aioodbc
|
||||
|
||||
|
||||
dialect = MSDialectAsync_aioodbc
|
4058
venv/lib/python3.11/site-packages/sqlalchemy/dialects/mssql/base.py
Normal file
4058
venv/lib/python3.11/site-packages/sqlalchemy/dialects/mssql/base.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,254 @@
|
||||
# dialects/mssql/information_schema.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import cast
|
||||
from ... import Column
|
||||
from ... import MetaData
|
||||
from ... import Table
|
||||
from ...ext.compiler import compiles
|
||||
from ...sql import expression
|
||||
from ...types import Boolean
|
||||
from ...types import Integer
|
||||
from ...types import Numeric
|
||||
from ...types import NVARCHAR
|
||||
from ...types import String
|
||||
from ...types import TypeDecorator
|
||||
from ...types import Unicode
|
||||
|
||||
|
||||
ischema = MetaData()
|
||||
|
||||
|
||||
class CoerceUnicode(TypeDecorator):
|
||||
impl = Unicode
|
||||
cache_ok = True
|
||||
|
||||
def bind_expression(self, bindvalue):
|
||||
return _cast_on_2005(bindvalue)
|
||||
|
||||
|
||||
class _cast_on_2005(expression.ColumnElement):
|
||||
def __init__(self, bindvalue):
|
||||
self.bindvalue = bindvalue
|
||||
|
||||
|
||||
@compiles(_cast_on_2005)
|
||||
def _compile(element, compiler, **kw):
|
||||
from . import base
|
||||
|
||||
if (
|
||||
compiler.dialect.server_version_info is None
|
||||
or compiler.dialect.server_version_info < base.MS_2005_VERSION
|
||||
):
|
||||
return compiler.process(element.bindvalue, **kw)
|
||||
else:
|
||||
return compiler.process(cast(element.bindvalue, Unicode), **kw)
|
||||
|
||||
|
||||
schemata = Table(
|
||||
"SCHEMATA",
|
||||
ischema,
|
||||
Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"),
|
||||
Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"),
|
||||
Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
tables = Table(
|
||||
"TABLES",
|
||||
ischema,
|
||||
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("TABLE_TYPE", CoerceUnicode, key="table_type"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
columns = Table(
|
||||
"COLUMNS",
|
||||
ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("IS_NULLABLE", Integer, key="is_nullable"),
|
||||
Column("DATA_TYPE", String, key="data_type"),
|
||||
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
|
||||
Column(
|
||||
"CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"
|
||||
),
|
||||
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
|
||||
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
|
||||
Column("COLUMN_DEFAULT", Integer, key="column_default"),
|
||||
Column("COLLATION_NAME", String, key="collation_name"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
mssql_temp_table_columns = Table(
|
||||
"COLUMNS",
|
||||
ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("IS_NULLABLE", Integer, key="is_nullable"),
|
||||
Column("DATA_TYPE", String, key="data_type"),
|
||||
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
|
||||
Column(
|
||||
"CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"
|
||||
),
|
||||
Column("NUMERIC_PRECISION", Integer, key="numeric_precision"),
|
||||
Column("NUMERIC_SCALE", Integer, key="numeric_scale"),
|
||||
Column("COLUMN_DEFAULT", Integer, key="column_default"),
|
||||
Column("COLLATION_NAME", String, key="collation_name"),
|
||||
schema="tempdb.INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
constraints = Table(
|
||||
"TABLE_CONSTRAINTS",
|
||||
ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
Column("CONSTRAINT_TYPE", CoerceUnicode, key="constraint_type"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
column_constraints = Table(
|
||||
"CONSTRAINT_COLUMN_USAGE",
|
||||
ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
key_constraints = Table(
|
||||
"KEY_COLUMN_USAGE",
|
||||
ischema,
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("COLUMN_NAME", CoerceUnicode, key="column_name"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
|
||||
Column("ORDINAL_POSITION", Integer, key="ordinal_position"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
ref_constraints = Table(
|
||||
"REFERENTIAL_CONSTRAINTS",
|
||||
ischema,
|
||||
Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"),
|
||||
Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"),
|
||||
Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"),
|
||||
# TODO: is CATLOG misspelled ?
|
||||
Column(
|
||||
"UNIQUE_CONSTRAINT_CATLOG",
|
||||
CoerceUnicode,
|
||||
key="unique_constraint_catalog",
|
||||
),
|
||||
Column(
|
||||
"UNIQUE_CONSTRAINT_SCHEMA",
|
||||
CoerceUnicode,
|
||||
key="unique_constraint_schema",
|
||||
),
|
||||
Column(
|
||||
"UNIQUE_CONSTRAINT_NAME", CoerceUnicode, key="unique_constraint_name"
|
||||
),
|
||||
Column("MATCH_OPTION", String, key="match_option"),
|
||||
Column("UPDATE_RULE", String, key="update_rule"),
|
||||
Column("DELETE_RULE", String, key="delete_rule"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
views = Table(
|
||||
"VIEWS",
|
||||
ischema,
|
||||
Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"),
|
||||
Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"),
|
||||
Column("TABLE_NAME", CoerceUnicode, key="table_name"),
|
||||
Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"),
|
||||
Column("CHECK_OPTION", String, key="check_option"),
|
||||
Column("IS_UPDATABLE", String, key="is_updatable"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
computed_columns = Table(
|
||||
"computed_columns",
|
||||
ischema,
|
||||
Column("object_id", Integer),
|
||||
Column("name", CoerceUnicode),
|
||||
Column("is_computed", Boolean),
|
||||
Column("is_persisted", Boolean),
|
||||
Column("definition", CoerceUnicode),
|
||||
schema="sys",
|
||||
)
|
||||
|
||||
sequences = Table(
|
||||
"SEQUENCES",
|
||||
ischema,
|
||||
Column("SEQUENCE_CATALOG", CoerceUnicode, key="sequence_catalog"),
|
||||
Column("SEQUENCE_SCHEMA", CoerceUnicode, key="sequence_schema"),
|
||||
Column("SEQUENCE_NAME", CoerceUnicode, key="sequence_name"),
|
||||
schema="INFORMATION_SCHEMA",
|
||||
)
|
||||
|
||||
|
||||
class NumericSqlVariant(TypeDecorator):
|
||||
r"""This type casts sql_variant columns in the identity_columns view
|
||||
to numeric. This is required because:
|
||||
|
||||
* pyodbc does not support sql_variant
|
||||
* pymssql under python 2 return the byte representation of the number,
|
||||
int 1 is returned as "\x01\x00\x00\x00". On python 3 it returns the
|
||||
correct value as string.
|
||||
"""
|
||||
|
||||
impl = Unicode
|
||||
cache_ok = True
|
||||
|
||||
def column_expression(self, colexpr):
|
||||
return cast(colexpr, Numeric(38, 0))
|
||||
|
||||
|
||||
identity_columns = Table(
|
||||
"identity_columns",
|
||||
ischema,
|
||||
Column("object_id", Integer),
|
||||
Column("name", CoerceUnicode),
|
||||
Column("is_identity", Boolean),
|
||||
Column("seed_value", NumericSqlVariant),
|
||||
Column("increment_value", NumericSqlVariant),
|
||||
Column("last_value", NumericSqlVariant),
|
||||
Column("is_not_for_replication", Boolean),
|
||||
schema="sys",
|
||||
)
|
||||
|
||||
|
||||
class NVarcharSqlVariant(TypeDecorator):
|
||||
"""This type casts sql_variant columns in the extended_properties view
|
||||
to nvarchar. This is required because pyodbc does not support sql_variant
|
||||
"""
|
||||
|
||||
impl = Unicode
|
||||
cache_ok = True
|
||||
|
||||
def column_expression(self, colexpr):
|
||||
return cast(colexpr, NVARCHAR)
|
||||
|
||||
|
||||
extended_properties = Table(
|
||||
"extended_properties",
|
||||
ischema,
|
||||
Column("class", Integer), # TINYINT
|
||||
Column("class_desc", CoerceUnicode),
|
||||
Column("major_id", Integer),
|
||||
Column("minor_id", Integer),
|
||||
Column("name", CoerceUnicode),
|
||||
Column("value", NVarcharSqlVariant),
|
||||
schema="sys",
|
||||
)
|
@ -0,0 +1,129 @@
|
||||
# dialects/mssql/json.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import types as sqltypes
|
||||
|
||||
# technically, all the dialect-specific datatypes that don't have any special
|
||||
# behaviors would be private with names like _MSJson. However, we haven't been
|
||||
# doing this for mysql.JSON or sqlite.JSON which both have JSON / JSONIndexType
|
||||
# / JSONPathType in their json.py files, so keep consistent with that
|
||||
# sub-convention for now. A future change can update them all to be
|
||||
# package-private at once.
|
||||
|
||||
|
||||
class JSON(sqltypes.JSON):
|
||||
"""MSSQL JSON type.
|
||||
|
||||
MSSQL supports JSON-formatted data as of SQL Server 2016.
|
||||
|
||||
The :class:`_mssql.JSON` datatype at the DDL level will represent the
|
||||
datatype as ``NVARCHAR(max)``, but provides for JSON-level comparison
|
||||
functions as well as Python coercion behavior.
|
||||
|
||||
:class:`_mssql.JSON` is used automatically whenever the base
|
||||
:class:`_types.JSON` datatype is used against a SQL Server backend.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - main documentation for the generic
|
||||
cross-platform JSON datatype.
|
||||
|
||||
The :class:`_mssql.JSON` type supports persistence of JSON values
|
||||
as well as the core index operations provided by :class:`_types.JSON`
|
||||
datatype, by adapting the operations to render the ``JSON_VALUE``
|
||||
or ``JSON_QUERY`` functions at the database level.
|
||||
|
||||
The SQL Server :class:`_mssql.JSON` type necessarily makes use of the
|
||||
``JSON_QUERY`` and ``JSON_VALUE`` functions when querying for elements
|
||||
of a JSON object. These two functions have a major restriction in that
|
||||
they are **mutually exclusive** based on the type of object to be returned.
|
||||
The ``JSON_QUERY`` function **only** returns a JSON dictionary or list,
|
||||
but not an individual string, numeric, or boolean element; the
|
||||
``JSON_VALUE`` function **only** returns an individual string, numeric,
|
||||
or boolean element. **both functions either return NULL or raise
|
||||
an error if they are not used against the correct expected value**.
|
||||
|
||||
To handle this awkward requirement, indexed access rules are as follows:
|
||||
|
||||
1. When extracting a sub element from a JSON that is itself a JSON
|
||||
dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor
|
||||
should be used::
|
||||
|
||||
stmt = select(data_table.c.data["some key"].as_json()).where(
|
||||
data_table.c.data["some key"].as_json() == {"sub": "structure"}
|
||||
)
|
||||
|
||||
2. When extracting a sub element from a JSON that is a plain boolean,
|
||||
string, integer, or float, use the appropriate method among
|
||||
:meth:`_types.JSON.Comparator.as_boolean`,
|
||||
:meth:`_types.JSON.Comparator.as_string`,
|
||||
:meth:`_types.JSON.Comparator.as_integer`,
|
||||
:meth:`_types.JSON.Comparator.as_float`::
|
||||
|
||||
stmt = select(data_table.c.data["some key"].as_string()).where(
|
||||
data_table.c.data["some key"].as_string() == "some string"
|
||||
)
|
||||
|
||||
.. versionadded:: 1.4
|
||||
|
||||
|
||||
"""
|
||||
|
||||
# note there was a result processor here that was looking for "number",
|
||||
# but none of the tests seem to exercise it.
|
||||
|
||||
|
||||
# Note: these objects currently match exactly those of MySQL, however since
|
||||
# these are not generalizable to all JSON implementations, remain separately
|
||||
# implemented for each dialect.
|
||||
class _FormatTypeMixin:
|
||||
def _format_value(self, value):
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_proc = self.string_bind_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
value = self._format_value(value)
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def literal_processor(self, dialect):
|
||||
super_proc = self.string_literal_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
value = self._format_value(value)
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
|
||||
def _format_value(self, value):
|
||||
if isinstance(value, int):
|
||||
value = "$[%s]" % value
|
||||
else:
|
||||
value = '$."%s"' % value
|
||||
return value
|
||||
|
||||
|
||||
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
|
||||
def _format_value(self, value):
|
||||
return "$%s" % (
|
||||
"".join(
|
||||
[
|
||||
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
|
||||
for elem in value
|
||||
]
|
||||
)
|
||||
)
|
@ -0,0 +1,162 @@
|
||||
# dialects/mssql/provision.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from sqlalchemy import inspect
|
||||
from sqlalchemy import Integer
|
||||
from ... import create_engine
|
||||
from ... import exc
|
||||
from ...schema import Column
|
||||
from ...schema import DropConstraint
|
||||
from ...schema import ForeignKeyConstraint
|
||||
from ...schema import MetaData
|
||||
from ...schema import Table
|
||||
from ...testing.provision import create_db
|
||||
from ...testing.provision import drop_all_schema_objects_pre_tables
|
||||
from ...testing.provision import drop_db
|
||||
from ...testing.provision import generate_driver_url
|
||||
from ...testing.provision import get_temp_table_name
|
||||
from ...testing.provision import log
|
||||
from ...testing.provision import normalize_sequence
|
||||
from ...testing.provision import post_configure_engine
|
||||
from ...testing.provision import run_reap_dbs
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
|
||||
|
||||
@post_configure_engine.for_db("mssql")
|
||||
def post_configure_engine(url, engine, follower_ident):
|
||||
if engine.driver == "pyodbc":
|
||||
engine.dialect.dbapi.pooling = False
|
||||
|
||||
|
||||
@generate_driver_url.for_db("mssql")
|
||||
def generate_driver_url(url, driver, query_str):
|
||||
backend = url.get_backend_name()
|
||||
|
||||
new_url = url.set(drivername="%s+%s" % (backend, driver))
|
||||
|
||||
if driver not in ("pyodbc", "aioodbc"):
|
||||
new_url = new_url.set(query="")
|
||||
|
||||
if driver == "aioodbc":
|
||||
new_url = new_url.update_query_dict({"MARS_Connection": "Yes"})
|
||||
|
||||
if query_str:
|
||||
new_url = new_url.update_query_string(query_str)
|
||||
|
||||
try:
|
||||
new_url.get_dialect()
|
||||
except exc.NoSuchModuleError:
|
||||
return None
|
||||
else:
|
||||
return new_url
|
||||
|
||||
|
||||
@create_db.for_db("mssql")
|
||||
def _mssql_create_db(cfg, eng, ident):
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
conn.exec_driver_sql("create database %s" % ident)
|
||||
conn.exec_driver_sql(
|
||||
"ALTER DATABASE %s SET ALLOW_SNAPSHOT_ISOLATION ON" % ident
|
||||
)
|
||||
conn.exec_driver_sql(
|
||||
"ALTER DATABASE %s SET READ_COMMITTED_SNAPSHOT ON" % ident
|
||||
)
|
||||
conn.exec_driver_sql("use %s" % ident)
|
||||
conn.exec_driver_sql("create schema test_schema")
|
||||
conn.exec_driver_sql("create schema test_schema_2")
|
||||
|
||||
|
||||
@drop_db.for_db("mssql")
|
||||
def _mssql_drop_db(cfg, eng, ident):
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
_mssql_drop_ignore(conn, ident)
|
||||
|
||||
|
||||
def _mssql_drop_ignore(conn, ident):
|
||||
try:
|
||||
# typically when this happens, we can't KILL the session anyway,
|
||||
# so let the cleanup process drop the DBs
|
||||
# for row in conn.exec_driver_sql(
|
||||
# "select session_id from sys.dm_exec_sessions "
|
||||
# "where database_id=db_id('%s')" % ident):
|
||||
# log.info("killing SQL server session %s", row['session_id'])
|
||||
# conn.exec_driver_sql("kill %s" % row['session_id'])
|
||||
conn.exec_driver_sql("drop database %s" % ident)
|
||||
log.info("Reaped db: %s", ident)
|
||||
return True
|
||||
except exc.DatabaseError as err:
|
||||
log.warning("couldn't drop db: %s", err)
|
||||
return False
|
||||
|
||||
|
||||
@run_reap_dbs.for_db("mssql")
|
||||
def _reap_mssql_dbs(url, idents):
|
||||
log.info("db reaper connecting to %r", url)
|
||||
eng = create_engine(url)
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
log.info("identifiers in file: %s", ", ".join(idents))
|
||||
|
||||
to_reap = conn.exec_driver_sql(
|
||||
"select d.name from sys.databases as d where name "
|
||||
"like 'TEST_%' and not exists (select session_id "
|
||||
"from sys.dm_exec_sessions "
|
||||
"where database_id=d.database_id)"
|
||||
)
|
||||
all_names = {dbname.lower() for (dbname,) in to_reap}
|
||||
to_drop = set()
|
||||
for name in all_names:
|
||||
if name in idents:
|
||||
to_drop.add(name)
|
||||
|
||||
dropped = total = 0
|
||||
for total, dbname in enumerate(to_drop, 1):
|
||||
if _mssql_drop_ignore(conn, dbname):
|
||||
dropped += 1
|
||||
log.info(
|
||||
"Dropped %d out of %d stale databases detected", dropped, total
|
||||
)
|
||||
|
||||
|
||||
@temp_table_keyword_args.for_db("mssql")
|
||||
def _mssql_temp_table_keyword_args(cfg, eng):
|
||||
return {}
|
||||
|
||||
|
||||
@get_temp_table_name.for_db("mssql")
|
||||
def _mssql_get_temp_table_name(cfg, eng, base_name):
|
||||
return "##" + base_name
|
||||
|
||||
|
||||
@drop_all_schema_objects_pre_tables.for_db("mssql")
|
||||
def drop_all_schema_objects_pre_tables(cfg, eng):
|
||||
with eng.connect().execution_options(isolation_level="AUTOCOMMIT") as conn:
|
||||
inspector = inspect(conn)
|
||||
for schema in (None, "dbo", cfg.test_schema, cfg.test_schema_2):
|
||||
for tname in inspector.get_table_names(schema=schema):
|
||||
tb = Table(
|
||||
tname,
|
||||
MetaData(),
|
||||
Column("x", Integer),
|
||||
Column("y", Integer),
|
||||
schema=schema,
|
||||
)
|
||||
for fk in inspect(conn).get_foreign_keys(tname, schema=schema):
|
||||
conn.execute(
|
||||
DropConstraint(
|
||||
ForeignKeyConstraint(
|
||||
[tb.c.x], [tb.c.y], name=fk["name"]
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@normalize_sequence.for_db("mssql")
|
||||
def normalize_sequence(cfg, sequence):
|
||||
if sequence.start is None:
|
||||
sequence.start = 1
|
||||
return sequence
|
@ -0,0 +1,126 @@
|
||||
# dialects/mssql/pymssql.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
"""
|
||||
.. dialect:: mssql+pymssql
|
||||
:name: pymssql
|
||||
:dbapi: pymssql
|
||||
:connectstring: mssql+pymssql://<username>:<password>@<freetds_name>/?charset=utf8
|
||||
|
||||
pymssql is a Python module that provides a Python DBAPI interface around
|
||||
`FreeTDS <https://www.freetds.org/>`_.
|
||||
|
||||
.. versionchanged:: 2.0.5
|
||||
|
||||
pymssql was restored to SQLAlchemy's continuous integration testing
|
||||
|
||||
|
||||
""" # noqa
|
||||
import re
|
||||
|
||||
from .base import MSDialect
|
||||
from .base import MSIdentifierPreparer
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...engine import processors
|
||||
|
||||
|
||||
class _MSNumeric_pymssql(sqltypes.Numeric):
|
||||
def result_processor(self, dialect, type_):
|
||||
if not self.asdecimal:
|
||||
return processors.to_float
|
||||
else:
|
||||
return sqltypes.Numeric.result_processor(self, dialect, type_)
|
||||
|
||||
|
||||
class MSIdentifierPreparer_pymssql(MSIdentifierPreparer):
|
||||
def __init__(self, dialect):
|
||||
super().__init__(dialect)
|
||||
# pymssql has the very unusual behavior that it uses pyformat
|
||||
# yet does not require that percent signs be doubled
|
||||
self._double_percents = False
|
||||
|
||||
|
||||
class MSDialect_pymssql(MSDialect):
|
||||
supports_statement_cache = True
|
||||
supports_native_decimal = True
|
||||
supports_native_uuid = True
|
||||
driver = "pymssql"
|
||||
|
||||
preparer = MSIdentifierPreparer_pymssql
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MSDialect.colspecs,
|
||||
{sqltypes.Numeric: _MSNumeric_pymssql, sqltypes.Float: sqltypes.Float},
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
module = __import__("pymssql")
|
||||
# pymmsql < 2.1.1 doesn't have a Binary method. we use string
|
||||
client_ver = tuple(int(x) for x in module.__version__.split("."))
|
||||
if client_ver < (2, 1, 1):
|
||||
# TODO: monkeypatching here is less than ideal
|
||||
module.Binary = lambda x: x if hasattr(x, "decode") else str(x)
|
||||
|
||||
if client_ver < (1,):
|
||||
util.warn(
|
||||
"The pymssql dialect expects at least "
|
||||
"the 1.0 series of the pymssql DBAPI."
|
||||
)
|
||||
return module
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
vers = connection.exec_driver_sql("select @@version").scalar()
|
||||
m = re.match(r"Microsoft .*? - (\d+)\.(\d+)\.(\d+)\.(\d+)", vers)
|
||||
if m:
|
||||
return tuple(int(x) for x in m.group(1, 2, 3, 4))
|
||||
else:
|
||||
return None
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username="user")
|
||||
opts.update(url.query)
|
||||
port = opts.pop("port", None)
|
||||
if port and "host" in opts:
|
||||
opts["host"] = "%s:%s" % (opts["host"], port)
|
||||
return ([], opts)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
for msg in (
|
||||
"Adaptive Server connection timed out",
|
||||
"Net-Lib error during Connection reset by peer",
|
||||
"message 20003", # connection timeout
|
||||
"Error 10054",
|
||||
"Not connected to any MS SQL server",
|
||||
"Connection is closed",
|
||||
"message 20006", # Write to the server failed
|
||||
"message 20017", # Unexpected EOF from the server
|
||||
"message 20047", # DBPROCESS is dead or not enabled
|
||||
"The server failed to resume the transaction",
|
||||
):
|
||||
if msg in str(e):
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return super().get_isolation_level_values(dbapi_connection) + [
|
||||
"AUTOCOMMIT"
|
||||
]
|
||||
|
||||
def set_isolation_level(self, dbapi_connection, level):
|
||||
if level == "AUTOCOMMIT":
|
||||
dbapi_connection.autocommit(True)
|
||||
else:
|
||||
dbapi_connection.autocommit(False)
|
||||
super().set_isolation_level(dbapi_connection, level)
|
||||
|
||||
|
||||
dialect = MSDialect_pymssql
|
@ -0,0 +1,760 @@
|
||||
# dialects/mssql/pyodbc.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: mssql+pyodbc
|
||||
:name: PyODBC
|
||||
:dbapi: pyodbc
|
||||
:connectstring: mssql+pyodbc://<username>:<password>@<dsnname>
|
||||
:url: https://pypi.org/project/pyodbc/
|
||||
|
||||
Connecting to PyODBC
|
||||
--------------------
|
||||
|
||||
The URL here is to be translated to PyODBC connection strings, as
|
||||
detailed in `ConnectionStrings <https://code.google.com/p/pyodbc/wiki/ConnectionStrings>`_.
|
||||
|
||||
DSN Connections
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
A DSN connection in ODBC means that a pre-existing ODBC datasource is
|
||||
configured on the client machine. The application then specifies the name
|
||||
of this datasource, which encompasses details such as the specific ODBC driver
|
||||
in use as well as the network address of the database. Assuming a datasource
|
||||
is configured on the client, a basic DSN-based connection looks like::
|
||||
|
||||
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
|
||||
|
||||
Which above, will pass the following connection string to PyODBC:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
DSN=some_dsn;UID=scott;PWD=tiger
|
||||
|
||||
If the username and password are omitted, the DSN form will also add
|
||||
the ``Trusted_Connection=yes`` directive to the ODBC string.
|
||||
|
||||
Hostname Connections
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Hostname-based connections are also supported by pyodbc. These are often
|
||||
easier to use than a DSN and have the additional advantage that the specific
|
||||
database name to connect towards may be specified locally in the URL, rather
|
||||
than it being fixed as part of a datasource configuration.
|
||||
|
||||
When using a hostname connection, the driver name must also be specified in the
|
||||
query parameters of the URL. As these names usually have spaces in them, the
|
||||
name must be URL encoded which means using plus signs for spaces::
|
||||
|
||||
engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
|
||||
)
|
||||
|
||||
The ``driver`` keyword is significant to the pyodbc dialect and must be
|
||||
specified in lowercase.
|
||||
|
||||
Any other names passed in the query string are passed through in the pyodbc
|
||||
connect string, such as ``authentication``, ``TrustServerCertificate``, etc.
|
||||
Multiple keyword arguments must be separated by an ampersand (``&``); these
|
||||
will be translated to semicolons when the pyodbc connect string is generated
|
||||
internally::
|
||||
|
||||
e = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?"
|
||||
"driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
|
||||
"&authentication=ActiveDirectoryIntegrated"
|
||||
)
|
||||
|
||||
The equivalent URL can be constructed using :class:`_sa.engine.URL`::
|
||||
|
||||
from sqlalchemy.engine import URL
|
||||
|
||||
connection_url = URL.create(
|
||||
"mssql+pyodbc",
|
||||
username="scott",
|
||||
password="tiger",
|
||||
host="mssql2017",
|
||||
port=1433,
|
||||
database="test",
|
||||
query={
|
||||
"driver": "ODBC Driver 18 for SQL Server",
|
||||
"TrustServerCertificate": "yes",
|
||||
"authentication": "ActiveDirectoryIntegrated",
|
||||
},
|
||||
)
|
||||
|
||||
Pass through exact Pyodbc string
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
A PyODBC connection string can also be sent in pyodbc's format directly, as
|
||||
specified in `the PyODBC documentation
|
||||
<https://github.com/mkleehammer/pyodbc/wiki/Connecting-to-databases>`_,
|
||||
using the parameter ``odbc_connect``. A :class:`_sa.engine.URL` object
|
||||
can help make this easier::
|
||||
|
||||
from sqlalchemy.engine import URL
|
||||
|
||||
connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
|
||||
connection_url = URL.create(
|
||||
"mssql+pyodbc", query={"odbc_connect": connection_string}
|
||||
)
|
||||
|
||||
engine = create_engine(connection_url)
|
||||
|
||||
.. _mssql_pyodbc_access_tokens:
|
||||
|
||||
Connecting to databases with access tokens
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Some database servers are set up to only accept access tokens for login. For
|
||||
example, SQL Server allows the use of Azure Active Directory tokens to connect
|
||||
to databases. This requires creating a credential object using the
|
||||
``azure-identity`` library. More information about the authentication step can be
|
||||
found in `Microsoft's documentation
|
||||
<https://docs.microsoft.com/en-us/azure/developer/python/azure-sdk-authenticate?tabs=bash>`_.
|
||||
|
||||
After getting an engine, the credentials need to be sent to ``pyodbc.connect``
|
||||
each time a connection is requested. One way to do this is to set up an event
|
||||
listener on the engine that adds the credential token to the dialect's connect
|
||||
call. This is discussed more generally in :ref:`engines_dynamic_tokens`. For
|
||||
SQL Server in particular, this is passed as an ODBC connection attribute with
|
||||
a data structure `described by Microsoft
|
||||
<https://docs.microsoft.com/en-us/sql/connect/odbc/using-azure-active-directory#authenticating-with-an-access-token>`_.
|
||||
|
||||
The following code snippet will create an engine that connects to an Azure SQL
|
||||
database using Azure credentials::
|
||||
|
||||
import struct
|
||||
from sqlalchemy import create_engine, event
|
||||
from sqlalchemy.engine.url import URL
|
||||
from azure import identity
|
||||
|
||||
# Connection option for access tokens, as defined in msodbcsql.h
|
||||
SQL_COPT_SS_ACCESS_TOKEN = 1256
|
||||
TOKEN_URL = "https://database.windows.net/" # The token URL for any Azure SQL database
|
||||
|
||||
connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
|
||||
|
||||
engine = create_engine(connection_string)
|
||||
|
||||
azure_credentials = identity.DefaultAzureCredential()
|
||||
|
||||
|
||||
@event.listens_for(engine, "do_connect")
|
||||
def provide_token(dialect, conn_rec, cargs, cparams):
|
||||
# remove the "Trusted_Connection" parameter that SQLAlchemy adds
|
||||
cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
|
||||
|
||||
# create token credential
|
||||
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
|
||||
"utf-16-le"
|
||||
)
|
||||
token_struct = struct.pack(
|
||||
f"<I{len(raw_token)}s", len(raw_token), raw_token
|
||||
)
|
||||
|
||||
# apply it to keyword arguments
|
||||
cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
|
||||
|
||||
.. tip::
|
||||
|
||||
The ``Trusted_Connection`` token is currently added by the SQLAlchemy
|
||||
pyodbc dialect when no username or password is present. This needs
|
||||
to be removed per Microsoft's
|
||||
`documentation for Azure access tokens
|
||||
<https://docs.microsoft.com/en-us/sql/connect/odbc/using-azure-active-directory#authenticating-with-an-access-token>`_,
|
||||
stating that a connection string when using an access token must not contain
|
||||
``UID``, ``PWD``, ``Authentication`` or ``Trusted_Connection`` parameters.
|
||||
|
||||
.. _azure_synapse_ignore_no_transaction_on_rollback:
|
||||
|
||||
Avoiding transaction-related exceptions on Azure Synapse Analytics
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Azure Synapse Analytics has a significant difference in its transaction
|
||||
handling compared to plain SQL Server; in some cases an error within a Synapse
|
||||
transaction can cause it to be arbitrarily terminated on the server side, which
|
||||
then causes the DBAPI ``.rollback()`` method (as well as ``.commit()``) to
|
||||
fail. The issue prevents the usual DBAPI contract of allowing ``.rollback()``
|
||||
to pass silently if no transaction is present as the driver does not expect
|
||||
this condition. The symptom of this failure is an exception with a message
|
||||
resembling 'No corresponding transaction found. (111214)' when attempting to
|
||||
emit a ``.rollback()`` after an operation had a failure of some kind.
|
||||
|
||||
This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
|
||||
the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
|
||||
|
||||
engine = create_engine(
|
||||
connection_url, ignore_no_transaction_on_rollback=True
|
||||
)
|
||||
|
||||
Using the above parameter, the dialect will catch ``ProgrammingError``
|
||||
exceptions raised during ``connection.rollback()`` and emit a warning
|
||||
if the error message contains code ``111214``, however will not raise
|
||||
an exception.
|
||||
|
||||
.. versionadded:: 1.4.40 Added the
|
||||
``ignore_no_transaction_on_rollback=True`` parameter.
|
||||
|
||||
Enable autocommit for Azure SQL Data Warehouse (DW) connections
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Azure SQL Data Warehouse does not support transactions,
|
||||
and that can cause problems with SQLAlchemy's "autobegin" (and implicit
|
||||
commit/rollback) behavior. We can avoid these problems by enabling autocommit
|
||||
at both the pyodbc and engine levels::
|
||||
|
||||
connection_url = sa.engine.URL.create(
|
||||
"mssql+pyodbc",
|
||||
username="scott",
|
||||
password="tiger",
|
||||
host="dw.azure.example.com",
|
||||
database="mydb",
|
||||
query={
|
||||
"driver": "ODBC Driver 17 for SQL Server",
|
||||
"autocommit": "True",
|
||||
},
|
||||
)
|
||||
|
||||
engine = create_engine(connection_url).execution_options(
|
||||
isolation_level="AUTOCOMMIT"
|
||||
)
|
||||
|
||||
Avoiding sending large string parameters as TEXT/NTEXT
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
By default, for historical reasons, Microsoft's ODBC drivers for SQL Server
|
||||
send long string parameters (greater than 4000 SBCS characters or 2000 Unicode
|
||||
characters) as TEXT/NTEXT values. TEXT and NTEXT have been deprecated for many
|
||||
years and are starting to cause compatibility issues with newer versions of
|
||||
SQL_Server/Azure. For example, see `this
|
||||
issue <https://github.com/mkleehammer/pyodbc/issues/835>`_.
|
||||
|
||||
Starting with ODBC Driver 18 for SQL Server we can override the legacy
|
||||
behavior and pass long strings as varchar(max)/nvarchar(max) using the
|
||||
``LongAsMax=Yes`` connection string parameter::
|
||||
|
||||
connection_url = sa.engine.URL.create(
|
||||
"mssql+pyodbc",
|
||||
username="scott",
|
||||
password="tiger",
|
||||
host="mssqlserver.example.com",
|
||||
database="mydb",
|
||||
query={
|
||||
"driver": "ODBC Driver 18 for SQL Server",
|
||||
"LongAsMax": "Yes",
|
||||
},
|
||||
)
|
||||
|
||||
Pyodbc Pooling / connection close behavior
|
||||
------------------------------------------
|
||||
|
||||
PyODBC uses internal `pooling
|
||||
<https://github.com/mkleehammer/pyodbc/wiki/The-pyodbc-Module#pooling>`_ by
|
||||
default, which means connections will be longer lived than they are within
|
||||
SQLAlchemy itself. As SQLAlchemy has its own pooling behavior, it is often
|
||||
preferable to disable this behavior. This behavior can only be disabled
|
||||
globally at the PyODBC module level, **before** any connections are made::
|
||||
|
||||
import pyodbc
|
||||
|
||||
pyodbc.pooling = False
|
||||
|
||||
# don't use the engine before pooling is set to False
|
||||
engine = create_engine("mssql+pyodbc://user:pass@dsn")
|
||||
|
||||
If this variable is left at its default value of ``True``, **the application
|
||||
will continue to maintain active database connections**, even when the
|
||||
SQLAlchemy engine itself fully discards a connection or if the engine is
|
||||
disposed.
|
||||
|
||||
.. seealso::
|
||||
|
||||
`pooling <https://github.com/mkleehammer/pyodbc/wiki/The-pyodbc-Module#pooling>`_ -
|
||||
in the PyODBC documentation.
|
||||
|
||||
Driver / Unicode Support
|
||||
-------------------------
|
||||
|
||||
PyODBC works best with Microsoft ODBC drivers, particularly in the area
|
||||
of Unicode support on both Python 2 and Python 3.
|
||||
|
||||
Using the FreeTDS ODBC drivers on Linux or OSX with PyODBC is **not**
|
||||
recommended; there have been historically many Unicode-related issues
|
||||
in this area, including before Microsoft offered ODBC drivers for Linux
|
||||
and OSX. Now that Microsoft offers drivers for all platforms, for
|
||||
PyODBC support these are recommended. FreeTDS remains relevant for
|
||||
non-ODBC drivers such as pymssql where it works very well.
|
||||
|
||||
|
||||
Rowcount Support
|
||||
----------------
|
||||
|
||||
Previous limitations with the SQLAlchemy ORM's "versioned rows" feature with
|
||||
Pyodbc have been resolved as of SQLAlchemy 2.0.5. See the notes at
|
||||
:ref:`mssql_rowcount_versioning`.
|
||||
|
||||
.. _mssql_pyodbc_fastexecutemany:
|
||||
|
||||
Fast Executemany Mode
|
||||
---------------------
|
||||
|
||||
The PyODBC driver includes support for a "fast executemany" mode of execution
|
||||
which greatly reduces round trips for a DBAPI ``executemany()`` call when using
|
||||
Microsoft ODBC drivers, for **limited size batches that fit in memory**. The
|
||||
feature is enabled by setting the attribute ``.fast_executemany`` on the DBAPI
|
||||
cursor when an executemany call is to be used. The SQLAlchemy PyODBC SQL
|
||||
Server dialect supports this parameter by passing the
|
||||
``fast_executemany`` parameter to
|
||||
:func:`_sa.create_engine` , when using the **Microsoft ODBC driver only**::
|
||||
|
||||
engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
|
||||
fast_executemany=True,
|
||||
)
|
||||
|
||||
.. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
|
||||
intended effect of this PyODBC feature taking effect for all INSERT
|
||||
statements that are executed with multiple parameter sets, which don't
|
||||
include RETURNING. Previously, SQLAlchemy 2.0's :term:`insertmanyvalues`
|
||||
feature would cause ``fast_executemany`` to not be used in most cases
|
||||
even if specified.
|
||||
|
||||
.. versionadded:: 1.3
|
||||
|
||||
.. seealso::
|
||||
|
||||
`fast executemany <https://github.com/mkleehammer/pyodbc/wiki/Features-beyond-the-DB-API#fast_executemany>`_
|
||||
- on github
|
||||
|
||||
.. _mssql_pyodbc_setinputsizes:
|
||||
|
||||
Setinputsizes Support
|
||||
-----------------------
|
||||
|
||||
As of version 2.0, the pyodbc ``cursor.setinputsizes()`` method is used for
|
||||
all statement executions, except for ``cursor.executemany()`` calls when
|
||||
fast_executemany=True where it is not supported (assuming
|
||||
:ref:`insertmanyvalues <engine_insertmanyvalues>` is kept enabled,
|
||||
"fastexecutemany" will not take place for INSERT statements in any case).
|
||||
|
||||
The use of ``cursor.setinputsizes()`` can be disabled by passing
|
||||
``use_setinputsizes=False`` to :func:`_sa.create_engine`.
|
||||
|
||||
When ``use_setinputsizes`` is left at its default of ``True``, the
|
||||
specific per-type symbols passed to ``cursor.setinputsizes()`` can be
|
||||
programmatically customized using the :meth:`.DialectEvents.do_setinputsizes`
|
||||
hook. See that method for usage examples.
|
||||
|
||||
.. versionchanged:: 2.0 The mssql+pyodbc dialect now defaults to using
|
||||
``use_setinputsizes=True`` for all statement executions with the exception of
|
||||
cursor.executemany() calls when fast_executemany=True. The behavior can
|
||||
be turned off by passing ``use_setinputsizes=False`` to
|
||||
:func:`_sa.create_engine`.
|
||||
|
||||
""" # noqa
|
||||
|
||||
|
||||
import datetime
|
||||
import decimal
|
||||
import re
|
||||
import struct
|
||||
|
||||
from .base import _MSDateTime
|
||||
from .base import _MSUnicode
|
||||
from .base import _MSUnicodeText
|
||||
from .base import BINARY
|
||||
from .base import DATETIMEOFFSET
|
||||
from .base import MSDialect
|
||||
from .base import MSExecutionContext
|
||||
from .base import VARBINARY
|
||||
from .json import JSON as _MSJson
|
||||
from .json import JSONIndexType as _MSJsonIndexType
|
||||
from .json import JSONPathType as _MSJsonPathType
|
||||
from ... import exc
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...connectors.pyodbc import PyODBCConnector
|
||||
from ...engine import cursor as _cursor
|
||||
|
||||
|
||||
class _ms_numeric_pyodbc:
|
||||
"""Turns Decimals with adjusted() < 0 or > 7 into strings.
|
||||
|
||||
The routines here are needed for older pyodbc versions
|
||||
as well as current mxODBC versions.
|
||||
|
||||
"""
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_process = super().bind_processor(dialect)
|
||||
|
||||
if not dialect._need_decimal_fix:
|
||||
return super_process
|
||||
|
||||
def process(value):
|
||||
if self.asdecimal and isinstance(value, decimal.Decimal):
|
||||
adjusted = value.adjusted()
|
||||
if adjusted < 0:
|
||||
return self._small_dec_to_string(value)
|
||||
elif adjusted > 7:
|
||||
return self._large_dec_to_string(value)
|
||||
|
||||
if super_process:
|
||||
return super_process(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
# these routines needed for older versions of pyodbc.
|
||||
# as of 2.1.8 this logic is integrated.
|
||||
|
||||
def _small_dec_to_string(self, value):
|
||||
return "%s0.%s%s" % (
|
||||
(value < 0 and "-" or ""),
|
||||
"0" * (abs(value.adjusted()) - 1),
|
||||
"".join([str(nint) for nint in value.as_tuple()[1]]),
|
||||
)
|
||||
|
||||
def _large_dec_to_string(self, value):
|
||||
_int = value.as_tuple()[1]
|
||||
if "E" in str(value):
|
||||
result = "%s%s%s" % (
|
||||
(value < 0 and "-" or ""),
|
||||
"".join([str(s) for s in _int]),
|
||||
"0" * (value.adjusted() - (len(_int) - 1)),
|
||||
)
|
||||
else:
|
||||
if (len(_int) - 1) > value.adjusted():
|
||||
result = "%s%s.%s" % (
|
||||
(value < 0 and "-" or ""),
|
||||
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
|
||||
"".join([str(s) for s in _int][value.adjusted() + 1 :]),
|
||||
)
|
||||
else:
|
||||
result = "%s%s" % (
|
||||
(value < 0 and "-" or ""),
|
||||
"".join([str(s) for s in _int][0 : value.adjusted() + 1]),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
class _MSNumeric_pyodbc(_ms_numeric_pyodbc, sqltypes.Numeric):
|
||||
pass
|
||||
|
||||
|
||||
class _MSFloat_pyodbc(_ms_numeric_pyodbc, sqltypes.Float):
|
||||
pass
|
||||
|
||||
|
||||
class _ms_binary_pyodbc:
|
||||
"""Wraps binary values in dialect-specific Binary wrapper.
|
||||
If the value is null, return a pyodbc-specific BinaryNull
|
||||
object to prevent pyODBC [and FreeTDS] from defaulting binary
|
||||
NULL types to SQLWCHAR and causing implicit conversion errors.
|
||||
"""
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
if dialect.dbapi is None:
|
||||
return None
|
||||
|
||||
DBAPIBinary = dialect.dbapi.Binary
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return DBAPIBinary(value)
|
||||
else:
|
||||
# pyodbc-specific
|
||||
return dialect.dbapi.BinaryNull
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class _ODBCDateTimeBindProcessor:
|
||||
"""Add bind processors to handle datetimeoffset behaviors"""
|
||||
|
||||
has_tz = False
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
elif isinstance(value, str):
|
||||
# if a string was passed directly, allow it through
|
||||
return value
|
||||
elif not value.tzinfo or (not self.timezone and not self.has_tz):
|
||||
# for DateTime(timezone=False)
|
||||
return value
|
||||
else:
|
||||
# for DATETIMEOFFSET or DateTime(timezone=True)
|
||||
#
|
||||
# Convert to string format required by T-SQL
|
||||
dto_string = value.strftime("%Y-%m-%d %H:%M:%S.%f %z")
|
||||
# offset needs a colon, e.g., -0700 -> -07:00
|
||||
# "UTC offset in the form (+-)HHMM[SS[.ffffff]]"
|
||||
# backend currently rejects seconds / fractional seconds
|
||||
dto_string = re.sub(
|
||||
r"([\+\-]\d{2})([\d\.]+)$", r"\1:\2", dto_string
|
||||
)
|
||||
return dto_string
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class _ODBCDateTime(_ODBCDateTimeBindProcessor, _MSDateTime):
|
||||
pass
|
||||
|
||||
|
||||
class _ODBCDATETIMEOFFSET(_ODBCDateTimeBindProcessor, DATETIMEOFFSET):
|
||||
has_tz = True
|
||||
|
||||
|
||||
class _VARBINARY_pyodbc(_ms_binary_pyodbc, VARBINARY):
|
||||
pass
|
||||
|
||||
|
||||
class _BINARY_pyodbc(_ms_binary_pyodbc, BINARY):
|
||||
pass
|
||||
|
||||
|
||||
class _String_pyodbc(sqltypes.String):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
if self.length in (None, "max") or self.length >= 2000:
|
||||
return (dbapi.SQL_VARCHAR, 0, 0)
|
||||
else:
|
||||
return dbapi.SQL_VARCHAR
|
||||
|
||||
|
||||
class _Unicode_pyodbc(_MSUnicode):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
if self.length in (None, "max") or self.length >= 2000:
|
||||
return (dbapi.SQL_WVARCHAR, 0, 0)
|
||||
else:
|
||||
return dbapi.SQL_WVARCHAR
|
||||
|
||||
|
||||
class _UnicodeText_pyodbc(_MSUnicodeText):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
if self.length in (None, "max") or self.length >= 2000:
|
||||
return (dbapi.SQL_WVARCHAR, 0, 0)
|
||||
else:
|
||||
return dbapi.SQL_WVARCHAR
|
||||
|
||||
|
||||
class _JSON_pyodbc(_MSJson):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return (dbapi.SQL_WVARCHAR, 0, 0)
|
||||
|
||||
|
||||
class _JSONIndexType_pyodbc(_MSJsonIndexType):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.SQL_WVARCHAR
|
||||
|
||||
|
||||
class _JSONPathType_pyodbc(_MSJsonPathType):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.SQL_WVARCHAR
|
||||
|
||||
|
||||
class MSExecutionContext_pyodbc(MSExecutionContext):
|
||||
_embedded_scope_identity = False
|
||||
|
||||
def pre_exec(self):
|
||||
"""where appropriate, issue "select scope_identity()" in the same
|
||||
statement.
|
||||
|
||||
Background on why "scope_identity()" is preferable to "@@identity":
|
||||
https://msdn.microsoft.com/en-us/library/ms190315.aspx
|
||||
|
||||
Background on why we attempt to embed "scope_identity()" into the same
|
||||
statement as the INSERT:
|
||||
https://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values?
|
||||
|
||||
"""
|
||||
|
||||
super().pre_exec()
|
||||
|
||||
# don't embed the scope_identity select into an
|
||||
# "INSERT .. DEFAULT VALUES"
|
||||
if (
|
||||
self._select_lastrowid
|
||||
and self.dialect.use_scope_identity
|
||||
and len(self.parameters[0])
|
||||
):
|
||||
self._embedded_scope_identity = True
|
||||
|
||||
self.statement += "; select scope_identity()"
|
||||
|
||||
def post_exec(self):
|
||||
if self._embedded_scope_identity:
|
||||
# Fetch the last inserted id from the manipulated statement
|
||||
# We may have to skip over a number of result sets with
|
||||
# no data (due to triggers, etc.)
|
||||
while True:
|
||||
try:
|
||||
# fetchall() ensures the cursor is consumed
|
||||
# without closing it (FreeTDS particularly)
|
||||
rows = self.cursor.fetchall()
|
||||
except self.dialect.dbapi.Error:
|
||||
# no way around this - nextset() consumes the previous set
|
||||
# so we need to just keep flipping
|
||||
self.cursor.nextset()
|
||||
else:
|
||||
if not rows:
|
||||
# async adapter drivers just return None here
|
||||
self.cursor.nextset()
|
||||
continue
|
||||
row = rows[0]
|
||||
break
|
||||
|
||||
self._lastrowid = int(row[0])
|
||||
|
||||
self.cursor_fetch_strategy = _cursor._NO_CURSOR_DML
|
||||
else:
|
||||
super().post_exec()
|
||||
|
||||
|
||||
class MSDialect_pyodbc(PyODBCConnector, MSDialect):
|
||||
supports_statement_cache = True
|
||||
|
||||
# note this parameter is no longer used by the ORM or default dialect
|
||||
# see #9414
|
||||
supports_sane_rowcount_returning = False
|
||||
|
||||
execution_ctx_cls = MSExecutionContext_pyodbc
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MSDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric: _MSNumeric_pyodbc,
|
||||
sqltypes.Float: _MSFloat_pyodbc,
|
||||
BINARY: _BINARY_pyodbc,
|
||||
# support DateTime(timezone=True)
|
||||
sqltypes.DateTime: _ODBCDateTime,
|
||||
DATETIMEOFFSET: _ODBCDATETIMEOFFSET,
|
||||
# SQL Server dialect has a VARBINARY that is just to support
|
||||
# "deprecate_large_types" w/ VARBINARY(max), but also we must
|
||||
# handle the usual SQL standard VARBINARY
|
||||
VARBINARY: _VARBINARY_pyodbc,
|
||||
sqltypes.VARBINARY: _VARBINARY_pyodbc,
|
||||
sqltypes.LargeBinary: _VARBINARY_pyodbc,
|
||||
sqltypes.String: _String_pyodbc,
|
||||
sqltypes.Unicode: _Unicode_pyodbc,
|
||||
sqltypes.UnicodeText: _UnicodeText_pyodbc,
|
||||
sqltypes.JSON: _JSON_pyodbc,
|
||||
sqltypes.JSON.JSONIndexType: _JSONIndexType_pyodbc,
|
||||
sqltypes.JSON.JSONPathType: _JSONPathType_pyodbc,
|
||||
# this excludes Enum from the string/VARCHAR thing for now
|
||||
# it looks like Enum's adaptation doesn't really support the
|
||||
# String type itself having a dialect-level impl
|
||||
sqltypes.Enum: sqltypes.Enum,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
fast_executemany=False,
|
||||
use_setinputsizes=True,
|
||||
**params,
|
||||
):
|
||||
super().__init__(use_setinputsizes=use_setinputsizes, **params)
|
||||
self.use_scope_identity = (
|
||||
self.use_scope_identity
|
||||
and self.dbapi
|
||||
and hasattr(self.dbapi.Cursor, "nextset")
|
||||
)
|
||||
self._need_decimal_fix = self.dbapi and self._dbapi_version() < (
|
||||
2,
|
||||
1,
|
||||
8,
|
||||
)
|
||||
self.fast_executemany = fast_executemany
|
||||
if fast_executemany:
|
||||
self.use_insertmanyvalues_wo_returning = False
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
try:
|
||||
# "Version of the instance of SQL Server, in the form
|
||||
# of 'major.minor.build.revision'"
|
||||
raw = connection.exec_driver_sql(
|
||||
"SELECT CAST(SERVERPROPERTY('ProductVersion') AS VARCHAR)"
|
||||
).scalar()
|
||||
except exc.DBAPIError:
|
||||
# SQL Server docs indicate this function isn't present prior to
|
||||
# 2008. Before we had the VARCHAR cast above, pyodbc would also
|
||||
# fail on this query.
|
||||
return super()._get_server_version_info(connection)
|
||||
else:
|
||||
version = []
|
||||
r = re.compile(r"[.\-]")
|
||||
for n in r.split(raw):
|
||||
try:
|
||||
version.append(int(n))
|
||||
except ValueError:
|
||||
pass
|
||||
return tuple(version)
|
||||
|
||||
def on_connect(self):
|
||||
super_ = super().on_connect()
|
||||
|
||||
def on_connect(conn):
|
||||
if super_ is not None:
|
||||
super_(conn)
|
||||
|
||||
self._setup_timestampoffset_type(conn)
|
||||
|
||||
return on_connect
|
||||
|
||||
def _setup_timestampoffset_type(self, connection):
|
||||
# output converter function for datetimeoffset
|
||||
def _handle_datetimeoffset(dto_value):
|
||||
tup = struct.unpack("<6hI2h", dto_value)
|
||||
return datetime.datetime(
|
||||
tup[0],
|
||||
tup[1],
|
||||
tup[2],
|
||||
tup[3],
|
||||
tup[4],
|
||||
tup[5],
|
||||
tup[6] // 1000,
|
||||
datetime.timezone(
|
||||
datetime.timedelta(hours=tup[7], minutes=tup[8])
|
||||
),
|
||||
)
|
||||
|
||||
odbc_SQL_SS_TIMESTAMPOFFSET = -155 # as defined in SQLNCLI.h
|
||||
connection.add_output_converter(
|
||||
odbc_SQL_SS_TIMESTAMPOFFSET, _handle_datetimeoffset
|
||||
)
|
||||
|
||||
def do_executemany(self, cursor, statement, parameters, context=None):
|
||||
if self.fast_executemany:
|
||||
cursor.fast_executemany = True
|
||||
super().do_executemany(cursor, statement, parameters, context=context)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if isinstance(e, self.dbapi.Error):
|
||||
code = e.args[0]
|
||||
if code in {
|
||||
"08S01",
|
||||
"01000",
|
||||
"01002",
|
||||
"08003",
|
||||
"08007",
|
||||
"08S02",
|
||||
"08001",
|
||||
"HYT00",
|
||||
"HY010",
|
||||
"10054",
|
||||
}:
|
||||
return True
|
||||
return super().is_disconnect(e, connection, cursor)
|
||||
|
||||
|
||||
dialect = MSDialect_pyodbc
|
@ -0,0 +1,104 @@
|
||||
# dialects/mysql/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
from . import aiomysql # noqa
|
||||
from . import asyncmy # noqa
|
||||
from . import base # noqa
|
||||
from . import cymysql # noqa
|
||||
from . import mariadbconnector # noqa
|
||||
from . import mysqlconnector # noqa
|
||||
from . import mysqldb # noqa
|
||||
from . import pymysql # noqa
|
||||
from . import pyodbc # noqa
|
||||
from .base import BIGINT
|
||||
from .base import BINARY
|
||||
from .base import BIT
|
||||
from .base import BLOB
|
||||
from .base import BOOLEAN
|
||||
from .base import CHAR
|
||||
from .base import DATE
|
||||
from .base import DATETIME
|
||||
from .base import DECIMAL
|
||||
from .base import DOUBLE
|
||||
from .base import ENUM
|
||||
from .base import FLOAT
|
||||
from .base import INTEGER
|
||||
from .base import JSON
|
||||
from .base import LONGBLOB
|
||||
from .base import LONGTEXT
|
||||
from .base import MEDIUMBLOB
|
||||
from .base import MEDIUMINT
|
||||
from .base import MEDIUMTEXT
|
||||
from .base import NCHAR
|
||||
from .base import NUMERIC
|
||||
from .base import NVARCHAR
|
||||
from .base import REAL
|
||||
from .base import SET
|
||||
from .base import SMALLINT
|
||||
from .base import TEXT
|
||||
from .base import TIME
|
||||
from .base import TIMESTAMP
|
||||
from .base import TINYBLOB
|
||||
from .base import TINYINT
|
||||
from .base import TINYTEXT
|
||||
from .base import VARBINARY
|
||||
from .base import VARCHAR
|
||||
from .base import YEAR
|
||||
from .dml import Insert
|
||||
from .dml import insert
|
||||
from .expression import match
|
||||
from .mariadb import INET4
|
||||
from .mariadb import INET6
|
||||
|
||||
# default dialect
|
||||
base.dialect = dialect = mysqldb.dialect
|
||||
|
||||
__all__ = (
|
||||
"BIGINT",
|
||||
"BINARY",
|
||||
"BIT",
|
||||
"BLOB",
|
||||
"BOOLEAN",
|
||||
"CHAR",
|
||||
"DATE",
|
||||
"DATETIME",
|
||||
"DECIMAL",
|
||||
"DOUBLE",
|
||||
"ENUM",
|
||||
"FLOAT",
|
||||
"INET4",
|
||||
"INET6",
|
||||
"INTEGER",
|
||||
"INTEGER",
|
||||
"JSON",
|
||||
"LONGBLOB",
|
||||
"LONGTEXT",
|
||||
"MEDIUMBLOB",
|
||||
"MEDIUMINT",
|
||||
"MEDIUMTEXT",
|
||||
"NCHAR",
|
||||
"NVARCHAR",
|
||||
"NUMERIC",
|
||||
"SET",
|
||||
"SMALLINT",
|
||||
"REAL",
|
||||
"TEXT",
|
||||
"TIME",
|
||||
"TIMESTAMP",
|
||||
"TINYBLOB",
|
||||
"TINYINT",
|
||||
"TINYTEXT",
|
||||
"VARBINARY",
|
||||
"VARCHAR",
|
||||
"YEAR",
|
||||
"dialect",
|
||||
"insert",
|
||||
"Insert",
|
||||
"match",
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,335 @@
|
||||
# dialects/mysql/aiomysql.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
|
||||
# file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: mysql+aiomysql
|
||||
:name: aiomysql
|
||||
:dbapi: aiomysql
|
||||
:connectstring: mysql+aiomysql://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://github.com/aio-libs/aiomysql
|
||||
|
||||
The aiomysql dialect is SQLAlchemy's second Python asyncio dialect.
|
||||
|
||||
Using a special asyncio mediation layer, the aiomysql dialect is usable
|
||||
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
|
||||
extension package.
|
||||
|
||||
This dialect should normally be used only with the
|
||||
:func:`_asyncio.create_async_engine` engine creation function::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
engine = create_async_engine(
|
||||
"mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4"
|
||||
)
|
||||
|
||||
""" # noqa
|
||||
from collections import deque
|
||||
|
||||
from .pymysql import MySQLDialect_pymysql
|
||||
from ... import pool
|
||||
from ... import util
|
||||
from ...engine import AdaptedConnection
|
||||
from ...util.concurrency import asyncio
|
||||
from ...util.concurrency import await_fallback
|
||||
from ...util.concurrency import await_only
|
||||
|
||||
|
||||
class AsyncAdapt_aiomysql_cursor:
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
server_side = False
|
||||
__slots__ = (
|
||||
"_adapt_connection",
|
||||
"_connection",
|
||||
"await_",
|
||||
"_cursor",
|
||||
"_rows",
|
||||
)
|
||||
|
||||
def __init__(self, adapt_connection):
|
||||
self._adapt_connection = adapt_connection
|
||||
self._connection = adapt_connection._connection
|
||||
self.await_ = adapt_connection.await_
|
||||
|
||||
cursor = self._connection.cursor(adapt_connection.dbapi.Cursor)
|
||||
|
||||
# see https://github.com/aio-libs/aiomysql/issues/543
|
||||
self._cursor = self.await_(cursor.__aenter__())
|
||||
self._rows = deque()
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return self._cursor.description
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
return self._cursor.rowcount
|
||||
|
||||
@property
|
||||
def arraysize(self):
|
||||
return self._cursor.arraysize
|
||||
|
||||
@arraysize.setter
|
||||
def arraysize(self, value):
|
||||
self._cursor.arraysize = value
|
||||
|
||||
@property
|
||||
def lastrowid(self):
|
||||
return self._cursor.lastrowid
|
||||
|
||||
def close(self):
|
||||
# note we aren't actually closing the cursor here,
|
||||
# we are just letting GC do it. to allow this to be async
|
||||
# we would need the Result to change how it does "Safe close cursor".
|
||||
# MySQL "cursors" don't actually have state to be "closed" besides
|
||||
# exhausting rows, which we already have done for sync cursor.
|
||||
# another option would be to emulate aiosqlite dialect and assign
|
||||
# cursor only if we are doing server side cursor operation.
|
||||
self._rows.clear()
|
||||
|
||||
def execute(self, operation, parameters=None):
|
||||
return self.await_(self._execute_async(operation, parameters))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
return self.await_(
|
||||
self._executemany_async(operation, seq_of_parameters)
|
||||
)
|
||||
|
||||
async def _execute_async(self, operation, parameters):
|
||||
async with self._adapt_connection._execute_mutex:
|
||||
result = await self._cursor.execute(operation, parameters)
|
||||
|
||||
if not self.server_side:
|
||||
# aiomysql has a "fake" async result, so we have to pull it out
|
||||
# of that here since our default result is not async.
|
||||
# we could just as easily grab "_rows" here and be done with it
|
||||
# but this is safer.
|
||||
self._rows = deque(await self._cursor.fetchall())
|
||||
return result
|
||||
|
||||
async def _executemany_async(self, operation, seq_of_parameters):
|
||||
async with self._adapt_connection._execute_mutex:
|
||||
return await self._cursor.executemany(operation, seq_of_parameters)
|
||||
|
||||
def setinputsizes(self, *inputsizes):
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
while self._rows:
|
||||
yield self._rows.popleft()
|
||||
|
||||
def fetchone(self):
|
||||
if self._rows:
|
||||
return self._rows.popleft()
|
||||
else:
|
||||
return None
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
if size is None:
|
||||
size = self.arraysize
|
||||
|
||||
rr = self._rows
|
||||
return [rr.popleft() for _ in range(min(size, len(rr)))]
|
||||
|
||||
def fetchall(self):
|
||||
retval = list(self._rows)
|
||||
self._rows.clear()
|
||||
return retval
|
||||
|
||||
|
||||
class AsyncAdapt_aiomysql_ss_cursor(AsyncAdapt_aiomysql_cursor):
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
__slots__ = ()
|
||||
server_side = True
|
||||
|
||||
def __init__(self, adapt_connection):
|
||||
self._adapt_connection = adapt_connection
|
||||
self._connection = adapt_connection._connection
|
||||
self.await_ = adapt_connection.await_
|
||||
|
||||
cursor = self._connection.cursor(adapt_connection.dbapi.SSCursor)
|
||||
|
||||
self._cursor = self.await_(cursor.__aenter__())
|
||||
|
||||
def close(self):
|
||||
if self._cursor is not None:
|
||||
self.await_(self._cursor.close())
|
||||
self._cursor = None
|
||||
|
||||
def fetchone(self):
|
||||
return self.await_(self._cursor.fetchone())
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
return self.await_(self._cursor.fetchmany(size=size))
|
||||
|
||||
def fetchall(self):
|
||||
return self.await_(self._cursor.fetchall())
|
||||
|
||||
|
||||
class AsyncAdapt_aiomysql_connection(AdaptedConnection):
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
await_ = staticmethod(await_only)
|
||||
__slots__ = ("dbapi", "_execute_mutex")
|
||||
|
||||
def __init__(self, dbapi, connection):
|
||||
self.dbapi = dbapi
|
||||
self._connection = connection
|
||||
self._execute_mutex = asyncio.Lock()
|
||||
|
||||
def ping(self, reconnect):
|
||||
return self.await_(self._connection.ping(reconnect))
|
||||
|
||||
def character_set_name(self):
|
||||
return self._connection.character_set_name()
|
||||
|
||||
def autocommit(self, value):
|
||||
self.await_(self._connection.autocommit(value))
|
||||
|
||||
def cursor(self, server_side=False):
|
||||
if server_side:
|
||||
return AsyncAdapt_aiomysql_ss_cursor(self)
|
||||
else:
|
||||
return AsyncAdapt_aiomysql_cursor(self)
|
||||
|
||||
def rollback(self):
|
||||
self.await_(self._connection.rollback())
|
||||
|
||||
def commit(self):
|
||||
self.await_(self._connection.commit())
|
||||
|
||||
def terminate(self):
|
||||
# it's not awaitable.
|
||||
self._connection.close()
|
||||
|
||||
def close(self) -> None:
|
||||
self.await_(self._connection.ensure_closed())
|
||||
|
||||
|
||||
class AsyncAdaptFallback_aiomysql_connection(AsyncAdapt_aiomysql_connection):
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
__slots__ = ()
|
||||
|
||||
await_ = staticmethod(await_fallback)
|
||||
|
||||
|
||||
class AsyncAdapt_aiomysql_dbapi:
|
||||
def __init__(self, aiomysql, pymysql):
|
||||
self.aiomysql = aiomysql
|
||||
self.pymysql = pymysql
|
||||
self.paramstyle = "format"
|
||||
self._init_dbapi_attributes()
|
||||
self.Cursor, self.SSCursor = self._init_cursors_subclasses()
|
||||
|
||||
def _init_dbapi_attributes(self):
|
||||
for name in (
|
||||
"Warning",
|
||||
"Error",
|
||||
"InterfaceError",
|
||||
"DataError",
|
||||
"DatabaseError",
|
||||
"OperationalError",
|
||||
"InterfaceError",
|
||||
"IntegrityError",
|
||||
"ProgrammingError",
|
||||
"InternalError",
|
||||
"NotSupportedError",
|
||||
):
|
||||
setattr(self, name, getattr(self.aiomysql, name))
|
||||
|
||||
for name in (
|
||||
"NUMBER",
|
||||
"STRING",
|
||||
"DATETIME",
|
||||
"BINARY",
|
||||
"TIMESTAMP",
|
||||
"Binary",
|
||||
):
|
||||
setattr(self, name, getattr(self.pymysql, name))
|
||||
|
||||
def connect(self, *arg, **kw):
|
||||
async_fallback = kw.pop("async_fallback", False)
|
||||
creator_fn = kw.pop("async_creator_fn", self.aiomysql.connect)
|
||||
|
||||
if util.asbool(async_fallback):
|
||||
return AsyncAdaptFallback_aiomysql_connection(
|
||||
self,
|
||||
await_fallback(creator_fn(*arg, **kw)),
|
||||
)
|
||||
else:
|
||||
return AsyncAdapt_aiomysql_connection(
|
||||
self,
|
||||
await_only(creator_fn(*arg, **kw)),
|
||||
)
|
||||
|
||||
def _init_cursors_subclasses(self):
|
||||
# suppress unconditional warning emitted by aiomysql
|
||||
class Cursor(self.aiomysql.Cursor):
|
||||
async def _show_warnings(self, conn):
|
||||
pass
|
||||
|
||||
class SSCursor(self.aiomysql.SSCursor):
|
||||
async def _show_warnings(self, conn):
|
||||
pass
|
||||
|
||||
return Cursor, SSCursor
|
||||
|
||||
|
||||
class MySQLDialect_aiomysql(MySQLDialect_pymysql):
|
||||
driver = "aiomysql"
|
||||
supports_statement_cache = True
|
||||
|
||||
supports_server_side_cursors = True
|
||||
_sscursor = AsyncAdapt_aiomysql_ss_cursor
|
||||
|
||||
is_async = True
|
||||
has_terminate = True
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return AsyncAdapt_aiomysql_dbapi(
|
||||
__import__("aiomysql"), __import__("pymysql")
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def get_pool_class(cls, url):
|
||||
async_fallback = url.query.get("async_fallback", False)
|
||||
|
||||
if util.asbool(async_fallback):
|
||||
return pool.FallbackAsyncAdaptedQueuePool
|
||||
else:
|
||||
return pool.AsyncAdaptedQueuePool
|
||||
|
||||
def do_terminate(self, dbapi_connection) -> None:
|
||||
dbapi_connection.terminate()
|
||||
|
||||
def create_connect_args(self, url):
|
||||
return super().create_connect_args(
|
||||
url, _translate_args=dict(username="user", database="db")
|
||||
)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if super().is_disconnect(e, connection, cursor):
|
||||
return True
|
||||
else:
|
||||
str_e = str(e).lower()
|
||||
return "not connected" in str_e
|
||||
|
||||
def _found_rows_client_flag(self):
|
||||
from pymysql.constants import CLIENT
|
||||
|
||||
return CLIENT.FOUND_ROWS
|
||||
|
||||
def get_driver_connection(self, connection):
|
||||
return connection._connection
|
||||
|
||||
|
||||
dialect = MySQLDialect_aiomysql
|
@ -0,0 +1,339 @@
|
||||
# dialects/mysql/asyncmy.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
|
||||
# file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
.. dialect:: mysql+asyncmy
|
||||
:name: asyncmy
|
||||
:dbapi: asyncmy
|
||||
:connectstring: mysql+asyncmy://user:password@host:port/dbname[?key=value&key=value...]
|
||||
:url: https://github.com/long2ice/asyncmy
|
||||
|
||||
Using a special asyncio mediation layer, the asyncmy dialect is usable
|
||||
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
|
||||
extension package.
|
||||
|
||||
This dialect should normally be used only with the
|
||||
:func:`_asyncio.create_async_engine` engine creation function::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
engine = create_async_engine(
|
||||
"mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4"
|
||||
)
|
||||
|
||||
""" # noqa
|
||||
from collections import deque
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from .pymysql import MySQLDialect_pymysql
|
||||
from ... import pool
|
||||
from ... import util
|
||||
from ...engine import AdaptedConnection
|
||||
from ...util.concurrency import asyncio
|
||||
from ...util.concurrency import await_fallback
|
||||
from ...util.concurrency import await_only
|
||||
|
||||
|
||||
class AsyncAdapt_asyncmy_cursor:
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
server_side = False
|
||||
__slots__ = (
|
||||
"_adapt_connection",
|
||||
"_connection",
|
||||
"await_",
|
||||
"_cursor",
|
||||
"_rows",
|
||||
)
|
||||
|
||||
def __init__(self, adapt_connection):
|
||||
self._adapt_connection = adapt_connection
|
||||
self._connection = adapt_connection._connection
|
||||
self.await_ = adapt_connection.await_
|
||||
|
||||
cursor = self._connection.cursor()
|
||||
|
||||
self._cursor = self.await_(cursor.__aenter__())
|
||||
self._rows = deque()
|
||||
|
||||
@property
|
||||
def description(self):
|
||||
return self._cursor.description
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
return self._cursor.rowcount
|
||||
|
||||
@property
|
||||
def arraysize(self):
|
||||
return self._cursor.arraysize
|
||||
|
||||
@arraysize.setter
|
||||
def arraysize(self, value):
|
||||
self._cursor.arraysize = value
|
||||
|
||||
@property
|
||||
def lastrowid(self):
|
||||
return self._cursor.lastrowid
|
||||
|
||||
def close(self):
|
||||
# note we aren't actually closing the cursor here,
|
||||
# we are just letting GC do it. to allow this to be async
|
||||
# we would need the Result to change how it does "Safe close cursor".
|
||||
# MySQL "cursors" don't actually have state to be "closed" besides
|
||||
# exhausting rows, which we already have done for sync cursor.
|
||||
# another option would be to emulate aiosqlite dialect and assign
|
||||
# cursor only if we are doing server side cursor operation.
|
||||
self._rows.clear()
|
||||
|
||||
def execute(self, operation, parameters=None):
|
||||
return self.await_(self._execute_async(operation, parameters))
|
||||
|
||||
def executemany(self, operation, seq_of_parameters):
|
||||
return self.await_(
|
||||
self._executemany_async(operation, seq_of_parameters)
|
||||
)
|
||||
|
||||
async def _execute_async(self, operation, parameters):
|
||||
async with self._adapt_connection._mutex_and_adapt_errors():
|
||||
if parameters is None:
|
||||
result = await self._cursor.execute(operation)
|
||||
else:
|
||||
result = await self._cursor.execute(operation, parameters)
|
||||
|
||||
if not self.server_side:
|
||||
# asyncmy has a "fake" async result, so we have to pull it out
|
||||
# of that here since our default result is not async.
|
||||
# we could just as easily grab "_rows" here and be done with it
|
||||
# but this is safer.
|
||||
self._rows = deque(await self._cursor.fetchall())
|
||||
return result
|
||||
|
||||
async def _executemany_async(self, operation, seq_of_parameters):
|
||||
async with self._adapt_connection._mutex_and_adapt_errors():
|
||||
return await self._cursor.executemany(operation, seq_of_parameters)
|
||||
|
||||
def setinputsizes(self, *inputsizes):
|
||||
pass
|
||||
|
||||
def __iter__(self):
|
||||
while self._rows:
|
||||
yield self._rows.popleft()
|
||||
|
||||
def fetchone(self):
|
||||
if self._rows:
|
||||
return self._rows.popleft()
|
||||
else:
|
||||
return None
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
if size is None:
|
||||
size = self.arraysize
|
||||
|
||||
rr = self._rows
|
||||
return [rr.popleft() for _ in range(min(size, len(rr)))]
|
||||
|
||||
def fetchall(self):
|
||||
retval = list(self._rows)
|
||||
self._rows.clear()
|
||||
return retval
|
||||
|
||||
|
||||
class AsyncAdapt_asyncmy_ss_cursor(AsyncAdapt_asyncmy_cursor):
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
__slots__ = ()
|
||||
server_side = True
|
||||
|
||||
def __init__(self, adapt_connection):
|
||||
self._adapt_connection = adapt_connection
|
||||
self._connection = adapt_connection._connection
|
||||
self.await_ = adapt_connection.await_
|
||||
|
||||
cursor = self._connection.cursor(
|
||||
adapt_connection.dbapi.asyncmy.cursors.SSCursor
|
||||
)
|
||||
|
||||
self._cursor = self.await_(cursor.__aenter__())
|
||||
|
||||
def close(self):
|
||||
if self._cursor is not None:
|
||||
self.await_(self._cursor.close())
|
||||
self._cursor = None
|
||||
|
||||
def fetchone(self):
|
||||
return self.await_(self._cursor.fetchone())
|
||||
|
||||
def fetchmany(self, size=None):
|
||||
return self.await_(self._cursor.fetchmany(size=size))
|
||||
|
||||
def fetchall(self):
|
||||
return self.await_(self._cursor.fetchall())
|
||||
|
||||
|
||||
class AsyncAdapt_asyncmy_connection(AdaptedConnection):
|
||||
# TODO: base on connectors/asyncio.py
|
||||
# see #10415
|
||||
await_ = staticmethod(await_only)
|
||||
__slots__ = ("dbapi", "_execute_mutex")
|
||||
|
||||
def __init__(self, dbapi, connection):
|
||||
self.dbapi = dbapi
|
||||
self._connection = connection
|
||||
self._execute_mutex = asyncio.Lock()
|
||||
|
||||
@asynccontextmanager
|
||||
async def _mutex_and_adapt_errors(self):
|
||||
async with self._execute_mutex:
|
||||
try:
|
||||
yield
|
||||
except AttributeError:
|
||||
raise self.dbapi.InternalError(
|
||||
"network operation failed due to asyncmy attribute error"
|
||||
)
|
||||
|
||||
def ping(self, reconnect):
|
||||
assert not reconnect
|
||||
return self.await_(self._do_ping())
|
||||
|
||||
async def _do_ping(self):
|
||||
async with self._mutex_and_adapt_errors():
|
||||
return await self._connection.ping(False)
|
||||
|
||||
def character_set_name(self):
|
||||
return self._connection.character_set_name()
|
||||
|
||||
def autocommit(self, value):
|
||||
self.await_(self._connection.autocommit(value))
|
||||
|
||||
def cursor(self, server_side=False):
|
||||
if server_side:
|
||||
return AsyncAdapt_asyncmy_ss_cursor(self)
|
||||
else:
|
||||
return AsyncAdapt_asyncmy_cursor(self)
|
||||
|
||||
def rollback(self):
|
||||
self.await_(self._connection.rollback())
|
||||
|
||||
def commit(self):
|
||||
self.await_(self._connection.commit())
|
||||
|
||||
def terminate(self):
|
||||
# it's not awaitable.
|
||||
self._connection.close()
|
||||
|
||||
def close(self) -> None:
|
||||
self.await_(self._connection.ensure_closed())
|
||||
|
||||
|
||||
class AsyncAdaptFallback_asyncmy_connection(AsyncAdapt_asyncmy_connection):
|
||||
__slots__ = ()
|
||||
|
||||
await_ = staticmethod(await_fallback)
|
||||
|
||||
|
||||
def _Binary(x):
|
||||
"""Return x as a binary type."""
|
||||
return bytes(x)
|
||||
|
||||
|
||||
class AsyncAdapt_asyncmy_dbapi:
|
||||
def __init__(self, asyncmy):
|
||||
self.asyncmy = asyncmy
|
||||
self.paramstyle = "format"
|
||||
self._init_dbapi_attributes()
|
||||
|
||||
def _init_dbapi_attributes(self):
|
||||
for name in (
|
||||
"Warning",
|
||||
"Error",
|
||||
"InterfaceError",
|
||||
"DataError",
|
||||
"DatabaseError",
|
||||
"OperationalError",
|
||||
"InterfaceError",
|
||||
"IntegrityError",
|
||||
"ProgrammingError",
|
||||
"InternalError",
|
||||
"NotSupportedError",
|
||||
):
|
||||
setattr(self, name, getattr(self.asyncmy.errors, name))
|
||||
|
||||
STRING = util.symbol("STRING")
|
||||
NUMBER = util.symbol("NUMBER")
|
||||
BINARY = util.symbol("BINARY")
|
||||
DATETIME = util.symbol("DATETIME")
|
||||
TIMESTAMP = util.symbol("TIMESTAMP")
|
||||
Binary = staticmethod(_Binary)
|
||||
|
||||
def connect(self, *arg, **kw):
|
||||
async_fallback = kw.pop("async_fallback", False)
|
||||
creator_fn = kw.pop("async_creator_fn", self.asyncmy.connect)
|
||||
|
||||
if util.asbool(async_fallback):
|
||||
return AsyncAdaptFallback_asyncmy_connection(
|
||||
self,
|
||||
await_fallback(creator_fn(*arg, **kw)),
|
||||
)
|
||||
else:
|
||||
return AsyncAdapt_asyncmy_connection(
|
||||
self,
|
||||
await_only(creator_fn(*arg, **kw)),
|
||||
)
|
||||
|
||||
|
||||
class MySQLDialect_asyncmy(MySQLDialect_pymysql):
|
||||
driver = "asyncmy"
|
||||
supports_statement_cache = True
|
||||
|
||||
supports_server_side_cursors = True
|
||||
_sscursor = AsyncAdapt_asyncmy_ss_cursor
|
||||
|
||||
is_async = True
|
||||
has_terminate = True
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return AsyncAdapt_asyncmy_dbapi(__import__("asyncmy"))
|
||||
|
||||
@classmethod
|
||||
def get_pool_class(cls, url):
|
||||
async_fallback = url.query.get("async_fallback", False)
|
||||
|
||||
if util.asbool(async_fallback):
|
||||
return pool.FallbackAsyncAdaptedQueuePool
|
||||
else:
|
||||
return pool.AsyncAdaptedQueuePool
|
||||
|
||||
def do_terminate(self, dbapi_connection) -> None:
|
||||
dbapi_connection.terminate()
|
||||
|
||||
def create_connect_args(self, url):
|
||||
return super().create_connect_args(
|
||||
url, _translate_args=dict(username="user", database="db")
|
||||
)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if super().is_disconnect(e, connection, cursor):
|
||||
return True
|
||||
else:
|
||||
str_e = str(e).lower()
|
||||
return (
|
||||
"not connected" in str_e or "network operation failed" in str_e
|
||||
)
|
||||
|
||||
def _found_rows_client_flag(self):
|
||||
from asyncmy.constants import CLIENT
|
||||
|
||||
return CLIENT.FOUND_ROWS
|
||||
|
||||
def get_driver_connection(self, connection):
|
||||
return connection._connection
|
||||
|
||||
|
||||
dialect = MySQLDialect_asyncmy
|
3582
venv/lib/python3.11/site-packages/sqlalchemy/dialects/mysql/base.py
Normal file
3582
venv/lib/python3.11/site-packages/sqlalchemy/dialects/mysql/base.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,84 @@
|
||||
# dialects/mysql/cymysql.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r"""
|
||||
|
||||
.. dialect:: mysql+cymysql
|
||||
:name: CyMySQL
|
||||
:dbapi: cymysql
|
||||
:connectstring: mysql+cymysql://<username>:<password>@<host>/<dbname>[?<options>]
|
||||
:url: https://github.com/nakagami/CyMySQL
|
||||
|
||||
.. note::
|
||||
|
||||
The CyMySQL dialect is **not tested as part of SQLAlchemy's continuous
|
||||
integration** and may have unresolved issues. The recommended MySQL
|
||||
dialects are mysqlclient and PyMySQL.
|
||||
|
||||
""" # noqa
|
||||
|
||||
from .base import BIT
|
||||
from .base import MySQLDialect
|
||||
from .mysqldb import MySQLDialect_mysqldb
|
||||
from ... import util
|
||||
|
||||
|
||||
class _cymysqlBIT(BIT):
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""Convert MySQL's 64 bit, variable length binary string to a long."""
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
v = 0
|
||||
for i in iter(value):
|
||||
v = v << 8 | i
|
||||
return v
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class MySQLDialect_cymysql(MySQLDialect_mysqldb):
|
||||
driver = "cymysql"
|
||||
supports_statement_cache = True
|
||||
|
||||
description_encoding = None
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = False
|
||||
supports_unicode_statements = True
|
||||
|
||||
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _cymysqlBIT})
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("cymysql")
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
return connection.connection.charset
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
return exception.errno
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if isinstance(e, self.dbapi.OperationalError):
|
||||
return self._extract_error_code(e) in (
|
||||
2006,
|
||||
2013,
|
||||
2014,
|
||||
2045,
|
||||
2055,
|
||||
)
|
||||
elif isinstance(e, self.dbapi.InterfaceError):
|
||||
# if underlying connection is closed,
|
||||
# this is the error you get
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
dialect = MySQLDialect_cymysql
|
@ -0,0 +1,225 @@
|
||||
# dialects/mysql/dml.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Mapping
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
from ... import exc
|
||||
from ... import util
|
||||
from ...sql._typing import _DMLTableArgument
|
||||
from ...sql.base import _exclusive_against
|
||||
from ...sql.base import _generative
|
||||
from ...sql.base import ColumnCollection
|
||||
from ...sql.base import ReadOnlyColumnCollection
|
||||
from ...sql.dml import Insert as StandardInsert
|
||||
from ...sql.elements import ClauseElement
|
||||
from ...sql.elements import KeyedColumnElement
|
||||
from ...sql.expression import alias
|
||||
from ...sql.selectable import NamedFromClause
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
__all__ = ("Insert", "insert")
|
||||
|
||||
|
||||
def insert(table: _DMLTableArgument) -> Insert:
|
||||
"""Construct a MySQL/MariaDB-specific variant :class:`_mysql.Insert`
|
||||
construct.
|
||||
|
||||
.. container:: inherited_member
|
||||
|
||||
The :func:`sqlalchemy.dialects.mysql.insert` function creates
|
||||
a :class:`sqlalchemy.dialects.mysql.Insert`. This class is based
|
||||
on the dialect-agnostic :class:`_sql.Insert` construct which may
|
||||
be constructed using the :func:`_sql.insert` function in
|
||||
SQLAlchemy Core.
|
||||
|
||||
The :class:`_mysql.Insert` construct includes additional methods
|
||||
:meth:`_mysql.Insert.on_duplicate_key_update`.
|
||||
|
||||
"""
|
||||
return Insert(table)
|
||||
|
||||
|
||||
class Insert(StandardInsert):
|
||||
"""MySQL-specific implementation of INSERT.
|
||||
|
||||
Adds methods for MySQL-specific syntaxes such as ON DUPLICATE KEY UPDATE.
|
||||
|
||||
The :class:`~.mysql.Insert` object is created using the
|
||||
:func:`sqlalchemy.dialects.mysql.insert` function.
|
||||
|
||||
.. versionadded:: 1.2
|
||||
|
||||
"""
|
||||
|
||||
stringify_dialect = "mysql"
|
||||
inherit_cache = False
|
||||
|
||||
@property
|
||||
def inserted(
|
||||
self,
|
||||
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
|
||||
"""Provide the "inserted" namespace for an ON DUPLICATE KEY UPDATE
|
||||
statement
|
||||
|
||||
MySQL's ON DUPLICATE KEY UPDATE clause allows reference to the row
|
||||
that would be inserted, via a special function called ``VALUES()``.
|
||||
This attribute provides all columns in this row to be referenceable
|
||||
such that they will render within a ``VALUES()`` function inside the
|
||||
ON DUPLICATE KEY UPDATE clause. The attribute is named ``.inserted``
|
||||
so as not to conflict with the existing
|
||||
:meth:`_expression.Insert.values` method.
|
||||
|
||||
.. tip:: The :attr:`_mysql.Insert.inserted` attribute is an instance
|
||||
of :class:`_expression.ColumnCollection`, which provides an
|
||||
interface the same as that of the :attr:`_schema.Table.c`
|
||||
collection described at :ref:`metadata_tables_and_columns`.
|
||||
With this collection, ordinary names are accessible like attributes
|
||||
(e.g. ``stmt.inserted.some_column``), but special names and
|
||||
dictionary method names should be accessed using indexed access,
|
||||
such as ``stmt.inserted["column name"]`` or
|
||||
``stmt.inserted["values"]``. See the docstring for
|
||||
:class:`_expression.ColumnCollection` for further examples.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`mysql_insert_on_duplicate_key_update` - example of how
|
||||
to use :attr:`_expression.Insert.inserted`
|
||||
|
||||
"""
|
||||
return self.inserted_alias.columns
|
||||
|
||||
@util.memoized_property
|
||||
def inserted_alias(self) -> NamedFromClause:
|
||||
return alias(self.table, name="inserted")
|
||||
|
||||
@_generative
|
||||
@_exclusive_against(
|
||||
"_post_values_clause",
|
||||
msgs={
|
||||
"_post_values_clause": "This Insert construct already "
|
||||
"has an ON DUPLICATE KEY clause present"
|
||||
},
|
||||
)
|
||||
def on_duplicate_key_update(self, *args: _UpdateArg, **kw: Any) -> Self:
|
||||
r"""
|
||||
Specifies the ON DUPLICATE KEY UPDATE clause.
|
||||
|
||||
:param \**kw: Column keys linked to UPDATE values. The
|
||||
values may be any SQL expression or supported literal Python
|
||||
values.
|
||||
|
||||
.. warning:: This dictionary does **not** take into account
|
||||
Python-specified default UPDATE values or generation functions,
|
||||
e.g. those specified using :paramref:`_schema.Column.onupdate`.
|
||||
These values will not be exercised for an ON DUPLICATE KEY UPDATE
|
||||
style of UPDATE, unless values are manually specified here.
|
||||
|
||||
:param \*args: As an alternative to passing key/value parameters,
|
||||
a dictionary or list of 2-tuples can be passed as a single positional
|
||||
argument.
|
||||
|
||||
Passing a single dictionary is equivalent to the keyword argument
|
||||
form::
|
||||
|
||||
insert().on_duplicate_key_update({"name": "some name"})
|
||||
|
||||
Passing a list of 2-tuples indicates that the parameter assignments
|
||||
in the UPDATE clause should be ordered as sent, in a manner similar
|
||||
to that described for the :class:`_expression.Update`
|
||||
construct overall
|
||||
in :ref:`tutorial_parameter_ordered_updates`::
|
||||
|
||||
insert().on_duplicate_key_update(
|
||||
[
|
||||
("name", "some name"),
|
||||
("value", "some value"),
|
||||
]
|
||||
)
|
||||
|
||||
.. versionchanged:: 1.3 parameters can be specified as a dictionary
|
||||
or list of 2-tuples; the latter form provides for parameter
|
||||
ordering.
|
||||
|
||||
|
||||
.. versionadded:: 1.2
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`mysql_insert_on_duplicate_key_update`
|
||||
|
||||
"""
|
||||
if args and kw:
|
||||
raise exc.ArgumentError(
|
||||
"Can't pass kwargs and positional arguments simultaneously"
|
||||
)
|
||||
|
||||
if args:
|
||||
if len(args) > 1:
|
||||
raise exc.ArgumentError(
|
||||
"Only a single dictionary or list of tuples "
|
||||
"is accepted positionally."
|
||||
)
|
||||
values = args[0]
|
||||
else:
|
||||
values = kw
|
||||
|
||||
self._post_values_clause = OnDuplicateClause(
|
||||
self.inserted_alias, values
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
class OnDuplicateClause(ClauseElement):
|
||||
__visit_name__ = "on_duplicate_key_update"
|
||||
|
||||
_parameter_ordering: Optional[List[str]] = None
|
||||
|
||||
update: Dict[str, Any]
|
||||
stringify_dialect = "mysql"
|
||||
|
||||
def __init__(
|
||||
self, inserted_alias: NamedFromClause, update: _UpdateArg
|
||||
) -> None:
|
||||
self.inserted_alias = inserted_alias
|
||||
|
||||
# auto-detect that parameters should be ordered. This is copied from
|
||||
# Update._proces_colparams(), however we don't look for a special flag
|
||||
# in this case since we are not disambiguating from other use cases as
|
||||
# we are in Update.values().
|
||||
if isinstance(update, list) and (
|
||||
update and isinstance(update[0], tuple)
|
||||
):
|
||||
self._parameter_ordering = [key for key, value in update]
|
||||
update = dict(update)
|
||||
|
||||
if isinstance(update, dict):
|
||||
if not update:
|
||||
raise ValueError(
|
||||
"update parameter dictionary must not be empty"
|
||||
)
|
||||
elif isinstance(update, ColumnCollection):
|
||||
update = dict(update)
|
||||
else:
|
||||
raise ValueError(
|
||||
"update parameter must be a non-empty dictionary "
|
||||
"or a ColumnCollection such as the `.c.` collection "
|
||||
"of a Table object"
|
||||
)
|
||||
self.update = update
|
||||
|
||||
|
||||
_UpdateArg = Union[
|
||||
Mapping[Any, Any], List[Tuple[str, Any]], ColumnCollection[Any, Any]
|
||||
]
|
@ -0,0 +1,243 @@
|
||||
# dialects/mysql/enumerated.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
import re
|
||||
|
||||
from .types import _StringType
|
||||
from ... import exc
|
||||
from ... import sql
|
||||
from ... import util
|
||||
from ...sql import sqltypes
|
||||
|
||||
|
||||
class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _StringType):
|
||||
"""MySQL ENUM type."""
|
||||
|
||||
__visit_name__ = "ENUM"
|
||||
|
||||
native_enum = True
|
||||
|
||||
def __init__(self, *enums, **kw):
|
||||
"""Construct an ENUM.
|
||||
|
||||
E.g.::
|
||||
|
||||
Column("myenum", ENUM("foo", "bar", "baz"))
|
||||
|
||||
:param enums: The range of valid values for this ENUM. Values in
|
||||
enums are not quoted, they will be escaped and surrounded by single
|
||||
quotes when generating the schema. This object may also be a
|
||||
PEP-435-compliant enumerated type.
|
||||
|
||||
.. versionadded: 1.1 added support for PEP-435-compliant enumerated
|
||||
types.
|
||||
|
||||
:param strict: This flag has no effect.
|
||||
|
||||
.. versionchanged:: The MySQL ENUM type as well as the base Enum
|
||||
type now validates all Python data values.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
kw.pop("strict", None)
|
||||
self._enum_init(enums, kw)
|
||||
_StringType.__init__(self, length=self.length, **kw)
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(cls, impl, **kw):
|
||||
"""Produce a MySQL native :class:`.mysql.ENUM` from plain
|
||||
:class:`.Enum`.
|
||||
|
||||
"""
|
||||
kw.setdefault("validate_strings", impl.validate_strings)
|
||||
kw.setdefault("values_callable", impl.values_callable)
|
||||
kw.setdefault("omit_aliases", impl._omit_aliases)
|
||||
return cls(**kw)
|
||||
|
||||
def _object_value_for_elem(self, elem):
|
||||
# mysql sends back a blank string for any value that
|
||||
# was persisted that was not in the enums; that is, it does no
|
||||
# validation on the incoming data, it "truncates" it to be
|
||||
# the blank string. Return it straight.
|
||||
if elem == "":
|
||||
return elem
|
||||
else:
|
||||
return super()._object_value_for_elem(elem)
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self, to_inspect=[ENUM, _StringType, sqltypes.Enum]
|
||||
)
|
||||
|
||||
|
||||
class SET(_StringType):
|
||||
"""MySQL SET type."""
|
||||
|
||||
__visit_name__ = "SET"
|
||||
|
||||
def __init__(self, *values, **kw):
|
||||
"""Construct a SET.
|
||||
|
||||
E.g.::
|
||||
|
||||
Column("myset", SET("foo", "bar", "baz"))
|
||||
|
||||
The list of potential values is required in the case that this
|
||||
set will be used to generate DDL for a table, or if the
|
||||
:paramref:`.SET.retrieve_as_bitwise` flag is set to True.
|
||||
|
||||
:param values: The range of valid values for this SET. The values
|
||||
are not quoted, they will be escaped and surrounded by single
|
||||
quotes when generating the schema.
|
||||
|
||||
:param convert_unicode: Same flag as that of
|
||||
:paramref:`.String.convert_unicode`.
|
||||
|
||||
:param collation: same as that of :paramref:`.String.collation`
|
||||
|
||||
:param charset: same as that of :paramref:`.VARCHAR.charset`.
|
||||
|
||||
:param ascii: same as that of :paramref:`.VARCHAR.ascii`.
|
||||
|
||||
:param unicode: same as that of :paramref:`.VARCHAR.unicode`.
|
||||
|
||||
:param binary: same as that of :paramref:`.VARCHAR.binary`.
|
||||
|
||||
:param retrieve_as_bitwise: if True, the data for the set type will be
|
||||
persisted and selected using an integer value, where a set is coerced
|
||||
into a bitwise mask for persistence. MySQL allows this mode which
|
||||
has the advantage of being able to store values unambiguously,
|
||||
such as the blank string ``''``. The datatype will appear
|
||||
as the expression ``col + 0`` in a SELECT statement, so that the
|
||||
value is coerced into an integer value in result sets.
|
||||
This flag is required if one wishes
|
||||
to persist a set that can store the blank string ``''`` as a value.
|
||||
|
||||
.. warning::
|
||||
|
||||
When using :paramref:`.mysql.SET.retrieve_as_bitwise`, it is
|
||||
essential that the list of set values is expressed in the
|
||||
**exact same order** as exists on the MySQL database.
|
||||
|
||||
"""
|
||||
self.retrieve_as_bitwise = kw.pop("retrieve_as_bitwise", False)
|
||||
self.values = tuple(values)
|
||||
if not self.retrieve_as_bitwise and "" in values:
|
||||
raise exc.ArgumentError(
|
||||
"Can't use the blank value '' in a SET without "
|
||||
"setting retrieve_as_bitwise=True"
|
||||
)
|
||||
if self.retrieve_as_bitwise:
|
||||
self._bitmap = {
|
||||
value: 2**idx for idx, value in enumerate(self.values)
|
||||
}
|
||||
self._bitmap.update(
|
||||
(2**idx, value) for idx, value in enumerate(self.values)
|
||||
)
|
||||
length = max([len(v) for v in values] + [0])
|
||||
kw.setdefault("length", length)
|
||||
super().__init__(**kw)
|
||||
|
||||
def column_expression(self, colexpr):
|
||||
if self.retrieve_as_bitwise:
|
||||
return sql.type_coerce(
|
||||
sql.type_coerce(colexpr, sqltypes.Integer) + 0, self
|
||||
)
|
||||
else:
|
||||
return colexpr
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.retrieve_as_bitwise:
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
value = int(value)
|
||||
|
||||
return set(util.map_bits(self._bitmap.__getitem__, value))
|
||||
else:
|
||||
return None
|
||||
|
||||
else:
|
||||
super_convert = super().result_processor(dialect, coltype)
|
||||
|
||||
def process(value):
|
||||
if isinstance(value, str):
|
||||
# MySQLdb returns a string, let's parse
|
||||
if super_convert:
|
||||
value = super_convert(value)
|
||||
return set(re.findall(r"[^,]+", value))
|
||||
else:
|
||||
# mysql-connector-python does a naive
|
||||
# split(",") which throws in an empty string
|
||||
if value is not None:
|
||||
value.discard("")
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_convert = super().bind_processor(dialect)
|
||||
if self.retrieve_as_bitwise:
|
||||
|
||||
def process(value):
|
||||
if value is None:
|
||||
return None
|
||||
elif isinstance(value, (int, str)):
|
||||
if super_convert:
|
||||
return super_convert(value)
|
||||
else:
|
||||
return value
|
||||
else:
|
||||
int_value = 0
|
||||
for v in value:
|
||||
int_value |= self._bitmap[v]
|
||||
return int_value
|
||||
|
||||
else:
|
||||
|
||||
def process(value):
|
||||
# accept strings and int (actually bitflag) values directly
|
||||
if value is not None and not isinstance(value, (int, str)):
|
||||
value = ",".join(value)
|
||||
|
||||
if super_convert:
|
||||
return super_convert(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def adapt(self, impltype, **kw):
|
||||
kw["retrieve_as_bitwise"] = self.retrieve_as_bitwise
|
||||
return util.constructor_copy(self, impltype, *self.values, **kw)
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self,
|
||||
to_inspect=[SET, _StringType],
|
||||
additional_kw=[
|
||||
("retrieve_as_bitwise", False),
|
||||
],
|
||||
)
|
@ -0,0 +1,143 @@
|
||||
# dialects/mysql/expression.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
from ... import exc
|
||||
from ... import util
|
||||
from ...sql import coercions
|
||||
from ...sql import elements
|
||||
from ...sql import operators
|
||||
from ...sql import roles
|
||||
from ...sql.base import _generative
|
||||
from ...sql.base import Generative
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
class match(Generative, elements.BinaryExpression):
|
||||
"""Produce a ``MATCH (X, Y) AGAINST ('TEXT')`` clause.
|
||||
|
||||
E.g.::
|
||||
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.dialects.mysql import match
|
||||
|
||||
match_expr = match(
|
||||
users_table.c.firstname,
|
||||
users_table.c.lastname,
|
||||
against="Firstname Lastname",
|
||||
)
|
||||
|
||||
stmt = (
|
||||
select(users_table)
|
||||
.where(match_expr.in_boolean_mode())
|
||||
.order_by(desc(match_expr))
|
||||
)
|
||||
|
||||
Would produce SQL resembling:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT id, firstname, lastname
|
||||
FROM user
|
||||
WHERE MATCH(firstname, lastname) AGAINST (:param_1 IN BOOLEAN MODE)
|
||||
ORDER BY MATCH(firstname, lastname) AGAINST (:param_2) DESC
|
||||
|
||||
The :func:`_mysql.match` function is a standalone version of the
|
||||
:meth:`_sql.ColumnElement.match` method available on all
|
||||
SQL expressions, as when :meth:`_expression.ColumnElement.match` is
|
||||
used, but allows to pass multiple columns
|
||||
|
||||
:param cols: column expressions to match against
|
||||
|
||||
:param against: expression to be compared towards
|
||||
|
||||
:param in_boolean_mode: boolean, set "boolean mode" to true
|
||||
|
||||
:param in_natural_language_mode: boolean , set "natural language" to true
|
||||
|
||||
:param with_query_expansion: boolean, set "query expansion" to true
|
||||
|
||||
.. versionadded:: 1.4.19
|
||||
|
||||
.. seealso::
|
||||
|
||||
:meth:`_expression.ColumnElement.match`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "mysql_match"
|
||||
|
||||
inherit_cache = True
|
||||
|
||||
def __init__(self, *cols, **kw):
|
||||
if not cols:
|
||||
raise exc.ArgumentError("columns are required")
|
||||
|
||||
against = kw.pop("against", None)
|
||||
|
||||
if against is None:
|
||||
raise exc.ArgumentError("against is required")
|
||||
against = coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
against,
|
||||
)
|
||||
|
||||
left = elements.BooleanClauseList._construct_raw(
|
||||
operators.comma_op,
|
||||
clauses=cols,
|
||||
)
|
||||
left.group = False
|
||||
|
||||
flags = util.immutabledict(
|
||||
{
|
||||
"mysql_boolean_mode": kw.pop("in_boolean_mode", False),
|
||||
"mysql_natural_language": kw.pop(
|
||||
"in_natural_language_mode", False
|
||||
),
|
||||
"mysql_query_expansion": kw.pop("with_query_expansion", False),
|
||||
}
|
||||
)
|
||||
|
||||
if kw:
|
||||
raise exc.ArgumentError("unknown arguments: %s" % (", ".join(kw)))
|
||||
|
||||
super().__init__(left, against, operators.match_op, modifiers=flags)
|
||||
|
||||
@_generative
|
||||
def in_boolean_mode(self) -> Self:
|
||||
"""Apply the "IN BOOLEAN MODE" modifier to the MATCH expression.
|
||||
|
||||
:return: a new :class:`_mysql.match` instance with modifications
|
||||
applied.
|
||||
"""
|
||||
|
||||
self.modifiers = self.modifiers.union({"mysql_boolean_mode": True})
|
||||
return self
|
||||
|
||||
@_generative
|
||||
def in_natural_language_mode(self) -> Self:
|
||||
"""Apply the "IN NATURAL LANGUAGE MODE" modifier to the MATCH
|
||||
expression.
|
||||
|
||||
:return: a new :class:`_mysql.match` instance with modifications
|
||||
applied.
|
||||
"""
|
||||
|
||||
self.modifiers = self.modifiers.union({"mysql_natural_language": True})
|
||||
return self
|
||||
|
||||
@_generative
|
||||
def with_query_expansion(self) -> Self:
|
||||
"""Apply the "WITH QUERY EXPANSION" modifier to the MATCH expression.
|
||||
|
||||
:return: a new :class:`_mysql.match` instance with modifications
|
||||
applied.
|
||||
"""
|
||||
|
||||
self.modifiers = self.modifiers.union({"mysql_query_expansion": True})
|
||||
return self
|
@ -0,0 +1,81 @@
|
||||
# dialects/mysql/json.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import types as sqltypes
|
||||
|
||||
|
||||
class JSON(sqltypes.JSON):
|
||||
"""MySQL JSON type.
|
||||
|
||||
MySQL supports JSON as of version 5.7.
|
||||
MariaDB supports JSON (as an alias for LONGTEXT) as of version 10.2.
|
||||
|
||||
:class:`_mysql.JSON` is used automatically whenever the base
|
||||
:class:`_types.JSON` datatype is used against a MySQL or MariaDB backend.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - main documentation for the generic
|
||||
cross-platform JSON datatype.
|
||||
|
||||
The :class:`.mysql.JSON` type supports persistence of JSON values
|
||||
as well as the core index operations provided by :class:`_types.JSON`
|
||||
datatype, by adapting the operations to render the ``JSON_EXTRACT``
|
||||
function at the database level.
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
class _FormatTypeMixin:
|
||||
def _format_value(self, value):
|
||||
raise NotImplementedError()
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
super_proc = self.string_bind_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
value = self._format_value(value)
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def literal_processor(self, dialect):
|
||||
super_proc = self.string_literal_processor(dialect)
|
||||
|
||||
def process(value):
|
||||
value = self._format_value(value)
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
|
||||
def _format_value(self, value):
|
||||
if isinstance(value, int):
|
||||
value = "$[%s]" % value
|
||||
else:
|
||||
value = '$."%s"' % value
|
||||
return value
|
||||
|
||||
|
||||
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
|
||||
def _format_value(self, value):
|
||||
return "$%s" % (
|
||||
"".join(
|
||||
[
|
||||
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
|
||||
for elem in value
|
||||
]
|
||||
)
|
||||
)
|
@ -0,0 +1,67 @@
|
||||
# dialects/mysql/mariadb.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from .base import MariaDBIdentifierPreparer
|
||||
from .base import MySQLDialect
|
||||
from .base import MySQLTypeCompiler
|
||||
from ...sql import sqltypes
|
||||
|
||||
|
||||
class INET4(sqltypes.TypeEngine[str]):
|
||||
"""INET4 column type for MariaDB
|
||||
|
||||
.. versionadded:: 2.0.37
|
||||
"""
|
||||
|
||||
__visit_name__ = "INET4"
|
||||
|
||||
|
||||
class INET6(sqltypes.TypeEngine[str]):
|
||||
"""INET6 column type for MariaDB
|
||||
|
||||
.. versionadded:: 2.0.37
|
||||
"""
|
||||
|
||||
__visit_name__ = "INET6"
|
||||
|
||||
|
||||
class MariaDBTypeCompiler(MySQLTypeCompiler):
|
||||
def visit_INET4(self, type_, **kwargs) -> str:
|
||||
return "INET4"
|
||||
|
||||
def visit_INET6(self, type_, **kwargs) -> str:
|
||||
return "INET6"
|
||||
|
||||
|
||||
class MariaDBDialect(MySQLDialect):
|
||||
is_mariadb = True
|
||||
supports_statement_cache = True
|
||||
name = "mariadb"
|
||||
preparer = MariaDBIdentifierPreparer
|
||||
type_compiler_cls = MariaDBTypeCompiler
|
||||
|
||||
|
||||
def loader(driver):
|
||||
dialect_mod = __import__(
|
||||
"sqlalchemy.dialects.mysql.%s" % driver
|
||||
).dialects.mysql
|
||||
|
||||
driver_mod = getattr(dialect_mod, driver)
|
||||
if hasattr(driver_mod, "mariadb_dialect"):
|
||||
driver_cls = driver_mod.mariadb_dialect
|
||||
return driver_cls
|
||||
else:
|
||||
driver_cls = driver_mod.dialect
|
||||
|
||||
return type(
|
||||
"MariaDBDialect_%s" % driver,
|
||||
(
|
||||
MariaDBDialect,
|
||||
driver_cls,
|
||||
),
|
||||
{"supports_statement_cache": True},
|
||||
)
|
@ -0,0 +1,277 @@
|
||||
# dialects/mysql/mariadbconnector.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
"""
|
||||
|
||||
.. dialect:: mysql+mariadbconnector
|
||||
:name: MariaDB Connector/Python
|
||||
:dbapi: mariadb
|
||||
:connectstring: mariadb+mariadbconnector://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
:url: https://pypi.org/project/mariadb/
|
||||
|
||||
Driver Status
|
||||
-------------
|
||||
|
||||
MariaDB Connector/Python enables Python programs to access MariaDB and MySQL
|
||||
databases using an API which is compliant with the Python DB API 2.0 (PEP-249).
|
||||
It is written in C and uses MariaDB Connector/C client library for client server
|
||||
communication.
|
||||
|
||||
Note that the default driver for a ``mariadb://`` connection URI continues to
|
||||
be ``mysqldb``. ``mariadb+mariadbconnector://`` is required to use this driver.
|
||||
|
||||
.. mariadb: https://github.com/mariadb-corporation/mariadb-connector-python
|
||||
|
||||
""" # noqa
|
||||
import re
|
||||
from uuid import UUID as _python_UUID
|
||||
|
||||
from .base import MySQLCompiler
|
||||
from .base import MySQLDialect
|
||||
from .base import MySQLExecutionContext
|
||||
from ... import sql
|
||||
from ... import util
|
||||
from ...sql import sqltypes
|
||||
|
||||
|
||||
mariadb_cpy_minimum_version = (1, 0, 1)
|
||||
|
||||
|
||||
class _MariaDBUUID(sqltypes.UUID[sqltypes._UUID_RETURN]):
|
||||
# work around JIRA issue
|
||||
# https://jira.mariadb.org/browse/CONPY-270. When that issue is fixed,
|
||||
# this type can be removed.
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.as_uuid:
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
if hasattr(value, "decode"):
|
||||
value = value.decode("ascii")
|
||||
value = _python_UUID(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
else:
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
if hasattr(value, "decode"):
|
||||
value = value.decode("ascii")
|
||||
value = str(_python_UUID(value))
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class MySQLExecutionContext_mariadbconnector(MySQLExecutionContext):
|
||||
_lastrowid = None
|
||||
|
||||
def create_server_side_cursor(self):
|
||||
return self._dbapi_connection.cursor(buffered=False)
|
||||
|
||||
def create_default_cursor(self):
|
||||
return self._dbapi_connection.cursor(buffered=True)
|
||||
|
||||
def post_exec(self):
|
||||
super().post_exec()
|
||||
|
||||
self._rowcount = self.cursor.rowcount
|
||||
|
||||
if self.isinsert and self.compiled.postfetch_lastrowid:
|
||||
self._lastrowid = self.cursor.lastrowid
|
||||
|
||||
def get_lastrowid(self):
|
||||
return self._lastrowid
|
||||
|
||||
|
||||
class MySQLCompiler_mariadbconnector(MySQLCompiler):
|
||||
pass
|
||||
|
||||
|
||||
class MySQLDialect_mariadbconnector(MySQLDialect):
|
||||
driver = "mariadbconnector"
|
||||
supports_statement_cache = True
|
||||
|
||||
# set this to True at the module level to prevent the driver from running
|
||||
# against a backend that server detects as MySQL. currently this appears to
|
||||
# be unnecessary as MariaDB client libraries have always worked against
|
||||
# MySQL databases. However, if this changes at some point, this can be
|
||||
# adjusted, but PLEASE ADD A TEST in test/dialect/mysql/test_dialect.py if
|
||||
# this change is made at some point to ensure the correct exception
|
||||
# is raised at the correct point when running the driver against
|
||||
# a MySQL backend.
|
||||
# is_mariadb = True
|
||||
|
||||
supports_unicode_statements = True
|
||||
encoding = "utf8mb4"
|
||||
convert_unicode = True
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
supports_native_decimal = True
|
||||
default_paramstyle = "qmark"
|
||||
execution_ctx_cls = MySQLExecutionContext_mariadbconnector
|
||||
statement_compiler = MySQLCompiler_mariadbconnector
|
||||
|
||||
supports_server_side_cursors = True
|
||||
|
||||
colspecs = util.update_copy(
|
||||
MySQLDialect.colspecs, {sqltypes.Uuid: _MariaDBUUID}
|
||||
)
|
||||
|
||||
@util.memoized_property
|
||||
def _dbapi_version(self):
|
||||
if self.dbapi and hasattr(self.dbapi, "__version__"):
|
||||
return tuple(
|
||||
[
|
||||
int(x)
|
||||
for x in re.findall(
|
||||
r"(\d+)(?:[-\.]?|$)", self.dbapi.__version__
|
||||
)
|
||||
]
|
||||
)
|
||||
else:
|
||||
return (99, 99, 99)
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.paramstyle = "qmark"
|
||||
if self.dbapi is not None:
|
||||
if self._dbapi_version < mariadb_cpy_minimum_version:
|
||||
raise NotImplementedError(
|
||||
"The minimum required version for MariaDB "
|
||||
"Connector/Python is %s"
|
||||
% ".".join(str(x) for x in mariadb_cpy_minimum_version)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("mariadb")
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if super().is_disconnect(e, connection, cursor):
|
||||
return True
|
||||
elif isinstance(e, self.dbapi.Error):
|
||||
str_e = str(e).lower()
|
||||
return "not connected" in str_e or "isn't valid" in str_e
|
||||
else:
|
||||
return False
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args()
|
||||
opts.update(url.query)
|
||||
|
||||
int_params = [
|
||||
"connect_timeout",
|
||||
"read_timeout",
|
||||
"write_timeout",
|
||||
"client_flag",
|
||||
"port",
|
||||
"pool_size",
|
||||
]
|
||||
bool_params = [
|
||||
"local_infile",
|
||||
"ssl_verify_cert",
|
||||
"ssl",
|
||||
"pool_reset_connection",
|
||||
"compress",
|
||||
]
|
||||
|
||||
for key in int_params:
|
||||
util.coerce_kw_type(opts, key, int)
|
||||
for key in bool_params:
|
||||
util.coerce_kw_type(opts, key, bool)
|
||||
|
||||
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
|
||||
# supports_sane_rowcount.
|
||||
client_flag = opts.get("client_flag", 0)
|
||||
if self.dbapi is not None:
|
||||
try:
|
||||
CLIENT_FLAGS = __import__(
|
||||
self.dbapi.__name__ + ".constants.CLIENT"
|
||||
).constants.CLIENT
|
||||
client_flag |= CLIENT_FLAGS.FOUND_ROWS
|
||||
except (AttributeError, ImportError):
|
||||
self.supports_sane_rowcount = False
|
||||
opts["client_flag"] = client_flag
|
||||
return [[], opts]
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
try:
|
||||
rc = exception.errno
|
||||
except:
|
||||
rc = -1
|
||||
return rc
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
return "utf8mb4"
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"SERIALIZABLE",
|
||||
"READ UNCOMMITTED",
|
||||
"READ COMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"AUTOCOMMIT",
|
||||
)
|
||||
|
||||
def set_isolation_level(self, connection, level):
|
||||
if level == "AUTOCOMMIT":
|
||||
connection.autocommit = True
|
||||
else:
|
||||
connection.autocommit = False
|
||||
super().set_isolation_level(connection, level)
|
||||
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
connection.execute(
|
||||
sql.text("XA BEGIN :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
connection.execute(
|
||||
sql.text("XA END :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
connection.execute(
|
||||
sql.text("XA PREPARE :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
|
||||
def do_rollback_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
if not is_prepared:
|
||||
connection.execute(
|
||||
sql.text("XA END :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
connection.execute(
|
||||
sql.text("XA ROLLBACK :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
|
||||
def do_commit_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
if not is_prepared:
|
||||
self.do_prepare_twophase(connection, xid)
|
||||
connection.execute(
|
||||
sql.text("XA COMMIT :xid").bindparams(
|
||||
sql.bindparam("xid", xid, literal_execute=True)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
dialect = MySQLDialect_mariadbconnector
|
@ -0,0 +1,245 @@
|
||||
# dialects/mysql/mysqlconnector.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
r"""
|
||||
.. dialect:: mysql+mysqlconnector
|
||||
:name: MySQL Connector/Python
|
||||
:dbapi: myconnpy
|
||||
:connectstring: mysql+mysqlconnector://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
:url: https://pypi.org/project/mysql-connector-python/
|
||||
|
||||
Driver Status
|
||||
-------------
|
||||
|
||||
MySQL Connector/Python is supported as of SQLAlchemy 2.0.39 to the
|
||||
degree which the driver is functional. There are still ongoing issues
|
||||
with features such as server side cursors which remain disabled until
|
||||
upstream issues are repaired.
|
||||
|
||||
.. versionchanged:: 2.0.39
|
||||
|
||||
The MySQL Connector/Python dialect has been updated to support the
|
||||
latest version of this DBAPI. Previously, MySQL Connector/Python
|
||||
was not fully supported.
|
||||
|
||||
Connecting to MariaDB with MySQL Connector/Python
|
||||
--------------------------------------------------
|
||||
|
||||
MySQL Connector/Python may attempt to pass an incompatible collation to the
|
||||
database when connecting to MariaDB. Experimentation has shown that using
|
||||
``?charset=utf8mb4&collation=utfmb4_general_ci`` or similar MariaDB-compatible
|
||||
charset/collation will allow connectivity.
|
||||
|
||||
|
||||
""" # noqa
|
||||
|
||||
import re
|
||||
|
||||
from .base import BIT
|
||||
from .base import MariaDBIdentifierPreparer
|
||||
from .base import MySQLCompiler
|
||||
from .base import MySQLDialect
|
||||
from .base import MySQLExecutionContext
|
||||
from .base import MySQLIdentifierPreparer
|
||||
from .mariadb import MariaDBDialect
|
||||
from ... import util
|
||||
|
||||
|
||||
class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext):
|
||||
def create_server_side_cursor(self):
|
||||
return self._dbapi_connection.cursor(buffered=False)
|
||||
|
||||
def create_default_cursor(self):
|
||||
return self._dbapi_connection.cursor(buffered=True)
|
||||
|
||||
|
||||
class MySQLCompiler_mysqlconnector(MySQLCompiler):
|
||||
def visit_mod_binary(self, binary, operator, **kw):
|
||||
return (
|
||||
self.process(binary.left, **kw)
|
||||
+ " % "
|
||||
+ self.process(binary.right, **kw)
|
||||
)
|
||||
|
||||
|
||||
class IdentifierPreparerCommon_mysqlconnector:
|
||||
@property
|
||||
def _double_percents(self):
|
||||
return False
|
||||
|
||||
@_double_percents.setter
|
||||
def _double_percents(self, value):
|
||||
pass
|
||||
|
||||
def _escape_identifier(self, value):
|
||||
value = value.replace(self.escape_quote, self.escape_to_quote)
|
||||
return value
|
||||
|
||||
|
||||
class MySQLIdentifierPreparer_mysqlconnector(
|
||||
IdentifierPreparerCommon_mysqlconnector, MySQLIdentifierPreparer
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class MariaDBIdentifierPreparer_mysqlconnector(
|
||||
IdentifierPreparerCommon_mysqlconnector, MariaDBIdentifierPreparer
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class _myconnpyBIT(BIT):
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""MySQL-connector already converts mysql bits, so."""
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class MySQLDialect_mysqlconnector(MySQLDialect):
|
||||
driver = "mysqlconnector"
|
||||
supports_statement_cache = True
|
||||
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
supports_native_bit = True
|
||||
|
||||
# not until https://bugs.mysql.com/bug.php?id=117548
|
||||
supports_server_side_cursors = False
|
||||
|
||||
default_paramstyle = "format"
|
||||
statement_compiler = MySQLCompiler_mysqlconnector
|
||||
|
||||
execution_ctx_cls = MySQLExecutionContext_mysqlconnector
|
||||
|
||||
preparer = MySQLIdentifierPreparer_mysqlconnector
|
||||
|
||||
colspecs = util.update_copy(MySQLDialect.colspecs, {BIT: _myconnpyBIT})
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
from mysql import connector
|
||||
|
||||
return connector
|
||||
|
||||
def do_ping(self, dbapi_connection):
|
||||
dbapi_connection.ping(False)
|
||||
return True
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username="user")
|
||||
|
||||
opts.update(url.query)
|
||||
|
||||
util.coerce_kw_type(opts, "allow_local_infile", bool)
|
||||
util.coerce_kw_type(opts, "autocommit", bool)
|
||||
util.coerce_kw_type(opts, "buffered", bool)
|
||||
util.coerce_kw_type(opts, "client_flag", int)
|
||||
util.coerce_kw_type(opts, "compress", bool)
|
||||
util.coerce_kw_type(opts, "connection_timeout", int)
|
||||
util.coerce_kw_type(opts, "connect_timeout", int)
|
||||
util.coerce_kw_type(opts, "consume_results", bool)
|
||||
util.coerce_kw_type(opts, "force_ipv6", bool)
|
||||
util.coerce_kw_type(opts, "get_warnings", bool)
|
||||
util.coerce_kw_type(opts, "pool_reset_session", bool)
|
||||
util.coerce_kw_type(opts, "pool_size", int)
|
||||
util.coerce_kw_type(opts, "raise_on_warnings", bool)
|
||||
util.coerce_kw_type(opts, "raw", bool)
|
||||
util.coerce_kw_type(opts, "ssl_verify_cert", bool)
|
||||
util.coerce_kw_type(opts, "use_pure", bool)
|
||||
util.coerce_kw_type(opts, "use_unicode", bool)
|
||||
|
||||
# note that "buffered" is set to False by default in MySQL/connector
|
||||
# python. If you set it to True, then there is no way to get a server
|
||||
# side cursor because the logic is written to disallow that.
|
||||
|
||||
# leaving this at True until
|
||||
# https://bugs.mysql.com/bug.php?id=117548 can be fixed
|
||||
opts["buffered"] = True
|
||||
|
||||
# FOUND_ROWS must be set in ClientFlag to enable
|
||||
# supports_sane_rowcount.
|
||||
if self.dbapi is not None:
|
||||
try:
|
||||
from mysql.connector.constants import ClientFlag
|
||||
|
||||
client_flags = opts.get(
|
||||
"client_flags", ClientFlag.get_default()
|
||||
)
|
||||
client_flags |= ClientFlag.FOUND_ROWS
|
||||
opts["client_flags"] = client_flags
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return [[], opts]
|
||||
|
||||
@util.memoized_property
|
||||
def _mysqlconnector_version_info(self):
|
||||
if self.dbapi and hasattr(self.dbapi, "__version__"):
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", self.dbapi.__version__)
|
||||
if m:
|
||||
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
return connection.connection.charset
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
return exception.errno
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
errnos = (2006, 2013, 2014, 2045, 2055, 2048)
|
||||
exceptions = (
|
||||
self.dbapi.OperationalError,
|
||||
self.dbapi.InterfaceError,
|
||||
self.dbapi.ProgrammingError,
|
||||
)
|
||||
if isinstance(e, exceptions):
|
||||
return (
|
||||
e.errno in errnos
|
||||
or "MySQL Connection not available." in str(e)
|
||||
or "Connection to MySQL is not available" in str(e)
|
||||
)
|
||||
else:
|
||||
return False
|
||||
|
||||
def _compat_fetchall(self, rp, charset=None):
|
||||
return rp.fetchall()
|
||||
|
||||
def _compat_fetchone(self, rp, charset=None):
|
||||
return rp.fetchone()
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"SERIALIZABLE",
|
||||
"READ UNCOMMITTED",
|
||||
"READ COMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"AUTOCOMMIT",
|
||||
)
|
||||
|
||||
def set_isolation_level(self, connection, level):
|
||||
if level == "AUTOCOMMIT":
|
||||
connection.autocommit = True
|
||||
else:
|
||||
connection.autocommit = False
|
||||
super().set_isolation_level(connection, level)
|
||||
|
||||
|
||||
class MariaDBDialect_mysqlconnector(
|
||||
MariaDBDialect, MySQLDialect_mysqlconnector
|
||||
):
|
||||
supports_statement_cache = True
|
||||
_allows_uuid_binds = False
|
||||
preparer = MariaDBIdentifierPreparer_mysqlconnector
|
||||
|
||||
|
||||
dialect = MySQLDialect_mysqlconnector
|
||||
mariadb_dialect = MariaDBDialect_mysqlconnector
|
@ -0,0 +1,305 @@
|
||||
# dialects/mysql/mysqldb.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
"""
|
||||
|
||||
.. dialect:: mysql+mysqldb
|
||||
:name: mysqlclient (maintained fork of MySQL-Python)
|
||||
:dbapi: mysqldb
|
||||
:connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
|
||||
:url: https://pypi.org/project/mysqlclient/
|
||||
|
||||
Driver Status
|
||||
-------------
|
||||
|
||||
The mysqlclient DBAPI is a maintained fork of the
|
||||
`MySQL-Python <https://sourceforge.net/projects/mysql-python>`_ DBAPI
|
||||
that is no longer maintained. `mysqlclient`_ supports Python 2 and Python 3
|
||||
and is very stable.
|
||||
|
||||
.. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python
|
||||
|
||||
.. _mysqldb_unicode:
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
Please see :ref:`mysql_unicode` for current recommendations on unicode
|
||||
handling.
|
||||
|
||||
.. _mysqldb_ssl:
|
||||
|
||||
SSL Connections
|
||||
----------------
|
||||
|
||||
The mysqlclient and PyMySQL DBAPIs accept an additional dictionary under the
|
||||
key "ssl", which may be specified using the
|
||||
:paramref:`_sa.create_engine.connect_args` dictionary::
|
||||
|
||||
engine = create_engine(
|
||||
"mysql+mysqldb://scott:tiger@192.168.0.134/test",
|
||||
connect_args={
|
||||
"ssl": {
|
||||
"ca": "/home/gord/client-ssl/ca.pem",
|
||||
"cert": "/home/gord/client-ssl/client-cert.pem",
|
||||
"key": "/home/gord/client-ssl/client-key.pem",
|
||||
}
|
||||
},
|
||||
)
|
||||
|
||||
For convenience, the following keys may also be specified inline within the URL
|
||||
where they will be interpreted into the "ssl" dictionary automatically:
|
||||
"ssl_ca", "ssl_cert", "ssl_key", "ssl_capath", "ssl_cipher",
|
||||
"ssl_check_hostname". An example is as follows::
|
||||
|
||||
connection_uri = (
|
||||
"mysql+mysqldb://scott:tiger@192.168.0.134/test"
|
||||
"?ssl_ca=/home/gord/client-ssl/ca.pem"
|
||||
"&ssl_cert=/home/gord/client-ssl/client-cert.pem"
|
||||
"&ssl_key=/home/gord/client-ssl/client-key.pem"
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`pymysql_ssl` in the PyMySQL dialect
|
||||
|
||||
|
||||
Using MySQLdb with Google Cloud SQL
|
||||
-----------------------------------
|
||||
|
||||
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
|
||||
using a URL like the following:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
|
||||
|
||||
Server Side Cursors
|
||||
-------------------
|
||||
|
||||
The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from .base import MySQLCompiler
|
||||
from .base import MySQLDialect
|
||||
from .base import MySQLExecutionContext
|
||||
from .base import MySQLIdentifierPreparer
|
||||
from .base import TEXT
|
||||
from ... import sql
|
||||
from ... import util
|
||||
|
||||
|
||||
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
|
||||
pass
|
||||
|
||||
|
||||
class MySQLCompiler_mysqldb(MySQLCompiler):
|
||||
pass
|
||||
|
||||
|
||||
class MySQLDialect_mysqldb(MySQLDialect):
|
||||
driver = "mysqldb"
|
||||
supports_statement_cache = True
|
||||
supports_unicode_statements = True
|
||||
supports_sane_rowcount = True
|
||||
supports_sane_multi_rowcount = True
|
||||
|
||||
supports_native_decimal = True
|
||||
|
||||
default_paramstyle = "format"
|
||||
execution_ctx_cls = MySQLExecutionContext_mysqldb
|
||||
statement_compiler = MySQLCompiler_mysqldb
|
||||
preparer = MySQLIdentifierPreparer
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self._mysql_dbapi_version = (
|
||||
self._parse_dbapi_version(self.dbapi.__version__)
|
||||
if self.dbapi is not None and hasattr(self.dbapi, "__version__")
|
||||
else (0, 0, 0)
|
||||
)
|
||||
|
||||
def _parse_dbapi_version(self, version):
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version)
|
||||
if m:
|
||||
return tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
|
||||
else:
|
||||
return (0, 0, 0)
|
||||
|
||||
@util.langhelpers.memoized_property
|
||||
def supports_server_side_cursors(self):
|
||||
try:
|
||||
cursors = __import__("MySQLdb.cursors").cursors
|
||||
self._sscursor = cursors.SSCursor
|
||||
return True
|
||||
except (ImportError, AttributeError):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("MySQLdb")
|
||||
|
||||
def on_connect(self):
|
||||
super_ = super().on_connect()
|
||||
|
||||
def on_connect(conn):
|
||||
if super_ is not None:
|
||||
super_(conn)
|
||||
|
||||
charset_name = conn.character_set_name()
|
||||
|
||||
if charset_name is not None:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SET NAMES %s" % charset_name)
|
||||
cursor.close()
|
||||
|
||||
return on_connect
|
||||
|
||||
def do_ping(self, dbapi_connection):
|
||||
dbapi_connection.ping()
|
||||
return True
|
||||
|
||||
def do_executemany(self, cursor, statement, parameters, context=None):
|
||||
rowcount = cursor.executemany(statement, parameters)
|
||||
if context is not None:
|
||||
context._rowcount = rowcount
|
||||
|
||||
def _check_unicode_returns(self, connection):
|
||||
# work around issue fixed in
|
||||
# https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8
|
||||
# specific issue w/ the utf8mb4_bin collation and unicode returns
|
||||
|
||||
collation = connection.exec_driver_sql(
|
||||
"show collation where %s = 'utf8mb4' and %s = 'utf8mb4_bin'"
|
||||
% (
|
||||
self.identifier_preparer.quote("Charset"),
|
||||
self.identifier_preparer.quote("Collation"),
|
||||
)
|
||||
).scalar()
|
||||
has_utf8mb4_bin = self.server_version_info > (5,) and collation
|
||||
if has_utf8mb4_bin:
|
||||
additional_tests = [
|
||||
sql.collate(
|
||||
sql.cast(
|
||||
sql.literal_column("'test collated returns'"),
|
||||
TEXT(charset="utf8mb4"),
|
||||
),
|
||||
"utf8mb4_bin",
|
||||
)
|
||||
]
|
||||
else:
|
||||
additional_tests = []
|
||||
return super()._check_unicode_returns(connection, additional_tests)
|
||||
|
||||
def create_connect_args(self, url, _translate_args=None):
|
||||
if _translate_args is None:
|
||||
_translate_args = dict(
|
||||
database="db", username="user", password="passwd"
|
||||
)
|
||||
|
||||
opts = url.translate_connect_args(**_translate_args)
|
||||
opts.update(url.query)
|
||||
|
||||
util.coerce_kw_type(opts, "compress", bool)
|
||||
util.coerce_kw_type(opts, "connect_timeout", int)
|
||||
util.coerce_kw_type(opts, "read_timeout", int)
|
||||
util.coerce_kw_type(opts, "write_timeout", int)
|
||||
util.coerce_kw_type(opts, "client_flag", int)
|
||||
util.coerce_kw_type(opts, "local_infile", bool)
|
||||
# Note: using either of the below will cause all strings to be
|
||||
# returned as Unicode, both in raw SQL operations and with column
|
||||
# types like String and MSString.
|
||||
util.coerce_kw_type(opts, "use_unicode", bool)
|
||||
util.coerce_kw_type(opts, "charset", str)
|
||||
|
||||
# Rich values 'cursorclass' and 'conv' are not supported via
|
||||
# query string.
|
||||
|
||||
ssl = {}
|
||||
keys = [
|
||||
("ssl_ca", str),
|
||||
("ssl_key", str),
|
||||
("ssl_cert", str),
|
||||
("ssl_capath", str),
|
||||
("ssl_cipher", str),
|
||||
("ssl_check_hostname", bool),
|
||||
]
|
||||
for key, kw_type in keys:
|
||||
if key in opts:
|
||||
ssl[key[4:]] = opts[key]
|
||||
util.coerce_kw_type(ssl, key[4:], kw_type)
|
||||
del opts[key]
|
||||
if ssl:
|
||||
opts["ssl"] = ssl
|
||||
|
||||
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
|
||||
# supports_sane_rowcount.
|
||||
client_flag = opts.get("client_flag", 0)
|
||||
|
||||
client_flag_found_rows = self._found_rows_client_flag()
|
||||
if client_flag_found_rows is not None:
|
||||
client_flag |= client_flag_found_rows
|
||||
opts["client_flag"] = client_flag
|
||||
return [[], opts]
|
||||
|
||||
def _found_rows_client_flag(self):
|
||||
if self.dbapi is not None:
|
||||
try:
|
||||
CLIENT_FLAGS = __import__(
|
||||
self.dbapi.__name__ + ".constants.CLIENT"
|
||||
).constants.CLIENT
|
||||
except (AttributeError, ImportError):
|
||||
return None
|
||||
else:
|
||||
return CLIENT_FLAGS.FOUND_ROWS
|
||||
else:
|
||||
return None
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
return exception.args[0]
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
|
||||
try:
|
||||
# note: the SQL here would be
|
||||
# "SHOW VARIABLES LIKE 'character_set%%'"
|
||||
cset_name = connection.connection.character_set_name
|
||||
except AttributeError:
|
||||
util.warn(
|
||||
"No 'character_set_name' can be detected with "
|
||||
"this MySQL-Python version; "
|
||||
"please upgrade to a recent version of MySQL-Python. "
|
||||
"Assuming latin1."
|
||||
)
|
||||
return "latin1"
|
||||
else:
|
||||
return cset_name()
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"SERIALIZABLE",
|
||||
"READ UNCOMMITTED",
|
||||
"READ COMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"AUTOCOMMIT",
|
||||
)
|
||||
|
||||
def set_isolation_level(self, dbapi_connection, level):
|
||||
if level == "AUTOCOMMIT":
|
||||
dbapi_connection.autocommit(True)
|
||||
else:
|
||||
dbapi_connection.autocommit(False)
|
||||
super().set_isolation_level(dbapi_connection, level)
|
||||
|
||||
|
||||
dialect = MySQLDialect_mysqldb
|
@ -0,0 +1,114 @@
|
||||
# dialects/mysql/provision.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import exc
|
||||
from ...testing.provision import configure_follower
|
||||
from ...testing.provision import create_db
|
||||
from ...testing.provision import drop_db
|
||||
from ...testing.provision import generate_driver_url
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
from ...testing.provision import upsert
|
||||
|
||||
|
||||
@generate_driver_url.for_db("mysql", "mariadb")
|
||||
def generate_driver_url(url, driver, query_str):
|
||||
backend = url.get_backend_name()
|
||||
|
||||
# NOTE: at the moment, tests are running mariadbconnector
|
||||
# against both mariadb and mysql backends. if we want this to be
|
||||
# limited, do the decision making here to reject a "mysql+mariadbconnector"
|
||||
# URL. Optionally also re-enable the module level
|
||||
# MySQLDialect_mariadbconnector.is_mysql flag as well, which must include
|
||||
# a unit and/or functional test.
|
||||
|
||||
# all the Jenkins tests have been running mysqlclient Python library
|
||||
# built against mariadb client drivers for years against all MySQL /
|
||||
# MariaDB versions going back to MySQL 5.6, currently they can talk
|
||||
# to MySQL databases without problems.
|
||||
|
||||
if backend == "mysql":
|
||||
dialect_cls = url.get_dialect()
|
||||
if dialect_cls._is_mariadb_from_url(url):
|
||||
backend = "mariadb"
|
||||
|
||||
new_url = url.set(
|
||||
drivername="%s+%s" % (backend, driver)
|
||||
).update_query_string(query_str)
|
||||
|
||||
if driver == "mariadbconnector":
|
||||
new_url = new_url.difference_update_query(["charset"])
|
||||
elif driver == "mysqlconnector":
|
||||
new_url = new_url.update_query_pairs(
|
||||
[("collation", "utf8mb4_general_ci")]
|
||||
)
|
||||
|
||||
try:
|
||||
new_url.get_dialect()
|
||||
except exc.NoSuchModuleError:
|
||||
return None
|
||||
else:
|
||||
return new_url
|
||||
|
||||
|
||||
@create_db.for_db("mysql", "mariadb")
|
||||
def _mysql_create_db(cfg, eng, ident):
|
||||
with eng.begin() as conn:
|
||||
try:
|
||||
_mysql_drop_db(cfg, conn, ident)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
with eng.begin() as conn:
|
||||
conn.exec_driver_sql(
|
||||
"CREATE DATABASE %s CHARACTER SET utf8mb4" % ident
|
||||
)
|
||||
conn.exec_driver_sql(
|
||||
"CREATE DATABASE %s_test_schema CHARACTER SET utf8mb4" % ident
|
||||
)
|
||||
conn.exec_driver_sql(
|
||||
"CREATE DATABASE %s_test_schema_2 CHARACTER SET utf8mb4" % ident
|
||||
)
|
||||
|
||||
|
||||
@configure_follower.for_db("mysql", "mariadb")
|
||||
def _mysql_configure_follower(config, ident):
|
||||
config.test_schema = "%s_test_schema" % ident
|
||||
config.test_schema_2 = "%s_test_schema_2" % ident
|
||||
|
||||
|
||||
@drop_db.for_db("mysql", "mariadb")
|
||||
def _mysql_drop_db(cfg, eng, ident):
|
||||
with eng.begin() as conn:
|
||||
conn.exec_driver_sql("DROP DATABASE %s_test_schema" % ident)
|
||||
conn.exec_driver_sql("DROP DATABASE %s_test_schema_2" % ident)
|
||||
conn.exec_driver_sql("DROP DATABASE %s" % ident)
|
||||
|
||||
|
||||
@temp_table_keyword_args.for_db("mysql", "mariadb")
|
||||
def _mysql_temp_table_keyword_args(cfg, eng):
|
||||
return {"prefixes": ["TEMPORARY"]}
|
||||
|
||||
|
||||
@upsert.for_db("mariadb")
|
||||
def _upsert(
|
||||
cfg, table, returning, *, set_lambda=None, sort_by_parameter_order=False
|
||||
):
|
||||
from sqlalchemy.dialects.mysql import insert
|
||||
|
||||
stmt = insert(table)
|
||||
|
||||
if set_lambda:
|
||||
stmt = stmt.on_duplicate_key_update(**set_lambda(stmt.inserted))
|
||||
else:
|
||||
pk1 = table.primary_key.c[0]
|
||||
stmt = stmt.on_duplicate_key_update({pk1.key: pk1})
|
||||
|
||||
stmt = stmt.returning(
|
||||
*returning, sort_by_parameter_order=sort_by_parameter_order
|
||||
)
|
||||
return stmt
|
@ -0,0 +1,136 @@
|
||||
# dialects/mysql/pymysql.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
r"""
|
||||
|
||||
.. dialect:: mysql+pymysql
|
||||
:name: PyMySQL
|
||||
:dbapi: pymysql
|
||||
:connectstring: mysql+pymysql://<username>:<password>@<host>/<dbname>[?<options>]
|
||||
:url: https://pymysql.readthedocs.io/
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
Please see :ref:`mysql_unicode` for current recommendations on unicode
|
||||
handling.
|
||||
|
||||
.. _pymysql_ssl:
|
||||
|
||||
SSL Connections
|
||||
------------------
|
||||
|
||||
The PyMySQL DBAPI accepts the same SSL arguments as that of MySQLdb,
|
||||
described at :ref:`mysqldb_ssl`. See that section for additional examples.
|
||||
|
||||
If the server uses an automatically-generated certificate that is self-signed
|
||||
or does not match the host name (as seen from the client), it may also be
|
||||
necessary to indicate ``ssl_check_hostname=false`` in PyMySQL::
|
||||
|
||||
connection_uri = (
|
||||
"mysql+pymysql://scott:tiger@192.168.0.134/test"
|
||||
"?ssl_ca=/home/gord/client-ssl/ca.pem"
|
||||
"&ssl_cert=/home/gord/client-ssl/client-cert.pem"
|
||||
"&ssl_key=/home/gord/client-ssl/client-key.pem"
|
||||
"&ssl_check_hostname=false"
|
||||
)
|
||||
|
||||
MySQL-Python Compatibility
|
||||
--------------------------
|
||||
|
||||
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
|
||||
and targets 100% compatibility. Most behavioral notes for MySQL-python apply
|
||||
to the pymysql driver as well.
|
||||
|
||||
""" # noqa
|
||||
|
||||
from .mysqldb import MySQLDialect_mysqldb
|
||||
from ...util import langhelpers
|
||||
|
||||
|
||||
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
|
||||
driver = "pymysql"
|
||||
supports_statement_cache = True
|
||||
|
||||
description_encoding = None
|
||||
|
||||
@langhelpers.memoized_property
|
||||
def supports_server_side_cursors(self):
|
||||
try:
|
||||
cursors = __import__("pymysql.cursors").cursors
|
||||
self._sscursor = cursors.SSCursor
|
||||
return True
|
||||
except (ImportError, AttributeError):
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
return __import__("pymysql")
|
||||
|
||||
@langhelpers.memoized_property
|
||||
def _send_false_to_ping(self):
|
||||
"""determine if pymysql has deprecated, changed the default of,
|
||||
or removed the 'reconnect' argument of connection.ping().
|
||||
|
||||
See #10492 and
|
||||
https://github.com/PyMySQL/mysqlclient/discussions/651#discussioncomment-7308971
|
||||
for background.
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
try:
|
||||
Connection = __import__(
|
||||
"pymysql.connections"
|
||||
).connections.Connection
|
||||
except (ImportError, AttributeError):
|
||||
return True
|
||||
else:
|
||||
insp = langhelpers.get_callable_argspec(Connection.ping)
|
||||
try:
|
||||
reconnect_arg = insp.args[1]
|
||||
except IndexError:
|
||||
return False
|
||||
else:
|
||||
return reconnect_arg == "reconnect" and (
|
||||
not insp.defaults or insp.defaults[0] is not False
|
||||
)
|
||||
|
||||
def do_ping(self, dbapi_connection):
|
||||
if self._send_false_to_ping:
|
||||
dbapi_connection.ping(False)
|
||||
else:
|
||||
dbapi_connection.ping()
|
||||
|
||||
return True
|
||||
|
||||
def create_connect_args(self, url, _translate_args=None):
|
||||
if _translate_args is None:
|
||||
_translate_args = dict(username="user")
|
||||
return super().create_connect_args(
|
||||
url, _translate_args=_translate_args
|
||||
)
|
||||
|
||||
def is_disconnect(self, e, connection, cursor):
|
||||
if super().is_disconnect(e, connection, cursor):
|
||||
return True
|
||||
elif isinstance(e, self.dbapi.Error):
|
||||
str_e = str(e).lower()
|
||||
return (
|
||||
"already closed" in str_e or "connection was killed" in str_e
|
||||
)
|
||||
else:
|
||||
return False
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
if isinstance(exception.args[0], Exception):
|
||||
exception = exception.args[0]
|
||||
return exception.args[0]
|
||||
|
||||
|
||||
dialect = MySQLDialect_pymysql
|
@ -0,0 +1,139 @@
|
||||
# dialects/mysql/pyodbc.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
r"""
|
||||
|
||||
|
||||
.. dialect:: mysql+pyodbc
|
||||
:name: PyODBC
|
||||
:dbapi: pyodbc
|
||||
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
|
||||
:url: https://pypi.org/project/pyodbc/
|
||||
|
||||
.. note::
|
||||
|
||||
The PyODBC for MySQL dialect is **not tested as part of
|
||||
SQLAlchemy's continuous integration**.
|
||||
The recommended MySQL dialects are mysqlclient and PyMySQL.
|
||||
However, if you want to use the mysql+pyodbc dialect and require
|
||||
full support for ``utf8mb4`` characters (including supplementary
|
||||
characters like emoji) be sure to use a current release of
|
||||
MySQL Connector/ODBC and specify the "ANSI" (**not** "Unicode")
|
||||
version of the driver in your DSN or connection string.
|
||||
|
||||
Pass through exact pyodbc connection string::
|
||||
|
||||
import urllib
|
||||
|
||||
connection_string = (
|
||||
"DRIVER=MySQL ODBC 8.0 ANSI Driver;"
|
||||
"SERVER=localhost;"
|
||||
"PORT=3307;"
|
||||
"DATABASE=mydb;"
|
||||
"UID=root;"
|
||||
"PWD=(whatever);"
|
||||
"charset=utf8mb4;"
|
||||
)
|
||||
params = urllib.parse.quote_plus(connection_string)
|
||||
connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params
|
||||
|
||||
""" # noqa
|
||||
|
||||
import re
|
||||
|
||||
from .base import MySQLDialect
|
||||
from .base import MySQLExecutionContext
|
||||
from .types import TIME
|
||||
from ... import exc
|
||||
from ... import util
|
||||
from ...connectors.pyodbc import PyODBCConnector
|
||||
from ...sql.sqltypes import Time
|
||||
|
||||
|
||||
class _pyodbcTIME(TIME):
|
||||
def result_processor(self, dialect, coltype):
|
||||
def process(value):
|
||||
# pyodbc returns a datetime.time object; no need to convert
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
|
||||
def get_lastrowid(self):
|
||||
cursor = self.create_cursor()
|
||||
cursor.execute("SELECT LAST_INSERT_ID()")
|
||||
lastrowid = cursor.fetchone()[0]
|
||||
cursor.close()
|
||||
return lastrowid
|
||||
|
||||
|
||||
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
|
||||
supports_statement_cache = True
|
||||
colspecs = util.update_copy(MySQLDialect.colspecs, {Time: _pyodbcTIME})
|
||||
supports_unicode_statements = True
|
||||
execution_ctx_cls = MySQLExecutionContext_pyodbc
|
||||
|
||||
pyodbc_driver_name = "MySQL"
|
||||
|
||||
def _detect_charset(self, connection):
|
||||
"""Sniff out the character set in use for connection results."""
|
||||
|
||||
# Prefer 'character_set_results' for the current connection over the
|
||||
# value in the driver. SET NAMES or individual variable SETs will
|
||||
# change the charset without updating the driver's view of the world.
|
||||
#
|
||||
# If it's decided that issuing that sort of SQL leaves you SOL, then
|
||||
# this can prefer the driver value.
|
||||
|
||||
# set this to None as _fetch_setting attempts to use it (None is OK)
|
||||
self._connection_charset = None
|
||||
try:
|
||||
value = self._fetch_setting(connection, "character_set_client")
|
||||
if value:
|
||||
return value
|
||||
except exc.DBAPIError:
|
||||
pass
|
||||
|
||||
util.warn(
|
||||
"Could not detect the connection character set. "
|
||||
"Assuming latin1."
|
||||
)
|
||||
return "latin1"
|
||||
|
||||
def _get_server_version_info(self, connection):
|
||||
return MySQLDialect._get_server_version_info(self, connection)
|
||||
|
||||
def _extract_error_code(self, exception):
|
||||
m = re.compile(r"\((\d+)\)").search(str(exception.args))
|
||||
c = m.group(1)
|
||||
if c:
|
||||
return int(c)
|
||||
else:
|
||||
return None
|
||||
|
||||
def on_connect(self):
|
||||
super_ = super().on_connect()
|
||||
|
||||
def on_connect(conn):
|
||||
if super_ is not None:
|
||||
super_(conn)
|
||||
|
||||
# declare Unicode encoding for pyodbc as per
|
||||
# https://github.com/mkleehammer/pyodbc/wiki/Unicode
|
||||
pyodbc_SQL_CHAR = 1 # pyodbc.SQL_CHAR
|
||||
pyodbc_SQL_WCHAR = -8 # pyodbc.SQL_WCHAR
|
||||
conn.setdecoding(pyodbc_SQL_CHAR, encoding="utf-8")
|
||||
conn.setdecoding(pyodbc_SQL_WCHAR, encoding="utf-8")
|
||||
conn.setencoding(encoding="utf-8")
|
||||
|
||||
return on_connect
|
||||
|
||||
|
||||
dialect = MySQLDialect_pyodbc
|
@ -0,0 +1,677 @@
|
||||
# dialects/mysql/reflection.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
import re
|
||||
|
||||
from .enumerated import ENUM
|
||||
from .enumerated import SET
|
||||
from .types import DATETIME
|
||||
from .types import TIME
|
||||
from .types import TIMESTAMP
|
||||
from ... import log
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
|
||||
|
||||
class ReflectedState:
|
||||
"""Stores raw information about a SHOW CREATE TABLE statement."""
|
||||
|
||||
def __init__(self):
|
||||
self.columns = []
|
||||
self.table_options = {}
|
||||
self.table_name = None
|
||||
self.keys = []
|
||||
self.fk_constraints = []
|
||||
self.ck_constraints = []
|
||||
|
||||
|
||||
@log.class_logger
|
||||
class MySQLTableDefinitionParser:
|
||||
"""Parses the results of a SHOW CREATE TABLE statement."""
|
||||
|
||||
def __init__(self, dialect, preparer):
|
||||
self.dialect = dialect
|
||||
self.preparer = preparer
|
||||
self._prep_regexes()
|
||||
|
||||
def parse(self, show_create, charset):
|
||||
state = ReflectedState()
|
||||
state.charset = charset
|
||||
for line in re.split(r"\r?\n", show_create):
|
||||
if line.startswith(" " + self.preparer.initial_quote):
|
||||
self._parse_column(line, state)
|
||||
# a regular table options line
|
||||
elif line.startswith(") "):
|
||||
self._parse_table_options(line, state)
|
||||
# an ANSI-mode table options line
|
||||
elif line == ")":
|
||||
pass
|
||||
elif line.startswith("CREATE "):
|
||||
self._parse_table_name(line, state)
|
||||
elif "PARTITION" in line:
|
||||
self._parse_partition_options(line, state)
|
||||
# Not present in real reflection, but may be if
|
||||
# loading from a file.
|
||||
elif not line:
|
||||
pass
|
||||
else:
|
||||
type_, spec = self._parse_constraints(line)
|
||||
if type_ is None:
|
||||
util.warn("Unknown schema content: %r" % line)
|
||||
elif type_ == "key":
|
||||
state.keys.append(spec)
|
||||
elif type_ == "fk_constraint":
|
||||
state.fk_constraints.append(spec)
|
||||
elif type_ == "ck_constraint":
|
||||
state.ck_constraints.append(spec)
|
||||
else:
|
||||
pass
|
||||
return state
|
||||
|
||||
def _check_view(self, sql: str) -> bool:
|
||||
return bool(self._re_is_view.match(sql))
|
||||
|
||||
def _parse_constraints(self, line):
|
||||
"""Parse a KEY or CONSTRAINT line.
|
||||
|
||||
:param line: A line of SHOW CREATE TABLE output
|
||||
"""
|
||||
|
||||
# KEY
|
||||
m = self._re_key.match(line)
|
||||
if m:
|
||||
spec = m.groupdict()
|
||||
# convert columns into name, length pairs
|
||||
# NOTE: we may want to consider SHOW INDEX as the
|
||||
# format of indexes in MySQL becomes more complex
|
||||
spec["columns"] = self._parse_keyexprs(spec["columns"])
|
||||
if spec["version_sql"]:
|
||||
m2 = self._re_key_version_sql.match(spec["version_sql"])
|
||||
if m2 and m2.groupdict()["parser"]:
|
||||
spec["parser"] = m2.groupdict()["parser"]
|
||||
if spec["parser"]:
|
||||
spec["parser"] = self.preparer.unformat_identifiers(
|
||||
spec["parser"]
|
||||
)[0]
|
||||
return "key", spec
|
||||
|
||||
# FOREIGN KEY CONSTRAINT
|
||||
m = self._re_fk_constraint.match(line)
|
||||
if m:
|
||||
spec = m.groupdict()
|
||||
spec["table"] = self.preparer.unformat_identifiers(spec["table"])
|
||||
spec["local"] = [c[0] for c in self._parse_keyexprs(spec["local"])]
|
||||
spec["foreign"] = [
|
||||
c[0] for c in self._parse_keyexprs(spec["foreign"])
|
||||
]
|
||||
return "fk_constraint", spec
|
||||
|
||||
# CHECK constraint
|
||||
m = self._re_ck_constraint.match(line)
|
||||
if m:
|
||||
spec = m.groupdict()
|
||||
return "ck_constraint", spec
|
||||
|
||||
# PARTITION and SUBPARTITION
|
||||
m = self._re_partition.match(line)
|
||||
if m:
|
||||
# Punt!
|
||||
return "partition", line
|
||||
|
||||
# No match.
|
||||
return (None, line)
|
||||
|
||||
def _parse_table_name(self, line, state):
|
||||
"""Extract the table name.
|
||||
|
||||
:param line: The first line of SHOW CREATE TABLE
|
||||
"""
|
||||
|
||||
regex, cleanup = self._pr_name
|
||||
m = regex.match(line)
|
||||
if m:
|
||||
state.table_name = cleanup(m.group("name"))
|
||||
|
||||
def _parse_table_options(self, line, state):
|
||||
"""Build a dictionary of all reflected table-level options.
|
||||
|
||||
:param line: The final line of SHOW CREATE TABLE output.
|
||||
"""
|
||||
|
||||
options = {}
|
||||
|
||||
if line and line != ")":
|
||||
rest_of_line = line
|
||||
for regex, cleanup in self._pr_options:
|
||||
m = regex.search(rest_of_line)
|
||||
if not m:
|
||||
continue
|
||||
directive, value = m.group("directive"), m.group("val")
|
||||
if cleanup:
|
||||
value = cleanup(value)
|
||||
options[directive.lower()] = value
|
||||
rest_of_line = regex.sub("", rest_of_line)
|
||||
|
||||
for nope in ("auto_increment", "data directory", "index directory"):
|
||||
options.pop(nope, None)
|
||||
|
||||
for opt, val in options.items():
|
||||
state.table_options["%s_%s" % (self.dialect.name, opt)] = val
|
||||
|
||||
def _parse_partition_options(self, line, state):
|
||||
options = {}
|
||||
new_line = line[:]
|
||||
|
||||
while new_line.startswith("(") or new_line.startswith(" "):
|
||||
new_line = new_line[1:]
|
||||
|
||||
for regex, cleanup in self._pr_options:
|
||||
m = regex.search(new_line)
|
||||
if not m or "PARTITION" not in regex.pattern:
|
||||
continue
|
||||
|
||||
directive = m.group("directive")
|
||||
directive = directive.lower()
|
||||
is_subpartition = directive == "subpartition"
|
||||
|
||||
if directive == "partition" or is_subpartition:
|
||||
new_line = new_line.replace(") */", "")
|
||||
new_line = new_line.replace(",", "")
|
||||
if is_subpartition and new_line.endswith(")"):
|
||||
new_line = new_line[:-1]
|
||||
if self.dialect.name == "mariadb" and new_line.endswith(")"):
|
||||
if (
|
||||
"MAXVALUE" in new_line
|
||||
or "MINVALUE" in new_line
|
||||
or "ENGINE" in new_line
|
||||
):
|
||||
# final line of MariaDB partition endswith ")"
|
||||
new_line = new_line[:-1]
|
||||
|
||||
defs = "%s_%s_definitions" % (self.dialect.name, directive)
|
||||
options[defs] = new_line
|
||||
|
||||
else:
|
||||
directive = directive.replace(" ", "_")
|
||||
value = m.group("val")
|
||||
if cleanup:
|
||||
value = cleanup(value)
|
||||
options[directive] = value
|
||||
break
|
||||
|
||||
for opt, val in options.items():
|
||||
part_def = "%s_partition_definitions" % (self.dialect.name)
|
||||
subpart_def = "%s_subpartition_definitions" % (self.dialect.name)
|
||||
if opt == part_def or opt == subpart_def:
|
||||
# builds a string of definitions
|
||||
if opt not in state.table_options:
|
||||
state.table_options[opt] = val
|
||||
else:
|
||||
state.table_options[opt] = "%s, %s" % (
|
||||
state.table_options[opt],
|
||||
val,
|
||||
)
|
||||
else:
|
||||
state.table_options["%s_%s" % (self.dialect.name, opt)] = val
|
||||
|
||||
def _parse_column(self, line, state):
|
||||
"""Extract column details.
|
||||
|
||||
Falls back to a 'minimal support' variant if full parse fails.
|
||||
|
||||
:param line: Any column-bearing line from SHOW CREATE TABLE
|
||||
"""
|
||||
|
||||
spec = None
|
||||
m = self._re_column.match(line)
|
||||
if m:
|
||||
spec = m.groupdict()
|
||||
spec["full"] = True
|
||||
else:
|
||||
m = self._re_column_loose.match(line)
|
||||
if m:
|
||||
spec = m.groupdict()
|
||||
spec["full"] = False
|
||||
if not spec:
|
||||
util.warn("Unknown column definition %r" % line)
|
||||
return
|
||||
if not spec["full"]:
|
||||
util.warn("Incomplete reflection of column definition %r" % line)
|
||||
|
||||
name, type_, args = spec["name"], spec["coltype"], spec["arg"]
|
||||
|
||||
try:
|
||||
col_type = self.dialect.ischema_names[type_]
|
||||
except KeyError:
|
||||
util.warn(
|
||||
"Did not recognize type '%s' of column '%s'" % (type_, name)
|
||||
)
|
||||
col_type = sqltypes.NullType
|
||||
|
||||
# Column type positional arguments eg. varchar(32)
|
||||
if args is None or args == "":
|
||||
type_args = []
|
||||
elif args[0] == "'" and args[-1] == "'":
|
||||
type_args = self._re_csv_str.findall(args)
|
||||
else:
|
||||
type_args = [int(v) for v in self._re_csv_int.findall(args)]
|
||||
|
||||
# Column type keyword options
|
||||
type_kw = {}
|
||||
|
||||
if issubclass(col_type, (DATETIME, TIME, TIMESTAMP)):
|
||||
if type_args:
|
||||
type_kw["fsp"] = type_args.pop(0)
|
||||
|
||||
for kw in ("unsigned", "zerofill"):
|
||||
if spec.get(kw, False):
|
||||
type_kw[kw] = True
|
||||
for kw in ("charset", "collate"):
|
||||
if spec.get(kw, False):
|
||||
type_kw[kw] = spec[kw]
|
||||
if issubclass(col_type, (ENUM, SET)):
|
||||
type_args = _strip_values(type_args)
|
||||
|
||||
if issubclass(col_type, SET) and "" in type_args:
|
||||
type_kw["retrieve_as_bitwise"] = True
|
||||
|
||||
type_instance = col_type(*type_args, **type_kw)
|
||||
|
||||
col_kw = {}
|
||||
|
||||
# NOT NULL
|
||||
col_kw["nullable"] = True
|
||||
# this can be "NULL" in the case of TIMESTAMP
|
||||
if spec.get("notnull", False) == "NOT NULL":
|
||||
col_kw["nullable"] = False
|
||||
# For generated columns, the nullability is marked in a different place
|
||||
if spec.get("notnull_generated", False) == "NOT NULL":
|
||||
col_kw["nullable"] = False
|
||||
|
||||
# AUTO_INCREMENT
|
||||
if spec.get("autoincr", False):
|
||||
col_kw["autoincrement"] = True
|
||||
elif issubclass(col_type, sqltypes.Integer):
|
||||
col_kw["autoincrement"] = False
|
||||
|
||||
# DEFAULT
|
||||
default = spec.get("default", None)
|
||||
|
||||
if default == "NULL":
|
||||
# eliminates the need to deal with this later.
|
||||
default = None
|
||||
|
||||
comment = spec.get("comment", None)
|
||||
|
||||
if comment is not None:
|
||||
comment = cleanup_text(comment)
|
||||
|
||||
sqltext = spec.get("generated")
|
||||
if sqltext is not None:
|
||||
computed = dict(sqltext=sqltext)
|
||||
persisted = spec.get("persistence")
|
||||
if persisted is not None:
|
||||
computed["persisted"] = persisted == "STORED"
|
||||
col_kw["computed"] = computed
|
||||
|
||||
col_d = dict(
|
||||
name=name, type=type_instance, default=default, comment=comment
|
||||
)
|
||||
col_d.update(col_kw)
|
||||
state.columns.append(col_d)
|
||||
|
||||
def _describe_to_create(self, table_name, columns):
|
||||
"""Re-format DESCRIBE output as a SHOW CREATE TABLE string.
|
||||
|
||||
DESCRIBE is a much simpler reflection and is sufficient for
|
||||
reflecting views for runtime use. This method formats DDL
|
||||
for columns only- keys are omitted.
|
||||
|
||||
:param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples.
|
||||
SHOW FULL COLUMNS FROM rows must be rearranged for use with
|
||||
this function.
|
||||
"""
|
||||
|
||||
buffer = []
|
||||
for row in columns:
|
||||
(name, col_type, nullable, default, extra) = (
|
||||
row[i] for i in (0, 1, 2, 4, 5)
|
||||
)
|
||||
|
||||
line = [" "]
|
||||
line.append(self.preparer.quote_identifier(name))
|
||||
line.append(col_type)
|
||||
if not nullable:
|
||||
line.append("NOT NULL")
|
||||
if default:
|
||||
if "auto_increment" in default:
|
||||
pass
|
||||
elif col_type.startswith("timestamp") and default.startswith(
|
||||
"C"
|
||||
):
|
||||
line.append("DEFAULT")
|
||||
line.append(default)
|
||||
elif default == "NULL":
|
||||
line.append("DEFAULT")
|
||||
line.append(default)
|
||||
else:
|
||||
line.append("DEFAULT")
|
||||
line.append("'%s'" % default.replace("'", "''"))
|
||||
if extra:
|
||||
line.append(extra)
|
||||
|
||||
buffer.append(" ".join(line))
|
||||
|
||||
return "".join(
|
||||
[
|
||||
(
|
||||
"CREATE TABLE %s (\n"
|
||||
% self.preparer.quote_identifier(table_name)
|
||||
),
|
||||
",\n".join(buffer),
|
||||
"\n) ",
|
||||
]
|
||||
)
|
||||
|
||||
def _parse_keyexprs(self, identifiers):
|
||||
"""Unpack '"col"(2),"col" ASC'-ish strings into components."""
|
||||
|
||||
return [
|
||||
(colname, int(length) if length else None, modifiers)
|
||||
for colname, length, modifiers in self._re_keyexprs.findall(
|
||||
identifiers
|
||||
)
|
||||
]
|
||||
|
||||
def _prep_regexes(self):
|
||||
"""Pre-compile regular expressions."""
|
||||
|
||||
self._re_columns = []
|
||||
self._pr_options = []
|
||||
|
||||
_final = self.preparer.final_quote
|
||||
|
||||
quotes = dict(
|
||||
zip(
|
||||
("iq", "fq", "esc_fq"),
|
||||
[
|
||||
re.escape(s)
|
||||
for s in (
|
||||
self.preparer.initial_quote,
|
||||
_final,
|
||||
self.preparer._escape_identifier(_final),
|
||||
)
|
||||
],
|
||||
)
|
||||
)
|
||||
|
||||
self._pr_name = _pr_compile(
|
||||
r"^CREATE (?:\w+ +)?TABLE +"
|
||||
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($" % quotes,
|
||||
self.preparer._unescape_identifier,
|
||||
)
|
||||
|
||||
self._re_is_view = _re_compile(r"^CREATE(?! TABLE)(\s.*)?\sVIEW")
|
||||
|
||||
# `col`,`col2`(32),`col3`(15) DESC
|
||||
#
|
||||
self._re_keyexprs = _re_compile(
|
||||
r"(?:"
|
||||
r"(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)"
|
||||
r"(?:\((\d+)\))?(?: +(ASC|DESC))?(?=\,|$))+" % quotes
|
||||
)
|
||||
|
||||
# 'foo' or 'foo','bar' or 'fo,o','ba''a''r'
|
||||
self._re_csv_str = _re_compile(r"\x27(?:\x27\x27|[^\x27])*\x27")
|
||||
|
||||
# 123 or 123,456
|
||||
self._re_csv_int = _re_compile(r"\d+")
|
||||
|
||||
# `colname` <type> [type opts]
|
||||
# (NOT NULL | NULL)
|
||||
# DEFAULT ('value' | CURRENT_TIMESTAMP...)
|
||||
# COMMENT 'comment'
|
||||
# COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT)
|
||||
# STORAGE (DISK|MEMORY)
|
||||
self._re_column = _re_compile(
|
||||
r" "
|
||||
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
|
||||
r"(?P<coltype>\w+)"
|
||||
r"(?:\((?P<arg>(?:\d+|\d+,\d+|"
|
||||
r"(?:'(?:''|[^'])*',?)+))\))?"
|
||||
r"(?: +(?P<unsigned>UNSIGNED))?"
|
||||
r"(?: +(?P<zerofill>ZEROFILL))?"
|
||||
r"(?: +CHARACTER SET +(?P<charset>[\w_]+))?"
|
||||
r"(?: +COLLATE +(?P<collate>[\w_]+))?"
|
||||
r"(?: +(?P<notnull>(?:NOT )?NULL))?"
|
||||
r"(?: +DEFAULT +(?P<default>"
|
||||
r"(?:NULL|'(?:''|[^'])*'|\(.+?\)|[\-\w\.\(\)]+"
|
||||
r"(?: +ON UPDATE [\-\w\.\(\)]+)?)"
|
||||
r"))?"
|
||||
r"(?: +(?:GENERATED ALWAYS)? ?AS +(?P<generated>\("
|
||||
r".*\))? ?(?P<persistence>VIRTUAL|STORED)?"
|
||||
r"(?: +(?P<notnull_generated>(?:NOT )?NULL))?"
|
||||
r")?"
|
||||
r"(?: +(?P<autoincr>AUTO_INCREMENT))?"
|
||||
r"(?: +COMMENT +'(?P<comment>(?:''|[^'])*)')?"
|
||||
r"(?: +COLUMN_FORMAT +(?P<colfmt>\w+))?"
|
||||
r"(?: +STORAGE +(?P<storage>\w+))?"
|
||||
r"(?: +(?P<extra>.*))?"
|
||||
r",?$" % quotes
|
||||
)
|
||||
|
||||
# Fallback, try to parse as little as possible
|
||||
self._re_column_loose = _re_compile(
|
||||
r" "
|
||||
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
|
||||
r"(?P<coltype>\w+)"
|
||||
r"(?:\((?P<arg>(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?"
|
||||
r".*?(?P<notnull>(?:NOT )NULL)?" % quotes
|
||||
)
|
||||
|
||||
# (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))?
|
||||
# (`col` (ASC|DESC)?, `col` (ASC|DESC)?)
|
||||
# KEY_BLOCK_SIZE size | WITH PARSER name /*!50100 WITH PARSER name */
|
||||
self._re_key = _re_compile(
|
||||
r" "
|
||||
r"(?:(?P<type>\S+) )?KEY"
|
||||
r"(?: +%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?"
|
||||
r"(?: +USING +(?P<using_pre>\S+))?"
|
||||
r" +\((?P<columns>.+?)\)"
|
||||
r"(?: +USING +(?P<using_post>\S+))?"
|
||||
r"(?: +KEY_BLOCK_SIZE *[ =]? *(?P<keyblock>\S+))?"
|
||||
r"(?: +WITH PARSER +(?P<parser>\S+))?"
|
||||
r"(?: +COMMENT +(?P<comment>(\x27\x27|\x27([^\x27])*?\x27)+))?"
|
||||
r"(?: +/\*(?P<version_sql>.+)\*/ *)?"
|
||||
r",?$" % quotes
|
||||
)
|
||||
|
||||
# https://forums.mysql.com/read.php?20,567102,567111#msg-567111
|
||||
# It means if the MySQL version >= \d+, execute what's in the comment
|
||||
self._re_key_version_sql = _re_compile(
|
||||
r"\!\d+ " r"(?: *WITH PARSER +(?P<parser>\S+) *)?"
|
||||
)
|
||||
|
||||
# CONSTRAINT `name` FOREIGN KEY (`local_col`)
|
||||
# REFERENCES `remote` (`remote_col`)
|
||||
# MATCH FULL | MATCH PARTIAL | MATCH SIMPLE
|
||||
# ON DELETE CASCADE ON UPDATE RESTRICT
|
||||
#
|
||||
# unique constraints come back as KEYs
|
||||
kw = quotes.copy()
|
||||
kw["on"] = "RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT"
|
||||
self._re_fk_constraint = _re_compile(
|
||||
r" "
|
||||
r"CONSTRAINT +"
|
||||
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
|
||||
r"FOREIGN KEY +"
|
||||
r"\((?P<local>[^\)]+?)\) REFERENCES +"
|
||||
r"(?P<table>%(iq)s[^%(fq)s]+%(fq)s"
|
||||
r"(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +"
|
||||
r"\((?P<foreign>(?:%(iq)s[^%(fq)s]+%(fq)s(?: *, *)?)+)\)"
|
||||
r"(?: +(?P<match>MATCH \w+))?"
|
||||
r"(?: +ON DELETE (?P<ondelete>%(on)s))?"
|
||||
r"(?: +ON UPDATE (?P<onupdate>%(on)s))?" % kw
|
||||
)
|
||||
|
||||
# CONSTRAINT `CONSTRAINT_1` CHECK (`x` > 5)'
|
||||
# testing on MariaDB 10.2 shows that the CHECK constraint
|
||||
# is returned on a line by itself, so to match without worrying
|
||||
# about parenthesis in the expression we go to the end of the line
|
||||
self._re_ck_constraint = _re_compile(
|
||||
r" "
|
||||
r"CONSTRAINT +"
|
||||
r"%(iq)s(?P<name>(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +"
|
||||
r"CHECK +"
|
||||
r"\((?P<sqltext>.+)\),?" % kw
|
||||
)
|
||||
|
||||
# PARTITION
|
||||
#
|
||||
# punt!
|
||||
self._re_partition = _re_compile(r"(?:.*)(?:SUB)?PARTITION(?:.*)")
|
||||
|
||||
# Table-level options (COLLATE, ENGINE, etc.)
|
||||
# Do the string options first, since they have quoted
|
||||
# strings we need to get rid of.
|
||||
for option in _options_of_type_string:
|
||||
self._add_option_string(option)
|
||||
|
||||
for option in (
|
||||
"ENGINE",
|
||||
"TYPE",
|
||||
"AUTO_INCREMENT",
|
||||
"AVG_ROW_LENGTH",
|
||||
"CHARACTER SET",
|
||||
"DEFAULT CHARSET",
|
||||
"CHECKSUM",
|
||||
"COLLATE",
|
||||
"DELAY_KEY_WRITE",
|
||||
"INSERT_METHOD",
|
||||
"MAX_ROWS",
|
||||
"MIN_ROWS",
|
||||
"PACK_KEYS",
|
||||
"ROW_FORMAT",
|
||||
"KEY_BLOCK_SIZE",
|
||||
"STATS_SAMPLE_PAGES",
|
||||
):
|
||||
self._add_option_word(option)
|
||||
|
||||
for option in (
|
||||
"PARTITION BY",
|
||||
"SUBPARTITION BY",
|
||||
"PARTITIONS",
|
||||
"SUBPARTITIONS",
|
||||
"PARTITION",
|
||||
"SUBPARTITION",
|
||||
):
|
||||
self._add_partition_option_word(option)
|
||||
|
||||
self._add_option_regex("UNION", r"\([^\)]+\)")
|
||||
self._add_option_regex("TABLESPACE", r".*? STORAGE DISK")
|
||||
self._add_option_regex(
|
||||
"RAID_TYPE",
|
||||
r"\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+",
|
||||
)
|
||||
|
||||
_optional_equals = r"(?:\s*(?:=\s*)|\s+)"
|
||||
|
||||
def _add_option_string(self, directive):
|
||||
regex = r"(?P<directive>%s)%s" r"'(?P<val>(?:[^']|'')*?)'(?!')" % (
|
||||
re.escape(directive),
|
||||
self._optional_equals,
|
||||
)
|
||||
self._pr_options.append(_pr_compile(regex, cleanup_text))
|
||||
|
||||
def _add_option_word(self, directive):
|
||||
regex = r"(?P<directive>%s)%s" r"(?P<val>\w+)" % (
|
||||
re.escape(directive),
|
||||
self._optional_equals,
|
||||
)
|
||||
self._pr_options.append(_pr_compile(regex))
|
||||
|
||||
def _add_partition_option_word(self, directive):
|
||||
if directive == "PARTITION BY" or directive == "SUBPARTITION BY":
|
||||
regex = r"(?<!\S)(?P<directive>%s)%s" r"(?P<val>\w+.*)" % (
|
||||
re.escape(directive),
|
||||
self._optional_equals,
|
||||
)
|
||||
elif directive == "SUBPARTITIONS" or directive == "PARTITIONS":
|
||||
regex = r"(?<!\S)(?P<directive>%s)%s" r"(?P<val>\d+)" % (
|
||||
re.escape(directive),
|
||||
self._optional_equals,
|
||||
)
|
||||
else:
|
||||
regex = r"(?<!\S)(?P<directive>%s)(?!\S)" % (re.escape(directive),)
|
||||
self._pr_options.append(_pr_compile(regex))
|
||||
|
||||
def _add_option_regex(self, directive, regex):
|
||||
regex = r"(?P<directive>%s)%s" r"(?P<val>%s)" % (
|
||||
re.escape(directive),
|
||||
self._optional_equals,
|
||||
regex,
|
||||
)
|
||||
self._pr_options.append(_pr_compile(regex))
|
||||
|
||||
|
||||
_options_of_type_string = (
|
||||
"COMMENT",
|
||||
"DATA DIRECTORY",
|
||||
"INDEX DIRECTORY",
|
||||
"PASSWORD",
|
||||
"CONNECTION",
|
||||
)
|
||||
|
||||
|
||||
def _pr_compile(regex, cleanup=None):
|
||||
"""Prepare a 2-tuple of compiled regex and callable."""
|
||||
|
||||
return (_re_compile(regex), cleanup)
|
||||
|
||||
|
||||
def _re_compile(regex):
|
||||
"""Compile a string to regex, I and UNICODE."""
|
||||
|
||||
return re.compile(regex, re.I | re.UNICODE)
|
||||
|
||||
|
||||
def _strip_values(values):
|
||||
"Strip reflected values quotes"
|
||||
strip_values = []
|
||||
for a in values:
|
||||
if a[0:1] == '"' or a[0:1] == "'":
|
||||
# strip enclosing quotes and unquote interior
|
||||
a = a[1:-1].replace(a[0] * 2, a[0])
|
||||
strip_values.append(a)
|
||||
return strip_values
|
||||
|
||||
|
||||
def cleanup_text(raw_text: str) -> str:
|
||||
if "\\" in raw_text:
|
||||
raw_text = re.sub(
|
||||
_control_char_regexp, lambda s: _control_char_map[s[0]], raw_text
|
||||
)
|
||||
return raw_text.replace("''", "'")
|
||||
|
||||
|
||||
_control_char_map = {
|
||||
"\\\\": "\\",
|
||||
"\\0": "\0",
|
||||
"\\a": "\a",
|
||||
"\\b": "\b",
|
||||
"\\t": "\t",
|
||||
"\\n": "\n",
|
||||
"\\v": "\v",
|
||||
"\\f": "\f",
|
||||
"\\r": "\r",
|
||||
# '\\e':'\e',
|
||||
}
|
||||
_control_char_regexp = re.compile(
|
||||
"|".join(re.escape(k) for k in _control_char_map)
|
||||
)
|
@ -0,0 +1,571 @@
|
||||
# dialects/mysql/reserved_words.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
# generated using:
|
||||
# https://gist.github.com/kkirsche/4f31f2153ed7a3248be1ec44ca6ddbc9
|
||||
#
|
||||
# https://mariadb.com/kb/en/reserved-words/
|
||||
# includes: Reserved Words, Oracle Mode (separate set unioned)
|
||||
# excludes: Exceptions, Function Names
|
||||
# mypy: ignore-errors
|
||||
|
||||
RESERVED_WORDS_MARIADB = {
|
||||
"accessible",
|
||||
"add",
|
||||
"all",
|
||||
"alter",
|
||||
"analyze",
|
||||
"and",
|
||||
"as",
|
||||
"asc",
|
||||
"asensitive",
|
||||
"before",
|
||||
"between",
|
||||
"bigint",
|
||||
"binary",
|
||||
"blob",
|
||||
"both",
|
||||
"by",
|
||||
"call",
|
||||
"cascade",
|
||||
"case",
|
||||
"change",
|
||||
"char",
|
||||
"character",
|
||||
"check",
|
||||
"collate",
|
||||
"column",
|
||||
"condition",
|
||||
"constraint",
|
||||
"continue",
|
||||
"convert",
|
||||
"create",
|
||||
"cross",
|
||||
"current_date",
|
||||
"current_role",
|
||||
"current_time",
|
||||
"current_timestamp",
|
||||
"current_user",
|
||||
"cursor",
|
||||
"database",
|
||||
"databases",
|
||||
"day_hour",
|
||||
"day_microsecond",
|
||||
"day_minute",
|
||||
"day_second",
|
||||
"dec",
|
||||
"decimal",
|
||||
"declare",
|
||||
"default",
|
||||
"delayed",
|
||||
"delete",
|
||||
"desc",
|
||||
"describe",
|
||||
"deterministic",
|
||||
"distinct",
|
||||
"distinctrow",
|
||||
"div",
|
||||
"do_domain_ids",
|
||||
"double",
|
||||
"drop",
|
||||
"dual",
|
||||
"each",
|
||||
"else",
|
||||
"elseif",
|
||||
"enclosed",
|
||||
"escaped",
|
||||
"except",
|
||||
"exists",
|
||||
"exit",
|
||||
"explain",
|
||||
"false",
|
||||
"fetch",
|
||||
"float",
|
||||
"float4",
|
||||
"float8",
|
||||
"for",
|
||||
"force",
|
||||
"foreign",
|
||||
"from",
|
||||
"fulltext",
|
||||
"general",
|
||||
"grant",
|
||||
"group",
|
||||
"having",
|
||||
"high_priority",
|
||||
"hour_microsecond",
|
||||
"hour_minute",
|
||||
"hour_second",
|
||||
"if",
|
||||
"ignore",
|
||||
"ignore_domain_ids",
|
||||
"ignore_server_ids",
|
||||
"in",
|
||||
"index",
|
||||
"infile",
|
||||
"inner",
|
||||
"inout",
|
||||
"insensitive",
|
||||
"insert",
|
||||
"int",
|
||||
"int1",
|
||||
"int2",
|
||||
"int3",
|
||||
"int4",
|
||||
"int8",
|
||||
"integer",
|
||||
"intersect",
|
||||
"interval",
|
||||
"into",
|
||||
"is",
|
||||
"iterate",
|
||||
"join",
|
||||
"key",
|
||||
"keys",
|
||||
"kill",
|
||||
"leading",
|
||||
"leave",
|
||||
"left",
|
||||
"like",
|
||||
"limit",
|
||||
"linear",
|
||||
"lines",
|
||||
"load",
|
||||
"localtime",
|
||||
"localtimestamp",
|
||||
"lock",
|
||||
"long",
|
||||
"longblob",
|
||||
"longtext",
|
||||
"loop",
|
||||
"low_priority",
|
||||
"master_heartbeat_period",
|
||||
"master_ssl_verify_server_cert",
|
||||
"match",
|
||||
"maxvalue",
|
||||
"mediumblob",
|
||||
"mediumint",
|
||||
"mediumtext",
|
||||
"middleint",
|
||||
"minute_microsecond",
|
||||
"minute_second",
|
||||
"mod",
|
||||
"modifies",
|
||||
"natural",
|
||||
"no_write_to_binlog",
|
||||
"not",
|
||||
"null",
|
||||
"numeric",
|
||||
"offset",
|
||||
"on",
|
||||
"optimize",
|
||||
"option",
|
||||
"optionally",
|
||||
"or",
|
||||
"order",
|
||||
"out",
|
||||
"outer",
|
||||
"outfile",
|
||||
"over",
|
||||
"page_checksum",
|
||||
"parse_vcol_expr",
|
||||
"partition",
|
||||
"position",
|
||||
"precision",
|
||||
"primary",
|
||||
"procedure",
|
||||
"purge",
|
||||
"range",
|
||||
"read",
|
||||
"read_write",
|
||||
"reads",
|
||||
"real",
|
||||
"recursive",
|
||||
"ref_system_id",
|
||||
"references",
|
||||
"regexp",
|
||||
"release",
|
||||
"rename",
|
||||
"repeat",
|
||||
"replace",
|
||||
"require",
|
||||
"resignal",
|
||||
"restrict",
|
||||
"return",
|
||||
"returning",
|
||||
"revoke",
|
||||
"right",
|
||||
"rlike",
|
||||
"rows",
|
||||
"row_number",
|
||||
"schema",
|
||||
"schemas",
|
||||
"second_microsecond",
|
||||
"select",
|
||||
"sensitive",
|
||||
"separator",
|
||||
"set",
|
||||
"show",
|
||||
"signal",
|
||||
"slow",
|
||||
"smallint",
|
||||
"spatial",
|
||||
"specific",
|
||||
"sql",
|
||||
"sql_big_result",
|
||||
"sql_calc_found_rows",
|
||||
"sql_small_result",
|
||||
"sqlexception",
|
||||
"sqlstate",
|
||||
"sqlwarning",
|
||||
"ssl",
|
||||
"starting",
|
||||
"stats_auto_recalc",
|
||||
"stats_persistent",
|
||||
"stats_sample_pages",
|
||||
"straight_join",
|
||||
"table",
|
||||
"terminated",
|
||||
"then",
|
||||
"tinyblob",
|
||||
"tinyint",
|
||||
"tinytext",
|
||||
"to",
|
||||
"trailing",
|
||||
"trigger",
|
||||
"true",
|
||||
"undo",
|
||||
"union",
|
||||
"unique",
|
||||
"unlock",
|
||||
"unsigned",
|
||||
"update",
|
||||
"usage",
|
||||
"use",
|
||||
"using",
|
||||
"utc_date",
|
||||
"utc_time",
|
||||
"utc_timestamp",
|
||||
"values",
|
||||
"varbinary",
|
||||
"varchar",
|
||||
"varcharacter",
|
||||
"varying",
|
||||
"when",
|
||||
"where",
|
||||
"while",
|
||||
"window",
|
||||
"with",
|
||||
"write",
|
||||
"xor",
|
||||
"year_month",
|
||||
"zerofill",
|
||||
}.union(
|
||||
{
|
||||
"body",
|
||||
"elsif",
|
||||
"goto",
|
||||
"history",
|
||||
"others",
|
||||
"package",
|
||||
"period",
|
||||
"raise",
|
||||
"rowtype",
|
||||
"system",
|
||||
"system_time",
|
||||
"versioning",
|
||||
"without",
|
||||
}
|
||||
)
|
||||
|
||||
# https://dev.mysql.com/doc/refman/8.3/en/keywords.html
|
||||
# https://dev.mysql.com/doc/refman/8.0/en/keywords.html
|
||||
# https://dev.mysql.com/doc/refman/5.7/en/keywords.html
|
||||
# https://dev.mysql.com/doc/refman/5.6/en/keywords.html
|
||||
# includes: MySQL x.0 Keywords and Reserved Words
|
||||
# excludes: MySQL x.0 New Keywords and Reserved Words,
|
||||
# MySQL x.0 Removed Keywords and Reserved Words
|
||||
RESERVED_WORDS_MYSQL = {
|
||||
"accessible",
|
||||
"add",
|
||||
"admin",
|
||||
"all",
|
||||
"alter",
|
||||
"analyze",
|
||||
"and",
|
||||
"array",
|
||||
"as",
|
||||
"asc",
|
||||
"asensitive",
|
||||
"before",
|
||||
"between",
|
||||
"bigint",
|
||||
"binary",
|
||||
"blob",
|
||||
"both",
|
||||
"by",
|
||||
"call",
|
||||
"cascade",
|
||||
"case",
|
||||
"change",
|
||||
"char",
|
||||
"character",
|
||||
"check",
|
||||
"collate",
|
||||
"column",
|
||||
"condition",
|
||||
"constraint",
|
||||
"continue",
|
||||
"convert",
|
||||
"create",
|
||||
"cross",
|
||||
"cube",
|
||||
"cume_dist",
|
||||
"current_date",
|
||||
"current_time",
|
||||
"current_timestamp",
|
||||
"current_user",
|
||||
"cursor",
|
||||
"database",
|
||||
"databases",
|
||||
"day_hour",
|
||||
"day_microsecond",
|
||||
"day_minute",
|
||||
"day_second",
|
||||
"dec",
|
||||
"decimal",
|
||||
"declare",
|
||||
"default",
|
||||
"delayed",
|
||||
"delete",
|
||||
"dense_rank",
|
||||
"desc",
|
||||
"describe",
|
||||
"deterministic",
|
||||
"distinct",
|
||||
"distinctrow",
|
||||
"div",
|
||||
"double",
|
||||
"drop",
|
||||
"dual",
|
||||
"each",
|
||||
"else",
|
||||
"elseif",
|
||||
"empty",
|
||||
"enclosed",
|
||||
"escaped",
|
||||
"except",
|
||||
"exists",
|
||||
"exit",
|
||||
"explain",
|
||||
"false",
|
||||
"fetch",
|
||||
"first_value",
|
||||
"float",
|
||||
"float4",
|
||||
"float8",
|
||||
"for",
|
||||
"force",
|
||||
"foreign",
|
||||
"from",
|
||||
"fulltext",
|
||||
"function",
|
||||
"general",
|
||||
"generated",
|
||||
"get",
|
||||
"get_master_public_key",
|
||||
"grant",
|
||||
"group",
|
||||
"grouping",
|
||||
"groups",
|
||||
"having",
|
||||
"high_priority",
|
||||
"hour_microsecond",
|
||||
"hour_minute",
|
||||
"hour_second",
|
||||
"if",
|
||||
"ignore",
|
||||
"ignore_server_ids",
|
||||
"in",
|
||||
"index",
|
||||
"infile",
|
||||
"inner",
|
||||
"inout",
|
||||
"insensitive",
|
||||
"insert",
|
||||
"int",
|
||||
"int1",
|
||||
"int2",
|
||||
"int3",
|
||||
"int4",
|
||||
"int8",
|
||||
"integer",
|
||||
"intersect",
|
||||
"interval",
|
||||
"into",
|
||||
"io_after_gtids",
|
||||
"io_before_gtids",
|
||||
"is",
|
||||
"iterate",
|
||||
"join",
|
||||
"json_table",
|
||||
"key",
|
||||
"keys",
|
||||
"kill",
|
||||
"lag",
|
||||
"last_value",
|
||||
"lateral",
|
||||
"lead",
|
||||
"leading",
|
||||
"leave",
|
||||
"left",
|
||||
"like",
|
||||
"limit",
|
||||
"linear",
|
||||
"lines",
|
||||
"load",
|
||||
"localtime",
|
||||
"localtimestamp",
|
||||
"lock",
|
||||
"long",
|
||||
"longblob",
|
||||
"longtext",
|
||||
"loop",
|
||||
"low_priority",
|
||||
"master_bind",
|
||||
"master_heartbeat_period",
|
||||
"master_ssl_verify_server_cert",
|
||||
"match",
|
||||
"maxvalue",
|
||||
"mediumblob",
|
||||
"mediumint",
|
||||
"mediumtext",
|
||||
"member",
|
||||
"middleint",
|
||||
"minute_microsecond",
|
||||
"minute_second",
|
||||
"mod",
|
||||
"modifies",
|
||||
"natural",
|
||||
"no_write_to_binlog",
|
||||
"not",
|
||||
"nth_value",
|
||||
"ntile",
|
||||
"null",
|
||||
"numeric",
|
||||
"of",
|
||||
"on",
|
||||
"optimize",
|
||||
"optimizer_costs",
|
||||
"option",
|
||||
"optionally",
|
||||
"or",
|
||||
"order",
|
||||
"out",
|
||||
"outer",
|
||||
"outfile",
|
||||
"over",
|
||||
"parse_gcol_expr",
|
||||
"parallel",
|
||||
"partition",
|
||||
"percent_rank",
|
||||
"persist",
|
||||
"persist_only",
|
||||
"precision",
|
||||
"primary",
|
||||
"procedure",
|
||||
"purge",
|
||||
"qualify",
|
||||
"range",
|
||||
"rank",
|
||||
"read",
|
||||
"read_write",
|
||||
"reads",
|
||||
"real",
|
||||
"recursive",
|
||||
"references",
|
||||
"regexp",
|
||||
"release",
|
||||
"rename",
|
||||
"repeat",
|
||||
"replace",
|
||||
"require",
|
||||
"resignal",
|
||||
"restrict",
|
||||
"return",
|
||||
"revoke",
|
||||
"right",
|
||||
"rlike",
|
||||
"role",
|
||||
"row",
|
||||
"row_number",
|
||||
"rows",
|
||||
"schema",
|
||||
"schemas",
|
||||
"second_microsecond",
|
||||
"select",
|
||||
"sensitive",
|
||||
"separator",
|
||||
"set",
|
||||
"show",
|
||||
"signal",
|
||||
"slow",
|
||||
"smallint",
|
||||
"spatial",
|
||||
"specific",
|
||||
"sql",
|
||||
"sql_after_gtids",
|
||||
"sql_before_gtids",
|
||||
"sql_big_result",
|
||||
"sql_calc_found_rows",
|
||||
"sql_small_result",
|
||||
"sqlexception",
|
||||
"sqlstate",
|
||||
"sqlwarning",
|
||||
"ssl",
|
||||
"starting",
|
||||
"stored",
|
||||
"straight_join",
|
||||
"system",
|
||||
"table",
|
||||
"terminated",
|
||||
"then",
|
||||
"tinyblob",
|
||||
"tinyint",
|
||||
"tinytext",
|
||||
"to",
|
||||
"trailing",
|
||||
"trigger",
|
||||
"true",
|
||||
"undo",
|
||||
"union",
|
||||
"unique",
|
||||
"unlock",
|
||||
"unsigned",
|
||||
"update",
|
||||
"usage",
|
||||
"use",
|
||||
"using",
|
||||
"utc_date",
|
||||
"utc_time",
|
||||
"utc_timestamp",
|
||||
"values",
|
||||
"varbinary",
|
||||
"varchar",
|
||||
"varcharacter",
|
||||
"varying",
|
||||
"virtual",
|
||||
"when",
|
||||
"where",
|
||||
"while",
|
||||
"window",
|
||||
"with",
|
||||
"write",
|
||||
"xor",
|
||||
"year_month",
|
||||
"zerofill",
|
||||
}
|
@ -0,0 +1,773 @@
|
||||
# dialects/mysql/types.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
import datetime
|
||||
|
||||
from ... import exc
|
||||
from ... import util
|
||||
from ...sql import sqltypes
|
||||
|
||||
|
||||
class _NumericType:
|
||||
"""Base for MySQL numeric types.
|
||||
|
||||
This is the base both for NUMERIC as well as INTEGER, hence
|
||||
it's a mixin.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, unsigned=False, zerofill=False, **kw):
|
||||
self.unsigned = unsigned
|
||||
self.zerofill = zerofill
|
||||
super().__init__(**kw)
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self, to_inspect=[_NumericType, sqltypes.Numeric]
|
||||
)
|
||||
|
||||
|
||||
class _FloatType(_NumericType, sqltypes.Float):
|
||||
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
|
||||
if isinstance(self, (REAL, DOUBLE)) and (
|
||||
(precision is None and scale is not None)
|
||||
or (precision is not None and scale is None)
|
||||
):
|
||||
raise exc.ArgumentError(
|
||||
"You must specify both precision and scale or omit "
|
||||
"both altogether."
|
||||
)
|
||||
super().__init__(precision=precision, asdecimal=asdecimal, **kw)
|
||||
self.scale = scale
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self, to_inspect=[_FloatType, _NumericType, sqltypes.Float]
|
||||
)
|
||||
|
||||
|
||||
class _IntegerType(_NumericType, sqltypes.Integer):
|
||||
def __init__(self, display_width=None, **kw):
|
||||
self.display_width = display_width
|
||||
super().__init__(**kw)
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self, to_inspect=[_IntegerType, _NumericType, sqltypes.Integer]
|
||||
)
|
||||
|
||||
|
||||
class _StringType(sqltypes.String):
|
||||
"""Base for MySQL string types."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charset=None,
|
||||
collation=None,
|
||||
ascii=False, # noqa
|
||||
binary=False,
|
||||
unicode=False,
|
||||
national=False,
|
||||
**kw,
|
||||
):
|
||||
self.charset = charset
|
||||
|
||||
# allow collate= or collation=
|
||||
kw.setdefault("collation", kw.pop("collate", collation))
|
||||
|
||||
self.ascii = ascii
|
||||
self.unicode = unicode
|
||||
self.binary = binary
|
||||
self.national = national
|
||||
super().__init__(**kw)
|
||||
|
||||
def __repr__(self):
|
||||
return util.generic_repr(
|
||||
self, to_inspect=[_StringType, sqltypes.String]
|
||||
)
|
||||
|
||||
|
||||
class _MatchType(sqltypes.Float, sqltypes.MatchType):
|
||||
def __init__(self, **kw):
|
||||
# TODO: float arguments?
|
||||
sqltypes.Float.__init__(self)
|
||||
sqltypes.MatchType.__init__(self)
|
||||
|
||||
|
||||
class NUMERIC(_NumericType, sqltypes.NUMERIC):
|
||||
"""MySQL NUMERIC type."""
|
||||
|
||||
__visit_name__ = "NUMERIC"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
|
||||
"""Construct a NUMERIC.
|
||||
|
||||
:param precision: Total digits in this number. If scale and precision
|
||||
are both None, values are stored to limits allowed by the server.
|
||||
|
||||
:param scale: The number of digits after the decimal point.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
precision=precision, scale=scale, asdecimal=asdecimal, **kw
|
||||
)
|
||||
|
||||
|
||||
class DECIMAL(_NumericType, sqltypes.DECIMAL):
|
||||
"""MySQL DECIMAL type."""
|
||||
|
||||
__visit_name__ = "DECIMAL"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
|
||||
"""Construct a DECIMAL.
|
||||
|
||||
:param precision: Total digits in this number. If scale and precision
|
||||
are both None, values are stored to limits allowed by the server.
|
||||
|
||||
:param scale: The number of digits after the decimal point.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
precision=precision, scale=scale, asdecimal=asdecimal, **kw
|
||||
)
|
||||
|
||||
|
||||
class DOUBLE(_FloatType, sqltypes.DOUBLE):
|
||||
"""MySQL DOUBLE type."""
|
||||
|
||||
__visit_name__ = "DOUBLE"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
|
||||
"""Construct a DOUBLE.
|
||||
|
||||
.. note::
|
||||
|
||||
The :class:`.DOUBLE` type by default converts from float
|
||||
to Decimal, using a truncation that defaults to 10 digits.
|
||||
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
|
||||
to change this scale, or ``asdecimal=False`` to return values
|
||||
directly as Python floating points.
|
||||
|
||||
:param precision: Total digits in this number. If scale and precision
|
||||
are both None, values are stored to limits allowed by the server.
|
||||
|
||||
:param scale: The number of digits after the decimal point.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
precision=precision, scale=scale, asdecimal=asdecimal, **kw
|
||||
)
|
||||
|
||||
|
||||
class REAL(_FloatType, sqltypes.REAL):
|
||||
"""MySQL REAL type."""
|
||||
|
||||
__visit_name__ = "REAL"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
|
||||
"""Construct a REAL.
|
||||
|
||||
.. note::
|
||||
|
||||
The :class:`.REAL` type by default converts from float
|
||||
to Decimal, using a truncation that defaults to 10 digits.
|
||||
Specify either ``scale=n`` or ``decimal_return_scale=n`` in order
|
||||
to change this scale, or ``asdecimal=False`` to return values
|
||||
directly as Python floating points.
|
||||
|
||||
:param precision: Total digits in this number. If scale and precision
|
||||
are both None, values are stored to limits allowed by the server.
|
||||
|
||||
:param scale: The number of digits after the decimal point.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
precision=precision, scale=scale, asdecimal=asdecimal, **kw
|
||||
)
|
||||
|
||||
|
||||
class FLOAT(_FloatType, sqltypes.FLOAT):
|
||||
"""MySQL FLOAT type."""
|
||||
|
||||
__visit_name__ = "FLOAT"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=False, **kw):
|
||||
"""Construct a FLOAT.
|
||||
|
||||
:param precision: Total digits in this number. If scale and precision
|
||||
are both None, values are stored to limits allowed by the server.
|
||||
|
||||
:param scale: The number of digits after the decimal point.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
precision=precision, scale=scale, asdecimal=asdecimal, **kw
|
||||
)
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
|
||||
class INTEGER(_IntegerType, sqltypes.INTEGER):
|
||||
"""MySQL INTEGER type."""
|
||||
|
||||
__visit_name__ = "INTEGER"
|
||||
|
||||
def __init__(self, display_width=None, **kw):
|
||||
"""Construct an INTEGER.
|
||||
|
||||
:param display_width: Optional, maximum display width for this number.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(display_width=display_width, **kw)
|
||||
|
||||
|
||||
class BIGINT(_IntegerType, sqltypes.BIGINT):
|
||||
"""MySQL BIGINTEGER type."""
|
||||
|
||||
__visit_name__ = "BIGINT"
|
||||
|
||||
def __init__(self, display_width=None, **kw):
|
||||
"""Construct a BIGINTEGER.
|
||||
|
||||
:param display_width: Optional, maximum display width for this number.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(display_width=display_width, **kw)
|
||||
|
||||
|
||||
class MEDIUMINT(_IntegerType):
|
||||
"""MySQL MEDIUMINTEGER type."""
|
||||
|
||||
__visit_name__ = "MEDIUMINT"
|
||||
|
||||
def __init__(self, display_width=None, **kw):
|
||||
"""Construct a MEDIUMINTEGER
|
||||
|
||||
:param display_width: Optional, maximum display width for this number.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(display_width=display_width, **kw)
|
||||
|
||||
|
||||
class TINYINT(_IntegerType):
|
||||
"""MySQL TINYINT type."""
|
||||
|
||||
__visit_name__ = "TINYINT"
|
||||
|
||||
def __init__(self, display_width=None, **kw):
|
||||
"""Construct a TINYINT.
|
||||
|
||||
:param display_width: Optional, maximum display width for this number.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(display_width=display_width, **kw)
|
||||
|
||||
|
||||
class SMALLINT(_IntegerType, sqltypes.SMALLINT):
|
||||
"""MySQL SMALLINTEGER type."""
|
||||
|
||||
__visit_name__ = "SMALLINT"
|
||||
|
||||
def __init__(self, display_width=None, **kw):
|
||||
"""Construct a SMALLINTEGER.
|
||||
|
||||
:param display_width: Optional, maximum display width for this number.
|
||||
|
||||
:param unsigned: a boolean, optional.
|
||||
|
||||
:param zerofill: Optional. If true, values will be stored as strings
|
||||
left-padded with zeros. Note that this does not effect the values
|
||||
returned by the underlying database API, which continue to be
|
||||
numeric.
|
||||
|
||||
"""
|
||||
super().__init__(display_width=display_width, **kw)
|
||||
|
||||
|
||||
class BIT(sqltypes.TypeEngine):
|
||||
"""MySQL BIT type.
|
||||
|
||||
This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater
|
||||
for MyISAM, MEMORY, InnoDB and BDB. For older versions, use a
|
||||
MSTinyInteger() type.
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "BIT"
|
||||
|
||||
def __init__(self, length=None):
|
||||
"""Construct a BIT.
|
||||
|
||||
:param length: Optional, number of bits.
|
||||
|
||||
"""
|
||||
self.length = length
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
"""Convert a MySQL's 64 bit, variable length binary string to a
|
||||
long."""
|
||||
|
||||
if dialect.supports_native_bit:
|
||||
return None
|
||||
|
||||
def process(value):
|
||||
if value is not None:
|
||||
v = 0
|
||||
for i in value:
|
||||
if not isinstance(i, int):
|
||||
i = ord(i) # convert byte to int on Python 2
|
||||
v = v << 8 | i
|
||||
return v
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class TIME(sqltypes.TIME):
|
||||
"""MySQL TIME type."""
|
||||
|
||||
__visit_name__ = "TIME"
|
||||
|
||||
def __init__(self, timezone=False, fsp=None):
|
||||
"""Construct a MySQL TIME type.
|
||||
|
||||
:param timezone: not used by the MySQL dialect.
|
||||
:param fsp: fractional seconds precision value.
|
||||
MySQL 5.6 supports storage of fractional seconds;
|
||||
this parameter will be used when emitting DDL
|
||||
for the TIME type.
|
||||
|
||||
.. note::
|
||||
|
||||
DBAPI driver support for fractional seconds may
|
||||
be limited; current support includes
|
||||
MySQL Connector/Python.
|
||||
|
||||
"""
|
||||
super().__init__(timezone=timezone)
|
||||
self.fsp = fsp
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
time = datetime.time
|
||||
|
||||
def process(value):
|
||||
# convert from a timedelta value
|
||||
if value is not None:
|
||||
microseconds = value.microseconds
|
||||
seconds = value.seconds
|
||||
minutes = seconds // 60
|
||||
return time(
|
||||
minutes // 60,
|
||||
minutes % 60,
|
||||
seconds - minutes * 60,
|
||||
microsecond=microseconds,
|
||||
)
|
||||
else:
|
||||
return None
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class TIMESTAMP(sqltypes.TIMESTAMP):
|
||||
"""MySQL TIMESTAMP type."""
|
||||
|
||||
__visit_name__ = "TIMESTAMP"
|
||||
|
||||
def __init__(self, timezone=False, fsp=None):
|
||||
"""Construct a MySQL TIMESTAMP type.
|
||||
|
||||
:param timezone: not used by the MySQL dialect.
|
||||
:param fsp: fractional seconds precision value.
|
||||
MySQL 5.6.4 supports storage of fractional seconds;
|
||||
this parameter will be used when emitting DDL
|
||||
for the TIMESTAMP type.
|
||||
|
||||
.. note::
|
||||
|
||||
DBAPI driver support for fractional seconds may
|
||||
be limited; current support includes
|
||||
MySQL Connector/Python.
|
||||
|
||||
"""
|
||||
super().__init__(timezone=timezone)
|
||||
self.fsp = fsp
|
||||
|
||||
|
||||
class DATETIME(sqltypes.DATETIME):
|
||||
"""MySQL DATETIME type."""
|
||||
|
||||
__visit_name__ = "DATETIME"
|
||||
|
||||
def __init__(self, timezone=False, fsp=None):
|
||||
"""Construct a MySQL DATETIME type.
|
||||
|
||||
:param timezone: not used by the MySQL dialect.
|
||||
:param fsp: fractional seconds precision value.
|
||||
MySQL 5.6.4 supports storage of fractional seconds;
|
||||
this parameter will be used when emitting DDL
|
||||
for the DATETIME type.
|
||||
|
||||
.. note::
|
||||
|
||||
DBAPI driver support for fractional seconds may
|
||||
be limited; current support includes
|
||||
MySQL Connector/Python.
|
||||
|
||||
"""
|
||||
super().__init__(timezone=timezone)
|
||||
self.fsp = fsp
|
||||
|
||||
|
||||
class YEAR(sqltypes.TypeEngine):
|
||||
"""MySQL YEAR type, for single byte storage of years 1901-2155."""
|
||||
|
||||
__visit_name__ = "YEAR"
|
||||
|
||||
def __init__(self, display_width=None):
|
||||
self.display_width = display_width
|
||||
|
||||
|
||||
class TEXT(_StringType, sqltypes.TEXT):
|
||||
"""MySQL TEXT type, for character storage encoded up to 2^16 bytes."""
|
||||
|
||||
__visit_name__ = "TEXT"
|
||||
|
||||
def __init__(self, length=None, **kw):
|
||||
"""Construct a TEXT.
|
||||
|
||||
:param length: Optional, if provided the server may optimize storage
|
||||
by substituting the smallest TEXT type sufficient to store
|
||||
``length`` bytes of characters.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param national: Optional. If true, use the server's configured
|
||||
national character set.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
super().__init__(length=length, **kw)
|
||||
|
||||
|
||||
class TINYTEXT(_StringType):
|
||||
"""MySQL TINYTEXT type, for character storage encoded up to 2^8 bytes."""
|
||||
|
||||
__visit_name__ = "TINYTEXT"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Construct a TINYTEXT.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param national: Optional. If true, use the server's configured
|
||||
national character set.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
class MEDIUMTEXT(_StringType):
|
||||
"""MySQL MEDIUMTEXT type, for character storage encoded up
|
||||
to 2^24 bytes."""
|
||||
|
||||
__visit_name__ = "MEDIUMTEXT"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Construct a MEDIUMTEXT.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param national: Optional. If true, use the server's configured
|
||||
national character set.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
class LONGTEXT(_StringType):
|
||||
"""MySQL LONGTEXT type, for character storage encoded up to 2^32 bytes."""
|
||||
|
||||
__visit_name__ = "LONGTEXT"
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
"""Construct a LONGTEXT.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param national: Optional. If true, use the server's configured
|
||||
national character set.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
|
||||
class VARCHAR(_StringType, sqltypes.VARCHAR):
|
||||
"""MySQL VARCHAR type, for variable-length character data."""
|
||||
|
||||
__visit_name__ = "VARCHAR"
|
||||
|
||||
def __init__(self, length=None, **kwargs):
|
||||
"""Construct a VARCHAR.
|
||||
|
||||
:param charset: Optional, a column-level character set for this string
|
||||
value. Takes precedence to 'ascii' or 'unicode' short-hand.
|
||||
|
||||
:param collation: Optional, a column-level collation for this string
|
||||
value. Takes precedence to 'binary' short-hand.
|
||||
|
||||
:param ascii: Defaults to False: short-hand for the ``latin1``
|
||||
character set, generates ASCII in schema.
|
||||
|
||||
:param unicode: Defaults to False: short-hand for the ``ucs2``
|
||||
character set, generates UNICODE in schema.
|
||||
|
||||
:param national: Optional. If true, use the server's configured
|
||||
national character set.
|
||||
|
||||
:param binary: Defaults to False: short-hand, pick the binary
|
||||
collation type that matches the column's character set. Generates
|
||||
BINARY in schema. This does not affect the type of data stored,
|
||||
only the collation of character data.
|
||||
|
||||
"""
|
||||
super().__init__(length=length, **kwargs)
|
||||
|
||||
|
||||
class CHAR(_StringType, sqltypes.CHAR):
|
||||
"""MySQL CHAR type, for fixed-length character data."""
|
||||
|
||||
__visit_name__ = "CHAR"
|
||||
|
||||
def __init__(self, length=None, **kwargs):
|
||||
"""Construct a CHAR.
|
||||
|
||||
:param length: Maximum data length, in characters.
|
||||
|
||||
:param binary: Optional, use the default binary collation for the
|
||||
national character set. This does not affect the type of data
|
||||
stored, use a BINARY type for binary data.
|
||||
|
||||
:param collation: Optional, request a particular collation. Must be
|
||||
compatible with the national character set.
|
||||
|
||||
"""
|
||||
super().__init__(length=length, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def _adapt_string_for_cast(cls, type_):
|
||||
# copy the given string type into a CHAR
|
||||
# for the purposes of rendering a CAST expression
|
||||
type_ = sqltypes.to_instance(type_)
|
||||
if isinstance(type_, sqltypes.CHAR):
|
||||
return type_
|
||||
elif isinstance(type_, _StringType):
|
||||
return CHAR(
|
||||
length=type_.length,
|
||||
charset=type_.charset,
|
||||
collation=type_.collation,
|
||||
ascii=type_.ascii,
|
||||
binary=type_.binary,
|
||||
unicode=type_.unicode,
|
||||
national=False, # not supported in CAST
|
||||
)
|
||||
else:
|
||||
return CHAR(length=type_.length)
|
||||
|
||||
|
||||
class NVARCHAR(_StringType, sqltypes.NVARCHAR):
|
||||
"""MySQL NVARCHAR type.
|
||||
|
||||
For variable-length character data in the server's configured national
|
||||
character set.
|
||||
"""
|
||||
|
||||
__visit_name__ = "NVARCHAR"
|
||||
|
||||
def __init__(self, length=None, **kwargs):
|
||||
"""Construct an NVARCHAR.
|
||||
|
||||
:param length: Maximum data length, in characters.
|
||||
|
||||
:param binary: Optional, use the default binary collation for the
|
||||
national character set. This does not affect the type of data
|
||||
stored, use a BINARY type for binary data.
|
||||
|
||||
:param collation: Optional, request a particular collation. Must be
|
||||
compatible with the national character set.
|
||||
|
||||
"""
|
||||
kwargs["national"] = True
|
||||
super().__init__(length=length, **kwargs)
|
||||
|
||||
|
||||
class NCHAR(_StringType, sqltypes.NCHAR):
|
||||
"""MySQL NCHAR type.
|
||||
|
||||
For fixed-length character data in the server's configured national
|
||||
character set.
|
||||
"""
|
||||
|
||||
__visit_name__ = "NCHAR"
|
||||
|
||||
def __init__(self, length=None, **kwargs):
|
||||
"""Construct an NCHAR.
|
||||
|
||||
:param length: Maximum data length, in characters.
|
||||
|
||||
:param binary: Optional, use the default binary collation for the
|
||||
national character set. This does not affect the type of data
|
||||
stored, use a BINARY type for binary data.
|
||||
|
||||
:param collation: Optional, request a particular collation. Must be
|
||||
compatible with the national character set.
|
||||
|
||||
"""
|
||||
kwargs["national"] = True
|
||||
super().__init__(length=length, **kwargs)
|
||||
|
||||
|
||||
class TINYBLOB(sqltypes._Binary):
|
||||
"""MySQL TINYBLOB type, for binary data up to 2^8 bytes."""
|
||||
|
||||
__visit_name__ = "TINYBLOB"
|
||||
|
||||
|
||||
class MEDIUMBLOB(sqltypes._Binary):
|
||||
"""MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes."""
|
||||
|
||||
__visit_name__ = "MEDIUMBLOB"
|
||||
|
||||
|
||||
class LONGBLOB(sqltypes._Binary):
|
||||
"""MySQL LONGBLOB type, for binary data up to 2^32 bytes."""
|
||||
|
||||
__visit_name__ = "LONGBLOB"
|
@ -0,0 +1,67 @@
|
||||
# dialects/oracle/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from types import ModuleType
|
||||
|
||||
from . import base # noqa
|
||||
from . import cx_oracle # noqa
|
||||
from . import oracledb # noqa
|
||||
from .base import BFILE
|
||||
from .base import BINARY_DOUBLE
|
||||
from .base import BINARY_FLOAT
|
||||
from .base import BLOB
|
||||
from .base import CHAR
|
||||
from .base import CLOB
|
||||
from .base import DATE
|
||||
from .base import DOUBLE_PRECISION
|
||||
from .base import FLOAT
|
||||
from .base import INTERVAL
|
||||
from .base import LONG
|
||||
from .base import NCHAR
|
||||
from .base import NCLOB
|
||||
from .base import NUMBER
|
||||
from .base import NVARCHAR
|
||||
from .base import NVARCHAR2
|
||||
from .base import RAW
|
||||
from .base import REAL
|
||||
from .base import ROWID
|
||||
from .base import TIMESTAMP
|
||||
from .base import VARCHAR
|
||||
from .base import VARCHAR2
|
||||
|
||||
# Alias oracledb also as oracledb_async
|
||||
oracledb_async = type(
|
||||
"oracledb_async", (ModuleType,), {"dialect": oracledb.dialect_async}
|
||||
)
|
||||
|
||||
base.dialect = dialect = cx_oracle.dialect
|
||||
|
||||
__all__ = (
|
||||
"VARCHAR",
|
||||
"NVARCHAR",
|
||||
"CHAR",
|
||||
"NCHAR",
|
||||
"DATE",
|
||||
"NUMBER",
|
||||
"BLOB",
|
||||
"BFILE",
|
||||
"CLOB",
|
||||
"NCLOB",
|
||||
"TIMESTAMP",
|
||||
"RAW",
|
||||
"FLOAT",
|
||||
"DOUBLE_PRECISION",
|
||||
"BINARY_DOUBLE",
|
||||
"BINARY_FLOAT",
|
||||
"LONG",
|
||||
"dialect",
|
||||
"INTERVAL",
|
||||
"VARCHAR2",
|
||||
"NVARCHAR2",
|
||||
"ROWID",
|
||||
"REAL",
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
3484
venv/lib/python3.11/site-packages/sqlalchemy/dialects/oracle/base.py
Normal file
3484
venv/lib/python3.11/site-packages/sqlalchemy/dialects/oracle/base.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,507 @@
|
||||
# dialects/oracle/dictionary.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from .types import DATE
|
||||
from .types import LONG
|
||||
from .types import NUMBER
|
||||
from .types import RAW
|
||||
from .types import VARCHAR2
|
||||
from ... import Column
|
||||
from ... import MetaData
|
||||
from ... import Table
|
||||
from ... import table
|
||||
from ...sql.sqltypes import CHAR
|
||||
|
||||
# constants
|
||||
DB_LINK_PLACEHOLDER = "__$sa_dblink$__"
|
||||
# tables
|
||||
dual = table("dual")
|
||||
dictionary_meta = MetaData()
|
||||
|
||||
# NOTE: all the dictionary_meta are aliases because oracle does not like
|
||||
# using the full table@dblink for every column in query, and complains with
|
||||
# ORA-00960: ambiguous column naming in select list
|
||||
all_tables = Table(
|
||||
"all_tables" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("tablespace_name", VARCHAR2(30)),
|
||||
Column("cluster_name", VARCHAR2(128)),
|
||||
Column("iot_name", VARCHAR2(128)),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("pct_free", NUMBER),
|
||||
Column("pct_used", NUMBER),
|
||||
Column("ini_trans", NUMBER),
|
||||
Column("max_trans", NUMBER),
|
||||
Column("initial_extent", NUMBER),
|
||||
Column("next_extent", NUMBER),
|
||||
Column("min_extents", NUMBER),
|
||||
Column("max_extents", NUMBER),
|
||||
Column("pct_increase", NUMBER),
|
||||
Column("freelists", NUMBER),
|
||||
Column("freelist_groups", NUMBER),
|
||||
Column("logging", VARCHAR2(3)),
|
||||
Column("backed_up", VARCHAR2(1)),
|
||||
Column("num_rows", NUMBER),
|
||||
Column("blocks", NUMBER),
|
||||
Column("empty_blocks", NUMBER),
|
||||
Column("avg_space", NUMBER),
|
||||
Column("chain_cnt", NUMBER),
|
||||
Column("avg_row_len", NUMBER),
|
||||
Column("avg_space_freelist_blocks", NUMBER),
|
||||
Column("num_freelist_blocks", NUMBER),
|
||||
Column("degree", VARCHAR2(10)),
|
||||
Column("instances", VARCHAR2(10)),
|
||||
Column("cache", VARCHAR2(5)),
|
||||
Column("table_lock", VARCHAR2(8)),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("partitioned", VARCHAR2(3)),
|
||||
Column("iot_type", VARCHAR2(12)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("nested", VARCHAR2(3)),
|
||||
Column("buffer_pool", VARCHAR2(7)),
|
||||
Column("flash_cache", VARCHAR2(7)),
|
||||
Column("cell_flash_cache", VARCHAR2(7)),
|
||||
Column("row_movement", VARCHAR2(8)),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("duration", VARCHAR2(15)),
|
||||
Column("skip_corrupt", VARCHAR2(8)),
|
||||
Column("monitoring", VARCHAR2(3)),
|
||||
Column("cluster_owner", VARCHAR2(128)),
|
||||
Column("dependencies", VARCHAR2(8)),
|
||||
Column("compression", VARCHAR2(8)),
|
||||
Column("compress_for", VARCHAR2(30)),
|
||||
Column("dropped", VARCHAR2(3)),
|
||||
Column("read_only", VARCHAR2(3)),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("result_cache", VARCHAR2(7)),
|
||||
Column("clustering", VARCHAR2(3)),
|
||||
Column("activity_tracking", VARCHAR2(23)),
|
||||
Column("dml_timestamp", VARCHAR2(25)),
|
||||
Column("has_identity", VARCHAR2(3)),
|
||||
Column("container_data", VARCHAR2(3)),
|
||||
Column("inmemory", VARCHAR2(8)),
|
||||
Column("inmemory_priority", VARCHAR2(8)),
|
||||
Column("inmemory_distribute", VARCHAR2(15)),
|
||||
Column("inmemory_compression", VARCHAR2(17)),
|
||||
Column("inmemory_duplicate", VARCHAR2(13)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("duplicated", VARCHAR2(1)),
|
||||
Column("sharded", VARCHAR2(1)),
|
||||
Column("externally_sharded", VARCHAR2(1)),
|
||||
Column("externally_duplicated", VARCHAR2(1)),
|
||||
Column("external", VARCHAR2(3)),
|
||||
Column("hybrid", VARCHAR2(3)),
|
||||
Column("cellmemory", VARCHAR2(24)),
|
||||
Column("containers_default", VARCHAR2(3)),
|
||||
Column("container_map", VARCHAR2(3)),
|
||||
Column("extended_data_link", VARCHAR2(3)),
|
||||
Column("extended_data_link_map", VARCHAR2(3)),
|
||||
Column("inmemory_service", VARCHAR2(12)),
|
||||
Column("inmemory_service_name", VARCHAR2(1000)),
|
||||
Column("container_map_object", VARCHAR2(3)),
|
||||
Column("memoptimize_read", VARCHAR2(8)),
|
||||
Column("memoptimize_write", VARCHAR2(8)),
|
||||
Column("has_sensitive_column", VARCHAR2(3)),
|
||||
Column("admit_null", VARCHAR2(3)),
|
||||
Column("data_link_dml_enabled", VARCHAR2(3)),
|
||||
Column("logical_replication", VARCHAR2(8)),
|
||||
).alias("a_tables")
|
||||
|
||||
all_views = Table(
|
||||
"all_views" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("view_name", VARCHAR2(128), nullable=False),
|
||||
Column("text_length", NUMBER),
|
||||
Column("text", LONG),
|
||||
Column("text_vc", VARCHAR2(4000)),
|
||||
Column("type_text_length", NUMBER),
|
||||
Column("type_text", VARCHAR2(4000)),
|
||||
Column("oid_text_length", NUMBER),
|
||||
Column("oid_text", VARCHAR2(4000)),
|
||||
Column("view_type_owner", VARCHAR2(128)),
|
||||
Column("view_type", VARCHAR2(128)),
|
||||
Column("superview_name", VARCHAR2(128)),
|
||||
Column("editioning_view", VARCHAR2(1)),
|
||||
Column("read_only", VARCHAR2(1)),
|
||||
Column("container_data", VARCHAR2(1)),
|
||||
Column("bequeath", VARCHAR2(12)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("containers_default", VARCHAR2(3)),
|
||||
Column("container_map", VARCHAR2(3)),
|
||||
Column("extended_data_link", VARCHAR2(3)),
|
||||
Column("extended_data_link_map", VARCHAR2(3)),
|
||||
Column("has_sensitive_column", VARCHAR2(3)),
|
||||
Column("admit_null", VARCHAR2(3)),
|
||||
Column("pdb_local_only", VARCHAR2(3)),
|
||||
).alias("a_views")
|
||||
|
||||
all_sequences = Table(
|
||||
"all_sequences" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("sequence_owner", VARCHAR2(128), nullable=False),
|
||||
Column("sequence_name", VARCHAR2(128), nullable=False),
|
||||
Column("min_value", NUMBER),
|
||||
Column("max_value", NUMBER),
|
||||
Column("increment_by", NUMBER, nullable=False),
|
||||
Column("cycle_flag", VARCHAR2(1)),
|
||||
Column("order_flag", VARCHAR2(1)),
|
||||
Column("cache_size", NUMBER, nullable=False),
|
||||
Column("last_number", NUMBER, nullable=False),
|
||||
Column("scale_flag", VARCHAR2(1)),
|
||||
Column("extend_flag", VARCHAR2(1)),
|
||||
Column("sharded_flag", VARCHAR2(1)),
|
||||
Column("session_flag", VARCHAR2(1)),
|
||||
Column("keep_value", VARCHAR2(1)),
|
||||
).alias("a_sequences")
|
||||
|
||||
all_users = Table(
|
||||
"all_users" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("username", VARCHAR2(128), nullable=False),
|
||||
Column("user_id", NUMBER, nullable=False),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("common", VARCHAR2(3)),
|
||||
Column("oracle_maintained", VARCHAR2(1)),
|
||||
Column("inherited", VARCHAR2(3)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("implicit", VARCHAR2(3)),
|
||||
Column("all_shard", VARCHAR2(3)),
|
||||
Column("external_shard", VARCHAR2(3)),
|
||||
).alias("a_users")
|
||||
|
||||
all_mviews = Table(
|
||||
"all_mviews" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("mview_name", VARCHAR2(128), nullable=False),
|
||||
Column("container_name", VARCHAR2(128), nullable=False),
|
||||
Column("query", LONG),
|
||||
Column("query_len", NUMBER(38)),
|
||||
Column("updatable", VARCHAR2(1)),
|
||||
Column("update_log", VARCHAR2(128)),
|
||||
Column("master_rollback_seg", VARCHAR2(128)),
|
||||
Column("master_link", VARCHAR2(128)),
|
||||
Column("rewrite_enabled", VARCHAR2(1)),
|
||||
Column("rewrite_capability", VARCHAR2(9)),
|
||||
Column("refresh_mode", VARCHAR2(6)),
|
||||
Column("refresh_method", VARCHAR2(8)),
|
||||
Column("build_mode", VARCHAR2(9)),
|
||||
Column("fast_refreshable", VARCHAR2(18)),
|
||||
Column("last_refresh_type", VARCHAR2(8)),
|
||||
Column("last_refresh_date", DATE),
|
||||
Column("last_refresh_end_time", DATE),
|
||||
Column("staleness", VARCHAR2(19)),
|
||||
Column("after_fast_refresh", VARCHAR2(19)),
|
||||
Column("unknown_prebuilt", VARCHAR2(1)),
|
||||
Column("unknown_plsql_func", VARCHAR2(1)),
|
||||
Column("unknown_external_table", VARCHAR2(1)),
|
||||
Column("unknown_consider_fresh", VARCHAR2(1)),
|
||||
Column("unknown_import", VARCHAR2(1)),
|
||||
Column("unknown_trusted_fd", VARCHAR2(1)),
|
||||
Column("compile_state", VARCHAR2(19)),
|
||||
Column("use_no_index", VARCHAR2(1)),
|
||||
Column("stale_since", DATE),
|
||||
Column("num_pct_tables", NUMBER),
|
||||
Column("num_fresh_pct_regions", NUMBER),
|
||||
Column("num_stale_pct_regions", NUMBER),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("evaluation_edition", VARCHAR2(128)),
|
||||
Column("unusable_before", VARCHAR2(128)),
|
||||
Column("unusable_beginning", VARCHAR2(128)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("on_query_computation", VARCHAR2(1)),
|
||||
Column("auto", VARCHAR2(3)),
|
||||
).alias("a_mviews")
|
||||
|
||||
all_tab_identity_cols = Table(
|
||||
"all_tab_identity_cols" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("generation_type", VARCHAR2(10)),
|
||||
Column("sequence_name", VARCHAR2(128), nullable=False),
|
||||
Column("identity_options", VARCHAR2(298)),
|
||||
).alias("a_tab_identity_cols")
|
||||
|
||||
all_tab_cols = Table(
|
||||
"all_tab_cols" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("data_type", VARCHAR2(128)),
|
||||
Column("data_type_mod", VARCHAR2(3)),
|
||||
Column("data_type_owner", VARCHAR2(128)),
|
||||
Column("data_length", NUMBER, nullable=False),
|
||||
Column("data_precision", NUMBER),
|
||||
Column("data_scale", NUMBER),
|
||||
Column("nullable", VARCHAR2(1)),
|
||||
Column("column_id", NUMBER),
|
||||
Column("default_length", NUMBER),
|
||||
Column("data_default", LONG),
|
||||
Column("num_distinct", NUMBER),
|
||||
Column("low_value", RAW(1000)),
|
||||
Column("high_value", RAW(1000)),
|
||||
Column("density", NUMBER),
|
||||
Column("num_nulls", NUMBER),
|
||||
Column("num_buckets", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("character_set_name", VARCHAR2(44)),
|
||||
Column("char_col_decl_length", NUMBER),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("avg_col_len", NUMBER),
|
||||
Column("char_length", NUMBER),
|
||||
Column("char_used", VARCHAR2(1)),
|
||||
Column("v80_fmt_image", VARCHAR2(3)),
|
||||
Column("data_upgraded", VARCHAR2(3)),
|
||||
Column("hidden_column", VARCHAR2(3)),
|
||||
Column("virtual_column", VARCHAR2(3)),
|
||||
Column("segment_column_id", NUMBER),
|
||||
Column("internal_column_id", NUMBER, nullable=False),
|
||||
Column("histogram", VARCHAR2(15)),
|
||||
Column("qualified_col_name", VARCHAR2(4000)),
|
||||
Column("user_generated", VARCHAR2(3)),
|
||||
Column("default_on_null", VARCHAR2(3)),
|
||||
Column("identity_column", VARCHAR2(3)),
|
||||
Column("evaluation_edition", VARCHAR2(128)),
|
||||
Column("unusable_before", VARCHAR2(128)),
|
||||
Column("unusable_beginning", VARCHAR2(128)),
|
||||
Column("collation", VARCHAR2(100)),
|
||||
Column("collated_column_id", NUMBER),
|
||||
).alias("a_tab_cols")
|
||||
|
||||
all_tab_comments = Table(
|
||||
"all_tab_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_type", VARCHAR2(11)),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
Column("origin_con_id", NUMBER),
|
||||
).alias("a_tab_comments")
|
||||
|
||||
all_col_comments = Table(
|
||||
"all_col_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(128), nullable=False),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
Column("origin_con_id", NUMBER),
|
||||
).alias("a_col_comments")
|
||||
|
||||
all_mview_comments = Table(
|
||||
"all_mview_comments" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("mview_name", VARCHAR2(128), nullable=False),
|
||||
Column("comments", VARCHAR2(4000)),
|
||||
).alias("a_mview_comments")
|
||||
|
||||
all_ind_columns = Table(
|
||||
"all_ind_columns" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("index_owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(4000)),
|
||||
Column("column_position", NUMBER, nullable=False),
|
||||
Column("column_length", NUMBER, nullable=False),
|
||||
Column("char_length", NUMBER),
|
||||
Column("descend", VARCHAR2(4)),
|
||||
Column("collated_column_id", NUMBER),
|
||||
).alias("a_ind_columns")
|
||||
|
||||
all_indexes = Table(
|
||||
"all_indexes" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("index_type", VARCHAR2(27)),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_type", CHAR(11)),
|
||||
Column("uniqueness", VARCHAR2(9)),
|
||||
Column("compression", VARCHAR2(13)),
|
||||
Column("prefix_length", NUMBER),
|
||||
Column("tablespace_name", VARCHAR2(30)),
|
||||
Column("ini_trans", NUMBER),
|
||||
Column("max_trans", NUMBER),
|
||||
Column("initial_extent", NUMBER),
|
||||
Column("next_extent", NUMBER),
|
||||
Column("min_extents", NUMBER),
|
||||
Column("max_extents", NUMBER),
|
||||
Column("pct_increase", NUMBER),
|
||||
Column("pct_threshold", NUMBER),
|
||||
Column("include_column", NUMBER),
|
||||
Column("freelists", NUMBER),
|
||||
Column("freelist_groups", NUMBER),
|
||||
Column("pct_free", NUMBER),
|
||||
Column("logging", VARCHAR2(3)),
|
||||
Column("blevel", NUMBER),
|
||||
Column("leaf_blocks", NUMBER),
|
||||
Column("distinct_keys", NUMBER),
|
||||
Column("avg_leaf_blocks_per_key", NUMBER),
|
||||
Column("avg_data_blocks_per_key", NUMBER),
|
||||
Column("clustering_factor", NUMBER),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("num_rows", NUMBER),
|
||||
Column("sample_size", NUMBER),
|
||||
Column("last_analyzed", DATE),
|
||||
Column("degree", VARCHAR2(40)),
|
||||
Column("instances", VARCHAR2(40)),
|
||||
Column("partitioned", VARCHAR2(3)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("generated", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("buffer_pool", VARCHAR2(7)),
|
||||
Column("flash_cache", VARCHAR2(7)),
|
||||
Column("cell_flash_cache", VARCHAR2(7)),
|
||||
Column("user_stats", VARCHAR2(3)),
|
||||
Column("duration", VARCHAR2(15)),
|
||||
Column("pct_direct_access", NUMBER),
|
||||
Column("ityp_owner", VARCHAR2(128)),
|
||||
Column("ityp_name", VARCHAR2(128)),
|
||||
Column("parameters", VARCHAR2(1000)),
|
||||
Column("global_stats", VARCHAR2(3)),
|
||||
Column("domidx_status", VARCHAR2(12)),
|
||||
Column("domidx_opstatus", VARCHAR2(6)),
|
||||
Column("funcidx_status", VARCHAR2(8)),
|
||||
Column("join_index", VARCHAR2(3)),
|
||||
Column("iot_redundant_pkey_elim", VARCHAR2(3)),
|
||||
Column("dropped", VARCHAR2(3)),
|
||||
Column("visibility", VARCHAR2(9)),
|
||||
Column("domidx_management", VARCHAR2(14)),
|
||||
Column("segment_created", VARCHAR2(3)),
|
||||
Column("orphaned_entries", VARCHAR2(3)),
|
||||
Column("indexing", VARCHAR2(7)),
|
||||
Column("auto", VARCHAR2(3)),
|
||||
).alias("a_indexes")
|
||||
|
||||
all_ind_expressions = Table(
|
||||
"all_ind_expressions" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("index_owner", VARCHAR2(128), nullable=False),
|
||||
Column("index_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_owner", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_expression", LONG),
|
||||
Column("column_position", NUMBER, nullable=False),
|
||||
).alias("a_ind_expressions")
|
||||
|
||||
all_constraints = Table(
|
||||
"all_constraints" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128)),
|
||||
Column("constraint_name", VARCHAR2(128)),
|
||||
Column("constraint_type", VARCHAR2(1)),
|
||||
Column("table_name", VARCHAR2(128)),
|
||||
Column("search_condition", LONG),
|
||||
Column("search_condition_vc", VARCHAR2(4000)),
|
||||
Column("r_owner", VARCHAR2(128)),
|
||||
Column("r_constraint_name", VARCHAR2(128)),
|
||||
Column("delete_rule", VARCHAR2(9)),
|
||||
Column("status", VARCHAR2(8)),
|
||||
Column("deferrable", VARCHAR2(14)),
|
||||
Column("deferred", VARCHAR2(9)),
|
||||
Column("validated", VARCHAR2(13)),
|
||||
Column("generated", VARCHAR2(14)),
|
||||
Column("bad", VARCHAR2(3)),
|
||||
Column("rely", VARCHAR2(4)),
|
||||
Column("last_change", DATE),
|
||||
Column("index_owner", VARCHAR2(128)),
|
||||
Column("index_name", VARCHAR2(128)),
|
||||
Column("invalid", VARCHAR2(7)),
|
||||
Column("view_related", VARCHAR2(14)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
).alias("a_constraints")
|
||||
|
||||
all_cons_columns = Table(
|
||||
"all_cons_columns" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("constraint_name", VARCHAR2(128), nullable=False),
|
||||
Column("table_name", VARCHAR2(128), nullable=False),
|
||||
Column("column_name", VARCHAR2(4000)),
|
||||
Column("position", NUMBER),
|
||||
).alias("a_cons_columns")
|
||||
|
||||
# TODO figure out if it's still relevant, since there is no mention from here
|
||||
# https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/ALL_DB_LINKS.html
|
||||
# original note:
|
||||
# using user_db_links here since all_db_links appears
|
||||
# to have more restricted permissions.
|
||||
# https://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
|
||||
# will need to hear from more users if we are doing
|
||||
# the right thing here. See [ticket:2619]
|
||||
all_db_links = Table(
|
||||
"all_db_links" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("db_link", VARCHAR2(128), nullable=False),
|
||||
Column("username", VARCHAR2(128)),
|
||||
Column("host", VARCHAR2(2000)),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("hidden", VARCHAR2(3)),
|
||||
Column("shard_internal", VARCHAR2(3)),
|
||||
Column("valid", VARCHAR2(3)),
|
||||
Column("intra_cdb", VARCHAR2(3)),
|
||||
).alias("a_db_links")
|
||||
|
||||
all_synonyms = Table(
|
||||
"all_synonyms" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128)),
|
||||
Column("synonym_name", VARCHAR2(128)),
|
||||
Column("table_owner", VARCHAR2(128)),
|
||||
Column("table_name", VARCHAR2(128)),
|
||||
Column("db_link", VARCHAR2(128)),
|
||||
Column("origin_con_id", VARCHAR2(256)),
|
||||
).alias("a_synonyms")
|
||||
|
||||
all_objects = Table(
|
||||
"all_objects" + DB_LINK_PLACEHOLDER,
|
||||
dictionary_meta,
|
||||
Column("owner", VARCHAR2(128), nullable=False),
|
||||
Column("object_name", VARCHAR2(128), nullable=False),
|
||||
Column("subobject_name", VARCHAR2(128)),
|
||||
Column("object_id", NUMBER, nullable=False),
|
||||
Column("data_object_id", NUMBER),
|
||||
Column("object_type", VARCHAR2(23)),
|
||||
Column("created", DATE, nullable=False),
|
||||
Column("last_ddl_time", DATE, nullable=False),
|
||||
Column("timestamp", VARCHAR2(19)),
|
||||
Column("status", VARCHAR2(7)),
|
||||
Column("temporary", VARCHAR2(1)),
|
||||
Column("generated", VARCHAR2(1)),
|
||||
Column("secondary", VARCHAR2(1)),
|
||||
Column("namespace", NUMBER, nullable=False),
|
||||
Column("edition_name", VARCHAR2(128)),
|
||||
Column("sharing", VARCHAR2(13)),
|
||||
Column("editionable", VARCHAR2(1)),
|
||||
Column("oracle_maintained", VARCHAR2(1)),
|
||||
Column("application", VARCHAR2(1)),
|
||||
Column("default_collation", VARCHAR2(100)),
|
||||
Column("duplicated", VARCHAR2(1)),
|
||||
Column("sharded", VARCHAR2(1)),
|
||||
Column("created_appid", NUMBER),
|
||||
Column("created_vsnid", NUMBER),
|
||||
Column("modified_appid", NUMBER),
|
||||
Column("modified_vsnid", NUMBER),
|
||||
).alias("a_objects")
|
@ -0,0 +1,947 @@
|
||||
# dialects/oracle/oracledb.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
r""".. dialect:: oracle+oracledb
|
||||
:name: python-oracledb
|
||||
:dbapi: oracledb
|
||||
:connectstring: oracle+oracledb://user:pass@hostname:port[/dbname][?service_name=<service>[&key=value&key=value...]]
|
||||
:url: https://oracle.github.io/python-oracledb/
|
||||
|
||||
Description
|
||||
-----------
|
||||
|
||||
Python-oracledb is the Oracle Database driver for Python. It features a default
|
||||
"thin" client mode that requires no dependencies, and an optional "thick" mode
|
||||
that uses Oracle Client libraries. It supports SQLAlchemy features including
|
||||
two phase transactions and Asyncio.
|
||||
|
||||
Python-oracle is the renamed, updated cx_Oracle driver. Oracle is no longer
|
||||
doing any releases in the cx_Oracle namespace.
|
||||
|
||||
The SQLAlchemy ``oracledb`` dialect provides both a sync and an async
|
||||
implementation under the same dialect name. The proper version is
|
||||
selected depending on how the engine is created:
|
||||
|
||||
* calling :func:`_sa.create_engine` with ``oracle+oracledb://...`` will
|
||||
automatically select the sync version::
|
||||
|
||||
from sqlalchemy import create_engine
|
||||
|
||||
sync_engine = create_engine(
|
||||
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
|
||||
)
|
||||
|
||||
* calling :func:`_asyncio.create_async_engine` with ``oracle+oracledb://...``
|
||||
will automatically select the async version::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
asyncio_engine = create_async_engine(
|
||||
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
|
||||
)
|
||||
|
||||
The asyncio version of the dialect may also be specified explicitly using the
|
||||
``oracledb_async`` suffix::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
asyncio_engine = create_async_engine(
|
||||
"oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1"
|
||||
)
|
||||
|
||||
.. versionadded:: 2.0.25 added support for the async version of oracledb.
|
||||
|
||||
Thick mode support
|
||||
------------------
|
||||
|
||||
By default, the python-oracledb driver runs in a "thin" mode that does not
|
||||
require Oracle Client libraries to be installed. The driver also supports a
|
||||
"thick" mode that uses Oracle Client libraries to get functionality such as
|
||||
Oracle Application Continuity.
|
||||
|
||||
To enable thick mode, call `oracledb.init_oracle_client()
|
||||
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.init_oracle_client>`_
|
||||
explicitly, or pass the parameter ``thick_mode=True`` to
|
||||
:func:`_sa.create_engine`. To pass custom arguments to
|
||||
``init_oracle_client()``, like the ``lib_dir`` path, a dict may be passed, for
|
||||
example::
|
||||
|
||||
engine = sa.create_engine(
|
||||
"oracle+oracledb://...",
|
||||
thick_mode={
|
||||
"lib_dir": "/path/to/oracle/client/lib",
|
||||
"config_dir": "/path/to/network_config_file_directory",
|
||||
"driver_name": "my-app : 1.0.0",
|
||||
},
|
||||
)
|
||||
|
||||
Note that passing a ``lib_dir`` path should only be done on macOS or
|
||||
Windows. On Linux it does not behave as you might expect.
|
||||
|
||||
.. seealso::
|
||||
|
||||
python-oracledb documentation `Enabling python-oracledb Thick mode
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/initialization.html#enabling-python-oracledb-thick-mode>`_
|
||||
|
||||
Connecting to Oracle Database
|
||||
-----------------------------
|
||||
|
||||
python-oracledb provides several methods of indicating the target database.
|
||||
The dialect translates from a series of different URL forms.
|
||||
|
||||
Given the hostname, port and service name of the target database, you can
|
||||
connect in SQLAlchemy using the ``service_name`` query string parameter::
|
||||
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://scott:tiger@hostname:port?service_name=myservice"
|
||||
)
|
||||
|
||||
Connecting with Easy Connect strings
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
You can pass any valid python-oracledb connection string as the ``dsn`` key
|
||||
value in a :paramref:`_sa.create_engine.connect_args` dictionary. See
|
||||
python-oracledb documentation `Oracle Net Services Connection Strings
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#oracle-net-services-connection-strings>`_.
|
||||
|
||||
For example to use an `Easy Connect string
|
||||
<https://download.oracle.com/ocomdocs/global/Oracle-Net-Easy-Connect-Plus.pdf>`_
|
||||
with a timeout to prevent connection establishment from hanging if the network
|
||||
transport to the database cannot be establishd in 30 seconds, and also setting
|
||||
a keep-alive time of 60 seconds to stop idle network connections from being
|
||||
terminated by a firewall::
|
||||
|
||||
e = create_engine(
|
||||
"oracle+oracledb://@",
|
||||
connect_args={
|
||||
"user": "scott",
|
||||
"password": "tiger",
|
||||
"dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
|
||||
},
|
||||
)
|
||||
|
||||
The Easy Connect syntax has been enhanced during the life of Oracle Database.
|
||||
Review the documentation for your database version. The current documentation
|
||||
is at `Understanding the Easy Connect Naming Method
|
||||
<https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B0437826-43C1-49EC-A94D-B650B6A4A6EE>`_.
|
||||
|
||||
The general syntax is similar to:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
[[protocol:]//]host[:port][/[service_name]][?parameter_name=value{¶meter_name=value}]
|
||||
|
||||
Note that although the SQLAlchemy URL syntax ``hostname:port/dbname`` looks
|
||||
like Oracle's Easy Connect syntax, it is different. SQLAlchemy's URL requires a
|
||||
system identifier (SID) for the ``dbname`` component::
|
||||
|
||||
engine = create_engine("oracle+oracledb://scott:tiger@hostname:port/sid")
|
||||
|
||||
Easy Connect syntax does not support SIDs. It uses services names, which are
|
||||
the preferred choice for connecting to Oracle Database.
|
||||
|
||||
Passing python-oracledb connect arguments
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Other python-oracledb driver `connection options
|
||||
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.connect>`_
|
||||
can be passed in ``connect_args``. For example::
|
||||
|
||||
e = create_engine(
|
||||
"oracle+oracledb://@",
|
||||
connect_args={
|
||||
"user": "scott",
|
||||
"password": "tiger",
|
||||
"dsn": "hostname:port/myservice",
|
||||
"events": True,
|
||||
"mode": oracledb.AUTH_MODE_SYSDBA,
|
||||
},
|
||||
)
|
||||
|
||||
Connecting with tnsnames.ora TNS aliases
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
If no port, database name, or service name is provided, the dialect will use an
|
||||
Oracle Database DSN "connection string". This takes the "hostname" portion of
|
||||
the URL as the data source name. For example, if the ``tnsnames.ora`` file
|
||||
contains a `TNS Alias
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#tns-aliases-for-connection-strings>`_
|
||||
of ``myalias`` as below:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
myalias =
|
||||
(DESCRIPTION =
|
||||
(ADDRESS = (PROTOCOL = TCP)(HOST = mymachine.example.com)(PORT = 1521))
|
||||
(CONNECT_DATA =
|
||||
(SERVER = DEDICATED)
|
||||
(SERVICE_NAME = orclpdb1)
|
||||
)
|
||||
)
|
||||
|
||||
The python-oracledb dialect connects to this database service when ``myalias`` is the
|
||||
hostname portion of the URL, without specifying a port, database name or
|
||||
``service_name``::
|
||||
|
||||
engine = create_engine("oracle+oracledb://scott:tiger@myalias")
|
||||
|
||||
Connecting to Oracle Autonomous Database
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
Users of Oracle Autonomous Database should use either use the TNS Alias URL
|
||||
shown above, or pass the TNS Alias as the ``dsn`` key value in a
|
||||
:paramref:`_sa.create_engine.connect_args` dictionary.
|
||||
|
||||
If Oracle Autonomous Database is configured for mutual TLS ("mTLS")
|
||||
connections, then additional configuration is required as shown in `Connecting
|
||||
to Oracle Cloud Autonomous Databases
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#connecting-to-oracle-cloud-autonomous-databases>`_. In
|
||||
summary, Thick mode users should configure file locations and set the wallet
|
||||
path in ``sqlnet.ora`` appropriately::
|
||||
|
||||
e = create_engine(
|
||||
"oracle+oracledb://@",
|
||||
thick_mode={
|
||||
# directory containing tnsnames.ora and cwallet.so
|
||||
"config_dir": "/opt/oracle/wallet_dir",
|
||||
},
|
||||
connect_args={
|
||||
"user": "scott",
|
||||
"password": "tiger",
|
||||
"dsn": "mydb_high",
|
||||
},
|
||||
)
|
||||
|
||||
Thin mode users of mTLS should pass the appropriate directories and PEM wallet
|
||||
password when creating the engine, similar to::
|
||||
|
||||
e = create_engine(
|
||||
"oracle+oracledb://@",
|
||||
connect_args={
|
||||
"user": "scott",
|
||||
"password": "tiger",
|
||||
"dsn": "mydb_high",
|
||||
"config_dir": "/opt/oracle/wallet_dir", # directory containing tnsnames.ora
|
||||
"wallet_location": "/opt/oracle/wallet_dir", # directory containing ewallet.pem
|
||||
"wallet_password": "top secret", # password for the PEM file
|
||||
},
|
||||
)
|
||||
|
||||
Typically ``config_dir`` and ``wallet_location`` are the same directory, which
|
||||
is where the Oracle Autonomous Database wallet zip file was extracted. Note
|
||||
this directory should be protected.
|
||||
|
||||
Connection Pooling
|
||||
------------------
|
||||
|
||||
Applications with multiple concurrent users should use connection pooling. A
|
||||
minimal sized connection pool is also beneficial for long-running, single-user
|
||||
applications that do not frequently use a connection.
|
||||
|
||||
The python-oracledb driver provides its own connection pool implementation that
|
||||
may be used in place of SQLAlchemy's pooling functionality. The driver pool
|
||||
gives support for high availability features such as dead connection detection,
|
||||
connection draining for planned database downtime, support for Oracle
|
||||
Application Continuity and Transparent Application Continuity, and gives
|
||||
support for `Database Resident Connection Pooling (DRCP)
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
|
||||
|
||||
To take advantage of python-oracledb's pool, use the
|
||||
:paramref:`_sa.create_engine.creator` parameter to provide a function that
|
||||
returns a new connection, along with setting
|
||||
:paramref:`_sa.create_engine.pool_class` to ``NullPool`` to disable
|
||||
SQLAlchemy's pooling::
|
||||
|
||||
import oracledb
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.pool import NullPool
|
||||
|
||||
# Uncomment to use the optional python-oracledb Thick mode.
|
||||
# Review the python-oracledb doc for the appropriate parameters
|
||||
# oracledb.init_oracle_client(<your parameters>)
|
||||
|
||||
pool = oracledb.create_pool(
|
||||
user="scott",
|
||||
password="tiger",
|
||||
dsn="localhost:1521/freepdb1",
|
||||
min=1,
|
||||
max=4,
|
||||
increment=1,
|
||||
)
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
|
||||
)
|
||||
|
||||
The above engine may then be used normally. Internally, python-oracledb handles
|
||||
connection pooling::
|
||||
|
||||
with engine.connect() as conn:
|
||||
print(conn.scalar(text("select 1 from dual")))
|
||||
|
||||
Refer to the python-oracledb documentation for `oracledb.create_pool()
|
||||
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.create_pool>`_
|
||||
for the arguments that can be used when creating a connection pool.
|
||||
|
||||
.. _drcp:
|
||||
|
||||
Using Oracle Database Resident Connection Pooling (DRCP)
|
||||
--------------------------------------------------------
|
||||
|
||||
When using Oracle Database's Database Resident Connection Pooling (DRCP), the
|
||||
best practice is to specify a connection class and "purity". Refer to the
|
||||
`python-oracledb documentation on DRCP
|
||||
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
|
||||
For example::
|
||||
|
||||
import oracledb
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.pool import NullPool
|
||||
|
||||
# Uncomment to use the optional python-oracledb Thick mode.
|
||||
# Review the python-oracledb doc for the appropriate parameters
|
||||
# oracledb.init_oracle_client(<your parameters>)
|
||||
|
||||
pool = oracledb.create_pool(
|
||||
user="scott",
|
||||
password="tiger",
|
||||
dsn="localhost:1521/freepdb1",
|
||||
min=1,
|
||||
max=4,
|
||||
increment=1,
|
||||
cclass="MYCLASS",
|
||||
purity=oracledb.PURITY_SELF,
|
||||
)
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
|
||||
)
|
||||
|
||||
The above engine may then be used normally where python-oracledb handles
|
||||
application connection pooling and Oracle Database additionally uses DRCP::
|
||||
|
||||
with engine.connect() as conn:
|
||||
print(conn.scalar(text("select 1 from dual")))
|
||||
|
||||
If you wish to use different connection classes or purities for different
|
||||
connections, then wrap ``pool.acquire()``::
|
||||
|
||||
import oracledb
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy import text
|
||||
from sqlalchemy.pool import NullPool
|
||||
|
||||
# Uncomment to use python-oracledb Thick mode.
|
||||
# Review the python-oracledb doc for the appropriate parameters
|
||||
# oracledb.init_oracle_client(<your parameters>)
|
||||
|
||||
pool = oracledb.create_pool(
|
||||
user="scott",
|
||||
password="tiger",
|
||||
dsn="localhost:1521/freepdb1",
|
||||
min=1,
|
||||
max=4,
|
||||
increment=1,
|
||||
cclass="MYCLASS",
|
||||
purity=oracledb.PURITY_SELF,
|
||||
)
|
||||
|
||||
|
||||
def creator():
|
||||
return pool.acquire(cclass="MYOTHERCLASS", purity=oracledb.PURITY_NEW)
|
||||
|
||||
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://", creator=creator, poolclass=NullPool
|
||||
)
|
||||
|
||||
Engine Options consumed by the SQLAlchemy oracledb dialect outside of the driver
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
There are also options that are consumed by the SQLAlchemy oracledb dialect
|
||||
itself. These options are always passed directly to :func:`_sa.create_engine`,
|
||||
such as::
|
||||
|
||||
e = create_engine("oracle+oracledb://user:pass@tnsalias", arraysize=500)
|
||||
|
||||
The parameters accepted by the oracledb dialect are as follows:
|
||||
|
||||
* ``arraysize`` - set the driver cursor.arraysize value. It defaults to
|
||||
``None``, indicating that the driver default value of 100 should be used.
|
||||
This setting controls how many rows are buffered when fetching rows, and can
|
||||
have a significant effect on performance if increased for queries that return
|
||||
large numbers of rows.
|
||||
|
||||
.. versionchanged:: 2.0.26 - changed the default value from 50 to None,
|
||||
to use the default value of the driver itself.
|
||||
|
||||
* ``auto_convert_lobs`` - defaults to True; See :ref:`oracledb_lob`.
|
||||
|
||||
* ``coerce_to_decimal`` - see :ref:`oracledb_numeric` for detail.
|
||||
|
||||
* ``encoding_errors`` - see :ref:`oracledb_unicode_encoding_errors` for detail.
|
||||
|
||||
.. _oracledb_unicode:
|
||||
|
||||
Unicode
|
||||
-------
|
||||
|
||||
As is the case for all DBAPIs under Python 3, all strings are inherently
|
||||
Unicode strings.
|
||||
|
||||
Ensuring the Correct Client Encoding
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
In python-oracledb, the encoding used for all character data is "UTF-8".
|
||||
|
||||
Unicode-specific Column datatypes
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The Core expression language handles unicode data by use of the
|
||||
:class:`.Unicode` and :class:`.UnicodeText` datatypes. These types correspond
|
||||
to the VARCHAR2 and CLOB Oracle Database datatypes by default. When using
|
||||
these datatypes with Unicode data, it is expected that the database is
|
||||
configured with a Unicode-aware character set so that the VARCHAR2 and CLOB
|
||||
datatypes can accommodate the data.
|
||||
|
||||
In the case that Oracle Database is not configured with a Unicode character
|
||||
set, the two options are to use the :class:`_types.NCHAR` and
|
||||
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
|
||||
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`, which will cause
|
||||
the SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
|
||||
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
|
||||
|
||||
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
|
||||
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle Database
|
||||
datatypes unless the ``use_nchar_for_unicode=True`` is passed to the dialect
|
||||
when :func:`_sa.create_engine` is called.
|
||||
|
||||
|
||||
.. _oracledb_unicode_encoding_errors:
|
||||
|
||||
Encoding Errors
|
||||
^^^^^^^^^^^^^^^
|
||||
|
||||
For the unusual case that data in Oracle Database is present with a broken
|
||||
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
|
||||
passed to Unicode decoding functions in order to affect how decoding errors are
|
||||
handled. The value is ultimately consumed by the Python `decode
|
||||
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`_ function, and
|
||||
is passed both via python-oracledb's ``encodingErrors`` parameter consumed by
|
||||
``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the
|
||||
python-oracledb dialect makes use of both under different circumstances.
|
||||
|
||||
.. versionadded:: 1.3.11
|
||||
|
||||
|
||||
.. _oracledb_setinputsizes:
|
||||
|
||||
Fine grained control over python-oracledb data binding with setinputsizes
|
||||
-------------------------------------------------------------------------
|
||||
|
||||
The python-oracle DBAPI has a deep and fundamental reliance upon the usage of
|
||||
the DBAPI ``setinputsizes()`` call. The purpose of this call is to establish
|
||||
the datatypes that are bound to a SQL statement for Python values being passed
|
||||
as parameters. While virtually no other DBAPI assigns any use to the
|
||||
``setinputsizes()`` call, the python-oracledb DBAPI relies upon it heavily in
|
||||
its interactions with the Oracle Database, and in some scenarios it is not
|
||||
possible for SQLAlchemy to know exactly how data should be bound, as some
|
||||
settings can cause profoundly different performance characteristics, while
|
||||
altering the type coercion behavior at the same time.
|
||||
|
||||
Users of the oracledb dialect are **strongly encouraged** to read through
|
||||
python-oracledb's list of built-in datatype symbols at `Database Types
|
||||
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#database-types>`_
|
||||
Note that in some cases, significant performance degradation can occur when
|
||||
using these types vs. not.
|
||||
|
||||
On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can
|
||||
be used both for runtime visibility (e.g. logging) of the setinputsizes step as
|
||||
well as to fully control how ``setinputsizes()`` is used on a per-statement
|
||||
basis.
|
||||
|
||||
.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes`
|
||||
|
||||
|
||||
Example 1 - logging all setinputsizes calls
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The following example illustrates how to log the intermediary values from a
|
||||
SQLAlchemy perspective before they are converted to the raw ``setinputsizes()``
|
||||
parameter dictionary. The keys of the dictionary are :class:`.BindParameter`
|
||||
objects which have a ``.key`` and a ``.type`` attribute::
|
||||
|
||||
from sqlalchemy import create_engine, event
|
||||
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
|
||||
)
|
||||
|
||||
|
||||
@event.listens_for(engine, "do_setinputsizes")
|
||||
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
|
||||
for bindparam, dbapitype in inputsizes.items():
|
||||
log.info(
|
||||
"Bound parameter name: %s SQLAlchemy type: %r DBAPI object: %s",
|
||||
bindparam.key,
|
||||
bindparam.type,
|
||||
dbapitype,
|
||||
)
|
||||
|
||||
Example 2 - remove all bindings to CLOB
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
For performance, fetching LOB datatypes from Oracle Database is set by default
|
||||
for the ``Text`` type within SQLAlchemy. This setting can be modified as
|
||||
follows::
|
||||
|
||||
|
||||
from sqlalchemy import create_engine, event
|
||||
from oracledb import CLOB
|
||||
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
|
||||
)
|
||||
|
||||
|
||||
@event.listens_for(engine, "do_setinputsizes")
|
||||
def _remove_clob(inputsizes, cursor, statement, parameters, context):
|
||||
for bindparam, dbapitype in list(inputsizes.items()):
|
||||
if dbapitype is CLOB:
|
||||
del inputsizes[bindparam]
|
||||
|
||||
.. _oracledb_lob:
|
||||
|
||||
LOB Datatypes
|
||||
--------------
|
||||
|
||||
LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and
|
||||
BLOB. Oracle Database can efficiently return these datatypes as a single
|
||||
buffer. SQLAlchemy makes use of type handlers to do this by default.
|
||||
|
||||
To disable the use of the type handlers and deliver LOB objects as classic
|
||||
buffered objects with a ``read()`` method, the parameter
|
||||
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`.
|
||||
|
||||
.. _oracledb_returning:
|
||||
|
||||
RETURNING Support
|
||||
-----------------
|
||||
|
||||
The oracledb dialect implements RETURNING using OUT parameters. The dialect
|
||||
supports RETURNING fully.
|
||||
|
||||
Two Phase Transaction Support
|
||||
-----------------------------
|
||||
|
||||
Two phase transactions are fully supported with python-oracledb. (Thin mode
|
||||
requires python-oracledb 2.3). APIs for two phase transactions are provided at
|
||||
the Core level via :meth:`_engine.Connection.begin_twophase` and
|
||||
:paramref:`_orm.Session.twophase` for transparent ORM use.
|
||||
|
||||
.. versionchanged:: 2.0.32 added support for two phase transactions
|
||||
|
||||
.. _oracledb_numeric:
|
||||
|
||||
Precision Numerics
|
||||
------------------
|
||||
|
||||
SQLAlchemy's numeric types can handle receiving and returning values as Python
|
||||
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
|
||||
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
|
||||
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
|
||||
coerced to ``Decimal`` upon return, or returned as float objects. To make
|
||||
matters more complicated under Oracle Database, the ``NUMBER`` type can also
|
||||
represent integer values if the "scale" is zero, so the Oracle
|
||||
Database-specific :class:`_oracle.NUMBER` type takes this into account as well.
|
||||
|
||||
The oracledb dialect makes extensive use of connection- and cursor-level
|
||||
"outputtypehandler" callables in order to coerce numeric values as requested.
|
||||
These callables are specific to the specific flavor of :class:`.Numeric` in
|
||||
use, as well as if no SQLAlchemy typing objects are present. There are
|
||||
observed scenarios where Oracle Database may send incomplete or ambiguous
|
||||
information about the numeric types being returned, such as a query where the
|
||||
numeric types are buried under multiple levels of subquery. The type handlers
|
||||
do their best to make the right decision in all cases, deferring to the
|
||||
underlying python-oracledb DBAPI for all those cases where the driver can make
|
||||
the best decision.
|
||||
|
||||
When no typing objects are present, as when executing plain SQL strings, a
|
||||
default "outputtypehandler" is present which will generally return numeric
|
||||
values which specify precision and scale as Python ``Decimal`` objects. To
|
||||
disable this coercion to decimal for performance reasons, pass the flag
|
||||
``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
|
||||
|
||||
engine = create_engine(
|
||||
"oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False
|
||||
)
|
||||
|
||||
The ``coerce_to_decimal`` flag only impacts the results of plain string
|
||||
SQL statements that are not otherwise associated with a :class:`.Numeric`
|
||||
SQLAlchemy type (or a subclass of such).
|
||||
|
||||
.. versionchanged:: 1.2 The numeric handling system for the oracle dialects has
|
||||
been reworked to take advantage of newer driver features as well as better
|
||||
integration of outputtypehandlers.
|
||||
|
||||
.. versionadded:: 2.0.0 added support for the python-oracledb driver.
|
||||
|
||||
""" # noqa
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import re
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from . import cx_oracle as _cx_oracle
|
||||
from ... import exc
|
||||
from ... import pool
|
||||
from ...connectors.asyncio import AsyncAdapt_dbapi_connection
|
||||
from ...connectors.asyncio import AsyncAdapt_dbapi_cursor
|
||||
from ...connectors.asyncio import AsyncAdapt_dbapi_ss_cursor
|
||||
from ...connectors.asyncio import AsyncAdaptFallback_dbapi_connection
|
||||
from ...engine import default
|
||||
from ...util import asbool
|
||||
from ...util import await_fallback
|
||||
from ...util import await_only
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from oracledb import AsyncConnection
|
||||
from oracledb import AsyncCursor
|
||||
|
||||
|
||||
class OracleExecutionContext_oracledb(
|
||||
_cx_oracle.OracleExecutionContext_cx_oracle
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class OracleDialect_oracledb(_cx_oracle.OracleDialect_cx_oracle):
|
||||
supports_statement_cache = True
|
||||
execution_ctx_cls = OracleExecutionContext_oracledb
|
||||
|
||||
driver = "oracledb"
|
||||
_min_version = (1,)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
auto_convert_lobs=True,
|
||||
coerce_to_decimal=True,
|
||||
arraysize=None,
|
||||
encoding_errors=None,
|
||||
thick_mode=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
auto_convert_lobs,
|
||||
coerce_to_decimal,
|
||||
arraysize,
|
||||
encoding_errors,
|
||||
**kwargs,
|
||||
)
|
||||
|
||||
if self.dbapi is not None and (
|
||||
thick_mode or isinstance(thick_mode, dict)
|
||||
):
|
||||
kw = thick_mode if isinstance(thick_mode, dict) else {}
|
||||
self.dbapi.init_oracle_client(**kw)
|
||||
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import oracledb
|
||||
|
||||
return oracledb
|
||||
|
||||
@classmethod
|
||||
def is_thin_mode(cls, connection):
|
||||
return connection.connection.dbapi_connection.thin
|
||||
|
||||
@classmethod
|
||||
def get_async_dialect_cls(cls, url):
|
||||
return OracleDialectAsync_oracledb
|
||||
|
||||
def _load_version(self, dbapi_module):
|
||||
version = (0, 0, 0)
|
||||
if dbapi_module is not None:
|
||||
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", dbapi_module.version)
|
||||
if m:
|
||||
version = tuple(
|
||||
int(x) for x in m.group(1, 2, 3) if x is not None
|
||||
)
|
||||
self.oracledb_ver = version
|
||||
if (
|
||||
self.oracledb_ver > (0, 0, 0)
|
||||
and self.oracledb_ver < self._min_version
|
||||
):
|
||||
raise exc.InvalidRequestError(
|
||||
f"oracledb version {self._min_version} and above are supported"
|
||||
)
|
||||
|
||||
def do_begin_twophase(self, connection, xid):
|
||||
conn_xis = connection.connection.xid(*xid)
|
||||
connection.connection.tpc_begin(conn_xis)
|
||||
connection.connection.info["oracledb_xid"] = conn_xis
|
||||
|
||||
def do_prepare_twophase(self, connection, xid):
|
||||
should_commit = connection.connection.tpc_prepare()
|
||||
connection.info["oracledb_should_commit"] = should_commit
|
||||
|
||||
def do_rollback_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
if recover:
|
||||
conn_xid = connection.connection.xid(*xid)
|
||||
else:
|
||||
conn_xid = None
|
||||
connection.connection.tpc_rollback(conn_xid)
|
||||
|
||||
def do_commit_twophase(
|
||||
self, connection, xid, is_prepared=True, recover=False
|
||||
):
|
||||
conn_xid = None
|
||||
if not is_prepared:
|
||||
should_commit = connection.connection.tpc_prepare()
|
||||
elif recover:
|
||||
conn_xid = connection.connection.xid(*xid)
|
||||
should_commit = True
|
||||
else:
|
||||
should_commit = connection.info["oracledb_should_commit"]
|
||||
if should_commit:
|
||||
connection.connection.tpc_commit(conn_xid)
|
||||
|
||||
def do_recover_twophase(self, connection):
|
||||
return [
|
||||
# oracledb seems to return bytes
|
||||
(
|
||||
fi,
|
||||
gti.decode() if isinstance(gti, bytes) else gti,
|
||||
bq.decode() if isinstance(bq, bytes) else bq,
|
||||
)
|
||||
for fi, gti, bq in connection.connection.tpc_recover()
|
||||
]
|
||||
|
||||
def _check_max_identifier_length(self, connection):
|
||||
if self.oracledb_ver >= (2, 5):
|
||||
max_len = connection.connection.max_identifier_length
|
||||
if max_len is not None:
|
||||
return max_len
|
||||
return super()._check_max_identifier_length(connection)
|
||||
|
||||
|
||||
class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
|
||||
_cursor: AsyncCursor
|
||||
__slots__ = ()
|
||||
|
||||
@property
|
||||
def outputtypehandler(self):
|
||||
return self._cursor.outputtypehandler
|
||||
|
||||
@outputtypehandler.setter
|
||||
def outputtypehandler(self, value):
|
||||
self._cursor.outputtypehandler = value
|
||||
|
||||
def var(self, *args, **kwargs):
|
||||
return self._cursor.var(*args, **kwargs)
|
||||
|
||||
def close(self):
|
||||
self._rows.clear()
|
||||
self._cursor.close()
|
||||
|
||||
def setinputsizes(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._cursor.setinputsizes(*args, **kwargs)
|
||||
|
||||
def _aenter_cursor(self, cursor: AsyncCursor) -> AsyncCursor:
|
||||
try:
|
||||
return cursor.__enter__()
|
||||
except Exception as error:
|
||||
self._adapt_connection._handle_exception(error)
|
||||
|
||||
async def _execute_async(self, operation, parameters):
|
||||
# override to not use mutex, oracledb already has a mutex
|
||||
|
||||
if parameters is None:
|
||||
result = await self._cursor.execute(operation)
|
||||
else:
|
||||
result = await self._cursor.execute(operation, parameters)
|
||||
|
||||
if self._cursor.description and not self.server_side:
|
||||
self._rows = collections.deque(await self._cursor.fetchall())
|
||||
return result
|
||||
|
||||
async def _executemany_async(
|
||||
self,
|
||||
operation,
|
||||
seq_of_parameters,
|
||||
):
|
||||
# override to not use mutex, oracledb already has a mutex
|
||||
return await self._cursor.executemany(operation, seq_of_parameters)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
|
||||
self.close()
|
||||
|
||||
|
||||
class AsyncAdapt_oracledb_ss_cursor(
|
||||
AsyncAdapt_dbapi_ss_cursor, AsyncAdapt_oracledb_cursor
|
||||
):
|
||||
__slots__ = ()
|
||||
|
||||
def close(self) -> None:
|
||||
if self._cursor is not None:
|
||||
self._cursor.close()
|
||||
self._cursor = None # type: ignore
|
||||
|
||||
|
||||
class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection):
|
||||
_connection: AsyncConnection
|
||||
__slots__ = ()
|
||||
|
||||
thin = True
|
||||
|
||||
_cursor_cls = AsyncAdapt_oracledb_cursor
|
||||
_ss_cursor_cls = None
|
||||
|
||||
@property
|
||||
def autocommit(self):
|
||||
return self._connection.autocommit
|
||||
|
||||
@autocommit.setter
|
||||
def autocommit(self, value):
|
||||
self._connection.autocommit = value
|
||||
|
||||
@property
|
||||
def outputtypehandler(self):
|
||||
return self._connection.outputtypehandler
|
||||
|
||||
@outputtypehandler.setter
|
||||
def outputtypehandler(self, value):
|
||||
self._connection.outputtypehandler = value
|
||||
|
||||
@property
|
||||
def version(self):
|
||||
return self._connection.version
|
||||
|
||||
@property
|
||||
def stmtcachesize(self):
|
||||
return self._connection.stmtcachesize
|
||||
|
||||
@stmtcachesize.setter
|
||||
def stmtcachesize(self, value):
|
||||
self._connection.stmtcachesize = value
|
||||
|
||||
@property
|
||||
def max_identifier_length(self):
|
||||
return self._connection.max_identifier_length
|
||||
|
||||
def cursor(self):
|
||||
return AsyncAdapt_oracledb_cursor(self)
|
||||
|
||||
def ss_cursor(self):
|
||||
return AsyncAdapt_oracledb_ss_cursor(self)
|
||||
|
||||
def xid(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self._connection.xid(*args, **kwargs)
|
||||
|
||||
def tpc_begin(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.await_(self._connection.tpc_begin(*args, **kwargs))
|
||||
|
||||
def tpc_commit(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.await_(self._connection.tpc_commit(*args, **kwargs))
|
||||
|
||||
def tpc_prepare(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.await_(self._connection.tpc_prepare(*args, **kwargs))
|
||||
|
||||
def tpc_recover(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.await_(self._connection.tpc_recover(*args, **kwargs))
|
||||
|
||||
def tpc_rollback(self, *args: Any, **kwargs: Any) -> Any:
|
||||
return self.await_(self._connection.tpc_rollback(*args, **kwargs))
|
||||
|
||||
|
||||
class AsyncAdaptFallback_oracledb_connection(
|
||||
AsyncAdaptFallback_dbapi_connection, AsyncAdapt_oracledb_connection
|
||||
):
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
class OracledbAdaptDBAPI:
|
||||
def __init__(self, oracledb) -> None:
|
||||
self.oracledb = oracledb
|
||||
|
||||
for k, v in self.oracledb.__dict__.items():
|
||||
if k != "connect":
|
||||
self.__dict__[k] = v
|
||||
|
||||
def connect(self, *arg, **kw):
|
||||
async_fallback = kw.pop("async_fallback", False)
|
||||
creator_fn = kw.pop("async_creator_fn", self.oracledb.connect_async)
|
||||
|
||||
if asbool(async_fallback):
|
||||
return AsyncAdaptFallback_oracledb_connection(
|
||||
self, await_fallback(creator_fn(*arg, **kw))
|
||||
)
|
||||
|
||||
else:
|
||||
return AsyncAdapt_oracledb_connection(
|
||||
self, await_only(creator_fn(*arg, **kw))
|
||||
)
|
||||
|
||||
|
||||
class OracleExecutionContextAsync_oracledb(OracleExecutionContext_oracledb):
|
||||
# restore default create cursor
|
||||
create_cursor = default.DefaultExecutionContext.create_cursor
|
||||
|
||||
def create_default_cursor(self):
|
||||
# copy of OracleExecutionContext_cx_oracle.create_cursor
|
||||
c = self._dbapi_connection.cursor()
|
||||
if self.dialect.arraysize:
|
||||
c.arraysize = self.dialect.arraysize
|
||||
|
||||
return c
|
||||
|
||||
def create_server_side_cursor(self):
|
||||
c = self._dbapi_connection.ss_cursor()
|
||||
if self.dialect.arraysize:
|
||||
c.arraysize = self.dialect.arraysize
|
||||
|
||||
return c
|
||||
|
||||
|
||||
class OracleDialectAsync_oracledb(OracleDialect_oracledb):
|
||||
is_async = True
|
||||
supports_server_side_cursors = True
|
||||
supports_statement_cache = True
|
||||
execution_ctx_cls = OracleExecutionContextAsync_oracledb
|
||||
|
||||
_min_version = (2,)
|
||||
|
||||
# thick_mode mode is not supported by asyncio, oracledb will raise
|
||||
@classmethod
|
||||
def import_dbapi(cls):
|
||||
import oracledb
|
||||
|
||||
return OracledbAdaptDBAPI(oracledb)
|
||||
|
||||
@classmethod
|
||||
def get_pool_class(cls, url):
|
||||
async_fallback = url.query.get("async_fallback", False)
|
||||
|
||||
if asbool(async_fallback):
|
||||
return pool.FallbackAsyncAdaptedQueuePool
|
||||
else:
|
||||
return pool.AsyncAdaptedQueuePool
|
||||
|
||||
def get_driver_connection(self, connection):
|
||||
return connection._connection
|
||||
|
||||
|
||||
dialect = OracleDialect_oracledb
|
||||
dialect_async = OracleDialectAsync_oracledb
|
@ -0,0 +1,220 @@
|
||||
# dialects/oracle/provision.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from ... import create_engine
|
||||
from ... import exc
|
||||
from ... import inspect
|
||||
from ...engine import url as sa_url
|
||||
from ...testing.provision import configure_follower
|
||||
from ...testing.provision import create_db
|
||||
from ...testing.provision import drop_all_schema_objects_post_tables
|
||||
from ...testing.provision import drop_all_schema_objects_pre_tables
|
||||
from ...testing.provision import drop_db
|
||||
from ...testing.provision import follower_url_from_main
|
||||
from ...testing.provision import log
|
||||
from ...testing.provision import post_configure_engine
|
||||
from ...testing.provision import run_reap_dbs
|
||||
from ...testing.provision import set_default_schema_on_connection
|
||||
from ...testing.provision import stop_test_class_outside_fixtures
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
from ...testing.provision import update_db_opts
|
||||
|
||||
|
||||
@create_db.for_db("oracle")
|
||||
def _oracle_create_db(cfg, eng, ident):
|
||||
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
|
||||
# similar, so that the default tablespace is not "system"; reflection will
|
||||
# fail otherwise
|
||||
with eng.begin() as conn:
|
||||
conn.exec_driver_sql("create user %s identified by xe" % ident)
|
||||
conn.exec_driver_sql("create user %s_ts1 identified by xe" % ident)
|
||||
conn.exec_driver_sql("create user %s_ts2 identified by xe" % ident)
|
||||
conn.exec_driver_sql("grant dba to %s" % (ident,))
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s" % ident)
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s_ts1" % ident)
|
||||
conn.exec_driver_sql("grant unlimited tablespace to %s_ts2" % ident)
|
||||
# these are needed to create materialized views
|
||||
conn.exec_driver_sql("grant create table to %s" % ident)
|
||||
conn.exec_driver_sql("grant create table to %s_ts1" % ident)
|
||||
conn.exec_driver_sql("grant create table to %s_ts2" % ident)
|
||||
|
||||
|
||||
@configure_follower.for_db("oracle")
|
||||
def _oracle_configure_follower(config, ident):
|
||||
config.test_schema = "%s_ts1" % ident
|
||||
config.test_schema_2 = "%s_ts2" % ident
|
||||
|
||||
|
||||
def _ora_drop_ignore(conn, dbname):
|
||||
try:
|
||||
conn.exec_driver_sql("drop user %s cascade" % dbname)
|
||||
log.info("Reaped db: %s", dbname)
|
||||
return True
|
||||
except exc.DatabaseError as err:
|
||||
log.warning("couldn't drop db: %s", err)
|
||||
return False
|
||||
|
||||
|
||||
@drop_all_schema_objects_pre_tables.for_db("oracle")
|
||||
def _ora_drop_all_schema_objects_pre_tables(cfg, eng):
|
||||
_purge_recyclebin(eng)
|
||||
_purge_recyclebin(eng, cfg.test_schema)
|
||||
|
||||
|
||||
@drop_all_schema_objects_post_tables.for_db("oracle")
|
||||
def _ora_drop_all_schema_objects_post_tables(cfg, eng):
|
||||
with eng.begin() as conn:
|
||||
for syn in conn.dialect._get_synonyms(conn, None, None, None):
|
||||
conn.exec_driver_sql(f"drop synonym {syn['synonym_name']}")
|
||||
|
||||
for syn in conn.dialect._get_synonyms(
|
||||
conn, cfg.test_schema, None, None
|
||||
):
|
||||
conn.exec_driver_sql(
|
||||
f"drop synonym {cfg.test_schema}.{syn['synonym_name']}"
|
||||
)
|
||||
|
||||
for tmp_table in inspect(conn).get_temp_table_names():
|
||||
conn.exec_driver_sql(f"drop table {tmp_table}")
|
||||
|
||||
|
||||
@drop_db.for_db("oracle")
|
||||
def _oracle_drop_db(cfg, eng, ident):
|
||||
with eng.begin() as conn:
|
||||
# cx_Oracle seems to occasionally leak open connections when a large
|
||||
# suite it run, even if we confirm we have zero references to
|
||||
# connection objects.
|
||||
# while there is a "kill session" command in Oracle Database,
|
||||
# it unfortunately does not release the connection sufficiently.
|
||||
_ora_drop_ignore(conn, ident)
|
||||
_ora_drop_ignore(conn, "%s_ts1" % ident)
|
||||
_ora_drop_ignore(conn, "%s_ts2" % ident)
|
||||
|
||||
|
||||
@stop_test_class_outside_fixtures.for_db("oracle")
|
||||
def _ora_stop_test_class_outside_fixtures(config, db, cls):
|
||||
try:
|
||||
_purge_recyclebin(db)
|
||||
except exc.DatabaseError as err:
|
||||
log.warning("purge recyclebin command failed: %s", err)
|
||||
|
||||
# clear statement cache on all connections that were used
|
||||
# https://github.com/oracle/python-cx_Oracle/issues/519
|
||||
|
||||
for cx_oracle_conn in _all_conns:
|
||||
try:
|
||||
sc = cx_oracle_conn.stmtcachesize
|
||||
except db.dialect.dbapi.InterfaceError:
|
||||
# connection closed
|
||||
pass
|
||||
else:
|
||||
cx_oracle_conn.stmtcachesize = 0
|
||||
cx_oracle_conn.stmtcachesize = sc
|
||||
_all_conns.clear()
|
||||
|
||||
|
||||
def _purge_recyclebin(eng, schema=None):
|
||||
with eng.begin() as conn:
|
||||
if schema is None:
|
||||
# run magic command to get rid of identity sequences
|
||||
# https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501
|
||||
conn.exec_driver_sql("purge recyclebin")
|
||||
else:
|
||||
# per user: https://community.oracle.com/tech/developers/discussion/2255402/how-to-clear-dba-recyclebin-for-a-particular-user # noqa: E501
|
||||
for owner, object_name, type_ in conn.exec_driver_sql(
|
||||
"select owner, object_name,type from "
|
||||
"dba_recyclebin where owner=:schema and type='TABLE'",
|
||||
{"schema": conn.dialect.denormalize_name(schema)},
|
||||
).all():
|
||||
conn.exec_driver_sql(f'purge {type_} {owner}."{object_name}"')
|
||||
|
||||
|
||||
_all_conns = set()
|
||||
|
||||
|
||||
@post_configure_engine.for_db("oracle")
|
||||
def _oracle_post_configure_engine(url, engine, follower_ident):
|
||||
from sqlalchemy import event
|
||||
|
||||
@event.listens_for(engine, "checkout")
|
||||
def checkout(dbapi_con, con_record, con_proxy):
|
||||
_all_conns.add(dbapi_con)
|
||||
|
||||
@event.listens_for(engine, "checkin")
|
||||
def checkin(dbapi_connection, connection_record):
|
||||
# work around cx_Oracle issue:
|
||||
# https://github.com/oracle/python-cx_Oracle/issues/530
|
||||
# invalidate oracle connections that had 2pc set up
|
||||
if "cx_oracle_xid" in connection_record.info:
|
||||
connection_record.invalidate()
|
||||
|
||||
|
||||
@run_reap_dbs.for_db("oracle")
|
||||
def _reap_oracle_dbs(url, idents):
|
||||
log.info("db reaper connecting to %r", url)
|
||||
eng = create_engine(url)
|
||||
with eng.begin() as conn:
|
||||
log.info("identifiers in file: %s", ", ".join(idents))
|
||||
|
||||
to_reap = conn.exec_driver_sql(
|
||||
"select u.username from all_users u where username "
|
||||
"like 'TEST_%' and not exists (select username "
|
||||
"from v$session where username=u.username)"
|
||||
)
|
||||
all_names = {username.lower() for (username,) in to_reap}
|
||||
to_drop = set()
|
||||
for name in all_names:
|
||||
if name.endswith("_ts1") or name.endswith("_ts2"):
|
||||
continue
|
||||
elif name in idents:
|
||||
to_drop.add(name)
|
||||
if "%s_ts1" % name in all_names:
|
||||
to_drop.add("%s_ts1" % name)
|
||||
if "%s_ts2" % name in all_names:
|
||||
to_drop.add("%s_ts2" % name)
|
||||
|
||||
dropped = total = 0
|
||||
for total, username in enumerate(to_drop, 1):
|
||||
if _ora_drop_ignore(conn, username):
|
||||
dropped += 1
|
||||
log.info(
|
||||
"Dropped %d out of %d stale databases detected", dropped, total
|
||||
)
|
||||
|
||||
|
||||
@follower_url_from_main.for_db("oracle")
|
||||
def _oracle_follower_url_from_main(url, ident):
|
||||
url = sa_url.make_url(url)
|
||||
return url.set(username=ident, password="xe")
|
||||
|
||||
|
||||
@temp_table_keyword_args.for_db("oracle")
|
||||
def _oracle_temp_table_keyword_args(cfg, eng):
|
||||
return {
|
||||
"prefixes": ["GLOBAL TEMPORARY"],
|
||||
"oracle_on_commit": "PRESERVE ROWS",
|
||||
}
|
||||
|
||||
|
||||
@set_default_schema_on_connection.for_db("oracle")
|
||||
def _oracle_set_default_schema_on_connection(
|
||||
cfg, dbapi_connection, schema_name
|
||||
):
|
||||
cursor = dbapi_connection.cursor()
|
||||
cursor.execute("ALTER SESSION SET CURRENT_SCHEMA=%s" % schema_name)
|
||||
cursor.close()
|
||||
|
||||
|
||||
@update_db_opts.for_db("oracle")
|
||||
def _update_db_opts(db_url, db_opts, options):
|
||||
"""Set database options (db_opts) for a test database that we created."""
|
||||
if (
|
||||
options.oracledb_thick_mode
|
||||
and sa_url.make_url(db_url).get_driver_name() == "oracledb"
|
||||
):
|
||||
db_opts["thick_mode"] = True
|
@ -0,0 +1,316 @@
|
||||
# dialects/oracle/types.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
import datetime as dt
|
||||
from typing import Optional
|
||||
from typing import Type
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from ... import exc
|
||||
from ...sql import sqltypes
|
||||
from ...types import NVARCHAR
|
||||
from ...types import VARCHAR
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
|
||||
|
||||
class RAW(sqltypes._Binary):
|
||||
__visit_name__ = "RAW"
|
||||
|
||||
|
||||
OracleRaw = RAW
|
||||
|
||||
|
||||
class NCLOB(sqltypes.Text):
|
||||
__visit_name__ = "NCLOB"
|
||||
|
||||
|
||||
class VARCHAR2(VARCHAR):
|
||||
__visit_name__ = "VARCHAR2"
|
||||
|
||||
|
||||
NVARCHAR2 = NVARCHAR
|
||||
|
||||
|
||||
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
|
||||
__visit_name__ = "NUMBER"
|
||||
|
||||
def __init__(self, precision=None, scale=None, asdecimal=None):
|
||||
if asdecimal is None:
|
||||
asdecimal = bool(scale and scale > 0)
|
||||
|
||||
super().__init__(precision=precision, scale=scale, asdecimal=asdecimal)
|
||||
|
||||
def adapt(self, impltype):
|
||||
ret = super().adapt(impltype)
|
||||
# leave a hint for the DBAPI handler
|
||||
ret._is_oracle_number = True
|
||||
return ret
|
||||
|
||||
@property
|
||||
def _type_affinity(self):
|
||||
if bool(self.scale and self.scale > 0):
|
||||
return sqltypes.Numeric
|
||||
else:
|
||||
return sqltypes.Integer
|
||||
|
||||
|
||||
class FLOAT(sqltypes.FLOAT):
|
||||
"""Oracle Database FLOAT.
|
||||
|
||||
This is the same as :class:`_sqltypes.FLOAT` except that
|
||||
an Oracle Database -specific :paramref:`_oracle.FLOAT.binary_precision`
|
||||
parameter is accepted, and
|
||||
the :paramref:`_sqltypes.Float.precision` parameter is not accepted.
|
||||
|
||||
Oracle Database FLOAT types indicate precision in terms of "binary
|
||||
precision", which defaults to 126. For a REAL type, the value is 63. This
|
||||
parameter does not cleanly map to a specific number of decimal places but
|
||||
is roughly equivalent to the desired number of decimal places divided by
|
||||
0.3103.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "FLOAT"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
binary_precision=None,
|
||||
asdecimal=False,
|
||||
decimal_return_scale=None,
|
||||
):
|
||||
r"""
|
||||
Construct a FLOAT
|
||||
|
||||
:param binary_precision: Oracle Database binary precision value to be
|
||||
rendered in DDL. This may be approximated to the number of decimal
|
||||
characters using the formula "decimal precision = 0.30103 * binary
|
||||
precision". The default value used by Oracle Database for FLOAT /
|
||||
DOUBLE PRECISION is 126.
|
||||
|
||||
:param asdecimal: See :paramref:`_sqltypes.Float.asdecimal`
|
||||
|
||||
:param decimal_return_scale: See
|
||||
:paramref:`_sqltypes.Float.decimal_return_scale`
|
||||
|
||||
"""
|
||||
super().__init__(
|
||||
asdecimal=asdecimal, decimal_return_scale=decimal_return_scale
|
||||
)
|
||||
self.binary_precision = binary_precision
|
||||
|
||||
|
||||
class BINARY_DOUBLE(sqltypes.Double):
|
||||
"""Implement the Oracle ``BINARY_DOUBLE`` datatype.
|
||||
|
||||
This datatype differs from the Oracle ``DOUBLE`` datatype in that it
|
||||
delivers a true 8-byte FP value. The datatype may be combined with a
|
||||
generic :class:`.Double` datatype using :meth:`.TypeEngine.with_variant`.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`oracle_float_support`
|
||||
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "BINARY_DOUBLE"
|
||||
|
||||
|
||||
class BINARY_FLOAT(sqltypes.Float):
|
||||
"""Implement the Oracle ``BINARY_FLOAT`` datatype.
|
||||
|
||||
This datatype differs from the Oracle ``FLOAT`` datatype in that it
|
||||
delivers a true 4-byte FP value. The datatype may be combined with a
|
||||
generic :class:`.Float` datatype using :meth:`.TypeEngine.with_variant`.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`oracle_float_support`
|
||||
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "BINARY_FLOAT"
|
||||
|
||||
|
||||
class BFILE(sqltypes.LargeBinary):
|
||||
__visit_name__ = "BFILE"
|
||||
|
||||
|
||||
class LONG(sqltypes.Text):
|
||||
__visit_name__ = "LONG"
|
||||
|
||||
|
||||
class _OracleDateLiteralRender:
|
||||
def _literal_processor_datetime(self, dialect):
|
||||
def process(value):
|
||||
if getattr(value, "microsecond", None):
|
||||
value = (
|
||||
f"""TO_TIMESTAMP"""
|
||||
f"""('{value.isoformat().replace("T", " ")}', """
|
||||
"""'YYYY-MM-DD HH24:MI:SS.FF')"""
|
||||
)
|
||||
else:
|
||||
value = (
|
||||
f"""TO_DATE"""
|
||||
f"""('{value.isoformat().replace("T", " ")}', """
|
||||
"""'YYYY-MM-DD HH24:MI:SS')"""
|
||||
)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def _literal_processor_date(self, dialect):
|
||||
def process(value):
|
||||
if getattr(value, "microsecond", None):
|
||||
value = (
|
||||
f"""TO_TIMESTAMP"""
|
||||
f"""('{value.isoformat().split("T")[0]}', """
|
||||
"""'YYYY-MM-DD')"""
|
||||
)
|
||||
else:
|
||||
value = (
|
||||
f"""TO_DATE"""
|
||||
f"""('{value.isoformat().split("T")[0]}', """
|
||||
"""'YYYY-MM-DD')"""
|
||||
)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class DATE(_OracleDateLiteralRender, sqltypes.DateTime):
|
||||
"""Provide the Oracle Database DATE type.
|
||||
|
||||
This type has no special Python behavior, except that it subclasses
|
||||
:class:`_types.DateTime`; this is to suit the fact that the Oracle Database
|
||||
``DATE`` type supports a time value.
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "DATE"
|
||||
|
||||
def literal_processor(self, dialect):
|
||||
return self._literal_processor_datetime(dialect)
|
||||
|
||||
def _compare_type_affinity(self, other):
|
||||
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
|
||||
|
||||
|
||||
class _OracleDate(_OracleDateLiteralRender, sqltypes.Date):
|
||||
def literal_processor(self, dialect):
|
||||
return self._literal_processor_date(dialect)
|
||||
|
||||
|
||||
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
|
||||
__visit_name__ = "INTERVAL"
|
||||
|
||||
def __init__(self, day_precision=None, second_precision=None):
|
||||
"""Construct an INTERVAL.
|
||||
|
||||
Note that only DAY TO SECOND intervals are currently supported.
|
||||
This is due to a lack of support for YEAR TO MONTH intervals
|
||||
within available DBAPIs.
|
||||
|
||||
:param day_precision: the day precision value. this is the number of
|
||||
digits to store for the day field. Defaults to "2"
|
||||
:param second_precision: the second precision value. this is the
|
||||
number of digits to store for the fractional seconds field.
|
||||
Defaults to "6".
|
||||
|
||||
"""
|
||||
self.day_precision = day_precision
|
||||
self.second_precision = second_precision
|
||||
|
||||
@classmethod
|
||||
def _adapt_from_generic_interval(cls, interval):
|
||||
return INTERVAL(
|
||||
day_precision=interval.day_precision,
|
||||
second_precision=interval.second_precision,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def adapt_emulated_to_native(
|
||||
cls, interval: sqltypes.Interval, **kw # type: ignore[override]
|
||||
):
|
||||
return INTERVAL(
|
||||
day_precision=interval.day_precision,
|
||||
second_precision=interval.second_precision,
|
||||
)
|
||||
|
||||
@property
|
||||
def _type_affinity(self):
|
||||
return sqltypes.Interval
|
||||
|
||||
def as_generic(self, allow_nulltype=False):
|
||||
return sqltypes.Interval(
|
||||
native=True,
|
||||
second_precision=self.second_precision,
|
||||
day_precision=self.day_precision,
|
||||
)
|
||||
|
||||
@property
|
||||
def python_type(self) -> Type[dt.timedelta]:
|
||||
return dt.timedelta
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
|
||||
def process(value: dt.timedelta) -> str:
|
||||
return f"NUMTODSINTERVAL({value.total_seconds()}, 'SECOND')"
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class TIMESTAMP(sqltypes.TIMESTAMP):
|
||||
"""Oracle Database implementation of ``TIMESTAMP``, which supports
|
||||
additional Oracle Database-specific modes
|
||||
|
||||
.. versionadded:: 2.0
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, timezone: bool = False, local_timezone: bool = False):
|
||||
"""Construct a new :class:`_oracle.TIMESTAMP`.
|
||||
|
||||
:param timezone: boolean. Indicates that the TIMESTAMP type should
|
||||
use Oracle Database's ``TIMESTAMP WITH TIME ZONE`` datatype.
|
||||
|
||||
:param local_timezone: boolean. Indicates that the TIMESTAMP type
|
||||
should use Oracle Database's ``TIMESTAMP WITH LOCAL TIME ZONE``
|
||||
datatype.
|
||||
|
||||
|
||||
"""
|
||||
if timezone and local_timezone:
|
||||
raise exc.ArgumentError(
|
||||
"timezone and local_timezone are mutually exclusive"
|
||||
)
|
||||
super().__init__(timezone=timezone)
|
||||
self.local_timezone = local_timezone
|
||||
|
||||
|
||||
class ROWID(sqltypes.TypeEngine):
|
||||
"""Oracle Database ROWID type.
|
||||
|
||||
When used in a cast() or similar, generates ROWID.
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "ROWID"
|
||||
|
||||
|
||||
class _OracleBoolean(sqltypes.Boolean):
|
||||
def get_dbapi_type(self, dbapi):
|
||||
return dbapi.NUMBER
|
@ -0,0 +1,167 @@
|
||||
# dialects/postgresql/__init__.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
from types import ModuleType
|
||||
|
||||
from . import array as arraylib # noqa # keep above base and other dialects
|
||||
from . import asyncpg # noqa
|
||||
from . import base
|
||||
from . import pg8000 # noqa
|
||||
from . import psycopg # noqa
|
||||
from . import psycopg2 # noqa
|
||||
from . import psycopg2cffi # noqa
|
||||
from .array import All
|
||||
from .array import Any
|
||||
from .array import ARRAY
|
||||
from .array import array
|
||||
from .base import BIGINT
|
||||
from .base import BOOLEAN
|
||||
from .base import CHAR
|
||||
from .base import DATE
|
||||
from .base import DOMAIN
|
||||
from .base import DOUBLE_PRECISION
|
||||
from .base import FLOAT
|
||||
from .base import INTEGER
|
||||
from .base import NUMERIC
|
||||
from .base import REAL
|
||||
from .base import SMALLINT
|
||||
from .base import TEXT
|
||||
from .base import UUID
|
||||
from .base import VARCHAR
|
||||
from .dml import Insert
|
||||
from .dml import insert
|
||||
from .ext import aggregate_order_by
|
||||
from .ext import array_agg
|
||||
from .ext import ExcludeConstraint
|
||||
from .ext import phraseto_tsquery
|
||||
from .ext import plainto_tsquery
|
||||
from .ext import to_tsquery
|
||||
from .ext import to_tsvector
|
||||
from .ext import ts_headline
|
||||
from .ext import websearch_to_tsquery
|
||||
from .hstore import HSTORE
|
||||
from .hstore import hstore
|
||||
from .json import JSON
|
||||
from .json import JSONB
|
||||
from .json import JSONPATH
|
||||
from .named_types import CreateDomainType
|
||||
from .named_types import CreateEnumType
|
||||
from .named_types import DropDomainType
|
||||
from .named_types import DropEnumType
|
||||
from .named_types import ENUM
|
||||
from .named_types import NamedType
|
||||
from .ranges import AbstractMultiRange
|
||||
from .ranges import AbstractRange
|
||||
from .ranges import AbstractSingleRange
|
||||
from .ranges import DATEMULTIRANGE
|
||||
from .ranges import DATERANGE
|
||||
from .ranges import INT4MULTIRANGE
|
||||
from .ranges import INT4RANGE
|
||||
from .ranges import INT8MULTIRANGE
|
||||
from .ranges import INT8RANGE
|
||||
from .ranges import MultiRange
|
||||
from .ranges import NUMMULTIRANGE
|
||||
from .ranges import NUMRANGE
|
||||
from .ranges import Range
|
||||
from .ranges import TSMULTIRANGE
|
||||
from .ranges import TSRANGE
|
||||
from .ranges import TSTZMULTIRANGE
|
||||
from .ranges import TSTZRANGE
|
||||
from .types import BIT
|
||||
from .types import BYTEA
|
||||
from .types import CIDR
|
||||
from .types import CITEXT
|
||||
from .types import INET
|
||||
from .types import INTERVAL
|
||||
from .types import MACADDR
|
||||
from .types import MACADDR8
|
||||
from .types import MONEY
|
||||
from .types import OID
|
||||
from .types import REGCLASS
|
||||
from .types import REGCONFIG
|
||||
from .types import TIME
|
||||
from .types import TIMESTAMP
|
||||
from .types import TSQUERY
|
||||
from .types import TSVECTOR
|
||||
|
||||
|
||||
# Alias psycopg also as psycopg_async
|
||||
psycopg_async = type(
|
||||
"psycopg_async", (ModuleType,), {"dialect": psycopg.dialect_async}
|
||||
)
|
||||
|
||||
base.dialect = dialect = psycopg2.dialect
|
||||
|
||||
|
||||
__all__ = (
|
||||
"INTEGER",
|
||||
"BIGINT",
|
||||
"SMALLINT",
|
||||
"VARCHAR",
|
||||
"CHAR",
|
||||
"TEXT",
|
||||
"NUMERIC",
|
||||
"FLOAT",
|
||||
"REAL",
|
||||
"INET",
|
||||
"CIDR",
|
||||
"CITEXT",
|
||||
"UUID",
|
||||
"BIT",
|
||||
"MACADDR",
|
||||
"MACADDR8",
|
||||
"MONEY",
|
||||
"OID",
|
||||
"REGCLASS",
|
||||
"REGCONFIG",
|
||||
"TSQUERY",
|
||||
"TSVECTOR",
|
||||
"DOUBLE_PRECISION",
|
||||
"TIMESTAMP",
|
||||
"TIME",
|
||||
"DATE",
|
||||
"BYTEA",
|
||||
"BOOLEAN",
|
||||
"INTERVAL",
|
||||
"ARRAY",
|
||||
"ENUM",
|
||||
"DOMAIN",
|
||||
"dialect",
|
||||
"array",
|
||||
"HSTORE",
|
||||
"hstore",
|
||||
"INT4RANGE",
|
||||
"INT8RANGE",
|
||||
"NUMRANGE",
|
||||
"DATERANGE",
|
||||
"INT4MULTIRANGE",
|
||||
"INT8MULTIRANGE",
|
||||
"NUMMULTIRANGE",
|
||||
"DATEMULTIRANGE",
|
||||
"TSVECTOR",
|
||||
"TSRANGE",
|
||||
"TSTZRANGE",
|
||||
"TSMULTIRANGE",
|
||||
"TSTZMULTIRANGE",
|
||||
"JSON",
|
||||
"JSONB",
|
||||
"JSONPATH",
|
||||
"Any",
|
||||
"All",
|
||||
"DropEnumType",
|
||||
"DropDomainType",
|
||||
"CreateDomainType",
|
||||
"NamedType",
|
||||
"CreateEnumType",
|
||||
"ExcludeConstraint",
|
||||
"Range",
|
||||
"aggregate_order_by",
|
||||
"array_agg",
|
||||
"insert",
|
||||
"Insert",
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -0,0 +1,187 @@
|
||||
# dialects/postgresql/_psycopg_common.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
import decimal
|
||||
|
||||
from .array import ARRAY as PGARRAY
|
||||
from .base import _DECIMAL_TYPES
|
||||
from .base import _FLOAT_TYPES
|
||||
from .base import _INT_TYPES
|
||||
from .base import PGDialect
|
||||
from .base import PGExecutionContext
|
||||
from .hstore import HSTORE
|
||||
from .pg_catalog import _SpaceVector
|
||||
from .pg_catalog import INT2VECTOR
|
||||
from .pg_catalog import OIDVECTOR
|
||||
from ... import exc
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...engine import processors
|
||||
|
||||
_server_side_id = util.counter()
|
||||
|
||||
|
||||
class _PsycopgNumeric(sqltypes.Numeric):
|
||||
def bind_processor(self, dialect):
|
||||
return None
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if self.asdecimal:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
return processors.to_decimal_processor_factory(
|
||||
decimal.Decimal, self._effective_decimal_return_scale
|
||||
)
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
# psycopg returns Decimal natively for 1700
|
||||
return None
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
else:
|
||||
if coltype in _FLOAT_TYPES:
|
||||
# psycopg returns float natively for 701
|
||||
return None
|
||||
elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES:
|
||||
return processors.to_float
|
||||
else:
|
||||
raise exc.InvalidRequestError(
|
||||
"Unknown PG numeric type: %d" % coltype
|
||||
)
|
||||
|
||||
|
||||
class _PsycopgFloat(_PsycopgNumeric):
|
||||
__visit_name__ = "float"
|
||||
|
||||
|
||||
class _PsycopgHStore(HSTORE):
|
||||
def bind_processor(self, dialect):
|
||||
if dialect._has_native_hstore:
|
||||
return None
|
||||
else:
|
||||
return super().bind_processor(dialect)
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
if dialect._has_native_hstore:
|
||||
return None
|
||||
else:
|
||||
return super().result_processor(dialect, coltype)
|
||||
|
||||
|
||||
class _PsycopgARRAY(PGARRAY):
|
||||
render_bind_cast = True
|
||||
|
||||
|
||||
class _PsycopgINT2VECTOR(_SpaceVector, INT2VECTOR):
|
||||
pass
|
||||
|
||||
|
||||
class _PsycopgOIDVECTOR(_SpaceVector, OIDVECTOR):
|
||||
pass
|
||||
|
||||
|
||||
class _PGExecutionContext_common_psycopg(PGExecutionContext):
|
||||
def create_server_side_cursor(self):
|
||||
# use server-side cursors:
|
||||
# psycopg
|
||||
# https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#server-side-cursors
|
||||
# psycopg2
|
||||
# https://www.psycopg.org/docs/usage.html#server-side-cursors
|
||||
ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:])
|
||||
return self._dbapi_connection.cursor(ident)
|
||||
|
||||
|
||||
class _PGDialect_common_psycopg(PGDialect):
|
||||
supports_statement_cache = True
|
||||
supports_server_side_cursors = True
|
||||
|
||||
default_paramstyle = "pyformat"
|
||||
|
||||
_has_native_hstore = True
|
||||
|
||||
colspecs = util.update_copy(
|
||||
PGDialect.colspecs,
|
||||
{
|
||||
sqltypes.Numeric: _PsycopgNumeric,
|
||||
sqltypes.Float: _PsycopgFloat,
|
||||
HSTORE: _PsycopgHStore,
|
||||
sqltypes.ARRAY: _PsycopgARRAY,
|
||||
INT2VECTOR: _PsycopgINT2VECTOR,
|
||||
OIDVECTOR: _PsycopgOIDVECTOR,
|
||||
},
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
client_encoding=None,
|
||||
use_native_hstore=True,
|
||||
**kwargs,
|
||||
):
|
||||
PGDialect.__init__(self, **kwargs)
|
||||
if not use_native_hstore:
|
||||
self._has_native_hstore = False
|
||||
self.use_native_hstore = use_native_hstore
|
||||
self.client_encoding = client_encoding
|
||||
|
||||
def create_connect_args(self, url):
|
||||
opts = url.translate_connect_args(username="user", database="dbname")
|
||||
|
||||
multihosts, multiports = self._split_multihost_from_url(url)
|
||||
|
||||
if opts or url.query:
|
||||
if not opts:
|
||||
opts = {}
|
||||
if "port" in opts:
|
||||
opts["port"] = int(opts["port"])
|
||||
opts.update(url.query)
|
||||
|
||||
if multihosts:
|
||||
opts["host"] = ",".join(multihosts)
|
||||
comma_ports = ",".join(str(p) if p else "" for p in multiports)
|
||||
if comma_ports:
|
||||
opts["port"] = comma_ports
|
||||
return ([], opts)
|
||||
else:
|
||||
# no connection arguments whatsoever; psycopg2.connect()
|
||||
# requires that "dsn" be present as a blank string.
|
||||
return ([""], opts)
|
||||
|
||||
def get_isolation_level_values(self, dbapi_connection):
|
||||
return (
|
||||
"AUTOCOMMIT",
|
||||
"READ COMMITTED",
|
||||
"READ UNCOMMITTED",
|
||||
"REPEATABLE READ",
|
||||
"SERIALIZABLE",
|
||||
)
|
||||
|
||||
def set_deferrable(self, connection, value):
|
||||
connection.deferrable = value
|
||||
|
||||
def get_deferrable(self, connection):
|
||||
return connection.deferrable
|
||||
|
||||
def _do_autocommit(self, connection, value):
|
||||
connection.autocommit = value
|
||||
|
||||
def do_ping(self, dbapi_connection):
|
||||
cursor = None
|
||||
before_autocommit = dbapi_connection.autocommit
|
||||
|
||||
if not before_autocommit:
|
||||
dbapi_connection.autocommit = True
|
||||
cursor = dbapi_connection.cursor()
|
||||
try:
|
||||
cursor.execute(self._dialect_specific_select_one)
|
||||
finally:
|
||||
cursor.close()
|
||||
if not before_autocommit and not dbapi_connection.closed:
|
||||
dbapi_connection.autocommit = before_autocommit
|
||||
|
||||
return True
|
@ -0,0 +1,509 @@
|
||||
# dialects/postgresql/array.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
from typing import Any as typing_Any
|
||||
from typing import Iterable
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
from typing import Union
|
||||
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import OVERLAP
|
||||
from ... import types as sqltypes
|
||||
from ... import util
|
||||
from ...sql import expression
|
||||
from ...sql import operators
|
||||
from ...sql.visitors import InternalTraversal
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql._typing import _ColumnExpressionArgument
|
||||
from ...sql._typing import _TypeEngineArgument
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.elements import Grouping
|
||||
from ...sql.expression import BindParameter
|
||||
from ...sql.operators import OperatorType
|
||||
from ...sql.selectable import _SelectIterable
|
||||
from ...sql.type_api import _BindProcessorType
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
from ...sql.type_api import _ResultProcessorType
|
||||
from ...sql.type_api import TypeEngine
|
||||
from ...sql.visitors import _TraverseInternalsType
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
_T = TypeVar("_T", bound=typing_Any)
|
||||
|
||||
|
||||
def Any(
|
||||
other: typing_Any,
|
||||
arrexpr: _ColumnExpressionArgument[_T],
|
||||
operator: OperatorType = operators.eq,
|
||||
) -> ColumnElement[bool]:
|
||||
"""A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.any` method.
|
||||
See that method for details.
|
||||
|
||||
"""
|
||||
|
||||
return arrexpr.any(other, operator) # type: ignore[no-any-return, union-attr] # noqa: E501
|
||||
|
||||
|
||||
def All(
|
||||
other: typing_Any,
|
||||
arrexpr: _ColumnExpressionArgument[_T],
|
||||
operator: OperatorType = operators.eq,
|
||||
) -> ColumnElement[bool]:
|
||||
"""A synonym for the ARRAY-level :meth:`.ARRAY.Comparator.all` method.
|
||||
See that method for details.
|
||||
|
||||
"""
|
||||
|
||||
return arrexpr.all(other, operator) # type: ignore[no-any-return, union-attr] # noqa: E501
|
||||
|
||||
|
||||
class array(expression.ExpressionClauseList[_T]):
|
||||
"""A PostgreSQL ARRAY literal.
|
||||
|
||||
This is used to produce ARRAY literals in SQL expressions, e.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import array
|
||||
from sqlalchemy.dialects import postgresql
|
||||
from sqlalchemy import select, func
|
||||
|
||||
stmt = select(array([1, 2]) + array([3, 4, 5]))
|
||||
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces the SQL:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
|
||||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
|
||||
|
||||
An instance of :class:`.array` will always have the datatype
|
||||
:class:`_types.ARRAY`. The "inner" type of the array is inferred from the
|
||||
values present, unless the :paramref:`_postgresql.array.type_` keyword
|
||||
argument is passed::
|
||||
|
||||
array(["foo", "bar"], type_=CHAR)
|
||||
|
||||
When constructing an empty array, the :paramref:`_postgresql.array.type_`
|
||||
argument is particularly important as PostgreSQL server typically requires
|
||||
a cast to be rendered for the inner type in order to render an empty array.
|
||||
SQLAlchemy's compilation for the empty array will produce this cast so
|
||||
that::
|
||||
|
||||
stmt = array([], type_=Integer)
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
ARRAY[]::INTEGER[]
|
||||
|
||||
As required by PostgreSQL for empty arrays.
|
||||
|
||||
.. versionadded:: 2.0.40 added support to render empty PostgreSQL array
|
||||
literals with a required cast.
|
||||
|
||||
Multidimensional arrays are produced by nesting :class:`.array` constructs.
|
||||
The dimensionality of the final :class:`_types.ARRAY`
|
||||
type is calculated by
|
||||
recursively adding the dimensions of the inner :class:`_types.ARRAY`
|
||||
type::
|
||||
|
||||
stmt = select(
|
||||
array(
|
||||
[array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
|
||||
)
|
||||
)
|
||||
print(stmt.compile(dialect=postgresql.dialect()))
|
||||
|
||||
Produces:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT ARRAY[
|
||||
ARRAY[%(param_1)s, %(param_2)s],
|
||||
ARRAY[%(param_3)s, %(param_4)s],
|
||||
ARRAY[q, x]
|
||||
] AS anon_1
|
||||
|
||||
.. versionadded:: 1.3.6 added support for multidimensional array literals
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_postgresql.ARRAY`
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
__visit_name__ = "array"
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
|
||||
_traverse_internals: _TraverseInternalsType = [
|
||||
("clauses", InternalTraversal.dp_clauseelement_tuple),
|
||||
("type", InternalTraversal.dp_type),
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
clauses: Iterable[_T],
|
||||
*,
|
||||
type_: Optional[_TypeEngineArgument[_T]] = None,
|
||||
**kw: typing_Any,
|
||||
):
|
||||
r"""Construct an ARRAY literal.
|
||||
|
||||
:param clauses: iterable, such as a list, containing elements to be
|
||||
rendered in the array
|
||||
:param type\_: optional type. If omitted, the type is inferred
|
||||
from the contents of the array.
|
||||
|
||||
"""
|
||||
super().__init__(operators.comma_op, *clauses, **kw)
|
||||
|
||||
main_type = (
|
||||
type_
|
||||
if type_ is not None
|
||||
else self.clauses[0].type if self.clauses else sqltypes.NULLTYPE
|
||||
)
|
||||
|
||||
if isinstance(main_type, ARRAY):
|
||||
self.type = ARRAY(
|
||||
main_type.item_type,
|
||||
dimensions=(
|
||||
main_type.dimensions + 1
|
||||
if main_type.dimensions is not None
|
||||
else 2
|
||||
),
|
||||
) # type: ignore[assignment]
|
||||
else:
|
||||
self.type = ARRAY(main_type) # type: ignore[assignment]
|
||||
|
||||
@property
|
||||
def _select_iterable(self) -> _SelectIterable:
|
||||
return (self,)
|
||||
|
||||
def _bind_param(
|
||||
self,
|
||||
operator: OperatorType,
|
||||
obj: typing_Any,
|
||||
type_: Optional[TypeEngine[_T]] = None,
|
||||
_assume_scalar: bool = False,
|
||||
) -> BindParameter[_T]:
|
||||
if _assume_scalar or operator is operators.getitem:
|
||||
return expression.BindParameter(
|
||||
None,
|
||||
obj,
|
||||
_compared_to_operator=operator,
|
||||
type_=type_,
|
||||
_compared_to_type=self.type,
|
||||
unique=True,
|
||||
)
|
||||
|
||||
else:
|
||||
return array(
|
||||
[
|
||||
self._bind_param(
|
||||
operator, o, _assume_scalar=True, type_=type_
|
||||
)
|
||||
for o in obj
|
||||
]
|
||||
) # type: ignore[return-value]
|
||||
|
||||
def self_group(
|
||||
self, against: Optional[OperatorType] = None
|
||||
) -> Union[Self, Grouping[_T]]:
|
||||
if against in (operators.any_op, operators.all_op, operators.getitem):
|
||||
return expression.Grouping(self)
|
||||
else:
|
||||
return self
|
||||
|
||||
|
||||
class ARRAY(sqltypes.ARRAY[_T]):
|
||||
"""PostgreSQL ARRAY type.
|
||||
|
||||
The :class:`_postgresql.ARRAY` type is constructed in the same way
|
||||
as the core :class:`_types.ARRAY` type; a member type is required, and a
|
||||
number of dimensions is recommended if the type is to be used for more
|
||||
than one dimension::
|
||||
|
||||
from sqlalchemy.dialects import postgresql
|
||||
|
||||
mytable = Table(
|
||||
"mytable",
|
||||
metadata,
|
||||
Column("data", postgresql.ARRAY(Integer, dimensions=2)),
|
||||
)
|
||||
|
||||
The :class:`_postgresql.ARRAY` type provides all operations defined on the
|
||||
core :class:`_types.ARRAY` type, including support for "dimensions",
|
||||
indexed access, and simple matching such as
|
||||
:meth:`.types.ARRAY.Comparator.any` and
|
||||
:meth:`.types.ARRAY.Comparator.all`. :class:`_postgresql.ARRAY`
|
||||
class also
|
||||
provides PostgreSQL-specific methods for containment operations, including
|
||||
:meth:`.postgresql.ARRAY.Comparator.contains`
|
||||
:meth:`.postgresql.ARRAY.Comparator.contained_by`, and
|
||||
:meth:`.postgresql.ARRAY.Comparator.overlap`, e.g.::
|
||||
|
||||
mytable.c.data.contains([1, 2])
|
||||
|
||||
Indexed access is one-based by default, to match that of PostgreSQL;
|
||||
for zero-based indexed access, set
|
||||
:paramref:`_postgresql.ARRAY.zero_indexes`.
|
||||
|
||||
Additionally, the :class:`_postgresql.ARRAY`
|
||||
type does not work directly in
|
||||
conjunction with the :class:`.ENUM` type. For a workaround, see the
|
||||
special type at :ref:`postgresql_array_of_enum`.
|
||||
|
||||
.. container:: topic
|
||||
|
||||
**Detecting Changes in ARRAY columns when using the ORM**
|
||||
|
||||
The :class:`_postgresql.ARRAY` type, when used with the SQLAlchemy ORM,
|
||||
does not detect in-place mutations to the array. In order to detect
|
||||
these, the :mod:`sqlalchemy.ext.mutable` extension must be used, using
|
||||
the :class:`.MutableList` class::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import ARRAY
|
||||
from sqlalchemy.ext.mutable import MutableList
|
||||
|
||||
|
||||
class SomeOrmClass(Base):
|
||||
# ...
|
||||
|
||||
data = Column(MutableList.as_mutable(ARRAY(Integer)))
|
||||
|
||||
This extension will allow "in-place" changes such to the array
|
||||
such as ``.append()`` to produce events which will be detected by the
|
||||
unit of work. Note that changes to elements **inside** the array,
|
||||
including subarrays that are mutated in place, are **not** detected.
|
||||
|
||||
Alternatively, assigning a new array value to an ORM element that
|
||||
replaces the old one will always trigger a change event.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.ARRAY` - base array type
|
||||
|
||||
:class:`_postgresql.array` - produces a literal array value.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
item_type: _TypeEngineArgument[_T],
|
||||
as_tuple: bool = False,
|
||||
dimensions: Optional[int] = None,
|
||||
zero_indexes: bool = False,
|
||||
):
|
||||
"""Construct an ARRAY.
|
||||
|
||||
E.g.::
|
||||
|
||||
Column("myarray", ARRAY(Integer))
|
||||
|
||||
Arguments are:
|
||||
|
||||
:param item_type: The data type of items of this array. Note that
|
||||
dimensionality is irrelevant here, so multi-dimensional arrays like
|
||||
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
|
||||
``ARRAY(ARRAY(Integer))`` or such.
|
||||
|
||||
:param as_tuple=False: Specify whether return results
|
||||
should be converted to tuples from lists. DBAPIs such
|
||||
as psycopg2 return lists by default. When tuples are
|
||||
returned, the results are hashable.
|
||||
|
||||
:param dimensions: if non-None, the ARRAY will assume a fixed
|
||||
number of dimensions. This will cause the DDL emitted for this
|
||||
ARRAY to include the exact number of bracket clauses ``[]``,
|
||||
and will also optimize the performance of the type overall.
|
||||
Note that PG arrays are always implicitly "non-dimensioned",
|
||||
meaning they can store any number of dimensions no matter how
|
||||
they were declared.
|
||||
|
||||
:param zero_indexes=False: when True, index values will be converted
|
||||
between Python zero-based and PostgreSQL one-based indexes, e.g.
|
||||
a value of one will be added to all index values before passing
|
||||
to the database.
|
||||
|
||||
"""
|
||||
if isinstance(item_type, ARRAY):
|
||||
raise ValueError(
|
||||
"Do not nest ARRAY types; ARRAY(basetype) "
|
||||
"handles multi-dimensional arrays of basetype"
|
||||
)
|
||||
if isinstance(item_type, type):
|
||||
item_type = item_type()
|
||||
self.item_type = item_type
|
||||
self.as_tuple = as_tuple
|
||||
self.dimensions = dimensions
|
||||
self.zero_indexes = zero_indexes
|
||||
|
||||
class Comparator(sqltypes.ARRAY.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.ARRAY`.
|
||||
|
||||
Note that these operations are in addition to those provided
|
||||
by the base :class:`.types.ARRAY.Comparator` class, including
|
||||
:meth:`.types.ARRAY.Comparator.any` and
|
||||
:meth:`.types.ARRAY.Comparator.all`.
|
||||
|
||||
"""
|
||||
|
||||
def contains(
|
||||
self, other: typing_Any, **kwargs: typing_Any
|
||||
) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if elements are a superset of the
|
||||
elements of the argument array expression.
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other: typing_Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if elements are a proper subset of the
|
||||
elements of the argument array expression.
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def overlap(self, other: typing_Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if array has elements in common with
|
||||
an argument array expression.
|
||||
"""
|
||||
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
@util.memoized_property
|
||||
def _against_native_enum(self) -> bool:
|
||||
return (
|
||||
isinstance(self.item_type, sqltypes.Enum)
|
||||
and self.item_type.native_enum # type: ignore[attr-defined]
|
||||
)
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_LiteralProcessorType[_T]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).literal_processor(
|
||||
dialect
|
||||
)
|
||||
if item_proc is None:
|
||||
return None
|
||||
|
||||
def to_str(elements: Iterable[typing_Any]) -> str:
|
||||
return f"ARRAY[{', '.join(elements)}]"
|
||||
|
||||
def process(value: Sequence[typing_Any]) -> str:
|
||||
inner = self._apply_item_processor(
|
||||
value, item_proc, self.dimensions, to_str
|
||||
)
|
||||
return inner
|
||||
|
||||
return process
|
||||
|
||||
def bind_processor(
|
||||
self, dialect: Dialect
|
||||
) -> Optional[_BindProcessorType[Sequence[typing_Any]]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).bind_processor(
|
||||
dialect
|
||||
)
|
||||
|
||||
def process(
|
||||
value: Optional[Sequence[typing_Any]],
|
||||
) -> Optional[list[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
else:
|
||||
return self._apply_item_processor(
|
||||
value, item_proc, self.dimensions, list
|
||||
)
|
||||
|
||||
return process
|
||||
|
||||
def result_processor(
|
||||
self, dialect: Dialect, coltype: object
|
||||
) -> _ResultProcessorType[Sequence[typing_Any]]:
|
||||
item_proc = self.item_type.dialect_impl(dialect).result_processor(
|
||||
dialect, coltype
|
||||
)
|
||||
|
||||
def process(
|
||||
value: Sequence[typing_Any],
|
||||
) -> Optional[Sequence[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
else:
|
||||
return self._apply_item_processor(
|
||||
value,
|
||||
item_proc,
|
||||
self.dimensions,
|
||||
tuple if self.as_tuple else list,
|
||||
)
|
||||
|
||||
if self._against_native_enum:
|
||||
super_rp = process
|
||||
pattern = re.compile(r"^{(.*)}$")
|
||||
|
||||
def handle_raw_string(value: str) -> list[str]:
|
||||
inner = pattern.match(value).group(1) # type: ignore[union-attr] # noqa: E501
|
||||
return _split_enum_values(inner)
|
||||
|
||||
def process(
|
||||
value: Sequence[typing_Any],
|
||||
) -> Optional[Sequence[typing_Any]]:
|
||||
if value is None:
|
||||
return value
|
||||
# isinstance(value, str) is required to handle
|
||||
# the case where a TypeDecorator for and Array of Enum is
|
||||
# used like was required in sa < 1.3.17
|
||||
return super_rp(
|
||||
handle_raw_string(value)
|
||||
if isinstance(value, str)
|
||||
else value
|
||||
)
|
||||
|
||||
return process
|
||||
|
||||
|
||||
def _split_enum_values(array_string: str) -> list[str]:
|
||||
if '"' not in array_string:
|
||||
# no escape char is present so it can just split on the comma
|
||||
return array_string.split(",") if array_string else []
|
||||
|
||||
# handles quoted strings from:
|
||||
# r'abc,"quoted","also\\\\quoted", "quoted, comma", "esc \" quot", qpr'
|
||||
# returns
|
||||
# ['abc', 'quoted', 'also\\quoted', 'quoted, comma', 'esc " quot', 'qpr']
|
||||
text = array_string.replace(r"\"", "_$ESC_QUOTE$_")
|
||||
text = text.replace(r"\\", "\\")
|
||||
result = []
|
||||
on_quotes = re.split(r'(")', text)
|
||||
in_quotes = False
|
||||
for tok in on_quotes:
|
||||
if tok == '"':
|
||||
in_quotes = not in_quotes
|
||||
elif in_quotes:
|
||||
result.append(tok.replace("_$ESC_QUOTE$_", '"'))
|
||||
else:
|
||||
result.extend(re.findall(r"([^\s,]+),?", tok))
|
||||
return result
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,339 @@
|
||||
# dialects/postgresql/dml.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
from . import ext
|
||||
from .._typing import _OnConflictConstraintT
|
||||
from .._typing import _OnConflictIndexElementsT
|
||||
from .._typing import _OnConflictIndexWhereT
|
||||
from .._typing import _OnConflictSetT
|
||||
from .._typing import _OnConflictWhereT
|
||||
from ... import util
|
||||
from ...sql import coercions
|
||||
from ...sql import roles
|
||||
from ...sql import schema
|
||||
from ...sql._typing import _DMLTableArgument
|
||||
from ...sql.base import _exclusive_against
|
||||
from ...sql.base import _generative
|
||||
from ...sql.base import ColumnCollection
|
||||
from ...sql.base import ReadOnlyColumnCollection
|
||||
from ...sql.dml import Insert as StandardInsert
|
||||
from ...sql.elements import ClauseElement
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.elements import KeyedColumnElement
|
||||
from ...sql.elements import TextClause
|
||||
from ...sql.expression import alias
|
||||
from ...util.typing import Self
|
||||
|
||||
|
||||
__all__ = ("Insert", "insert")
|
||||
|
||||
|
||||
def insert(table: _DMLTableArgument) -> Insert:
|
||||
"""Construct a PostgreSQL-specific variant :class:`_postgresql.Insert`
|
||||
construct.
|
||||
|
||||
.. container:: inherited_member
|
||||
|
||||
The :func:`sqlalchemy.dialects.postgresql.insert` function creates
|
||||
a :class:`sqlalchemy.dialects.postgresql.Insert`. This class is based
|
||||
on the dialect-agnostic :class:`_sql.Insert` construct which may
|
||||
be constructed using the :func:`_sql.insert` function in
|
||||
SQLAlchemy Core.
|
||||
|
||||
The :class:`_postgresql.Insert` construct includes additional methods
|
||||
:meth:`_postgresql.Insert.on_conflict_do_update`,
|
||||
:meth:`_postgresql.Insert.on_conflict_do_nothing`.
|
||||
|
||||
"""
|
||||
return Insert(table)
|
||||
|
||||
|
||||
class Insert(StandardInsert):
|
||||
"""PostgreSQL-specific implementation of INSERT.
|
||||
|
||||
Adds methods for PG-specific syntaxes such as ON CONFLICT.
|
||||
|
||||
The :class:`_postgresql.Insert` object is created using the
|
||||
:func:`sqlalchemy.dialects.postgresql.insert` function.
|
||||
|
||||
"""
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
inherit_cache = False
|
||||
|
||||
@util.memoized_property
|
||||
def excluded(
|
||||
self,
|
||||
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
|
||||
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
|
||||
|
||||
PG's ON CONFLICT clause allows reference to the row that would
|
||||
be inserted, known as ``excluded``. This attribute provides
|
||||
all columns in this row to be referenceable.
|
||||
|
||||
.. tip:: The :attr:`_postgresql.Insert.excluded` attribute is an
|
||||
instance of :class:`_expression.ColumnCollection`, which provides
|
||||
an interface the same as that of the :attr:`_schema.Table.c`
|
||||
collection described at :ref:`metadata_tables_and_columns`.
|
||||
With this collection, ordinary names are accessible like attributes
|
||||
(e.g. ``stmt.excluded.some_column``), but special names and
|
||||
dictionary method names should be accessed using indexed access,
|
||||
such as ``stmt.excluded["column name"]`` or
|
||||
``stmt.excluded["values"]``. See the docstring for
|
||||
:class:`_expression.ColumnCollection` for further examples.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict` - example of how
|
||||
to use :attr:`_expression.Insert.excluded`
|
||||
|
||||
"""
|
||||
return alias(self.table, name="excluded").columns
|
||||
|
||||
_on_conflict_exclusive = _exclusive_against(
|
||||
"_post_values_clause",
|
||||
msgs={
|
||||
"_post_values_clause": "This Insert construct already has "
|
||||
"an ON CONFLICT clause established"
|
||||
},
|
||||
)
|
||||
|
||||
@_generative
|
||||
@_on_conflict_exclusive
|
||||
def on_conflict_do_update(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
set_: _OnConflictSetT = None,
|
||||
where: _OnConflictWhereT = None,
|
||||
) -> Self:
|
||||
r"""
|
||||
Specifies a DO UPDATE SET action for ON CONFLICT clause.
|
||||
|
||||
Either the ``constraint`` or ``index_elements`` argument is
|
||||
required, but only one of these can be specified.
|
||||
|
||||
:param constraint:
|
||||
The name of a unique or exclusion constraint on the table,
|
||||
or the constraint object itself if it has a .name attribute.
|
||||
|
||||
:param index_elements:
|
||||
A sequence consisting of string column names, :class:`_schema.Column`
|
||||
objects, or other column expression objects that will be used
|
||||
to infer a target index.
|
||||
|
||||
:param index_where:
|
||||
Additional WHERE criterion that can be used to infer a
|
||||
conditional target index.
|
||||
|
||||
:param set\_:
|
||||
A dictionary or other mapping object
|
||||
where the keys are either names of columns in the target table,
|
||||
or :class:`_schema.Column` objects or other ORM-mapped columns
|
||||
matching that of the target table, and expressions or literals
|
||||
as values, specifying the ``SET`` actions to take.
|
||||
|
||||
.. versionadded:: 1.4 The
|
||||
:paramref:`_postgresql.Insert.on_conflict_do_update.set_`
|
||||
parameter supports :class:`_schema.Column` objects from the target
|
||||
:class:`_schema.Table` as keys.
|
||||
|
||||
.. warning:: This dictionary does **not** take into account
|
||||
Python-specified default UPDATE values or generation functions,
|
||||
e.g. those specified using :paramref:`_schema.Column.onupdate`.
|
||||
These values will not be exercised for an ON CONFLICT style of
|
||||
UPDATE, unless they are manually specified in the
|
||||
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
|
||||
|
||||
:param where:
|
||||
Optional argument. An expression object representing a ``WHERE``
|
||||
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
|
||||
meeting the ``WHERE`` condition will not be updated (effectively a
|
||||
``DO NOTHING`` for those rows).
|
||||
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict`
|
||||
|
||||
"""
|
||||
self._post_values_clause = OnConflictDoUpdate(
|
||||
constraint, index_elements, index_where, set_, where
|
||||
)
|
||||
return self
|
||||
|
||||
@_generative
|
||||
@_on_conflict_exclusive
|
||||
def on_conflict_do_nothing(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
) -> Self:
|
||||
"""
|
||||
Specifies a DO NOTHING action for ON CONFLICT clause.
|
||||
|
||||
The ``constraint`` and ``index_elements`` arguments
|
||||
are optional, but only one of these can be specified.
|
||||
|
||||
:param constraint:
|
||||
The name of a unique or exclusion constraint on the table,
|
||||
or the constraint object itself if it has a .name attribute.
|
||||
|
||||
:param index_elements:
|
||||
A sequence consisting of string column names, :class:`_schema.Column`
|
||||
objects, or other column expression objects that will be used
|
||||
to infer a target index.
|
||||
|
||||
:param index_where:
|
||||
Additional WHERE criterion that can be used to infer a
|
||||
conditional target index.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_insert_on_conflict`
|
||||
|
||||
"""
|
||||
self._post_values_clause = OnConflictDoNothing(
|
||||
constraint, index_elements, index_where
|
||||
)
|
||||
return self
|
||||
|
||||
|
||||
class OnConflictClause(ClauseElement):
|
||||
stringify_dialect = "postgresql"
|
||||
|
||||
constraint_target: Optional[str]
|
||||
inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
|
||||
inferred_target_whereclause: Optional[
|
||||
Union[ColumnElement[Any], TextClause]
|
||||
]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
):
|
||||
if constraint is not None:
|
||||
if not isinstance(constraint, str) and isinstance(
|
||||
constraint,
|
||||
(schema.Constraint, ext.ExcludeConstraint),
|
||||
):
|
||||
constraint = getattr(constraint, "name") or constraint
|
||||
|
||||
if constraint is not None:
|
||||
if index_elements is not None:
|
||||
raise ValueError(
|
||||
"'constraint' and 'index_elements' are mutually exclusive"
|
||||
)
|
||||
|
||||
if isinstance(constraint, str):
|
||||
self.constraint_target = constraint
|
||||
self.inferred_target_elements = None
|
||||
self.inferred_target_whereclause = None
|
||||
elif isinstance(constraint, schema.Index):
|
||||
index_elements = constraint.expressions
|
||||
index_where = constraint.dialect_options["postgresql"].get(
|
||||
"where"
|
||||
)
|
||||
elif isinstance(constraint, ext.ExcludeConstraint):
|
||||
index_elements = constraint.columns
|
||||
index_where = constraint.where
|
||||
else:
|
||||
index_elements = constraint.columns
|
||||
index_where = constraint.dialect_options["postgresql"].get(
|
||||
"where"
|
||||
)
|
||||
|
||||
if index_elements is not None:
|
||||
self.constraint_target = None
|
||||
self.inferred_target_elements = [
|
||||
coercions.expect(roles.DDLConstraintColumnRole, column)
|
||||
for column in index_elements
|
||||
]
|
||||
|
||||
self.inferred_target_whereclause = (
|
||||
coercions.expect(
|
||||
(
|
||||
roles.StatementOptionRole
|
||||
if isinstance(constraint, ext.ExcludeConstraint)
|
||||
else roles.WhereHavingRole
|
||||
),
|
||||
index_where,
|
||||
)
|
||||
if index_where is not None
|
||||
else None
|
||||
)
|
||||
|
||||
elif constraint is None:
|
||||
self.constraint_target = self.inferred_target_elements = (
|
||||
self.inferred_target_whereclause
|
||||
) = None
|
||||
|
||||
|
||||
class OnConflictDoNothing(OnConflictClause):
|
||||
__visit_name__ = "on_conflict_do_nothing"
|
||||
|
||||
|
||||
class OnConflictDoUpdate(OnConflictClause):
|
||||
__visit_name__ = "on_conflict_do_update"
|
||||
|
||||
update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
|
||||
update_whereclause: Optional[ColumnElement[Any]]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
constraint: _OnConflictConstraintT = None,
|
||||
index_elements: _OnConflictIndexElementsT = None,
|
||||
index_where: _OnConflictIndexWhereT = None,
|
||||
set_: _OnConflictSetT = None,
|
||||
where: _OnConflictWhereT = None,
|
||||
):
|
||||
super().__init__(
|
||||
constraint=constraint,
|
||||
index_elements=index_elements,
|
||||
index_where=index_where,
|
||||
)
|
||||
|
||||
if (
|
||||
self.inferred_target_elements is None
|
||||
and self.constraint_target is None
|
||||
):
|
||||
raise ValueError(
|
||||
"Either constraint or index_elements, "
|
||||
"but not both, must be specified unless DO NOTHING"
|
||||
)
|
||||
|
||||
if isinstance(set_, dict):
|
||||
if not set_:
|
||||
raise ValueError("set parameter dictionary must not be empty")
|
||||
elif isinstance(set_, ColumnCollection):
|
||||
set_ = dict(set_)
|
||||
else:
|
||||
raise ValueError(
|
||||
"set parameter must be a non-empty dictionary "
|
||||
"or a ColumnCollection such as the `.c.` collection "
|
||||
"of a Table object"
|
||||
)
|
||||
self.update_values_to_set = [
|
||||
(coercions.expect(roles.DMLColumnRole, key), value)
|
||||
for key, value in set_.items()
|
||||
]
|
||||
self.update_whereclause = (
|
||||
coercions.expect(roles.WhereHavingRole, where)
|
||||
if where is not None
|
||||
else None
|
||||
)
|
@ -0,0 +1,501 @@
|
||||
# dialects/postgresql/ext.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import TypeVar
|
||||
|
||||
from . import types
|
||||
from .array import ARRAY
|
||||
from ...sql import coercions
|
||||
from ...sql import elements
|
||||
from ...sql import expression
|
||||
from ...sql import functions
|
||||
from ...sql import roles
|
||||
from ...sql import schema
|
||||
from ...sql.schema import ColumnCollectionConstraint
|
||||
from ...sql.sqltypes import TEXT
|
||||
from ...sql.visitors import InternalTraversal
|
||||
|
||||
_T = TypeVar("_T", bound=Any)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...sql.visitors import _TraverseInternalsType
|
||||
|
||||
|
||||
class aggregate_order_by(expression.ColumnElement):
|
||||
"""Represent a PostgreSQL aggregate order by expression.
|
||||
|
||||
E.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import aggregate_order_by
|
||||
|
||||
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
|
||||
stmt = select(expr)
|
||||
|
||||
would represent the expression:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT array_agg(a ORDER BY b DESC) FROM table;
|
||||
|
||||
Similarly::
|
||||
|
||||
expr = func.string_agg(
|
||||
table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
|
||||
)
|
||||
stmt = select(expr)
|
||||
|
||||
Would represent:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT string_agg(a, ',' ORDER BY a) FROM table;
|
||||
|
||||
.. versionchanged:: 1.2.13 - the ORDER BY argument may be multiple terms
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_functions.array_agg`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "aggregate_order_by"
|
||||
|
||||
stringify_dialect = "postgresql"
|
||||
_traverse_internals: _TraverseInternalsType = [
|
||||
("target", InternalTraversal.dp_clauseelement),
|
||||
("type", InternalTraversal.dp_type),
|
||||
("order_by", InternalTraversal.dp_clauseelement),
|
||||
]
|
||||
|
||||
def __init__(self, target, *order_by):
|
||||
self.target = coercions.expect(roles.ExpressionElementRole, target)
|
||||
self.type = self.target.type
|
||||
|
||||
_lob = len(order_by)
|
||||
if _lob == 0:
|
||||
raise TypeError("at least one ORDER BY element is required")
|
||||
elif _lob == 1:
|
||||
self.order_by = coercions.expect(
|
||||
roles.ExpressionElementRole, order_by[0]
|
||||
)
|
||||
else:
|
||||
self.order_by = elements.ClauseList(
|
||||
*order_by, _literal_as_text_role=roles.ExpressionElementRole
|
||||
)
|
||||
|
||||
def self_group(self, against=None):
|
||||
return self
|
||||
|
||||
def get_children(self, **kwargs):
|
||||
return self.target, self.order_by
|
||||
|
||||
def _copy_internals(self, clone=elements._clone, **kw):
|
||||
self.target = clone(self.target, **kw)
|
||||
self.order_by = clone(self.order_by, **kw)
|
||||
|
||||
@property
|
||||
def _from_objects(self):
|
||||
return self.target._from_objects + self.order_by._from_objects
|
||||
|
||||
|
||||
class ExcludeConstraint(ColumnCollectionConstraint):
|
||||
"""A table-level EXCLUDE constraint.
|
||||
|
||||
Defines an EXCLUDE constraint as described in the `PostgreSQL
|
||||
documentation`__.
|
||||
|
||||
__ https://www.postgresql.org/docs/current/static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
|
||||
|
||||
""" # noqa
|
||||
|
||||
__visit_name__ = "exclude_constraint"
|
||||
|
||||
where = None
|
||||
inherit_cache = False
|
||||
|
||||
create_drop_stringify_dialect = "postgresql"
|
||||
|
||||
@elements._document_text_coercion(
|
||||
"where",
|
||||
":class:`.ExcludeConstraint`",
|
||||
":paramref:`.ExcludeConstraint.where`",
|
||||
)
|
||||
def __init__(self, *elements, **kw):
|
||||
r"""
|
||||
Create an :class:`.ExcludeConstraint` object.
|
||||
|
||||
E.g.::
|
||||
|
||||
const = ExcludeConstraint(
|
||||
(Column("period"), "&&"),
|
||||
(Column("group"), "="),
|
||||
where=(Column("group") != "some group"),
|
||||
ops={"group": "my_operator_class"},
|
||||
)
|
||||
|
||||
The constraint is normally embedded into the :class:`_schema.Table`
|
||||
construct
|
||||
directly, or added later using :meth:`.append_constraint`::
|
||||
|
||||
some_table = Table(
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("period", TSRANGE()),
|
||||
Column("group", String),
|
||||
)
|
||||
|
||||
some_table.append_constraint(
|
||||
ExcludeConstraint(
|
||||
(some_table.c.period, "&&"),
|
||||
(some_table.c.group, "="),
|
||||
where=some_table.c.group != "some group",
|
||||
name="some_table_excl_const",
|
||||
ops={"group": "my_operator_class"},
|
||||
)
|
||||
)
|
||||
|
||||
The exclude constraint defined in this example requires the
|
||||
``btree_gist`` extension, that can be created using the
|
||||
command ``CREATE EXTENSION btree_gist;``.
|
||||
|
||||
:param \*elements:
|
||||
|
||||
A sequence of two tuples of the form ``(column, operator)`` where
|
||||
"column" is either a :class:`_schema.Column` object, or a SQL
|
||||
expression element (e.g. ``func.int8range(table.from, table.to)``)
|
||||
or the name of a column as string, and "operator" is a string
|
||||
containing the operator to use (e.g. `"&&"` or `"="`).
|
||||
|
||||
In order to specify a column name when a :class:`_schema.Column`
|
||||
object is not available, while ensuring
|
||||
that any necessary quoting rules take effect, an ad-hoc
|
||||
:class:`_schema.Column` or :func:`_expression.column`
|
||||
object should be used.
|
||||
The ``column`` may also be a string SQL expression when
|
||||
passed as :func:`_expression.literal_column` or
|
||||
:func:`_expression.text`
|
||||
|
||||
:param name:
|
||||
Optional, the in-database name of this constraint.
|
||||
|
||||
:param deferrable:
|
||||
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
|
||||
issuing DDL for this constraint.
|
||||
|
||||
:param initially:
|
||||
Optional string. If set, emit INITIALLY <value> when issuing DDL
|
||||
for this constraint.
|
||||
|
||||
:param using:
|
||||
Optional string. If set, emit USING <index_method> when issuing DDL
|
||||
for this constraint. Defaults to 'gist'.
|
||||
|
||||
:param where:
|
||||
Optional SQL expression construct or literal SQL string.
|
||||
If set, emit WHERE <predicate> when issuing DDL
|
||||
for this constraint.
|
||||
|
||||
:param ops:
|
||||
Optional dictionary. Used to define operator classes for the
|
||||
elements; works the same way as that of the
|
||||
:ref:`postgresql_ops <postgresql_operator_classes>`
|
||||
parameter specified to the :class:`_schema.Index` construct.
|
||||
|
||||
.. versionadded:: 1.3.21
|
||||
|
||||
.. seealso::
|
||||
|
||||
:ref:`postgresql_operator_classes` - general description of how
|
||||
PostgreSQL operator classes are specified.
|
||||
|
||||
"""
|
||||
columns = []
|
||||
render_exprs = []
|
||||
self.operators = {}
|
||||
|
||||
expressions, operators = zip(*elements)
|
||||
|
||||
for (expr, column, strname, add_element), operator in zip(
|
||||
coercions.expect_col_expression_collection(
|
||||
roles.DDLConstraintColumnRole, expressions
|
||||
),
|
||||
operators,
|
||||
):
|
||||
if add_element is not None:
|
||||
columns.append(add_element)
|
||||
|
||||
name = column.name if column is not None else strname
|
||||
|
||||
if name is not None:
|
||||
# backwards compat
|
||||
self.operators[name] = operator
|
||||
|
||||
render_exprs.append((expr, name, operator))
|
||||
|
||||
self._render_exprs = render_exprs
|
||||
|
||||
ColumnCollectionConstraint.__init__(
|
||||
self,
|
||||
*columns,
|
||||
name=kw.get("name"),
|
||||
deferrable=kw.get("deferrable"),
|
||||
initially=kw.get("initially"),
|
||||
)
|
||||
self.using = kw.get("using", "gist")
|
||||
where = kw.get("where")
|
||||
if where is not None:
|
||||
self.where = coercions.expect(roles.StatementOptionRole, where)
|
||||
|
||||
self.ops = kw.get("ops", {})
|
||||
|
||||
def _set_parent(self, table, **kw):
|
||||
super()._set_parent(table)
|
||||
|
||||
self._render_exprs = [
|
||||
(
|
||||
expr if not isinstance(expr, str) else table.c[expr],
|
||||
name,
|
||||
operator,
|
||||
)
|
||||
for expr, name, operator in (self._render_exprs)
|
||||
]
|
||||
|
||||
def _copy(self, target_table=None, **kw):
|
||||
elements = [
|
||||
(
|
||||
schema._copy_expression(expr, self.parent, target_table),
|
||||
operator,
|
||||
)
|
||||
for expr, _, operator in self._render_exprs
|
||||
]
|
||||
c = self.__class__(
|
||||
*elements,
|
||||
name=self.name,
|
||||
deferrable=self.deferrable,
|
||||
initially=self.initially,
|
||||
where=self.where,
|
||||
using=self.using,
|
||||
)
|
||||
c.dispatch._update(self.dispatch)
|
||||
return c
|
||||
|
||||
|
||||
def array_agg(*arg, **kw):
|
||||
"""PostgreSQL-specific form of :class:`_functions.array_agg`, ensures
|
||||
return type is :class:`_postgresql.ARRAY` and not
|
||||
the plain :class:`_types.ARRAY`, unless an explicit ``type_``
|
||||
is passed.
|
||||
|
||||
"""
|
||||
kw["_default_array_type"] = ARRAY
|
||||
return functions.func.array_agg(*arg, **kw)
|
||||
|
||||
|
||||
class _regconfig_fn(functions.GenericFunction[_T]):
|
||||
inherit_cache = True
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
if len(args) > 1:
|
||||
initial_arg = coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
args.pop(0),
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
type_=types.REGCONFIG,
|
||||
)
|
||||
initial_arg = [initial_arg]
|
||||
else:
|
||||
initial_arg = []
|
||||
|
||||
addtl_args = [
|
||||
coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
c,
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
)
|
||||
for c in args
|
||||
]
|
||||
super().__init__(*(initial_arg + addtl_args), **kwargs)
|
||||
|
||||
|
||||
class to_tsvector(_regconfig_fn):
|
||||
"""The PostgreSQL ``to_tsvector`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSVECTOR`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.to_tsvector` will be used automatically when invoking
|
||||
``sqlalchemy.func.to_tsvector()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSVECTOR
|
||||
|
||||
|
||||
class to_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``to_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.to_tsquery` will be used automatically when invoking
|
||||
``sqlalchemy.func.to_tsquery()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class plainto_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``plainto_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.plainto_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.plainto_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class phraseto_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``phraseto_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.phraseto_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.phraseto_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class websearch_to_tsquery(_regconfig_fn):
|
||||
"""The PostgreSQL ``websearch_to_tsquery`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_postgresql.TSQUERY`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.websearch_to_tsquery` will be used automatically when
|
||||
invoking ``sqlalchemy.func.websearch_to_tsquery()``, ensuring the correct
|
||||
argument and return type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = types.TSQUERY
|
||||
|
||||
|
||||
class ts_headline(_regconfig_fn):
|
||||
"""The PostgreSQL ``ts_headline`` SQL function.
|
||||
|
||||
This function applies automatic casting of the REGCONFIG argument
|
||||
to use the :class:`_postgresql.REGCONFIG` datatype automatically,
|
||||
and applies a return type of :class:`_types.TEXT`.
|
||||
|
||||
Assuming the PostgreSQL dialect has been imported, either by invoking
|
||||
``from sqlalchemy.dialects import postgresql``, or by creating a PostgreSQL
|
||||
engine using ``create_engine("postgresql...")``,
|
||||
:class:`_postgresql.ts_headline` will be used automatically when invoking
|
||||
``sqlalchemy.func.ts_headline()``, ensuring the correct argument and return
|
||||
type handlers are used at compile and execution time.
|
||||
|
||||
.. versionadded:: 2.0.0rc1
|
||||
|
||||
"""
|
||||
|
||||
inherit_cache = True
|
||||
type = TEXT
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
args = list(args)
|
||||
|
||||
# parse types according to
|
||||
# https://www.postgresql.org/docs/current/textsearch-controls.html#TEXTSEARCH-HEADLINE
|
||||
if len(args) < 2:
|
||||
# invalid args; don't do anything
|
||||
has_regconfig = False
|
||||
elif (
|
||||
isinstance(args[1], elements.ColumnElement)
|
||||
and args[1].type._type_affinity is types.TSQUERY
|
||||
):
|
||||
# tsquery is second argument, no regconfig argument
|
||||
has_regconfig = False
|
||||
else:
|
||||
has_regconfig = True
|
||||
|
||||
if has_regconfig:
|
||||
initial_arg = coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
args.pop(0),
|
||||
apply_propagate_attrs=self,
|
||||
name=getattr(self, "name", None),
|
||||
type_=types.REGCONFIG,
|
||||
)
|
||||
initial_arg = [initial_arg]
|
||||
else:
|
||||
initial_arg = []
|
||||
|
||||
addtl_args = [
|
||||
coercions.expect(
|
||||
roles.ExpressionElementRole,
|
||||
c,
|
||||
name=getattr(self, "name", None),
|
||||
apply_propagate_attrs=self,
|
||||
)
|
||||
for c in args
|
||||
]
|
||||
super().__init__(*(initial_arg + addtl_args), **kwargs)
|
@ -0,0 +1,406 @@
|
||||
# dialects/postgresql/hstore.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
# mypy: ignore-errors
|
||||
|
||||
|
||||
import re
|
||||
|
||||
from .array import ARRAY
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import GETITEM
|
||||
from .operators import HAS_ALL
|
||||
from .operators import HAS_ANY
|
||||
from .operators import HAS_KEY
|
||||
from ... import types as sqltypes
|
||||
from ...sql import functions as sqlfunc
|
||||
|
||||
|
||||
__all__ = ("HSTORE", "hstore")
|
||||
|
||||
|
||||
class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
|
||||
"""Represent the PostgreSQL HSTORE type.
|
||||
|
||||
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
|
||||
|
||||
data_table = Table(
|
||||
"data_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("data", HSTORE),
|
||||
)
|
||||
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
data_table.insert(), data={"key1": "value1", "key2": "value2"}
|
||||
)
|
||||
|
||||
:class:`.HSTORE` provides for a wide range of operations, including:
|
||||
|
||||
* Index operations::
|
||||
|
||||
data_table.c.data["some key"] == "some value"
|
||||
|
||||
* Containment operations::
|
||||
|
||||
data_table.c.data.has_key("some key")
|
||||
|
||||
data_table.c.data.has_all(["one", "two", "three"])
|
||||
|
||||
* Concatenation::
|
||||
|
||||
data_table.c.data + {"k1": "v1"}
|
||||
|
||||
For a full list of special methods see
|
||||
:class:`.HSTORE.comparator_factory`.
|
||||
|
||||
.. container:: topic
|
||||
|
||||
**Detecting Changes in HSTORE columns when using the ORM**
|
||||
|
||||
For usage with the SQLAlchemy ORM, it may be desirable to combine the
|
||||
usage of :class:`.HSTORE` with :class:`.MutableDict` dictionary now
|
||||
part of the :mod:`sqlalchemy.ext.mutable` extension. This extension
|
||||
will allow "in-place" changes to the dictionary, e.g. addition of new
|
||||
keys or replacement/removal of existing keys to/from the current
|
||||
dictionary, to produce events which will be detected by the unit of
|
||||
work::
|
||||
|
||||
from sqlalchemy.ext.mutable import MutableDict
|
||||
|
||||
|
||||
class MyClass(Base):
|
||||
__tablename__ = "data_table"
|
||||
|
||||
id = Column(Integer, primary_key=True)
|
||||
data = Column(MutableDict.as_mutable(HSTORE))
|
||||
|
||||
|
||||
my_object = session.query(MyClass).one()
|
||||
|
||||
# in-place mutation, requires Mutable extension
|
||||
# in order for the ORM to detect
|
||||
my_object.data["some_key"] = "some value"
|
||||
|
||||
session.commit()
|
||||
|
||||
When the :mod:`sqlalchemy.ext.mutable` extension is not used, the ORM
|
||||
will not be alerted to any changes to the contents of an existing
|
||||
dictionary, unless that dictionary value is re-assigned to the
|
||||
HSTORE-attribute itself, thus generating a change event.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
|
||||
|
||||
|
||||
""" # noqa: E501
|
||||
|
||||
__visit_name__ = "HSTORE"
|
||||
hashable = False
|
||||
text_type = sqltypes.Text()
|
||||
|
||||
def __init__(self, text_type=None):
|
||||
"""Construct a new :class:`.HSTORE`.
|
||||
|
||||
:param text_type: the type that should be used for indexed values.
|
||||
Defaults to :class:`_types.Text`.
|
||||
|
||||
"""
|
||||
if text_type is not None:
|
||||
self.text_type = text_type
|
||||
|
||||
class Comparator(
|
||||
sqltypes.Indexable.Comparator, sqltypes.Concatenable.Comparator
|
||||
):
|
||||
"""Define comparison operations for :class:`.HSTORE`."""
|
||||
|
||||
def has_key(self, other):
|
||||
"""Boolean expression. Test for presence of a key. Note that the
|
||||
key may be a SQLA expression.
|
||||
"""
|
||||
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_all(self, other):
|
||||
"""Boolean expression. Test for presence of all keys in jsonb"""
|
||||
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_any(self, other):
|
||||
"""Boolean expression. Test for presence of any key in jsonb"""
|
||||
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contains(self, other, **kwargs):
|
||||
"""Boolean expression. Test if keys (or array) are a superset
|
||||
of/contained the keys of the argument jsonb expression.
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other):
|
||||
"""Boolean expression. Test if keys are a proper subset of the
|
||||
keys of the argument jsonb expression.
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def _setup_getitem(self, index):
|
||||
return GETITEM, index, self.type.text_type
|
||||
|
||||
def defined(self, key):
|
||||
"""Boolean expression. Test for presence of a non-NULL value for
|
||||
the key. Note that the key may be a SQLA expression.
|
||||
"""
|
||||
return _HStoreDefinedFunction(self.expr, key)
|
||||
|
||||
def delete(self, key):
|
||||
"""HStore expression. Returns the contents of this hstore with the
|
||||
given key deleted. Note that the key may be a SQLA expression.
|
||||
"""
|
||||
if isinstance(key, dict):
|
||||
key = _serialize_hstore(key)
|
||||
return _HStoreDeleteFunction(self.expr, key)
|
||||
|
||||
def slice(self, array):
|
||||
"""HStore expression. Returns a subset of an hstore defined by
|
||||
array of keys.
|
||||
"""
|
||||
return _HStoreSliceFunction(self.expr, array)
|
||||
|
||||
def keys(self):
|
||||
"""Text array expression. Returns array of keys."""
|
||||
return _HStoreKeysFunction(self.expr)
|
||||
|
||||
def vals(self):
|
||||
"""Text array expression. Returns array of values."""
|
||||
return _HStoreValsFunction(self.expr)
|
||||
|
||||
def array(self):
|
||||
"""Text array expression. Returns array of alternating keys and
|
||||
values.
|
||||
"""
|
||||
return _HStoreArrayFunction(self.expr)
|
||||
|
||||
def matrix(self):
|
||||
"""Text array expression. Returns array of [key, value] pairs."""
|
||||
return _HStoreMatrixFunction(self.expr)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
def bind_processor(self, dialect):
|
||||
# note that dialect-specific types like that of psycopg and
|
||||
# psycopg2 will override this method to allow driver-level conversion
|
||||
# instead, see _PsycopgHStore
|
||||
def process(value):
|
||||
if isinstance(value, dict):
|
||||
return _serialize_hstore(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def result_processor(self, dialect, coltype):
|
||||
# note that dialect-specific types like that of psycopg and
|
||||
# psycopg2 will override this method to allow driver-level conversion
|
||||
# instead, see _PsycopgHStore
|
||||
def process(value):
|
||||
if value is not None:
|
||||
return _parse_hstore(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
|
||||
class hstore(sqlfunc.GenericFunction):
|
||||
"""Construct an hstore value within a SQL expression using the
|
||||
PostgreSQL ``hstore()`` function.
|
||||
|
||||
The :class:`.hstore` function accepts one or two arguments as described
|
||||
in the PostgreSQL documentation.
|
||||
|
||||
E.g.::
|
||||
|
||||
from sqlalchemy.dialects.postgresql import array, hstore
|
||||
|
||||
select(hstore("key1", "value1"))
|
||||
|
||||
select(
|
||||
hstore(
|
||||
array(["key1", "key2", "key3"]),
|
||||
array(["value1", "value2", "value3"]),
|
||||
)
|
||||
)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`.HSTORE` - the PostgreSQL ``HSTORE`` datatype.
|
||||
|
||||
"""
|
||||
|
||||
type = HSTORE
|
||||
name = "hstore"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreDefinedFunction(sqlfunc.GenericFunction):
|
||||
type = sqltypes.Boolean
|
||||
name = "defined"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreDeleteFunction(sqlfunc.GenericFunction):
|
||||
type = HSTORE
|
||||
name = "delete"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreSliceFunction(sqlfunc.GenericFunction):
|
||||
type = HSTORE
|
||||
name = "slice"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreKeysFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "akeys"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreValsFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "avals"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreArrayFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "hstore_to_array"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
class _HStoreMatrixFunction(sqlfunc.GenericFunction):
|
||||
type = ARRAY(sqltypes.Text)
|
||||
name = "hstore_to_matrix"
|
||||
inherit_cache = True
|
||||
|
||||
|
||||
#
|
||||
# parsing. note that none of this is used with the psycopg2 backend,
|
||||
# which provides its own native extensions.
|
||||
#
|
||||
|
||||
# My best guess at the parsing rules of hstore literals, since no formal
|
||||
# grammar is given. This is mostly reverse engineered from PG's input parser
|
||||
# behavior.
|
||||
HSTORE_PAIR_RE = re.compile(
|
||||
r"""
|
||||
(
|
||||
"(?P<key> (\\ . | [^"])* )" # Quoted key
|
||||
)
|
||||
[ ]* => [ ]* # Pair operator, optional adjoining whitespace
|
||||
(
|
||||
(?P<value_null> NULL ) # NULL value
|
||||
| "(?P<value> (\\ . | [^"])* )" # Quoted value
|
||||
)
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
HSTORE_DELIMITER_RE = re.compile(
|
||||
r"""
|
||||
[ ]* , [ ]*
|
||||
""",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def _parse_error(hstore_str, pos):
|
||||
"""format an unmarshalling error."""
|
||||
|
||||
ctx = 20
|
||||
hslen = len(hstore_str)
|
||||
|
||||
parsed_tail = hstore_str[max(pos - ctx - 1, 0) : min(pos, hslen)]
|
||||
residual = hstore_str[min(pos, hslen) : min(pos + ctx + 1, hslen)]
|
||||
|
||||
if len(parsed_tail) > ctx:
|
||||
parsed_tail = "[...]" + parsed_tail[1:]
|
||||
if len(residual) > ctx:
|
||||
residual = residual[:-1] + "[...]"
|
||||
|
||||
return "After %r, could not parse residual at position %d: %r" % (
|
||||
parsed_tail,
|
||||
pos,
|
||||
residual,
|
||||
)
|
||||
|
||||
|
||||
def _parse_hstore(hstore_str):
|
||||
"""Parse an hstore from its literal string representation.
|
||||
|
||||
Attempts to approximate PG's hstore input parsing rules as closely as
|
||||
possible. Although currently this is not strictly necessary, since the
|
||||
current implementation of hstore's output syntax is stricter than what it
|
||||
accepts as input, the documentation makes no guarantees that will always
|
||||
be the case.
|
||||
|
||||
|
||||
|
||||
"""
|
||||
result = {}
|
||||
pos = 0
|
||||
pair_match = HSTORE_PAIR_RE.match(hstore_str)
|
||||
|
||||
while pair_match is not None:
|
||||
key = pair_match.group("key").replace(r"\"", '"').replace("\\\\", "\\")
|
||||
if pair_match.group("value_null"):
|
||||
value = None
|
||||
else:
|
||||
value = (
|
||||
pair_match.group("value")
|
||||
.replace(r"\"", '"')
|
||||
.replace("\\\\", "\\")
|
||||
)
|
||||
result[key] = value
|
||||
|
||||
pos += pair_match.end()
|
||||
|
||||
delim_match = HSTORE_DELIMITER_RE.match(hstore_str[pos:])
|
||||
if delim_match is not None:
|
||||
pos += delim_match.end()
|
||||
|
||||
pair_match = HSTORE_PAIR_RE.match(hstore_str[pos:])
|
||||
|
||||
if pos != len(hstore_str):
|
||||
raise ValueError(_parse_error(hstore_str, pos))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def _serialize_hstore(val):
|
||||
"""Serialize a dictionary into an hstore literal. Keys and values must
|
||||
both be strings (except None for values).
|
||||
|
||||
"""
|
||||
|
||||
def esc(s, position):
|
||||
if position == "value" and s is None:
|
||||
return "NULL"
|
||||
elif isinstance(s, str):
|
||||
return '"%s"' % s.replace("\\", "\\\\").replace('"', r"\"")
|
||||
else:
|
||||
raise ValueError(
|
||||
"%r in %s position is not a string." % (s, position)
|
||||
)
|
||||
|
||||
return ", ".join(
|
||||
"%s=>%s" % (esc(k, "key"), esc(v, "value")) for k, v in val.items()
|
||||
)
|
@ -0,0 +1,367 @@
|
||||
# dialects/postgresql/json.py
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
# the MIT License: https://www.opensource.org/licenses/mit-license.php
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
from typing import Callable
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import TYPE_CHECKING
|
||||
from typing import Union
|
||||
|
||||
from .array import ARRAY
|
||||
from .array import array as _pg_array
|
||||
from .operators import ASTEXT
|
||||
from .operators import CONTAINED_BY
|
||||
from .operators import CONTAINS
|
||||
from .operators import DELETE_PATH
|
||||
from .operators import HAS_ALL
|
||||
from .operators import HAS_ANY
|
||||
from .operators import HAS_KEY
|
||||
from .operators import JSONPATH_ASTEXT
|
||||
from .operators import PATH_EXISTS
|
||||
from .operators import PATH_MATCH
|
||||
from ... import types as sqltypes
|
||||
from ...sql import cast
|
||||
from ...sql._typing import _T
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ...engine.interfaces import Dialect
|
||||
from ...sql.elements import ColumnElement
|
||||
from ...sql.type_api import _BindProcessorType
|
||||
from ...sql.type_api import _LiteralProcessorType
|
||||
from ...sql.type_api import TypeEngine
|
||||
|
||||
__all__ = ("JSON", "JSONB")
|
||||
|
||||
|
||||
class JSONPathType(sqltypes.JSON.JSONPathType):
|
||||
def _processor(
|
||||
self, dialect: Dialect, super_proc: Optional[Callable[[Any], Any]]
|
||||
) -> Callable[[Any], Any]:
|
||||
def process(value: Any) -> Any:
|
||||
if isinstance(value, str):
|
||||
# If it's already a string assume that it's in json path
|
||||
# format. This allows using cast with json paths literals
|
||||
return value
|
||||
elif value:
|
||||
# If it's already a string assume that it's in json path
|
||||
# format. This allows using cast with json paths literals
|
||||
value = "{%s}" % (", ".join(map(str, value)))
|
||||
else:
|
||||
value = "{}"
|
||||
if super_proc:
|
||||
value = super_proc(value)
|
||||
return value
|
||||
|
||||
return process
|
||||
|
||||
def bind_processor(self, dialect: Dialect) -> _BindProcessorType[Any]:
|
||||
return self._processor(dialect, self.string_bind_processor(dialect)) # type: ignore[return-value] # noqa: E501
|
||||
|
||||
def literal_processor(
|
||||
self, dialect: Dialect
|
||||
) -> _LiteralProcessorType[Any]:
|
||||
return self._processor(dialect, self.string_literal_processor(dialect)) # type: ignore[return-value] # noqa: E501
|
||||
|
||||
|
||||
class JSONPATH(JSONPathType):
|
||||
"""JSON Path Type.
|
||||
|
||||
This is usually required to cast literal values to json path when using
|
||||
json search like function, such as ``jsonb_path_query_array`` or
|
||||
``jsonb_path_exists``::
|
||||
|
||||
stmt = sa.select(
|
||||
sa.func.jsonb_path_query_array(
|
||||
table.c.jsonb_col, cast("$.address.id", JSONPATH)
|
||||
)
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "JSONPATH"
|
||||
|
||||
|
||||
class JSON(sqltypes.JSON):
|
||||
"""Represent the PostgreSQL JSON type.
|
||||
|
||||
:class:`_postgresql.JSON` is used automatically whenever the base
|
||||
:class:`_types.JSON` datatype is used against a PostgreSQL backend,
|
||||
however base :class:`_types.JSON` datatype does not provide Python
|
||||
accessors for PostgreSQL-specific comparison methods such as
|
||||
:meth:`_postgresql.JSON.Comparator.astext`; additionally, to use
|
||||
PostgreSQL ``JSONB``, the :class:`_postgresql.JSONB` datatype should
|
||||
be used explicitly.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - main documentation for the generic
|
||||
cross-platform JSON datatype.
|
||||
|
||||
The operators provided by the PostgreSQL version of :class:`_types.JSON`
|
||||
include:
|
||||
|
||||
* Index operations (the ``->`` operator)::
|
||||
|
||||
data_table.c.data["some key"]
|
||||
|
||||
data_table.c.data[5]
|
||||
|
||||
* Index operations returning text
|
||||
(the ``->>`` operator)::
|
||||
|
||||
data_table.c.data["some key"].astext == "some value"
|
||||
|
||||
Note that equivalent functionality is available via the
|
||||
:attr:`.JSON.Comparator.as_string` accessor.
|
||||
|
||||
* Index operations with CAST
|
||||
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
|
||||
|
||||
data_table.c.data["some key"].astext.cast(Integer) == 5
|
||||
|
||||
Note that equivalent functionality is available via the
|
||||
:attr:`.JSON.Comparator.as_integer` and similar accessors.
|
||||
|
||||
* Path index operations (the ``#>`` operator)::
|
||||
|
||||
data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
|
||||
|
||||
* Path index operations returning text (the ``#>>`` operator)::
|
||||
|
||||
data_table.c.data[
|
||||
("key_1", "key_2", 5, ..., "key_n")
|
||||
].astext == "some value"
|
||||
|
||||
Index operations return an expression object whose type defaults to
|
||||
:class:`_types.JSON` by default,
|
||||
so that further JSON-oriented instructions
|
||||
may be called upon the result type.
|
||||
|
||||
Custom serializers and deserializers are specified at the dialect level,
|
||||
that is using :func:`_sa.create_engine`. The reason for this is that when
|
||||
using psycopg2, the DBAPI only allows serializers at the per-cursor
|
||||
or per-connection level. E.g.::
|
||||
|
||||
engine = create_engine(
|
||||
"postgresql+psycopg2://scott:tiger@localhost/test",
|
||||
json_serializer=my_serialize_fn,
|
||||
json_deserializer=my_deserialize_fn,
|
||||
)
|
||||
|
||||
When using the psycopg2 dialect, the json_deserializer is registered
|
||||
against the database using ``psycopg2.extras.register_default_json``.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON` - Core level JSON type
|
||||
|
||||
:class:`_postgresql.JSONB`
|
||||
|
||||
""" # noqa
|
||||
|
||||
render_bind_cast = True
|
||||
astext_type: TypeEngine[str] = sqltypes.Text()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
none_as_null: bool = False,
|
||||
astext_type: Optional[TypeEngine[str]] = None,
|
||||
):
|
||||
"""Construct a :class:`_types.JSON` type.
|
||||
|
||||
:param none_as_null: if True, persist the value ``None`` as a
|
||||
SQL NULL value, not the JSON encoding of ``null``. Note that
|
||||
when this flag is False, the :func:`.null` construct can still
|
||||
be used to persist a NULL value::
|
||||
|
||||
from sqlalchemy import null
|
||||
|
||||
conn.execute(table.insert(), {"data": null()})
|
||||
|
||||
.. seealso::
|
||||
|
||||
:attr:`_types.JSON.NULL`
|
||||
|
||||
:param astext_type: the type to use for the
|
||||
:attr:`.JSON.Comparator.astext`
|
||||
accessor on indexed attributes. Defaults to :class:`_types.Text`.
|
||||
|
||||
"""
|
||||
super().__init__(none_as_null=none_as_null)
|
||||
if astext_type is not None:
|
||||
self.astext_type = astext_type
|
||||
|
||||
class Comparator(sqltypes.JSON.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.JSON`."""
|
||||
|
||||
type: JSON
|
||||
|
||||
@property
|
||||
def astext(self) -> ColumnElement[str]:
|
||||
"""On an indexed expression, use the "astext" (e.g. "->>")
|
||||
conversion when rendered in SQL.
|
||||
|
||||
E.g.::
|
||||
|
||||
select(data_table.c.data["some key"].astext)
|
||||
|
||||
.. seealso::
|
||||
|
||||
:meth:`_expression.ColumnElement.cast`
|
||||
|
||||
"""
|
||||
if isinstance(self.expr.right.type, sqltypes.JSON.JSONPathType):
|
||||
return self.expr.left.operate( # type: ignore[no-any-return]
|
||||
JSONPATH_ASTEXT,
|
||||
self.expr.right,
|
||||
result_type=self.type.astext_type,
|
||||
)
|
||||
else:
|
||||
return self.expr.left.operate( # type: ignore[no-any-return]
|
||||
ASTEXT, self.expr.right, result_type=self.type.astext_type
|
||||
)
|
||||
|
||||
comparator_factory = Comparator
|
||||
|
||||
|
||||
class JSONB(JSON):
|
||||
"""Represent the PostgreSQL JSONB type.
|
||||
|
||||
The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
|
||||
e.g.::
|
||||
|
||||
data_table = Table(
|
||||
"data_table",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("data", JSONB),
|
||||
)
|
||||
|
||||
with engine.connect() as conn:
|
||||
conn.execute(
|
||||
data_table.insert(), data={"key1": "value1", "key2": "value2"}
|
||||
)
|
||||
|
||||
The :class:`_postgresql.JSONB` type includes all operations provided by
|
||||
:class:`_types.JSON`, including the same behaviors for indexing
|
||||
operations.
|
||||
It also adds additional operators specific to JSONB, including
|
||||
:meth:`.JSONB.Comparator.has_key`, :meth:`.JSONB.Comparator.has_all`,
|
||||
:meth:`.JSONB.Comparator.has_any`, :meth:`.JSONB.Comparator.contains`,
|
||||
:meth:`.JSONB.Comparator.contained_by`,
|
||||
:meth:`.JSONB.Comparator.delete_path`,
|
||||
:meth:`.JSONB.Comparator.path_exists` and
|
||||
:meth:`.JSONB.Comparator.path_match`.
|
||||
|
||||
Like the :class:`_types.JSON` type, the :class:`_postgresql.JSONB`
|
||||
type does not detect
|
||||
in-place changes when used with the ORM, unless the
|
||||
:mod:`sqlalchemy.ext.mutable` extension is used.
|
||||
|
||||
Custom serializers and deserializers
|
||||
are shared with the :class:`_types.JSON` class,
|
||||
using the ``json_serializer``
|
||||
and ``json_deserializer`` keyword arguments. These must be specified
|
||||
at the dialect level using :func:`_sa.create_engine`. When using
|
||||
psycopg2, the serializers are associated with the jsonb type using
|
||||
``psycopg2.extras.register_default_jsonb`` on a per-connection basis,
|
||||
in the same way that ``psycopg2.extras.register_default_json`` is used
|
||||
to register these handlers with the json type.
|
||||
|
||||
.. seealso::
|
||||
|
||||
:class:`_types.JSON`
|
||||
|
||||
"""
|
||||
|
||||
__visit_name__ = "JSONB"
|
||||
|
||||
class Comparator(JSON.Comparator[_T]):
|
||||
"""Define comparison operations for :class:`_types.JSON`."""
|
||||
|
||||
type: JSONB
|
||||
|
||||
def has_key(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of a key (equivalent of
|
||||
the ``?`` operator). Note that the key may be a SQLA expression.
|
||||
"""
|
||||
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_all(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of all keys in jsonb
|
||||
(equivalent of the ``?&`` operator)
|
||||
"""
|
||||
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def has_any(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of any key in jsonb
|
||||
(equivalent of the ``?|`` operator)
|
||||
"""
|
||||
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contains(self, other: Any, **kwargs: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if keys (or array) are a superset
|
||||
of/contained the keys of the argument jsonb expression
|
||||
(equivalent of the ``@>`` operator).
|
||||
|
||||
kwargs may be ignored by this operator but are required for API
|
||||
conformance.
|
||||
"""
|
||||
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
|
||||
|
||||
def contained_by(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if keys are a proper subset of the
|
||||
keys of the argument jsonb expression
|
||||
(equivalent of the ``<@`` operator).
|
||||
"""
|
||||
return self.operate(
|
||||
CONTAINED_BY, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def delete_path(
|
||||
self, array: Union[List[str], _pg_array[str]]
|
||||
) -> ColumnElement[JSONB]:
|
||||
"""JSONB expression. Deletes field or array element specified in
|
||||
the argument array (equivalent of the ``#-`` operator).
|
||||
|
||||
The input may be a list of strings that will be coerced to an
|
||||
``ARRAY`` or an instance of :meth:`_postgres.array`.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
if not isinstance(array, _pg_array):
|
||||
array = _pg_array(array)
|
||||
right_side = cast(array, ARRAY(sqltypes.TEXT))
|
||||
return self.operate(DELETE_PATH, right_side, result_type=JSONB)
|
||||
|
||||
def path_exists(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test for presence of item given by the
|
||||
argument JSONPath expression (equivalent of the ``@?`` operator).
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.operate(
|
||||
PATH_EXISTS, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
def path_match(self, other: Any) -> ColumnElement[bool]:
|
||||
"""Boolean expression. Test if JSONPath predicate given by the
|
||||
argument JSONPath expression matches
|
||||
(equivalent of the ``@@`` operator).
|
||||
|
||||
Only the first item of the result is taken into account.
|
||||
|
||||
.. versionadded:: 2.0
|
||||
"""
|
||||
return self.operate(
|
||||
PATH_MATCH, other, result_type=sqltypes.Boolean
|
||||
)
|
||||
|
||||
comparator_factory = Comparator
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user