Update 2025-04-24_11:44:19

This commit is contained in:
oib
2025-04-24 11:44:23 +02:00
commit e748c737f4
3408 changed files with 717481 additions and 0 deletions

View File

@ -0,0 +1,67 @@
# dialects/oracle/__init__.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from types import ModuleType
from . import base # noqa
from . import cx_oracle # noqa
from . import oracledb # noqa
from .base import BFILE
from .base import BINARY_DOUBLE
from .base import BINARY_FLOAT
from .base import BLOB
from .base import CHAR
from .base import CLOB
from .base import DATE
from .base import DOUBLE_PRECISION
from .base import FLOAT
from .base import INTERVAL
from .base import LONG
from .base import NCHAR
from .base import NCLOB
from .base import NUMBER
from .base import NVARCHAR
from .base import NVARCHAR2
from .base import RAW
from .base import REAL
from .base import ROWID
from .base import TIMESTAMP
from .base import VARCHAR
from .base import VARCHAR2
# Alias oracledb also as oracledb_async
oracledb_async = type(
"oracledb_async", (ModuleType,), {"dialect": oracledb.dialect_async}
)
base.dialect = dialect = cx_oracle.dialect
__all__ = (
"VARCHAR",
"NVARCHAR",
"CHAR",
"NCHAR",
"DATE",
"NUMBER",
"BLOB",
"BFILE",
"CLOB",
"NCLOB",
"TIMESTAMP",
"RAW",
"FLOAT",
"DOUBLE_PRECISION",
"BINARY_DOUBLE",
"BINARY_FLOAT",
"LONG",
"dialect",
"INTERVAL",
"VARCHAR2",
"NVARCHAR2",
"ROWID",
"REAL",
)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,507 @@
# dialects/oracle/dictionary.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from .types import DATE
from .types import LONG
from .types import NUMBER
from .types import RAW
from .types import VARCHAR2
from ... import Column
from ... import MetaData
from ... import Table
from ... import table
from ...sql.sqltypes import CHAR
# constants
DB_LINK_PLACEHOLDER = "__$sa_dblink$__"
# tables
dual = table("dual")
dictionary_meta = MetaData()
# NOTE: all the dictionary_meta are aliases because oracle does not like
# using the full table@dblink for every column in query, and complains with
# ORA-00960: ambiguous column naming in select list
all_tables = Table(
"all_tables" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("tablespace_name", VARCHAR2(30)),
Column("cluster_name", VARCHAR2(128)),
Column("iot_name", VARCHAR2(128)),
Column("status", VARCHAR2(8)),
Column("pct_free", NUMBER),
Column("pct_used", NUMBER),
Column("ini_trans", NUMBER),
Column("max_trans", NUMBER),
Column("initial_extent", NUMBER),
Column("next_extent", NUMBER),
Column("min_extents", NUMBER),
Column("max_extents", NUMBER),
Column("pct_increase", NUMBER),
Column("freelists", NUMBER),
Column("freelist_groups", NUMBER),
Column("logging", VARCHAR2(3)),
Column("backed_up", VARCHAR2(1)),
Column("num_rows", NUMBER),
Column("blocks", NUMBER),
Column("empty_blocks", NUMBER),
Column("avg_space", NUMBER),
Column("chain_cnt", NUMBER),
Column("avg_row_len", NUMBER),
Column("avg_space_freelist_blocks", NUMBER),
Column("num_freelist_blocks", NUMBER),
Column("degree", VARCHAR2(10)),
Column("instances", VARCHAR2(10)),
Column("cache", VARCHAR2(5)),
Column("table_lock", VARCHAR2(8)),
Column("sample_size", NUMBER),
Column("last_analyzed", DATE),
Column("partitioned", VARCHAR2(3)),
Column("iot_type", VARCHAR2(12)),
Column("temporary", VARCHAR2(1)),
Column("secondary", VARCHAR2(1)),
Column("nested", VARCHAR2(3)),
Column("buffer_pool", VARCHAR2(7)),
Column("flash_cache", VARCHAR2(7)),
Column("cell_flash_cache", VARCHAR2(7)),
Column("row_movement", VARCHAR2(8)),
Column("global_stats", VARCHAR2(3)),
Column("user_stats", VARCHAR2(3)),
Column("duration", VARCHAR2(15)),
Column("skip_corrupt", VARCHAR2(8)),
Column("monitoring", VARCHAR2(3)),
Column("cluster_owner", VARCHAR2(128)),
Column("dependencies", VARCHAR2(8)),
Column("compression", VARCHAR2(8)),
Column("compress_for", VARCHAR2(30)),
Column("dropped", VARCHAR2(3)),
Column("read_only", VARCHAR2(3)),
Column("segment_created", VARCHAR2(3)),
Column("result_cache", VARCHAR2(7)),
Column("clustering", VARCHAR2(3)),
Column("activity_tracking", VARCHAR2(23)),
Column("dml_timestamp", VARCHAR2(25)),
Column("has_identity", VARCHAR2(3)),
Column("container_data", VARCHAR2(3)),
Column("inmemory", VARCHAR2(8)),
Column("inmemory_priority", VARCHAR2(8)),
Column("inmemory_distribute", VARCHAR2(15)),
Column("inmemory_compression", VARCHAR2(17)),
Column("inmemory_duplicate", VARCHAR2(13)),
Column("default_collation", VARCHAR2(100)),
Column("duplicated", VARCHAR2(1)),
Column("sharded", VARCHAR2(1)),
Column("externally_sharded", VARCHAR2(1)),
Column("externally_duplicated", VARCHAR2(1)),
Column("external", VARCHAR2(3)),
Column("hybrid", VARCHAR2(3)),
Column("cellmemory", VARCHAR2(24)),
Column("containers_default", VARCHAR2(3)),
Column("container_map", VARCHAR2(3)),
Column("extended_data_link", VARCHAR2(3)),
Column("extended_data_link_map", VARCHAR2(3)),
Column("inmemory_service", VARCHAR2(12)),
Column("inmemory_service_name", VARCHAR2(1000)),
Column("container_map_object", VARCHAR2(3)),
Column("memoptimize_read", VARCHAR2(8)),
Column("memoptimize_write", VARCHAR2(8)),
Column("has_sensitive_column", VARCHAR2(3)),
Column("admit_null", VARCHAR2(3)),
Column("data_link_dml_enabled", VARCHAR2(3)),
Column("logical_replication", VARCHAR2(8)),
).alias("a_tables")
all_views = Table(
"all_views" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("view_name", VARCHAR2(128), nullable=False),
Column("text_length", NUMBER),
Column("text", LONG),
Column("text_vc", VARCHAR2(4000)),
Column("type_text_length", NUMBER),
Column("type_text", VARCHAR2(4000)),
Column("oid_text_length", NUMBER),
Column("oid_text", VARCHAR2(4000)),
Column("view_type_owner", VARCHAR2(128)),
Column("view_type", VARCHAR2(128)),
Column("superview_name", VARCHAR2(128)),
Column("editioning_view", VARCHAR2(1)),
Column("read_only", VARCHAR2(1)),
Column("container_data", VARCHAR2(1)),
Column("bequeath", VARCHAR2(12)),
Column("origin_con_id", VARCHAR2(256)),
Column("default_collation", VARCHAR2(100)),
Column("containers_default", VARCHAR2(3)),
Column("container_map", VARCHAR2(3)),
Column("extended_data_link", VARCHAR2(3)),
Column("extended_data_link_map", VARCHAR2(3)),
Column("has_sensitive_column", VARCHAR2(3)),
Column("admit_null", VARCHAR2(3)),
Column("pdb_local_only", VARCHAR2(3)),
).alias("a_views")
all_sequences = Table(
"all_sequences" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("sequence_owner", VARCHAR2(128), nullable=False),
Column("sequence_name", VARCHAR2(128), nullable=False),
Column("min_value", NUMBER),
Column("max_value", NUMBER),
Column("increment_by", NUMBER, nullable=False),
Column("cycle_flag", VARCHAR2(1)),
Column("order_flag", VARCHAR2(1)),
Column("cache_size", NUMBER, nullable=False),
Column("last_number", NUMBER, nullable=False),
Column("scale_flag", VARCHAR2(1)),
Column("extend_flag", VARCHAR2(1)),
Column("sharded_flag", VARCHAR2(1)),
Column("session_flag", VARCHAR2(1)),
Column("keep_value", VARCHAR2(1)),
).alias("a_sequences")
all_users = Table(
"all_users" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("username", VARCHAR2(128), nullable=False),
Column("user_id", NUMBER, nullable=False),
Column("created", DATE, nullable=False),
Column("common", VARCHAR2(3)),
Column("oracle_maintained", VARCHAR2(1)),
Column("inherited", VARCHAR2(3)),
Column("default_collation", VARCHAR2(100)),
Column("implicit", VARCHAR2(3)),
Column("all_shard", VARCHAR2(3)),
Column("external_shard", VARCHAR2(3)),
).alias("a_users")
all_mviews = Table(
"all_mviews" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("mview_name", VARCHAR2(128), nullable=False),
Column("container_name", VARCHAR2(128), nullable=False),
Column("query", LONG),
Column("query_len", NUMBER(38)),
Column("updatable", VARCHAR2(1)),
Column("update_log", VARCHAR2(128)),
Column("master_rollback_seg", VARCHAR2(128)),
Column("master_link", VARCHAR2(128)),
Column("rewrite_enabled", VARCHAR2(1)),
Column("rewrite_capability", VARCHAR2(9)),
Column("refresh_mode", VARCHAR2(6)),
Column("refresh_method", VARCHAR2(8)),
Column("build_mode", VARCHAR2(9)),
Column("fast_refreshable", VARCHAR2(18)),
Column("last_refresh_type", VARCHAR2(8)),
Column("last_refresh_date", DATE),
Column("last_refresh_end_time", DATE),
Column("staleness", VARCHAR2(19)),
Column("after_fast_refresh", VARCHAR2(19)),
Column("unknown_prebuilt", VARCHAR2(1)),
Column("unknown_plsql_func", VARCHAR2(1)),
Column("unknown_external_table", VARCHAR2(1)),
Column("unknown_consider_fresh", VARCHAR2(1)),
Column("unknown_import", VARCHAR2(1)),
Column("unknown_trusted_fd", VARCHAR2(1)),
Column("compile_state", VARCHAR2(19)),
Column("use_no_index", VARCHAR2(1)),
Column("stale_since", DATE),
Column("num_pct_tables", NUMBER),
Column("num_fresh_pct_regions", NUMBER),
Column("num_stale_pct_regions", NUMBER),
Column("segment_created", VARCHAR2(3)),
Column("evaluation_edition", VARCHAR2(128)),
Column("unusable_before", VARCHAR2(128)),
Column("unusable_beginning", VARCHAR2(128)),
Column("default_collation", VARCHAR2(100)),
Column("on_query_computation", VARCHAR2(1)),
Column("auto", VARCHAR2(3)),
).alias("a_mviews")
all_tab_identity_cols = Table(
"all_tab_identity_cols" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_name", VARCHAR2(128), nullable=False),
Column("generation_type", VARCHAR2(10)),
Column("sequence_name", VARCHAR2(128), nullable=False),
Column("identity_options", VARCHAR2(298)),
).alias("a_tab_identity_cols")
all_tab_cols = Table(
"all_tab_cols" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_name", VARCHAR2(128), nullable=False),
Column("data_type", VARCHAR2(128)),
Column("data_type_mod", VARCHAR2(3)),
Column("data_type_owner", VARCHAR2(128)),
Column("data_length", NUMBER, nullable=False),
Column("data_precision", NUMBER),
Column("data_scale", NUMBER),
Column("nullable", VARCHAR2(1)),
Column("column_id", NUMBER),
Column("default_length", NUMBER),
Column("data_default", LONG),
Column("num_distinct", NUMBER),
Column("low_value", RAW(1000)),
Column("high_value", RAW(1000)),
Column("density", NUMBER),
Column("num_nulls", NUMBER),
Column("num_buckets", NUMBER),
Column("last_analyzed", DATE),
Column("sample_size", NUMBER),
Column("character_set_name", VARCHAR2(44)),
Column("char_col_decl_length", NUMBER),
Column("global_stats", VARCHAR2(3)),
Column("user_stats", VARCHAR2(3)),
Column("avg_col_len", NUMBER),
Column("char_length", NUMBER),
Column("char_used", VARCHAR2(1)),
Column("v80_fmt_image", VARCHAR2(3)),
Column("data_upgraded", VARCHAR2(3)),
Column("hidden_column", VARCHAR2(3)),
Column("virtual_column", VARCHAR2(3)),
Column("segment_column_id", NUMBER),
Column("internal_column_id", NUMBER, nullable=False),
Column("histogram", VARCHAR2(15)),
Column("qualified_col_name", VARCHAR2(4000)),
Column("user_generated", VARCHAR2(3)),
Column("default_on_null", VARCHAR2(3)),
Column("identity_column", VARCHAR2(3)),
Column("evaluation_edition", VARCHAR2(128)),
Column("unusable_before", VARCHAR2(128)),
Column("unusable_beginning", VARCHAR2(128)),
Column("collation", VARCHAR2(100)),
Column("collated_column_id", NUMBER),
).alias("a_tab_cols")
all_tab_comments = Table(
"all_tab_comments" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("table_type", VARCHAR2(11)),
Column("comments", VARCHAR2(4000)),
Column("origin_con_id", NUMBER),
).alias("a_tab_comments")
all_col_comments = Table(
"all_col_comments" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_name", VARCHAR2(128), nullable=False),
Column("comments", VARCHAR2(4000)),
Column("origin_con_id", NUMBER),
).alias("a_col_comments")
all_mview_comments = Table(
"all_mview_comments" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("mview_name", VARCHAR2(128), nullable=False),
Column("comments", VARCHAR2(4000)),
).alias("a_mview_comments")
all_ind_columns = Table(
"all_ind_columns" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("index_owner", VARCHAR2(128), nullable=False),
Column("index_name", VARCHAR2(128), nullable=False),
Column("table_owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_name", VARCHAR2(4000)),
Column("column_position", NUMBER, nullable=False),
Column("column_length", NUMBER, nullable=False),
Column("char_length", NUMBER),
Column("descend", VARCHAR2(4)),
Column("collated_column_id", NUMBER),
).alias("a_ind_columns")
all_indexes = Table(
"all_indexes" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("index_name", VARCHAR2(128), nullable=False),
Column("index_type", VARCHAR2(27)),
Column("table_owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("table_type", CHAR(11)),
Column("uniqueness", VARCHAR2(9)),
Column("compression", VARCHAR2(13)),
Column("prefix_length", NUMBER),
Column("tablespace_name", VARCHAR2(30)),
Column("ini_trans", NUMBER),
Column("max_trans", NUMBER),
Column("initial_extent", NUMBER),
Column("next_extent", NUMBER),
Column("min_extents", NUMBER),
Column("max_extents", NUMBER),
Column("pct_increase", NUMBER),
Column("pct_threshold", NUMBER),
Column("include_column", NUMBER),
Column("freelists", NUMBER),
Column("freelist_groups", NUMBER),
Column("pct_free", NUMBER),
Column("logging", VARCHAR2(3)),
Column("blevel", NUMBER),
Column("leaf_blocks", NUMBER),
Column("distinct_keys", NUMBER),
Column("avg_leaf_blocks_per_key", NUMBER),
Column("avg_data_blocks_per_key", NUMBER),
Column("clustering_factor", NUMBER),
Column("status", VARCHAR2(8)),
Column("num_rows", NUMBER),
Column("sample_size", NUMBER),
Column("last_analyzed", DATE),
Column("degree", VARCHAR2(40)),
Column("instances", VARCHAR2(40)),
Column("partitioned", VARCHAR2(3)),
Column("temporary", VARCHAR2(1)),
Column("generated", VARCHAR2(1)),
Column("secondary", VARCHAR2(1)),
Column("buffer_pool", VARCHAR2(7)),
Column("flash_cache", VARCHAR2(7)),
Column("cell_flash_cache", VARCHAR2(7)),
Column("user_stats", VARCHAR2(3)),
Column("duration", VARCHAR2(15)),
Column("pct_direct_access", NUMBER),
Column("ityp_owner", VARCHAR2(128)),
Column("ityp_name", VARCHAR2(128)),
Column("parameters", VARCHAR2(1000)),
Column("global_stats", VARCHAR2(3)),
Column("domidx_status", VARCHAR2(12)),
Column("domidx_opstatus", VARCHAR2(6)),
Column("funcidx_status", VARCHAR2(8)),
Column("join_index", VARCHAR2(3)),
Column("iot_redundant_pkey_elim", VARCHAR2(3)),
Column("dropped", VARCHAR2(3)),
Column("visibility", VARCHAR2(9)),
Column("domidx_management", VARCHAR2(14)),
Column("segment_created", VARCHAR2(3)),
Column("orphaned_entries", VARCHAR2(3)),
Column("indexing", VARCHAR2(7)),
Column("auto", VARCHAR2(3)),
).alias("a_indexes")
all_ind_expressions = Table(
"all_ind_expressions" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("index_owner", VARCHAR2(128), nullable=False),
Column("index_name", VARCHAR2(128), nullable=False),
Column("table_owner", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_expression", LONG),
Column("column_position", NUMBER, nullable=False),
).alias("a_ind_expressions")
all_constraints = Table(
"all_constraints" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128)),
Column("constraint_name", VARCHAR2(128)),
Column("constraint_type", VARCHAR2(1)),
Column("table_name", VARCHAR2(128)),
Column("search_condition", LONG),
Column("search_condition_vc", VARCHAR2(4000)),
Column("r_owner", VARCHAR2(128)),
Column("r_constraint_name", VARCHAR2(128)),
Column("delete_rule", VARCHAR2(9)),
Column("status", VARCHAR2(8)),
Column("deferrable", VARCHAR2(14)),
Column("deferred", VARCHAR2(9)),
Column("validated", VARCHAR2(13)),
Column("generated", VARCHAR2(14)),
Column("bad", VARCHAR2(3)),
Column("rely", VARCHAR2(4)),
Column("last_change", DATE),
Column("index_owner", VARCHAR2(128)),
Column("index_name", VARCHAR2(128)),
Column("invalid", VARCHAR2(7)),
Column("view_related", VARCHAR2(14)),
Column("origin_con_id", VARCHAR2(256)),
).alias("a_constraints")
all_cons_columns = Table(
"all_cons_columns" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("constraint_name", VARCHAR2(128), nullable=False),
Column("table_name", VARCHAR2(128), nullable=False),
Column("column_name", VARCHAR2(4000)),
Column("position", NUMBER),
).alias("a_cons_columns")
# TODO figure out if it's still relevant, since there is no mention from here
# https://docs.oracle.com/en/database/oracle/oracle-database/21/refrn/ALL_DB_LINKS.html
# original note:
# using user_db_links here since all_db_links appears
# to have more restricted permissions.
# https://docs.oracle.com/cd/B28359_01/server.111/b28310/ds_admin005.htm
# will need to hear from more users if we are doing
# the right thing here. See [ticket:2619]
all_db_links = Table(
"all_db_links" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("db_link", VARCHAR2(128), nullable=False),
Column("username", VARCHAR2(128)),
Column("host", VARCHAR2(2000)),
Column("created", DATE, nullable=False),
Column("hidden", VARCHAR2(3)),
Column("shard_internal", VARCHAR2(3)),
Column("valid", VARCHAR2(3)),
Column("intra_cdb", VARCHAR2(3)),
).alias("a_db_links")
all_synonyms = Table(
"all_synonyms" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128)),
Column("synonym_name", VARCHAR2(128)),
Column("table_owner", VARCHAR2(128)),
Column("table_name", VARCHAR2(128)),
Column("db_link", VARCHAR2(128)),
Column("origin_con_id", VARCHAR2(256)),
).alias("a_synonyms")
all_objects = Table(
"all_objects" + DB_LINK_PLACEHOLDER,
dictionary_meta,
Column("owner", VARCHAR2(128), nullable=False),
Column("object_name", VARCHAR2(128), nullable=False),
Column("subobject_name", VARCHAR2(128)),
Column("object_id", NUMBER, nullable=False),
Column("data_object_id", NUMBER),
Column("object_type", VARCHAR2(23)),
Column("created", DATE, nullable=False),
Column("last_ddl_time", DATE, nullable=False),
Column("timestamp", VARCHAR2(19)),
Column("status", VARCHAR2(7)),
Column("temporary", VARCHAR2(1)),
Column("generated", VARCHAR2(1)),
Column("secondary", VARCHAR2(1)),
Column("namespace", NUMBER, nullable=False),
Column("edition_name", VARCHAR2(128)),
Column("sharing", VARCHAR2(13)),
Column("editionable", VARCHAR2(1)),
Column("oracle_maintained", VARCHAR2(1)),
Column("application", VARCHAR2(1)),
Column("default_collation", VARCHAR2(100)),
Column("duplicated", VARCHAR2(1)),
Column("sharded", VARCHAR2(1)),
Column("created_appid", NUMBER),
Column("created_vsnid", NUMBER),
Column("modified_appid", NUMBER),
Column("modified_vsnid", NUMBER),
).alias("a_objects")

View File

@ -0,0 +1,947 @@
# dialects/oracle/oracledb.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
r""".. dialect:: oracle+oracledb
:name: python-oracledb
:dbapi: oracledb
:connectstring: oracle+oracledb://user:pass@hostname:port[/dbname][?service_name=<service>[&key=value&key=value...]]
:url: https://oracle.github.io/python-oracledb/
Description
-----------
Python-oracledb is the Oracle Database driver for Python. It features a default
"thin" client mode that requires no dependencies, and an optional "thick" mode
that uses Oracle Client libraries. It supports SQLAlchemy features including
two phase transactions and Asyncio.
Python-oracle is the renamed, updated cx_Oracle driver. Oracle is no longer
doing any releases in the cx_Oracle namespace.
The SQLAlchemy ``oracledb`` dialect provides both a sync and an async
implementation under the same dialect name. The proper version is
selected depending on how the engine is created:
* calling :func:`_sa.create_engine` with ``oracle+oracledb://...`` will
automatically select the sync version::
from sqlalchemy import create_engine
sync_engine = create_engine(
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
)
* calling :func:`_asyncio.create_async_engine` with ``oracle+oracledb://...``
will automatically select the async version::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine(
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
)
The asyncio version of the dialect may also be specified explicitly using the
``oracledb_async`` suffix::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine(
"oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1"
)
.. versionadded:: 2.0.25 added support for the async version of oracledb.
Thick mode support
------------------
By default, the python-oracledb driver runs in a "thin" mode that does not
require Oracle Client libraries to be installed. The driver also supports a
"thick" mode that uses Oracle Client libraries to get functionality such as
Oracle Application Continuity.
To enable thick mode, call `oracledb.init_oracle_client()
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.init_oracle_client>`_
explicitly, or pass the parameter ``thick_mode=True`` to
:func:`_sa.create_engine`. To pass custom arguments to
``init_oracle_client()``, like the ``lib_dir`` path, a dict may be passed, for
example::
engine = sa.create_engine(
"oracle+oracledb://...",
thick_mode={
"lib_dir": "/path/to/oracle/client/lib",
"config_dir": "/path/to/network_config_file_directory",
"driver_name": "my-app : 1.0.0",
},
)
Note that passing a ``lib_dir`` path should only be done on macOS or
Windows. On Linux it does not behave as you might expect.
.. seealso::
python-oracledb documentation `Enabling python-oracledb Thick mode
<https://python-oracledb.readthedocs.io/en/latest/user_guide/initialization.html#enabling-python-oracledb-thick-mode>`_
Connecting to Oracle Database
-----------------------------
python-oracledb provides several methods of indicating the target database.
The dialect translates from a series of different URL forms.
Given the hostname, port and service name of the target database, you can
connect in SQLAlchemy using the ``service_name`` query string parameter::
engine = create_engine(
"oracle+oracledb://scott:tiger@hostname:port?service_name=myservice"
)
Connecting with Easy Connect strings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can pass any valid python-oracledb connection string as the ``dsn`` key
value in a :paramref:`_sa.create_engine.connect_args` dictionary. See
python-oracledb documentation `Oracle Net Services Connection Strings
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#oracle-net-services-connection-strings>`_.
For example to use an `Easy Connect string
<https://download.oracle.com/ocomdocs/global/Oracle-Net-Easy-Connect-Plus.pdf>`_
with a timeout to prevent connection establishment from hanging if the network
transport to the database cannot be establishd in 30 seconds, and also setting
a keep-alive time of 60 seconds to stop idle network connections from being
terminated by a firewall::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
},
)
The Easy Connect syntax has been enhanced during the life of Oracle Database.
Review the documentation for your database version. The current documentation
is at `Understanding the Easy Connect Naming Method
<https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B0437826-43C1-49EC-A94D-B650B6A4A6EE>`_.
The general syntax is similar to:
.. sourcecode:: text
[[protocol:]//]host[:port][/[service_name]][?parameter_name=value{&parameter_name=value}]
Note that although the SQLAlchemy URL syntax ``hostname:port/dbname`` looks
like Oracle's Easy Connect syntax, it is different. SQLAlchemy's URL requires a
system identifier (SID) for the ``dbname`` component::
engine = create_engine("oracle+oracledb://scott:tiger@hostname:port/sid")
Easy Connect syntax does not support SIDs. It uses services names, which are
the preferred choice for connecting to Oracle Database.
Passing python-oracledb connect arguments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Other python-oracledb driver `connection options
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.connect>`_
can be passed in ``connect_args``. For example::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "hostname:port/myservice",
"events": True,
"mode": oracledb.AUTH_MODE_SYSDBA,
},
)
Connecting with tnsnames.ora TNS aliases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If no port, database name, or service name is provided, the dialect will use an
Oracle Database DSN "connection string". This takes the "hostname" portion of
the URL as the data source name. For example, if the ``tnsnames.ora`` file
contains a `TNS Alias
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#tns-aliases-for-connection-strings>`_
of ``myalias`` as below:
.. sourcecode:: text
myalias =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = mymachine.example.com)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = orclpdb1)
)
)
The python-oracledb dialect connects to this database service when ``myalias`` is the
hostname portion of the URL, without specifying a port, database name or
``service_name``::
engine = create_engine("oracle+oracledb://scott:tiger@myalias")
Connecting to Oracle Autonomous Database
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Users of Oracle Autonomous Database should use either use the TNS Alias URL
shown above, or pass the TNS Alias as the ``dsn`` key value in a
:paramref:`_sa.create_engine.connect_args` dictionary.
If Oracle Autonomous Database is configured for mutual TLS ("mTLS")
connections, then additional configuration is required as shown in `Connecting
to Oracle Cloud Autonomous Databases
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#connecting-to-oracle-cloud-autonomous-databases>`_. In
summary, Thick mode users should configure file locations and set the wallet
path in ``sqlnet.ora`` appropriately::
e = create_engine(
"oracle+oracledb://@",
thick_mode={
# directory containing tnsnames.ora and cwallet.so
"config_dir": "/opt/oracle/wallet_dir",
},
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "mydb_high",
},
)
Thin mode users of mTLS should pass the appropriate directories and PEM wallet
password when creating the engine, similar to::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "mydb_high",
"config_dir": "/opt/oracle/wallet_dir", # directory containing tnsnames.ora
"wallet_location": "/opt/oracle/wallet_dir", # directory containing ewallet.pem
"wallet_password": "top secret", # password for the PEM file
},
)
Typically ``config_dir`` and ``wallet_location`` are the same directory, which
is where the Oracle Autonomous Database wallet zip file was extracted. Note
this directory should be protected.
Connection Pooling
------------------
Applications with multiple concurrent users should use connection pooling. A
minimal sized connection pool is also beneficial for long-running, single-user
applications that do not frequently use a connection.
The python-oracledb driver provides its own connection pool implementation that
may be used in place of SQLAlchemy's pooling functionality. The driver pool
gives support for high availability features such as dead connection detection,
connection draining for planned database downtime, support for Oracle
Application Continuity and Transparent Application Continuity, and gives
support for `Database Resident Connection Pooling (DRCP)
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
To take advantage of python-oracledb's pool, use the
:paramref:`_sa.create_engine.creator` parameter to provide a function that
returns a new connection, along with setting
:paramref:`_sa.create_engine.pool_class` to ``NullPool`` to disable
SQLAlchemy's pooling::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use the optional python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
)
engine = create_engine(
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
)
The above engine may then be used normally. Internally, python-oracledb handles
connection pooling::
with engine.connect() as conn:
print(conn.scalar(text("select 1 from dual")))
Refer to the python-oracledb documentation for `oracledb.create_pool()
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.create_pool>`_
for the arguments that can be used when creating a connection pool.
.. _drcp:
Using Oracle Database Resident Connection Pooling (DRCP)
--------------------------------------------------------
When using Oracle Database's Database Resident Connection Pooling (DRCP), the
best practice is to specify a connection class and "purity". Refer to the
`python-oracledb documentation on DRCP
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
For example::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use the optional python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
cclass="MYCLASS",
purity=oracledb.PURITY_SELF,
)
engine = create_engine(
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
)
The above engine may then be used normally where python-oracledb handles
application connection pooling and Oracle Database additionally uses DRCP::
with engine.connect() as conn:
print(conn.scalar(text("select 1 from dual")))
If you wish to use different connection classes or purities for different
connections, then wrap ``pool.acquire()``::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
cclass="MYCLASS",
purity=oracledb.PURITY_SELF,
)
def creator():
return pool.acquire(cclass="MYOTHERCLASS", purity=oracledb.PURITY_NEW)
engine = create_engine(
"oracle+oracledb://", creator=creator, poolclass=NullPool
)
Engine Options consumed by the SQLAlchemy oracledb dialect outside of the driver
--------------------------------------------------------------------------------
There are also options that are consumed by the SQLAlchemy oracledb dialect
itself. These options are always passed directly to :func:`_sa.create_engine`,
such as::
e = create_engine("oracle+oracledb://user:pass@tnsalias", arraysize=500)
The parameters accepted by the oracledb dialect are as follows:
* ``arraysize`` - set the driver cursor.arraysize value. It defaults to
``None``, indicating that the driver default value of 100 should be used.
This setting controls how many rows are buffered when fetching rows, and can
have a significant effect on performance if increased for queries that return
large numbers of rows.
.. versionchanged:: 2.0.26 - changed the default value from 50 to None,
to use the default value of the driver itself.
* ``auto_convert_lobs`` - defaults to True; See :ref:`oracledb_lob`.
* ``coerce_to_decimal`` - see :ref:`oracledb_numeric` for detail.
* ``encoding_errors`` - see :ref:`oracledb_unicode_encoding_errors` for detail.
.. _oracledb_unicode:
Unicode
-------
As is the case for all DBAPIs under Python 3, all strings are inherently
Unicode strings.
Ensuring the Correct Client Encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In python-oracledb, the encoding used for all character data is "UTF-8".
Unicode-specific Column datatypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Core expression language handles unicode data by use of the
:class:`.Unicode` and :class:`.UnicodeText` datatypes. These types correspond
to the VARCHAR2 and CLOB Oracle Database datatypes by default. When using
these datatypes with Unicode data, it is expected that the database is
configured with a Unicode-aware character set so that the VARCHAR2 and CLOB
datatypes can accommodate the data.
In the case that Oracle Database is not configured with a Unicode character
set, the two options are to use the :class:`_types.NCHAR` and
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`, which will cause
the SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle Database
datatypes unless the ``use_nchar_for_unicode=True`` is passed to the dialect
when :func:`_sa.create_engine` is called.
.. _oracledb_unicode_encoding_errors:
Encoding Errors
^^^^^^^^^^^^^^^
For the unusual case that data in Oracle Database is present with a broken
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
passed to Unicode decoding functions in order to affect how decoding errors are
handled. The value is ultimately consumed by the Python `decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`_ function, and
is passed both via python-oracledb's ``encodingErrors`` parameter consumed by
``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the
python-oracledb dialect makes use of both under different circumstances.
.. versionadded:: 1.3.11
.. _oracledb_setinputsizes:
Fine grained control over python-oracledb data binding with setinputsizes
-------------------------------------------------------------------------
The python-oracle DBAPI has a deep and fundamental reliance upon the usage of
the DBAPI ``setinputsizes()`` call. The purpose of this call is to establish
the datatypes that are bound to a SQL statement for Python values being passed
as parameters. While virtually no other DBAPI assigns any use to the
``setinputsizes()`` call, the python-oracledb DBAPI relies upon it heavily in
its interactions with the Oracle Database, and in some scenarios it is not
possible for SQLAlchemy to know exactly how data should be bound, as some
settings can cause profoundly different performance characteristics, while
altering the type coercion behavior at the same time.
Users of the oracledb dialect are **strongly encouraged** to read through
python-oracledb's list of built-in datatype symbols at `Database Types
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#database-types>`_
Note that in some cases, significant performance degradation can occur when
using these types vs. not.
On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can
be used both for runtime visibility (e.g. logging) of the setinputsizes step as
well as to fully control how ``setinputsizes()`` is used on a per-statement
basis.
.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes`
Example 1 - logging all setinputsizes calls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example illustrates how to log the intermediary values from a
SQLAlchemy perspective before they are converted to the raw ``setinputsizes()``
parameter dictionary. The keys of the dictionary are :class:`.BindParameter`
objects which have a ``.key`` and a ``.type`` attribute::
from sqlalchemy import create_engine, event
engine = create_engine(
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
)
@event.listens_for(engine, "do_setinputsizes")
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in inputsizes.items():
log.info(
"Bound parameter name: %s SQLAlchemy type: %r DBAPI object: %s",
bindparam.key,
bindparam.type,
dbapitype,
)
Example 2 - remove all bindings to CLOB
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For performance, fetching LOB datatypes from Oracle Database is set by default
for the ``Text`` type within SQLAlchemy. This setting can be modified as
follows::
from sqlalchemy import create_engine, event
from oracledb import CLOB
engine = create_engine(
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
)
@event.listens_for(engine, "do_setinputsizes")
def _remove_clob(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in list(inputsizes.items()):
if dbapitype is CLOB:
del inputsizes[bindparam]
.. _oracledb_lob:
LOB Datatypes
--------------
LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and
BLOB. Oracle Database can efficiently return these datatypes as a single
buffer. SQLAlchemy makes use of type handlers to do this by default.
To disable the use of the type handlers and deliver LOB objects as classic
buffered objects with a ``read()`` method, the parameter
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`.
.. _oracledb_returning:
RETURNING Support
-----------------
The oracledb dialect implements RETURNING using OUT parameters. The dialect
supports RETURNING fully.
Two Phase Transaction Support
-----------------------------
Two phase transactions are fully supported with python-oracledb. (Thin mode
requires python-oracledb 2.3). APIs for two phase transactions are provided at
the Core level via :meth:`_engine.Connection.begin_twophase` and
:paramref:`_orm.Session.twophase` for transparent ORM use.
.. versionchanged:: 2.0.32 added support for two phase transactions
.. _oracledb_numeric:
Precision Numerics
------------------
SQLAlchemy's numeric types can handle receiving and returning values as Python
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle Database, the ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle
Database-specific :class:`_oracle.NUMBER` type takes this into account as well.
The oracledb dialect makes extensive use of connection- and cursor-level
"outputtypehandler" callables in order to coerce numeric values as requested.
These callables are specific to the specific flavor of :class:`.Numeric` in
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle Database may send incomplete or ambiguous
information about the numeric types being returned, such as a query where the
numeric types are buried under multiple levels of subquery. The type handlers
do their best to make the right decision in all cases, deferring to the
underlying python-oracledb DBAPI for all those cases where the driver can make
the best decision.
When no typing objects are present, as when executing plain SQL strings, a
default "outputtypehandler" is present which will generally return numeric
values which specify precision and scale as Python ``Decimal`` objects. To
disable this coercion to decimal for performance reasons, pass the flag
``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
engine = create_engine(
"oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False
)
The ``coerce_to_decimal`` flag only impacts the results of plain string
SQL statements that are not otherwise associated with a :class:`.Numeric`
SQLAlchemy type (or a subclass of such).
.. versionchanged:: 1.2 The numeric handling system for the oracle dialects has
been reworked to take advantage of newer driver features as well as better
integration of outputtypehandlers.
.. versionadded:: 2.0.0 added support for the python-oracledb driver.
""" # noqa
from __future__ import annotations
import collections
import re
from typing import Any
from typing import TYPE_CHECKING
from . import cx_oracle as _cx_oracle
from ... import exc
from ... import pool
from ...connectors.asyncio import AsyncAdapt_dbapi_connection
from ...connectors.asyncio import AsyncAdapt_dbapi_cursor
from ...connectors.asyncio import AsyncAdapt_dbapi_ss_cursor
from ...connectors.asyncio import AsyncAdaptFallback_dbapi_connection
from ...engine import default
from ...util import asbool
from ...util import await_fallback
from ...util import await_only
if TYPE_CHECKING:
from oracledb import AsyncConnection
from oracledb import AsyncCursor
class OracleExecutionContext_oracledb(
_cx_oracle.OracleExecutionContext_cx_oracle
):
pass
class OracleDialect_oracledb(_cx_oracle.OracleDialect_cx_oracle):
supports_statement_cache = True
execution_ctx_cls = OracleExecutionContext_oracledb
driver = "oracledb"
_min_version = (1,)
def __init__(
self,
auto_convert_lobs=True,
coerce_to_decimal=True,
arraysize=None,
encoding_errors=None,
thick_mode=None,
**kwargs,
):
super().__init__(
auto_convert_lobs,
coerce_to_decimal,
arraysize,
encoding_errors,
**kwargs,
)
if self.dbapi is not None and (
thick_mode or isinstance(thick_mode, dict)
):
kw = thick_mode if isinstance(thick_mode, dict) else {}
self.dbapi.init_oracle_client(**kw)
@classmethod
def import_dbapi(cls):
import oracledb
return oracledb
@classmethod
def is_thin_mode(cls, connection):
return connection.connection.dbapi_connection.thin
@classmethod
def get_async_dialect_cls(cls, url):
return OracleDialectAsync_oracledb
def _load_version(self, dbapi_module):
version = (0, 0, 0)
if dbapi_module is not None:
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", dbapi_module.version)
if m:
version = tuple(
int(x) for x in m.group(1, 2, 3) if x is not None
)
self.oracledb_ver = version
if (
self.oracledb_ver > (0, 0, 0)
and self.oracledb_ver < self._min_version
):
raise exc.InvalidRequestError(
f"oracledb version {self._min_version} and above are supported"
)
def do_begin_twophase(self, connection, xid):
conn_xis = connection.connection.xid(*xid)
connection.connection.tpc_begin(conn_xis)
connection.connection.info["oracledb_xid"] = conn_xis
def do_prepare_twophase(self, connection, xid):
should_commit = connection.connection.tpc_prepare()
connection.info["oracledb_should_commit"] = should_commit
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if recover:
conn_xid = connection.connection.xid(*xid)
else:
conn_xid = None
connection.connection.tpc_rollback(conn_xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
conn_xid = None
if not is_prepared:
should_commit = connection.connection.tpc_prepare()
elif recover:
conn_xid = connection.connection.xid(*xid)
should_commit = True
else:
should_commit = connection.info["oracledb_should_commit"]
if should_commit:
connection.connection.tpc_commit(conn_xid)
def do_recover_twophase(self, connection):
return [
# oracledb seems to return bytes
(
fi,
gti.decode() if isinstance(gti, bytes) else gti,
bq.decode() if isinstance(bq, bytes) else bq,
)
for fi, gti, bq in connection.connection.tpc_recover()
]
def _check_max_identifier_length(self, connection):
if self.oracledb_ver >= (2, 5):
max_len = connection.connection.max_identifier_length
if max_len is not None:
return max_len
return super()._check_max_identifier_length(connection)
class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
_cursor: AsyncCursor
__slots__ = ()
@property
def outputtypehandler(self):
return self._cursor.outputtypehandler
@outputtypehandler.setter
def outputtypehandler(self, value):
self._cursor.outputtypehandler = value
def var(self, *args, **kwargs):
return self._cursor.var(*args, **kwargs)
def close(self):
self._rows.clear()
self._cursor.close()
def setinputsizes(self, *args: Any, **kwargs: Any) -> Any:
return self._cursor.setinputsizes(*args, **kwargs)
def _aenter_cursor(self, cursor: AsyncCursor) -> AsyncCursor:
try:
return cursor.__enter__()
except Exception as error:
self._adapt_connection._handle_exception(error)
async def _execute_async(self, operation, parameters):
# override to not use mutex, oracledb already has a mutex
if parameters is None:
result = await self._cursor.execute(operation)
else:
result = await self._cursor.execute(operation, parameters)
if self._cursor.description and not self.server_side:
self._rows = collections.deque(await self._cursor.fetchall())
return result
async def _executemany_async(
self,
operation,
seq_of_parameters,
):
# override to not use mutex, oracledb already has a mutex
return await self._cursor.executemany(operation, seq_of_parameters)
def __enter__(self):
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
self.close()
class AsyncAdapt_oracledb_ss_cursor(
AsyncAdapt_dbapi_ss_cursor, AsyncAdapt_oracledb_cursor
):
__slots__ = ()
def close(self) -> None:
if self._cursor is not None:
self._cursor.close()
self._cursor = None # type: ignore
class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection):
_connection: AsyncConnection
__slots__ = ()
thin = True
_cursor_cls = AsyncAdapt_oracledb_cursor
_ss_cursor_cls = None
@property
def autocommit(self):
return self._connection.autocommit
@autocommit.setter
def autocommit(self, value):
self._connection.autocommit = value
@property
def outputtypehandler(self):
return self._connection.outputtypehandler
@outputtypehandler.setter
def outputtypehandler(self, value):
self._connection.outputtypehandler = value
@property
def version(self):
return self._connection.version
@property
def stmtcachesize(self):
return self._connection.stmtcachesize
@stmtcachesize.setter
def stmtcachesize(self, value):
self._connection.stmtcachesize = value
@property
def max_identifier_length(self):
return self._connection.max_identifier_length
def cursor(self):
return AsyncAdapt_oracledb_cursor(self)
def ss_cursor(self):
return AsyncAdapt_oracledb_ss_cursor(self)
def xid(self, *args: Any, **kwargs: Any) -> Any:
return self._connection.xid(*args, **kwargs)
def tpc_begin(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_begin(*args, **kwargs))
def tpc_commit(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_commit(*args, **kwargs))
def tpc_prepare(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_prepare(*args, **kwargs))
def tpc_recover(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_recover(*args, **kwargs))
def tpc_rollback(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_rollback(*args, **kwargs))
class AsyncAdaptFallback_oracledb_connection(
AsyncAdaptFallback_dbapi_connection, AsyncAdapt_oracledb_connection
):
__slots__ = ()
class OracledbAdaptDBAPI:
def __init__(self, oracledb) -> None:
self.oracledb = oracledb
for k, v in self.oracledb.__dict__.items():
if k != "connect":
self.__dict__[k] = v
def connect(self, *arg, **kw):
async_fallback = kw.pop("async_fallback", False)
creator_fn = kw.pop("async_creator_fn", self.oracledb.connect_async)
if asbool(async_fallback):
return AsyncAdaptFallback_oracledb_connection(
self, await_fallback(creator_fn(*arg, **kw))
)
else:
return AsyncAdapt_oracledb_connection(
self, await_only(creator_fn(*arg, **kw))
)
class OracleExecutionContextAsync_oracledb(OracleExecutionContext_oracledb):
# restore default create cursor
create_cursor = default.DefaultExecutionContext.create_cursor
def create_default_cursor(self):
# copy of OracleExecutionContext_cx_oracle.create_cursor
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def create_server_side_cursor(self):
c = self._dbapi_connection.ss_cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
class OracleDialectAsync_oracledb(OracleDialect_oracledb):
is_async = True
supports_server_side_cursors = True
supports_statement_cache = True
execution_ctx_cls = OracleExecutionContextAsync_oracledb
_min_version = (2,)
# thick_mode mode is not supported by asyncio, oracledb will raise
@classmethod
def import_dbapi(cls):
import oracledb
return OracledbAdaptDBAPI(oracledb)
@classmethod
def get_pool_class(cls, url):
async_fallback = url.query.get("async_fallback", False)
if asbool(async_fallback):
return pool.FallbackAsyncAdaptedQueuePool
else:
return pool.AsyncAdaptedQueuePool
def get_driver_connection(self, connection):
return connection._connection
dialect = OracleDialect_oracledb
dialect_async = OracleDialectAsync_oracledb

View File

@ -0,0 +1,220 @@
# dialects/oracle/provision.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from ... import create_engine
from ... import exc
from ... import inspect
from ...engine import url as sa_url
from ...testing.provision import configure_follower
from ...testing.provision import create_db
from ...testing.provision import drop_all_schema_objects_post_tables
from ...testing.provision import drop_all_schema_objects_pre_tables
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import log
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import set_default_schema_on_connection
from ...testing.provision import stop_test_class_outside_fixtures
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import update_db_opts
@create_db.for_db("oracle")
def _oracle_create_db(cfg, eng, ident):
# NOTE: make sure you've run "ALTER DATABASE default tablespace users" or
# similar, so that the default tablespace is not "system"; reflection will
# fail otherwise
with eng.begin() as conn:
conn.exec_driver_sql("create user %s identified by xe" % ident)
conn.exec_driver_sql("create user %s_ts1 identified by xe" % ident)
conn.exec_driver_sql("create user %s_ts2 identified by xe" % ident)
conn.exec_driver_sql("grant dba to %s" % (ident,))
conn.exec_driver_sql("grant unlimited tablespace to %s" % ident)
conn.exec_driver_sql("grant unlimited tablespace to %s_ts1" % ident)
conn.exec_driver_sql("grant unlimited tablespace to %s_ts2" % ident)
# these are needed to create materialized views
conn.exec_driver_sql("grant create table to %s" % ident)
conn.exec_driver_sql("grant create table to %s_ts1" % ident)
conn.exec_driver_sql("grant create table to %s_ts2" % ident)
@configure_follower.for_db("oracle")
def _oracle_configure_follower(config, ident):
config.test_schema = "%s_ts1" % ident
config.test_schema_2 = "%s_ts2" % ident
def _ora_drop_ignore(conn, dbname):
try:
conn.exec_driver_sql("drop user %s cascade" % dbname)
log.info("Reaped db: %s", dbname)
return True
except exc.DatabaseError as err:
log.warning("couldn't drop db: %s", err)
return False
@drop_all_schema_objects_pre_tables.for_db("oracle")
def _ora_drop_all_schema_objects_pre_tables(cfg, eng):
_purge_recyclebin(eng)
_purge_recyclebin(eng, cfg.test_schema)
@drop_all_schema_objects_post_tables.for_db("oracle")
def _ora_drop_all_schema_objects_post_tables(cfg, eng):
with eng.begin() as conn:
for syn in conn.dialect._get_synonyms(conn, None, None, None):
conn.exec_driver_sql(f"drop synonym {syn['synonym_name']}")
for syn in conn.dialect._get_synonyms(
conn, cfg.test_schema, None, None
):
conn.exec_driver_sql(
f"drop synonym {cfg.test_schema}.{syn['synonym_name']}"
)
for tmp_table in inspect(conn).get_temp_table_names():
conn.exec_driver_sql(f"drop table {tmp_table}")
@drop_db.for_db("oracle")
def _oracle_drop_db(cfg, eng, ident):
with eng.begin() as conn:
# cx_Oracle seems to occasionally leak open connections when a large
# suite it run, even if we confirm we have zero references to
# connection objects.
# while there is a "kill session" command in Oracle Database,
# it unfortunately does not release the connection sufficiently.
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)
_ora_drop_ignore(conn, "%s_ts2" % ident)
@stop_test_class_outside_fixtures.for_db("oracle")
def _ora_stop_test_class_outside_fixtures(config, db, cls):
try:
_purge_recyclebin(db)
except exc.DatabaseError as err:
log.warning("purge recyclebin command failed: %s", err)
# clear statement cache on all connections that were used
# https://github.com/oracle/python-cx_Oracle/issues/519
for cx_oracle_conn in _all_conns:
try:
sc = cx_oracle_conn.stmtcachesize
except db.dialect.dbapi.InterfaceError:
# connection closed
pass
else:
cx_oracle_conn.stmtcachesize = 0
cx_oracle_conn.stmtcachesize = sc
_all_conns.clear()
def _purge_recyclebin(eng, schema=None):
with eng.begin() as conn:
if schema is None:
# run magic command to get rid of identity sequences
# https://floo.bar/2019/11/29/drop-the-underlying-sequence-of-an-identity-column/ # noqa: E501
conn.exec_driver_sql("purge recyclebin")
else:
# per user: https://community.oracle.com/tech/developers/discussion/2255402/how-to-clear-dba-recyclebin-for-a-particular-user # noqa: E501
for owner, object_name, type_ in conn.exec_driver_sql(
"select owner, object_name,type from "
"dba_recyclebin where owner=:schema and type='TABLE'",
{"schema": conn.dialect.denormalize_name(schema)},
).all():
conn.exec_driver_sql(f'purge {type_} {owner}."{object_name}"')
_all_conns = set()
@post_configure_engine.for_db("oracle")
def _oracle_post_configure_engine(url, engine, follower_ident):
from sqlalchemy import event
@event.listens_for(engine, "checkout")
def checkout(dbapi_con, con_record, con_proxy):
_all_conns.add(dbapi_con)
@event.listens_for(engine, "checkin")
def checkin(dbapi_connection, connection_record):
# work around cx_Oracle issue:
# https://github.com/oracle/python-cx_Oracle/issues/530
# invalidate oracle connections that had 2pc set up
if "cx_oracle_xid" in connection_record.info:
connection_record.invalidate()
@run_reap_dbs.for_db("oracle")
def _reap_oracle_dbs(url, idents):
log.info("db reaper connecting to %r", url)
eng = create_engine(url)
with eng.begin() as conn:
log.info("identifiers in file: %s", ", ".join(idents))
to_reap = conn.exec_driver_sql(
"select u.username from all_users u where username "
"like 'TEST_%' and not exists (select username "
"from v$session where username=u.username)"
)
all_names = {username.lower() for (username,) in to_reap}
to_drop = set()
for name in all_names:
if name.endswith("_ts1") or name.endswith("_ts2"):
continue
elif name in idents:
to_drop.add(name)
if "%s_ts1" % name in all_names:
to_drop.add("%s_ts1" % name)
if "%s_ts2" % name in all_names:
to_drop.add("%s_ts2" % name)
dropped = total = 0
for total, username in enumerate(to_drop, 1):
if _ora_drop_ignore(conn, username):
dropped += 1
log.info(
"Dropped %d out of %d stale databases detected", dropped, total
)
@follower_url_from_main.for_db("oracle")
def _oracle_follower_url_from_main(url, ident):
url = sa_url.make_url(url)
return url.set(username=ident, password="xe")
@temp_table_keyword_args.for_db("oracle")
def _oracle_temp_table_keyword_args(cfg, eng):
return {
"prefixes": ["GLOBAL TEMPORARY"],
"oracle_on_commit": "PRESERVE ROWS",
}
@set_default_schema_on_connection.for_db("oracle")
def _oracle_set_default_schema_on_connection(
cfg, dbapi_connection, schema_name
):
cursor = dbapi_connection.cursor()
cursor.execute("ALTER SESSION SET CURRENT_SCHEMA=%s" % schema_name)
cursor.close()
@update_db_opts.for_db("oracle")
def _update_db_opts(db_url, db_opts, options):
"""Set database options (db_opts) for a test database that we created."""
if (
options.oracledb_thick_mode
and sa_url.make_url(db_url).get_driver_name() == "oracledb"
):
db_opts["thick_mode"] = True

View File

@ -0,0 +1,316 @@
# dialects/oracle/types.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from __future__ import annotations
import datetime as dt
from typing import Optional
from typing import Type
from typing import TYPE_CHECKING
from ... import exc
from ...sql import sqltypes
from ...types import NVARCHAR
from ...types import VARCHAR
if TYPE_CHECKING:
from ...engine.interfaces import Dialect
from ...sql.type_api import _LiteralProcessorType
class RAW(sqltypes._Binary):
__visit_name__ = "RAW"
OracleRaw = RAW
class NCLOB(sqltypes.Text):
__visit_name__ = "NCLOB"
class VARCHAR2(VARCHAR):
__visit_name__ = "VARCHAR2"
NVARCHAR2 = NVARCHAR
class NUMBER(sqltypes.Numeric, sqltypes.Integer):
__visit_name__ = "NUMBER"
def __init__(self, precision=None, scale=None, asdecimal=None):
if asdecimal is None:
asdecimal = bool(scale and scale > 0)
super().__init__(precision=precision, scale=scale, asdecimal=asdecimal)
def adapt(self, impltype):
ret = super().adapt(impltype)
# leave a hint for the DBAPI handler
ret._is_oracle_number = True
return ret
@property
def _type_affinity(self):
if bool(self.scale and self.scale > 0):
return sqltypes.Numeric
else:
return sqltypes.Integer
class FLOAT(sqltypes.FLOAT):
"""Oracle Database FLOAT.
This is the same as :class:`_sqltypes.FLOAT` except that
an Oracle Database -specific :paramref:`_oracle.FLOAT.binary_precision`
parameter is accepted, and
the :paramref:`_sqltypes.Float.precision` parameter is not accepted.
Oracle Database FLOAT types indicate precision in terms of "binary
precision", which defaults to 126. For a REAL type, the value is 63. This
parameter does not cleanly map to a specific number of decimal places but
is roughly equivalent to the desired number of decimal places divided by
0.3103.
.. versionadded:: 2.0
"""
__visit_name__ = "FLOAT"
def __init__(
self,
binary_precision=None,
asdecimal=False,
decimal_return_scale=None,
):
r"""
Construct a FLOAT
:param binary_precision: Oracle Database binary precision value to be
rendered in DDL. This may be approximated to the number of decimal
characters using the formula "decimal precision = 0.30103 * binary
precision". The default value used by Oracle Database for FLOAT /
DOUBLE PRECISION is 126.
:param asdecimal: See :paramref:`_sqltypes.Float.asdecimal`
:param decimal_return_scale: See
:paramref:`_sqltypes.Float.decimal_return_scale`
"""
super().__init__(
asdecimal=asdecimal, decimal_return_scale=decimal_return_scale
)
self.binary_precision = binary_precision
class BINARY_DOUBLE(sqltypes.Double):
"""Implement the Oracle ``BINARY_DOUBLE`` datatype.
This datatype differs from the Oracle ``DOUBLE`` datatype in that it
delivers a true 8-byte FP value. The datatype may be combined with a
generic :class:`.Double` datatype using :meth:`.TypeEngine.with_variant`.
.. seealso::
:ref:`oracle_float_support`
"""
__visit_name__ = "BINARY_DOUBLE"
class BINARY_FLOAT(sqltypes.Float):
"""Implement the Oracle ``BINARY_FLOAT`` datatype.
This datatype differs from the Oracle ``FLOAT`` datatype in that it
delivers a true 4-byte FP value. The datatype may be combined with a
generic :class:`.Float` datatype using :meth:`.TypeEngine.with_variant`.
.. seealso::
:ref:`oracle_float_support`
"""
__visit_name__ = "BINARY_FLOAT"
class BFILE(sqltypes.LargeBinary):
__visit_name__ = "BFILE"
class LONG(sqltypes.Text):
__visit_name__ = "LONG"
class _OracleDateLiteralRender:
def _literal_processor_datetime(self, dialect):
def process(value):
if getattr(value, "microsecond", None):
value = (
f"""TO_TIMESTAMP"""
f"""('{value.isoformat().replace("T", " ")}', """
"""'YYYY-MM-DD HH24:MI:SS.FF')"""
)
else:
value = (
f"""TO_DATE"""
f"""('{value.isoformat().replace("T", " ")}', """
"""'YYYY-MM-DD HH24:MI:SS')"""
)
return value
return process
def _literal_processor_date(self, dialect):
def process(value):
if getattr(value, "microsecond", None):
value = (
f"""TO_TIMESTAMP"""
f"""('{value.isoformat().split("T")[0]}', """
"""'YYYY-MM-DD')"""
)
else:
value = (
f"""TO_DATE"""
f"""('{value.isoformat().split("T")[0]}', """
"""'YYYY-MM-DD')"""
)
return value
return process
class DATE(_OracleDateLiteralRender, sqltypes.DateTime):
"""Provide the Oracle Database DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle Database
``DATE`` type supports a time value.
"""
__visit_name__ = "DATE"
def literal_processor(self, dialect):
return self._literal_processor_datetime(dialect)
def _compare_type_affinity(self, other):
return other._type_affinity in (sqltypes.DateTime, sqltypes.Date)
class _OracleDate(_OracleDateLiteralRender, sqltypes.Date):
def literal_processor(self, dialect):
return self._literal_processor_date(dialect)
class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
__visit_name__ = "INTERVAL"
def __init__(self, day_precision=None, second_precision=None):
"""Construct an INTERVAL.
Note that only DAY TO SECOND intervals are currently supported.
This is due to a lack of support for YEAR TO MONTH intervals
within available DBAPIs.
:param day_precision: the day precision value. this is the number of
digits to store for the day field. Defaults to "2"
:param second_precision: the second precision value. this is the
number of digits to store for the fractional seconds field.
Defaults to "6".
"""
self.day_precision = day_precision
self.second_precision = second_precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@classmethod
def adapt_emulated_to_native(
cls, interval: sqltypes.Interval, **kw # type: ignore[override]
):
return INTERVAL(
day_precision=interval.day_precision,
second_precision=interval.second_precision,
)
@property
def _type_affinity(self):
return sqltypes.Interval
def as_generic(self, allow_nulltype=False):
return sqltypes.Interval(
native=True,
second_precision=self.second_precision,
day_precision=self.day_precision,
)
@property
def python_type(self) -> Type[dt.timedelta]:
return dt.timedelta
def literal_processor(
self, dialect: Dialect
) -> Optional[_LiteralProcessorType[dt.timedelta]]:
def process(value: dt.timedelta) -> str:
return f"NUMTODSINTERVAL({value.total_seconds()}, 'SECOND')"
return process
class TIMESTAMP(sqltypes.TIMESTAMP):
"""Oracle Database implementation of ``TIMESTAMP``, which supports
additional Oracle Database-specific modes
.. versionadded:: 2.0
"""
def __init__(self, timezone: bool = False, local_timezone: bool = False):
"""Construct a new :class:`_oracle.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
use Oracle Database's ``TIMESTAMP WITH TIME ZONE`` datatype.
:param local_timezone: boolean. Indicates that the TIMESTAMP type
should use Oracle Database's ``TIMESTAMP WITH LOCAL TIME ZONE``
datatype.
"""
if timezone and local_timezone:
raise exc.ArgumentError(
"timezone and local_timezone are mutually exclusive"
)
super().__init__(timezone=timezone)
self.local_timezone = local_timezone
class ROWID(sqltypes.TypeEngine):
"""Oracle Database ROWID type.
When used in a cast() or similar, generates ROWID.
"""
__visit_name__ = "ROWID"
class _OracleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER