Update 2025-04-13_16:25:39

This commit is contained in:
root
2025-04-13 16:25:41 +02:00
commit 4c711360d3
2979 changed files with 666585 additions and 0 deletions

View File

@ -0,0 +1,57 @@
# dialects/sqlite/__init__.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from . import aiosqlite # noqa
from . import base # noqa
from . import pysqlcipher # noqa
from . import pysqlite # noqa
from .base import BLOB
from .base import BOOLEAN
from .base import CHAR
from .base import DATE
from .base import DATETIME
from .base import DECIMAL
from .base import FLOAT
from .base import INTEGER
from .base import JSON
from .base import NUMERIC
from .base import REAL
from .base import SMALLINT
from .base import TEXT
from .base import TIME
from .base import TIMESTAMP
from .base import VARCHAR
from .dml import Insert
from .dml import insert
# default dialect
base.dialect = dialect = pysqlite.dialect
__all__ = (
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"FLOAT",
"INTEGER",
"JSON",
"NUMERIC",
"SMALLINT",
"TEXT",
"TIME",
"TIMESTAMP",
"VARCHAR",
"REAL",
"Insert",
"insert",
"dialect",
)

View File

@ -0,0 +1,421 @@
# dialects/sqlite/aiosqlite.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
r"""
.. dialect:: sqlite+aiosqlite
:name: aiosqlite
:dbapi: aiosqlite
:connectstring: sqlite+aiosqlite:///file_path
:url: https://pypi.org/project/aiosqlite/
The aiosqlite dialect provides support for the SQLAlchemy asyncio interface
running on top of pysqlite.
aiosqlite is a wrapper around pysqlite that uses a background thread for
each connection. It does not actually use non-blocking IO, as SQLite
databases are not socket-based. However it does provide a working asyncio
interface that's useful for testing and prototyping purposes.
Using a special asyncio mediation layer, the aiosqlite dialect is usable
as the backend for the :ref:`SQLAlchemy asyncio <asyncio_toplevel>`
extension package.
This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("sqlite+aiosqlite:///filename")
The URL passes through all arguments to the ``pysqlite`` driver, so all
connection arguments are the same as they are for that of :ref:`pysqlite`.
.. _aiosqlite_udfs:
User-Defined Functions
----------------------
aiosqlite extends pysqlite to support async, so we can create our own user-defined functions (UDFs)
in Python and use them directly in SQLite queries as described here: :ref:`pysqlite_udfs`.
.. _aiosqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL (asyncio version)
-------------------------------------------------------------------------
Similarly to pysqlite, aiosqlite does not support SAVEPOINT feature.
The solution is similar to :ref:`pysqlite_serializable`. This is achieved by the event listeners in async::
from sqlalchemy import create_engine, event
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("sqlite+aiosqlite:///myfile.db")
@event.listens_for(engine.sync_engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable aiosqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine.sync_engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.exec_driver_sql("BEGIN")
.. warning:: When using the above recipe, it is advised to not use the
:paramref:`.Connection.execution_options.isolation_level` setting on
:class:`_engine.Connection` and :func:`_sa.create_engine`
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
.. _aiosqlite_pooling:
Pooling Behavior
----------------
The SQLAlchemy ``aiosqlite`` DBAPI establishes the connection pool differently
based on the kind of SQLite database that's requested:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.StaticPool`. This pool maintains a single
connection, so that all access to the engine
use the same ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.AsyncAdaptedQueuePool` as the source of connections.
.. versionchanged:: 2.0.38
SQLite file database engines now use :class:`.AsyncAdaptedQueuePool` by default.
Previously, :class:`.NullPool` were used. The :class:`.NullPool` class
may be used by specifying it via the
:paramref:`_sa.create_engine.poolclass` parameter.
""" # noqa
import asyncio
from collections import deque
from functools import partial
from .base import SQLiteExecutionContext
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
from ... import util
from ...engine import AdaptedConnection
from ...util.concurrency import await_fallback
from ...util.concurrency import await_only
class AsyncAdapt_aiosqlite_cursor:
# TODO: base on connectors/asyncio.py
# see #10415
__slots__ = (
"_adapt_connection",
"_connection",
"description",
"await_",
"_rows",
"arraysize",
"rowcount",
"lastrowid",
)
server_side = False
def __init__(self, adapt_connection):
self._adapt_connection = adapt_connection
self._connection = adapt_connection._connection
self.await_ = adapt_connection.await_
self.arraysize = 1
self.rowcount = -1
self.description = None
self._rows = deque()
def close(self):
self._rows.clear()
def execute(self, operation, parameters=None):
try:
_cursor = self.await_(self._connection.cursor())
if parameters is None:
self.await_(_cursor.execute(operation))
else:
self.await_(_cursor.execute(operation, parameters))
if _cursor.description:
self.description = _cursor.description
self.lastrowid = self.rowcount = -1
if not self.server_side:
self._rows = deque(self.await_(_cursor.fetchall()))
else:
self.description = None
self.lastrowid = _cursor.lastrowid
self.rowcount = _cursor.rowcount
if not self.server_side:
self.await_(_cursor.close())
else:
self._cursor = _cursor
except Exception as error:
self._adapt_connection._handle_exception(error)
def executemany(self, operation, seq_of_parameters):
try:
_cursor = self.await_(self._connection.cursor())
self.await_(_cursor.executemany(operation, seq_of_parameters))
self.description = None
self.lastrowid = _cursor.lastrowid
self.rowcount = _cursor.rowcount
self.await_(_cursor.close())
except Exception as error:
self._adapt_connection._handle_exception(error)
def setinputsizes(self, *inputsizes):
pass
def __iter__(self):
while self._rows:
yield self._rows.popleft()
def fetchone(self):
if self._rows:
return self._rows.popleft()
else:
return None
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = list(self._rows)
self._rows.clear()
return retval
class AsyncAdapt_aiosqlite_ss_cursor(AsyncAdapt_aiosqlite_cursor):
# TODO: base on connectors/asyncio.py
# see #10415
__slots__ = "_cursor"
server_side = True
def __init__(self, *arg, **kw):
super().__init__(*arg, **kw)
self._cursor = None
def close(self):
if self._cursor is not None:
self.await_(self._cursor.close())
self._cursor = None
def fetchone(self):
return self.await_(self._cursor.fetchone())
def fetchmany(self, size=None):
if size is None:
size = self.arraysize
return self.await_(self._cursor.fetchmany(size=size))
def fetchall(self):
return self.await_(self._cursor.fetchall())
class AsyncAdapt_aiosqlite_connection(AdaptedConnection):
await_ = staticmethod(await_only)
__slots__ = ("dbapi",)
def __init__(self, dbapi, connection):
self.dbapi = dbapi
self._connection = connection
@property
def isolation_level(self):
return self._connection.isolation_level
@isolation_level.setter
def isolation_level(self, value):
# aiosqlite's isolation_level setter works outside the Thread
# that it's supposed to, necessitating setting check_same_thread=False.
# for improved stability, we instead invent our own awaitable version
# using aiosqlite's async queue directly.
def set_iso(connection, value):
connection.isolation_level = value
function = partial(set_iso, self._connection._conn, value)
future = asyncio.get_event_loop().create_future()
self._connection._tx.put_nowait((future, function))
try:
return self.await_(future)
except Exception as error:
self._handle_exception(error)
def create_function(self, *args, **kw):
try:
self.await_(self._connection.create_function(*args, **kw))
except Exception as error:
self._handle_exception(error)
def cursor(self, server_side=False):
if server_side:
return AsyncAdapt_aiosqlite_ss_cursor(self)
else:
return AsyncAdapt_aiosqlite_cursor(self)
def execute(self, *args, **kw):
return self.await_(self._connection.execute(*args, **kw))
def rollback(self):
try:
self.await_(self._connection.rollback())
except Exception as error:
self._handle_exception(error)
def commit(self):
try:
self.await_(self._connection.commit())
except Exception as error:
self._handle_exception(error)
def close(self):
try:
self.await_(self._connection.close())
except ValueError:
# this is undocumented for aiosqlite, that ValueError
# was raised if .close() was called more than once, which is
# both not customary for DBAPI and is also not a DBAPI.Error
# exception. This is now fixed in aiosqlite via my PR
# https://github.com/omnilib/aiosqlite/pull/238, so we can be
# assured this will not become some other kind of exception,
# since it doesn't raise anymore.
pass
except Exception as error:
self._handle_exception(error)
def _handle_exception(self, error):
if (
isinstance(error, ValueError)
and error.args[0] == "no active connection"
):
raise self.dbapi.sqlite.OperationalError(
"no active connection"
) from error
else:
raise error
class AsyncAdaptFallback_aiosqlite_connection(AsyncAdapt_aiosqlite_connection):
__slots__ = ()
await_ = staticmethod(await_fallback)
class AsyncAdapt_aiosqlite_dbapi:
def __init__(self, aiosqlite, sqlite):
self.aiosqlite = aiosqlite
self.sqlite = sqlite
self.paramstyle = "qmark"
self._init_dbapi_attributes()
def _init_dbapi_attributes(self):
for name in (
"DatabaseError",
"Error",
"IntegrityError",
"NotSupportedError",
"OperationalError",
"ProgrammingError",
"sqlite_version",
"sqlite_version_info",
):
setattr(self, name, getattr(self.aiosqlite, name))
for name in ("PARSE_COLNAMES", "PARSE_DECLTYPES"):
setattr(self, name, getattr(self.sqlite, name))
for name in ("Binary",):
setattr(self, name, getattr(self.sqlite, name))
def connect(self, *arg, **kw):
async_fallback = kw.pop("async_fallback", False)
creator_fn = kw.pop("async_creator_fn", None)
if creator_fn:
connection = creator_fn(*arg, **kw)
else:
connection = self.aiosqlite.connect(*arg, **kw)
# it's a Thread. you'll thank us later
connection.daemon = True
if util.asbool(async_fallback):
return AsyncAdaptFallback_aiosqlite_connection(
self,
await_fallback(connection),
)
else:
return AsyncAdapt_aiosqlite_connection(
self,
await_only(connection),
)
class SQLiteExecutionContext_aiosqlite(SQLiteExecutionContext):
def create_server_side_cursor(self):
return self._dbapi_connection.cursor(server_side=True)
class SQLiteDialect_aiosqlite(SQLiteDialect_pysqlite):
driver = "aiosqlite"
supports_statement_cache = True
is_async = True
supports_server_side_cursors = True
execution_ctx_cls = SQLiteExecutionContext_aiosqlite
@classmethod
def import_dbapi(cls):
return AsyncAdapt_aiosqlite_dbapi(
__import__("aiosqlite"), __import__("sqlite3")
)
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.AsyncAdaptedQueuePool
else:
return pool.StaticPool
def is_disconnect(self, e, connection, cursor):
if isinstance(
e, self.dbapi.OperationalError
) and "no active connection" in str(e):
return True
return super().is_disconnect(e, connection, cursor)
def get_driver_connection(self, connection):
return connection._connection
dialect = SQLiteDialect_aiosqlite

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,263 @@
# dialects/sqlite/dml.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
from __future__ import annotations
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from .._typing import _OnConflictIndexElementsT
from .._typing import _OnConflictIndexWhereT
from .._typing import _OnConflictSetT
from .._typing import _OnConflictWhereT
from ... import util
from ...sql import coercions
from ...sql import roles
from ...sql import schema
from ...sql._typing import _DMLTableArgument
from ...sql.base import _exclusive_against
from ...sql.base import _generative
from ...sql.base import ColumnCollection
from ...sql.base import ReadOnlyColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.elements import ColumnElement
from ...sql.elements import KeyedColumnElement
from ...sql.elements import TextClause
from ...sql.expression import alias
from ...util.typing import Self
__all__ = ("Insert", "insert")
def insert(table: _DMLTableArgument) -> Insert:
"""Construct a sqlite-specific variant :class:`_sqlite.Insert`
construct.
.. container:: inherited_member
The :func:`sqlalchemy.dialects.sqlite.insert` function creates
a :class:`sqlalchemy.dialects.sqlite.Insert`. This class is based
on the dialect-agnostic :class:`_sql.Insert` construct which may
be constructed using the :func:`_sql.insert` function in
SQLAlchemy Core.
The :class:`_sqlite.Insert` construct includes additional methods
:meth:`_sqlite.Insert.on_conflict_do_update`,
:meth:`_sqlite.Insert.on_conflict_do_nothing`.
"""
return Insert(table)
class Insert(StandardInsert):
"""SQLite-specific implementation of INSERT.
Adds methods for SQLite-specific syntaxes such as ON CONFLICT.
The :class:`_sqlite.Insert` object is created using the
:func:`sqlalchemy.dialects.sqlite.insert` function.
.. versionadded:: 1.4
.. seealso::
:ref:`sqlite_on_conflict_insert`
"""
stringify_dialect = "sqlite"
inherit_cache = False
@util.memoized_property
def excluded(
self,
) -> ReadOnlyColumnCollection[str, KeyedColumnElement[Any]]:
"""Provide the ``excluded`` namespace for an ON CONFLICT statement
SQLite's ON CONFLICT clause allows reference to the row that would
be inserted, known as ``excluded``. This attribute provides
all columns in this row to be referenceable.
.. tip:: The :attr:`_sqlite.Insert.excluded` attribute is an instance
of :class:`_expression.ColumnCollection`, which provides an
interface the same as that of the :attr:`_schema.Table.c`
collection described at :ref:`metadata_tables_and_columns`.
With this collection, ordinary names are accessible like attributes
(e.g. ``stmt.excluded.some_column``), but special names and
dictionary method names should be accessed using indexed access,
such as ``stmt.excluded["column name"]`` or
``stmt.excluded["values"]``. See the docstring for
:class:`_expression.ColumnCollection` for further examples.
"""
return alias(self.table, name="excluded").columns
_on_conflict_exclusive = _exclusive_against(
"_post_values_clause",
msgs={
"_post_values_clause": "This Insert construct already has "
"an ON CONFLICT clause established"
},
)
@_generative
@_on_conflict_exclusive
def on_conflict_do_update(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
) -> Self:
r"""
Specifies a DO UPDATE SET action for ON CONFLICT clause.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index or unique constraint.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
:param set\_:
A dictionary or other mapping object
where the keys are either names of columns in the target table,
or :class:`_schema.Column` objects or other ORM-mapped columns
matching that of the target table, and expressions or literals
as values, specifying the ``SET`` actions to take.
.. versionadded:: 1.4 The
:paramref:`_sqlite.Insert.on_conflict_do_update.set_`
parameter supports :class:`_schema.Column` objects from the target
:class:`_schema.Table` as keys.
.. warning:: This dictionary does **not** take into account
Python-specified default UPDATE values or generation functions,
e.g. those specified using :paramref:`_schema.Column.onupdate`.
These values will not be exercised for an ON CONFLICT style of
UPDATE, unless they are manually specified in the
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. An expression object representing a ``WHERE``
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
meeting the ``WHERE`` condition will not be updated (effectively a
``DO NOTHING`` for those rows).
"""
self._post_values_clause = OnConflictDoUpdate(
index_elements, index_where, set_, where
)
return self
@_generative
@_on_conflict_exclusive
def on_conflict_do_nothing(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
) -> Self:
"""
Specifies a DO NOTHING action for ON CONFLICT clause.
:param index_elements:
A sequence consisting of string column names, :class:`_schema.Column`
objects, or other column expression objects that will be used
to infer a target index or unique constraint.
:param index_where:
Additional WHERE criterion that can be used to infer a
conditional target index.
"""
self._post_values_clause = OnConflictDoNothing(
index_elements, index_where
)
return self
class OnConflictClause(ClauseElement):
stringify_dialect = "sqlite"
inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
inferred_target_whereclause: Optional[
Union[ColumnElement[Any], TextClause]
]
def __init__(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
):
if index_elements is not None:
self.inferred_target_elements = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in index_elements
]
self.inferred_target_whereclause = (
coercions.expect(
roles.WhereHavingRole,
index_where,
)
if index_where is not None
else None
)
else:
self.inferred_target_elements = (
self.inferred_target_whereclause
) = None
class OnConflictDoNothing(OnConflictClause):
__visit_name__ = "on_conflict_do_nothing"
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
update_whereclause: Optional[ColumnElement[Any]]
def __init__(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
):
super().__init__(
index_elements=index_elements,
index_where=index_where,
)
if isinstance(set_, dict):
if not set_:
raise ValueError("set parameter dictionary must not be empty")
elif isinstance(set_, ColumnCollection):
set_ = dict(set_)
else:
raise ValueError(
"set parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update_values_to_set = [
(coercions.expect(roles.DMLColumnRole, key), value)
for key, value in set_.items()
]
self.update_whereclause = (
coercions.expect(roles.WhereHavingRole, where)
if where is not None
else None
)

View File

@ -0,0 +1,92 @@
# dialects/sqlite/json.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
from ... import types as sqltypes
class JSON(sqltypes.JSON):
"""SQLite JSON type.
SQLite supports JSON as of version 3.9 through its JSON1_ extension. Note
that JSON1_ is a
`loadable extension <https://www.sqlite.org/loadext.html>`_ and as such
may not be available, or may require run-time loading.
:class:`_sqlite.JSON` is used automatically whenever the base
:class:`_types.JSON` datatype is used against a SQLite backend.
.. seealso::
:class:`_types.JSON` - main documentation for the generic
cross-platform JSON datatype.
The :class:`_sqlite.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function wrapped in the ``JSON_QUOTE`` function at the database level.
Extracted values are quoted in order to ensure that the results are
always JSON string values.
.. versionadded:: 1.3
.. _JSON1: https://www.sqlite.org/json1.html
"""
# Note: these objects currently match exactly those of MySQL, however since
# these are not generalizable to all JSON implementations, remain separately
# implemented for each dialect.
class _FormatTypeMixin:
def _format_value(self, value):
raise NotImplementedError()
def bind_processor(self, dialect):
super_proc = self.string_bind_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
def literal_processor(self, dialect):
super_proc = self.string_literal_processor(dialect)
def process(value):
value = self._format_value(value)
if super_proc:
value = super_proc(value)
return value
return process
class JSONIndexType(_FormatTypeMixin, sqltypes.JSON.JSONIndexType):
def _format_value(self, value):
if isinstance(value, int):
value = "$[%s]" % value
else:
value = '$."%s"' % value
return value
class JSONPathType(_FormatTypeMixin, sqltypes.JSON.JSONPathType):
def _format_value(self, value):
return "$%s" % (
"".join(
[
"[%s]" % elem if isinstance(elem, int) else '."%s"' % elem
for elem in value
]
)
)

View File

@ -0,0 +1,198 @@
# dialects/sqlite/provision.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
import os
import re
from ... import exc
from ...engine import url as sa_url
from ...testing.provision import create_db
from ...testing.provision import drop_db
from ...testing.provision import follower_url_from_main
from ...testing.provision import generate_driver_url
from ...testing.provision import log
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import stop_test_class_outside_fixtures
from ...testing.provision import temp_table_keyword_args
from ...testing.provision import upsert
# TODO: I can't get this to build dynamically with pytest-xdist procs
_drivernames = {
"pysqlite",
"aiosqlite",
"pysqlcipher",
"pysqlite_numeric",
"pysqlite_dollar",
}
def _format_url(url, driver, ident):
"""given a sqlite url + desired driver + ident, make a canonical
URL out of it
"""
url = sa_url.make_url(url)
if driver is None:
driver = url.get_driver_name()
filename = url.database
needs_enc = driver == "pysqlcipher"
name_token = None
if filename and filename != ":memory:":
assert "test_schema" not in filename
tokens = re.split(r"[_\.]", filename)
new_filename = f"{driver}"
for token in tokens:
if token in _drivernames:
if driver is None:
driver = token
continue
elif token in ("db", "enc"):
continue
elif name_token is None:
name_token = token.strip("_")
assert name_token, f"sqlite filename has no name token: {url.database}"
new_filename = f"{name_token}_{driver}"
if ident:
new_filename += f"_{ident}"
new_filename += ".db"
if needs_enc:
new_filename += ".enc"
url = url.set(database=new_filename)
if needs_enc:
url = url.set(password="test")
url = url.set(drivername="sqlite+%s" % (driver,))
return url
@generate_driver_url.for_db("sqlite")
def generate_driver_url(url, driver, query_str):
url = _format_url(url, driver, None)
try:
url.get_dialect()
except exc.NoSuchModuleError:
return None
else:
return url
@follower_url_from_main.for_db("sqlite")
def _sqlite_follower_url_from_main(url, ident):
return _format_url(url, None, ident)
@post_configure_engine.for_db("sqlite")
def _sqlite_post_configure_engine(url, engine, follower_ident):
from sqlalchemy import event
if follower_ident:
attach_path = f"{follower_ident}_{engine.driver}_test_schema.db"
else:
attach_path = f"{engine.driver}_test_schema.db"
@event.listens_for(engine, "connect")
def connect(dbapi_connection, connection_record):
# use file DBs in all cases, memory acts kind of strangely
# as an attached
# NOTE! this has to be done *per connection*. New sqlite connection,
# as we get with say, QueuePool, the attaches are gone.
# so schemes to delete those attached files have to be done at the
# filesystem level and not rely upon what attachments are in a
# particular SQLite connection
dbapi_connection.execute(
f'ATTACH DATABASE "{attach_path}" AS test_schema'
)
@event.listens_for(engine, "engine_disposed")
def dispose(engine):
"""most databases should be dropped using
stop_test_class_outside_fixtures
however a few tests like AttachedDBTest might not get triggered on
that main hook
"""
if os.path.exists(attach_path):
os.remove(attach_path)
filename = engine.url.database
if filename and filename != ":memory:" and os.path.exists(filename):
os.remove(filename)
@create_db.for_db("sqlite")
def _sqlite_create_db(cfg, eng, ident):
pass
@drop_db.for_db("sqlite")
def _sqlite_drop_db(cfg, eng, ident):
_drop_dbs_w_ident(eng.url.database, eng.driver, ident)
def _drop_dbs_w_ident(databasename, driver, ident):
for path in os.listdir("."):
fname, ext = os.path.split(path)
if ident in fname and ext in [".db", ".db.enc"]:
log.info("deleting SQLite database file: %s", path)
os.remove(path)
@stop_test_class_outside_fixtures.for_db("sqlite")
def stop_test_class_outside_fixtures(config, db, cls):
db.dispose()
@temp_table_keyword_args.for_db("sqlite")
def _sqlite_temp_table_keyword_args(cfg, eng):
return {"prefixes": ["TEMPORARY"]}
@run_reap_dbs.for_db("sqlite")
def _reap_sqlite_dbs(url, idents):
log.info("db reaper connecting to %r", url)
log.info("identifiers in file: %s", ", ".join(idents))
url = sa_url.make_url(url)
for ident in idents:
for drivername in _drivernames:
_drop_dbs_w_ident(url.database, drivername, ident)
@upsert.for_db("sqlite")
def _upsert(
cfg, table, returning, *, set_lambda=None, sort_by_parameter_order=False
):
from sqlalchemy.dialects.sqlite import insert
stmt = insert(table)
if set_lambda:
stmt = stmt.on_conflict_do_update(set_=set_lambda(stmt.excluded))
else:
stmt = stmt.on_conflict_do_nothing()
stmt = stmt.returning(
*returning, sort_by_parameter_order=sort_by_parameter_order
)
return stmt

View File

@ -0,0 +1,157 @@
# dialects/sqlite/pysqlcipher.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: sqlcipher 3 or pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase@/file_path[?kdf_iter=<iter>]
Dialect for support of DBAPIs that make use of the
`SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
Driver
------
Current dialect selection logic is:
* If the :paramref:`_sa.create_engine.module` parameter supplies a DBAPI module,
that module is used.
* Otherwise for Python 3, choose https://pypi.org/project/sqlcipher3/
* If not available, fall back to https://pypi.org/project/pysqlcipher3/
* For Python 2, https://pypi.org/project/pysqlcipher/ is used.
.. warning:: The ``pysqlcipher3`` and ``pysqlcipher`` DBAPI drivers are no
longer maintained; the ``sqlcipher3`` driver as of this writing appears
to be current. For future compatibility, any pysqlcipher-compatible DBAPI
may be used as follows::
import sqlcipher_compatible_driver
from sqlalchemy import create_engine
e = create_engine(
"sqlite+pysqlcipher://:password@/dbname.db",
module=sqlcipher_compatible_driver,
)
These drivers make use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database file to be
encrypted.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine("sqlite+pysqlcipher://:testing@/foo.db")
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine("sqlite+pysqlcipher://:testing@//path/to/foo.db")
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine(
"sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000"
)
.. warning:: Previous versions of sqlalchemy did not take into consideration
the encryption-related pragmas passed in the url string, that were silently
ignored. This may cause errors when opening files saved by a
previous sqlalchemy version if the encryption options do not match.
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`_sa.create_engine.poolclass` parameter; the :class:`.
StaticPool` may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
""" # noqa
from .pysqlite import SQLiteDialect_pysqlite
from ... import pool
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = "pysqlcipher"
supports_statement_cache = True
pragmas = ("kdf_iter", "cipher", "cipher_page_size", "cipher_use_hmac")
@classmethod
def import_dbapi(cls):
try:
import sqlcipher3 as sqlcipher
except ImportError:
pass
else:
return sqlcipher
from pysqlcipher3 import dbapi2 as sqlcipher
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def on_connect_url(self, url):
super_on_connect = super().on_connect_url(url)
# pull the info we need from the URL early. Even though URL
# is immutable, we don't want any in-place changes to the URL
# to affect things
passphrase = url.password or ""
url_query = dict(url.query)
def on_connect(conn):
cursor = conn.cursor()
cursor.execute('pragma key="%s"' % passphrase)
for prag in self.pragmas:
value = url_query.get(prag, None)
if value is not None:
cursor.execute('pragma %s="%s"' % (prag, value))
cursor.close()
if super_on_connect:
super_on_connect(conn)
return on_connect
def create_connect_args(self, url):
plain_url = url._replace(password=None)
plain_url = plain_url.difference_update_query(self.pragmas)
return super().create_connect_args(plain_url)
dialect = SQLiteDialect_pysqlcipher

View File

@ -0,0 +1,771 @@
# dialects/sqlite/pysqlite.py
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
r"""
.. dialect:: sqlite+pysqlite
:name: pysqlite
:dbapi: sqlite3
:connectstring: sqlite+pysqlite:///file_path
:url: https://docs.python.org/library/sqlite3.html
Note that ``pysqlite`` is the same driver as the ``sqlite3``
module included with the Python distribution.
Driver
------
The ``sqlite3`` Python DBAPI is standard on all modern Python versions;
for cPython and Pypy, no additional installation is necessary.
Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is:
.. sourcecode:: text
driver://user:pass@host/database
This means that the actual filename to be used starts with the characters to
the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine("sqlite:///path/to/database.db")
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine("sqlite:////path/to/database.db")
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine("sqlite:///C:\\path\\to\\database.db")
To use sqlite ``:memory:`` database specify it as the filename using
``sqlite:///:memory:``. It's also the default if no filepath is
present, specifying only ``sqlite://`` and nothing else::
# in-memory database (note three slashes)
e = create_engine("sqlite:///:memory:")
# also in-memory database
e2 = create_engine("sqlite://")
.. _pysqlite_uri_connections:
URI Connections
^^^^^^^^^^^^^^^
Modern versions of SQLite support an alternative system of connecting using a
`driver level URI <https://www.sqlite.org/uri.html>`_, which has the advantage
that additional driver-level arguments can be passed including options such as
"read only". The Python sqlite3 driver supports this mode under modern Python
3 versions. The SQLAlchemy pysqlite driver supports this mode of use by
specifying "uri=true" in the URL query string. The SQLite-level "URI" is kept
as the "database" portion of the SQLAlchemy url (that is, following a slash)::
e = create_engine("sqlite:///file:path/to/database?mode=ro&uri=true")
.. note:: The "uri=true" parameter must appear in the **query string**
of the URL. It will not currently work as expected if it is only
present in the :paramref:`_sa.create_engine.connect_args`
parameter dictionary.
The logic reconciles the simultaneous presence of SQLAlchemy's query string and
SQLite's query string by separating out the parameters that belong to the
Python sqlite3 driver vs. those that belong to the SQLite URI. This is
achieved through the use of a fixed list of parameters known to be accepted by
the Python side of the driver. For example, to include a URL that indicates
the Python sqlite3 "timeout" and "check_same_thread" parameters, along with the
SQLite "mode" and "nolock" parameters, they can all be passed together on the
query string::
e = create_engine(
"sqlite:///file:path/to/database?"
"check_same_thread=true&timeout=10&mode=ro&nolock=1&uri=true"
)
Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
sqlite3.connect(
"file:path/to/database?mode=ro&nolock=1",
check_same_thread=True,
timeout=10,
uri=True,
)
Regarding future parameters added to either the Python or native drivers. new
parameter names added to the SQLite URI scheme should be automatically
accommodated by this scheme. New parameter names added to the Python driver
side can be accommodated by specifying them in the
:paramref:`_sa.create_engine.connect_args` dictionary,
until dialect support is
added by SQLAlchemy. For the less likely case that the native SQLite driver
adds a new parameter name that overlaps with one of the existing, known Python
driver parameters (such as "timeout" perhaps), SQLAlchemy's dialect would
require adjustment for the URL scheme to continue to support this.
As is always the case for all SQLAlchemy dialects, the entire "URL" process
can be bypassed in :func:`_sa.create_engine` through the use of the
:paramref:`_sa.create_engine.creator`
parameter which allows for a custom callable
that creates a Python sqlite3 driver level connection directly.
.. versionadded:: 1.3.9
.. seealso::
`Uniform Resource Identifiers <https://www.sqlite.org/uri.html>`_ - in
the SQLite documentation
.. _pysqlite_regexp:
Regular Expression Support
---------------------------
.. versionadded:: 1.4
Support for the :meth:`_sql.ColumnOperators.regexp_match` operator is provided
using Python's re.search_ function. SQLite itself does not include a working
regular expression operator; instead, it includes a non-implemented placeholder
operator ``REGEXP`` that calls a user-defined function that must be provided.
SQLAlchemy's implementation makes use of the pysqlite create_function_ hook
as follows::
def regexp(a, b):
return re.search(a, b) is not None
sqlite_connection.create_function(
"regexp",
2,
regexp,
)
There is currently no support for regular expression flags as a separate
argument, as these are not supported by SQLite's REGEXP operator, however these
may be included inline within the regular expression string. See `Python regular expressions`_ for
details.
.. seealso::
`Python regular expressions`_: Documentation for Python's regular expression syntax.
.. _create_function: https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function
.. _re.search: https://docs.python.org/3/library/re.html#re.search
.. _Python regular expressions: https://docs.python.org/3/library/re.html#re.search
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine(
"sqlite://",
connect_args={
"detect_types": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
},
native_datetime=True,
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
DATETIME or TIME types...confused yet ?) will not perform any bind parameter
or result processing. Execution of "func.current_date()" will return a string.
"func.current_timestamp()" is registered as returning a DATETIME type in
SQLAlchemy, so this function still receives SQLAlchemy-level result
processing.
.. _pysqlite_threading_pooling:
Threading/Pooling Behavior
---------------------------
The ``sqlite3`` DBAPI by default prohibits the use of a particular connection
in a thread which is not the one in which it was created. As SQLite has
matured, it's behavior under multiple threads has improved, and even includes
options for memory only databases to be used in multiple threads.
The thread prohibition is known as "check same thread" and may be controlled
using the ``sqlite3`` parameter ``check_same_thread``, which will disable or
enable this check. SQLAlchemy's default behavior here is to set
``check_same_thread`` to ``False`` automatically whenever a file-based database
is in use, to establish compatibility with the default pool class
:class:`.QueuePool`.
The SQLAlchemy ``pysqlite`` DBAPI establishes the connection pool differently
based on the kind of SQLite database that's requested:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.SingletonThreadPool`. This pool maintains a single
connection per thread, so that all access to the engine within the current
thread use the same ``:memory:`` database - other threads would access a
different ``:memory:`` database. The ``check_same_thread`` parameter
defaults to ``True``.
* When a file-based database is specified, the dialect will use
:class:`.QueuePool` as the source of connections. at the same time,
the ``check_same_thread`` flag is set to False by default unless overridden.
.. versionchanged:: 2.0
SQLite file database engines now use :class:`.QueuePool` by default.
Previously, :class:`.NullPool` were used. The :class:`.NullPool` class
may be used by specifying it via the
:paramref:`_sa.create_engine.poolclass` parameter.
Disabling Connection Pooling for File Databases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Pooling may be disabled for a file based database by specifying the
:class:`.NullPool` implementation for the :func:`_sa.create_engine.poolclass`
parameter::
from sqlalchemy import NullPool
engine = create_engine("sqlite:///myfile.db", poolclass=NullPool)
It's been observed that the :class:`.NullPool` implementation incurs an
extremely small performance overhead for repeated checkouts due to the lack of
connection re-use implemented by :class:`.QueuePool`. However, it still
may be beneficial to use this class if the application is experiencing
issues with files being locked.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a ``:memory:`` database in a multithreaded scenario, the same
connection object must be shared among threads, since the database exists
only within the scope of that connection. The
:class:`.StaticPool` implementation will maintain a single connection
globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine(
"sqlite://",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Due to the way SQLite deals with temporary tables, if you wish to use a
temporary table in a file-based SQLite database across multiple checkouts
from the connection pool, such as when using an ORM :class:`.Session` where
the temporary table should continue to remain after :meth:`.Session.commit` or
:meth:`.Session.rollback` is called, a pool which maintains a single
connection must be used. Use :class:`.SingletonThreadPool` if the scope is
only needed within the current thread, or :class:`.StaticPool` is scope is
needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine("sqlite:///mydb.db", poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine("sqlite:///mydb.db", poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
closed out in a non deterministic way.
Dealing with Mixed String / Binary Columns
------------------------------------------------------
The SQLite database is weakly typed, and as such it is possible when using
binary values, which in Python are represented as ``b'some string'``, that a
particular SQLite database can have data values within different rows where
some of them will be returned as a ``b''`` value by the Pysqlite driver, and
others will be returned as Python strings, e.g. ``''`` values. This situation
is not known to occur if the SQLAlchemy :class:`.LargeBinary` datatype is used
consistently, however if a particular SQLite database has data that was
inserted using the Pysqlite driver directly, or when using the SQLAlchemy
:class:`.String` type which was later changed to :class:`.LargeBinary`, the
table will not be consistently readable because SQLAlchemy's
:class:`.LargeBinary` datatype does not handle strings so it has no way of
"encoding" a value that is in string format.
To deal with a SQLite table that has mixed string / binary data in the
same column, use a custom type that will check each row individually::
from sqlalchemy import String
from sqlalchemy import TypeDecorator
class MixedBinary(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
if isinstance(value, str):
value = bytes(value, "utf-8")
elif value is not None:
value = bytes(value)
return value
Then use the above ``MixedBinary`` datatype in the place where
:class:`.LargeBinary` would normally be used.
.. _pysqlite_serializable:
Serializable isolation / Savepoints / Transactional DDL
-------------------------------------------------------
In the section :ref:`sqlite_concurrency`, we refer to the pysqlite
driver's assortment of issues that prevent several features of SQLite
from working correctly. The pysqlite DBAPI driver has several
long-standing bugs which impact the correctness of its transactional
behavior. In its default mode of operation, SQLite features such as
SERIALIZABLE isolation, transactional DDL, and SAVEPOINT support are
non-functional, and in order to use these features, workarounds must
be taken.
The issue is essentially that the driver attempts to second-guess the user's
intent, failing to start transactions and sometimes ending them prematurely, in
an effort to minimize the SQLite databases's file locking behavior, even
though SQLite itself uses "shared" locks for read-only activities.
SQLAlchemy chooses to not alter this behavior by default, as it is the
long-expected behavior of the pysqlite driver; if and when the pysqlite
driver attempts to repair these issues, that will be more of a driver towards
defaults for SQLAlchemy.
The good news is that with a few events, we can implement transactional
support fully, by disabling pysqlite's feature entirely and emitting BEGIN
ourselves. This is achieved using two event listeners::
from sqlalchemy import create_engine, event
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
conn.exec_driver_sql("BEGIN")
.. warning:: When using the above recipe, it is advised to not use the
:paramref:`.Connection.execution_options.isolation_level` setting on
:class:`_engine.Connection` and :func:`_sa.create_engine`
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
Above, we intercept a new pysqlite connection and disable any transactional
integration. Then, at the point at which SQLAlchemy knows that transaction
scope is to begin, we emit ``"BEGIN"`` ourselves.
When we take control of ``"BEGIN"``, we can also control directly SQLite's
locking modes, introduced at
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_,
by adding the desired locking mode to our ``"BEGIN"``::
@event.listens_for(engine, "begin")
def do_begin(conn):
conn.exec_driver_sql("BEGIN EXCLUSIVE")
.. seealso::
`BEGIN TRANSACTION <https://sqlite.org/lang_transaction.html>`_ -
on the SQLite site
`sqlite3 SELECT does not BEGIN a transaction <https://bugs.python.org/issue9924>`_ -
on the Python bug tracker
`sqlite3 module breaks transactions and potentially corrupts data <https://bugs.python.org/issue10740>`_ -
on the Python bug tracker
.. _pysqlite_udfs:
User-Defined Functions
----------------------
pysqlite supports a `create_function() <https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function>`_
method that allows us to create our own user-defined functions (UDFs) in Python and use them directly in SQLite queries.
These functions are registered with a specific DBAPI Connection.
SQLAlchemy uses connection pooling with file-based SQLite databases, so we need to ensure that the UDF is attached to the
connection when it is created. That is accomplished with an event listener::
from sqlalchemy import create_engine
from sqlalchemy import event
from sqlalchemy import text
def udf():
return "udf-ok"
engine = create_engine("sqlite:///./db_file")
@event.listens_for(engine, "connect")
def connect(conn, rec):
conn.create_function("udf", 0, udf)
for i in range(5):
with engine.connect() as conn:
print(conn.scalar(text("SELECT UDF()")))
""" # noqa
import math
import os
import re
from .base import DATE
from .base import DATETIME
from .base import SQLiteDialect
from ... import exc
from ... import pool
from ... import types as sqltypes
from ... import util
class _SQLite_pysqliteTimeStamp(DATETIME):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATETIME.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATETIME.result_processor(self, dialect, coltype)
class _SQLite_pysqliteDate(DATE):
def bind_processor(self, dialect):
if dialect.native_datetime:
return None
else:
return DATE.bind_processor(self, dialect)
def result_processor(self, dialect, coltype):
if dialect.native_datetime:
return None
else:
return DATE.result_processor(self, dialect, coltype)
class SQLiteDialect_pysqlite(SQLiteDialect):
default_paramstyle = "qmark"
supports_statement_cache = True
returns_native_bytes = True
colspecs = util.update_copy(
SQLiteDialect.colspecs,
{
sqltypes.Date: _SQLite_pysqliteDate,
sqltypes.TIMESTAMP: _SQLite_pysqliteTimeStamp,
},
)
description_encoding = None
driver = "pysqlite"
@classmethod
def import_dbapi(cls):
from sqlite3 import dbapi2 as sqlite
return sqlite
@classmethod
def _is_url_file_db(cls, url):
if (url.database and url.database != ":memory:") and (
url.query.get("mode", None) != "memory"
):
return True
else:
return False
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.QueuePool
else:
return pool.SingletonThreadPool
def _get_server_version_info(self, connection):
return self.dbapi.sqlite_version_info
_isolation_lookup = SQLiteDialect._isolation_lookup.union(
{
"AUTOCOMMIT": None,
}
)
def set_isolation_level(self, dbapi_connection, level):
if level == "AUTOCOMMIT":
dbapi_connection.isolation_level = None
else:
dbapi_connection.isolation_level = ""
return super().set_isolation_level(dbapi_connection, level)
def on_connect(self):
def regexp(a, b):
if b is None:
return None
return re.search(a, b) is not None
if util.py38 and self._get_server_version_info(None) >= (3, 9):
# sqlite must be greater than 3.8.3 for deterministic=True
# https://docs.python.org/3/library/sqlite3.html#sqlite3.Connection.create_function
# the check is more conservative since there were still issues
# with following 3.8 sqlite versions
create_func_kw = {"deterministic": True}
else:
create_func_kw = {}
def set_regexp(dbapi_connection):
dbapi_connection.create_function(
"regexp", 2, regexp, **create_func_kw
)
def floor_func(dbapi_connection):
# NOTE: floor is optionally present in sqlite 3.35+ , however
# as it is normally non-present we deliver floor() unconditionally
# for now.
# https://www.sqlite.org/lang_mathfunc.html
dbapi_connection.create_function(
"floor", 1, math.floor, **create_func_kw
)
fns = [set_regexp, floor_func]
def connect(conn):
for fn in fns:
fn(conn)
return connect
def create_connect_args(self, url):
if url.username or url.password or url.host or url.port:
raise exc.ArgumentError(
"Invalid SQLite URL: %s\n"
"Valid SQLite URL forms are:\n"
" sqlite:///:memory: (or, sqlite://)\n"
" sqlite:///relative/path/to/file.db\n"
" sqlite:////absolute/path/to/file.db" % (url,)
)
# theoretically, this list can be augmented, at least as far as
# parameter names accepted by sqlite3/pysqlite, using
# inspect.getfullargspec(). for the moment this seems like overkill
# as these parameters don't change very often, and as always,
# parameters passed to connect_args will always go to the
# sqlite3/pysqlite driver.
pysqlite_args = [
("uri", bool),
("timeout", float),
("isolation_level", str),
("detect_types", int),
("check_same_thread", bool),
("cached_statements", int),
]
opts = url.query
pysqlite_opts = {}
for key, type_ in pysqlite_args:
util.coerce_kw_type(opts, key, type_, dest=pysqlite_opts)
if pysqlite_opts.get("uri", False):
uri_opts = dict(opts)
# here, we are actually separating the parameters that go to
# sqlite3/pysqlite vs. those that go the SQLite URI. What if
# two names conflict? again, this seems to be not the case right
# now, and in the case that new names are added to
# either side which overlap, again the sqlite3/pysqlite parameters
# can be passed through connect_args instead of in the URL.
# If SQLite native URIs add a parameter like "timeout" that
# we already have listed here for the python driver, then we need
# to adjust for that here.
for key, type_ in pysqlite_args:
uri_opts.pop(key, None)
filename = url.database
if uri_opts:
# sorting of keys is for unit test support
filename += "?" + (
"&".join(
"%s=%s" % (key, uri_opts[key])
for key in sorted(uri_opts)
)
)
else:
filename = url.database or ":memory:"
if filename != ":memory:":
filename = os.path.abspath(filename)
pysqlite_opts.setdefault(
"check_same_thread", not self._is_url_file_db(url)
)
return ([filename], pysqlite_opts)
def is_disconnect(self, e, connection, cursor):
return isinstance(
e, self.dbapi.ProgrammingError
) and "Cannot operate on a closed database." in str(e)
dialect = SQLiteDialect_pysqlite
class _SQLiteDialect_pysqlite_numeric(SQLiteDialect_pysqlite):
"""numeric dialect for testing only
internal use only. This dialect is **NOT** supported by SQLAlchemy
and may change at any time.
"""
supports_statement_cache = True
default_paramstyle = "numeric"
driver = "pysqlite_numeric"
_first_bind = ":1"
_not_in_statement_regexp = None
def __init__(self, *arg, **kw):
kw.setdefault("paramstyle", "numeric")
super().__init__(*arg, **kw)
def create_connect_args(self, url):
arg, opts = super().create_connect_args(url)
opts["factory"] = self._fix_sqlite_issue_99953()
return arg, opts
def _fix_sqlite_issue_99953(self):
import sqlite3
first_bind = self._first_bind
if self._not_in_statement_regexp:
nis = self._not_in_statement_regexp
def _test_sql(sql):
m = nis.search(sql)
assert not m, f"Found {nis.pattern!r} in {sql!r}"
else:
def _test_sql(sql):
pass
def _numeric_param_as_dict(parameters):
if parameters:
assert isinstance(parameters, tuple)
return {
str(idx): value for idx, value in enumerate(parameters, 1)
}
else:
return ()
class SQLiteFix99953Cursor(sqlite3.Cursor):
def execute(self, sql, parameters=()):
_test_sql(sql)
if first_bind in sql:
parameters = _numeric_param_as_dict(parameters)
return super().execute(sql, parameters)
def executemany(self, sql, parameters):
_test_sql(sql)
if first_bind in sql:
parameters = [
_numeric_param_as_dict(p) for p in parameters
]
return super().executemany(sql, parameters)
class SQLiteFix99953Connection(sqlite3.Connection):
def cursor(self, factory=None):
if factory is None:
factory = SQLiteFix99953Cursor
return super().cursor(factory=factory)
def execute(self, sql, parameters=()):
_test_sql(sql)
if first_bind in sql:
parameters = _numeric_param_as_dict(parameters)
return super().execute(sql, parameters)
def executemany(self, sql, parameters):
_test_sql(sql)
if first_bind in sql:
parameters = [
_numeric_param_as_dict(p) for p in parameters
]
return super().executemany(sql, parameters)
return SQLiteFix99953Connection
class _SQLiteDialect_pysqlite_dollar(_SQLiteDialect_pysqlite_numeric):
"""numeric dialect that uses $ for testing only
internal use only. This dialect is **NOT** supported by SQLAlchemy
and may change at any time.
"""
supports_statement_cache = True
default_paramstyle = "numeric_dollar"
driver = "pysqlite_dollar"
_first_bind = "$1"
_not_in_statement_regexp = re.compile(r"[^\d]:\d+")
def __init__(self, *arg, **kw):
kw.setdefault("paramstyle", "numeric_dollar")
super().__init__(*arg, **kw)