1
0
mirror of https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git synced 2025-08-14 00:25:46 +02:00

Проверка 09.02.2025

This commit is contained in:
MoonTestUse1
2025-02-09 01:11:49 +06:00
parent ce52f8a23a
commit 0aa3ef8fc2
5827 changed files with 14316 additions and 1906434 deletions

View File

@@ -1,5 +1,5 @@
# dialects/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/_typing.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -12,14 +12,19 @@ from typing import Mapping
from typing import Optional
from typing import Union
from ..sql._typing import _DDLColumnArgument
from ..sql.elements import DQLDMLClauseElement
from ..sql import roles
from ..sql.base import ColumnCollection
from ..sql.schema import Column
from ..sql.schema import ColumnCollectionConstraint
from ..sql.schema import Index
_OnConflictConstraintT = Union[str, ColumnCollectionConstraint, Index, None]
_OnConflictIndexElementsT = Optional[Iterable[_DDLColumnArgument]]
_OnConflictIndexWhereT = Optional[DQLDMLClauseElement]
_OnConflictSetT = Optional[Mapping[Any, Any]]
_OnConflictWhereT = Union[DQLDMLClauseElement, str, None]
_OnConflictIndexElementsT = Optional[
Iterable[Union[Column[Any], str, roles.DDLConstraintColumnRole]]
]
_OnConflictIndexWhereT = Optional[roles.WhereHavingRole]
_OnConflictSetT = Optional[
Union[Mapping[Any, Any], ColumnCollection[Any, Any]]
]
_OnConflictWhereT = Optional[roles.WhereHavingRole]

View File

@@ -1,5 +1,5 @@
# dialects/mssql/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/mssql/aioodbc.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -32,13 +32,12 @@ This dialect should normally be used only with the
styles are otherwise equivalent to those documented in the pyodbc section::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine(
"mssql+aioodbc://scott:tiger@mssql2017:1433/test?"
"driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
)
"""
from __future__ import annotations

View File

@@ -1,5 +1,5 @@
# dialects/mssql/base.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -9,7 +9,6 @@
"""
.. dialect:: mssql
:name: Microsoft SQL Server
:full_support: 2017
:normal_support: 2012+
:best_effort: 2005+
@@ -40,9 +39,12 @@ considered to be the identity column - unless it is associated with a
from sqlalchemy import Table, MetaData, Column, Integer
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True),
Column('x', Integer))
t = Table(
"t",
m,
Column("id", Integer, primary_key=True),
Column("x", Integer),
)
m.create_all(engine)
The above example will generate DDL as:
@@ -60,9 +62,12 @@ specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
on the first integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer))
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer),
)
m.create_all(engine)
To add the ``IDENTITY`` keyword to a non-primary key column, specify
@@ -72,9 +77,12 @@ To add the ``IDENTITY`` keyword to a non-primary key column, specify
is set to ``False`` on any integer primary key column::
m = MetaData()
t = Table('t', m,
Column('id', Integer, primary_key=True, autoincrement=False),
Column('x', Integer, autoincrement=True))
t = Table(
"t",
m,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("x", Integer, autoincrement=True),
)
m.create_all(engine)
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
@@ -137,14 +145,12 @@ parameters passed to the :class:`_schema.Identity` object::
from sqlalchemy import Table, Integer, Column, Identity
test = Table(
'test', metadata,
"test",
metadata,
Column(
'id',
Integer,
primary_key=True,
Identity(start=100, increment=10)
"id", Integer, primary_key=True, Identity(start=100, increment=10)
),
Column('name', String(20))
Column("name", String(20)),
)
The CREATE TABLE for the above :class:`_schema.Table` object would be:
@@ -154,7 +160,7 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
CREATE TABLE test (
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
name VARCHAR(20) NULL,
)
)
.. note::
@@ -187,6 +193,7 @@ type deployed to the SQL Server database can be specified as ``Numeric`` using
Base = declarative_base()
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -212,8 +219,9 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
from sqlalchemy import TypeDecorator
class NumericAsInteger(TypeDecorator):
'''normalize floating point return values into ints'''
"normalize floating point return values into ints"
impl = Numeric(10, 0, asdecimal=False)
cache_ok = True
@@ -223,6 +231,7 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
value = int(value)
return value
class TestTable(Base):
__tablename__ = "test"
id = Column(
@@ -271,11 +280,11 @@ The process for fetching this value has several variants:
fetched in order to receive the value. Given a table as::
t = Table(
't',
"t",
metadata,
Column('id', Integer, primary_key=True),
Column('x', Integer),
implicit_returning=False
Column("id", Integer, primary_key=True),
Column("x", Integer),
implicit_returning=False,
)
an INSERT will look like:
@@ -301,12 +310,13 @@ statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
execution. Given this example::
m = MetaData()
t = Table('t', m, Column('id', Integer, primary_key=True),
Column('x', Integer))
t = Table(
"t", m, Column("id", Integer, primary_key=True), Column("x", Integer)
)
m.create_all(engine)
with engine.begin() as conn:
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
conn.execute(t.insert(), {"id": 1, "x": 1}, {"id": 2, "x": 2})
The above column will be created with IDENTITY, however the INSERT statement
we emit is specifying explicit values. In the echo output we can see
@@ -342,7 +352,11 @@ The :class:`.Sequence` object creates "real" sequences, i.e.,
>>> from sqlalchemy import Sequence
>>> from sqlalchemy.schema import CreateSequence
>>> from sqlalchemy.dialects import mssql
>>> print(CreateSequence(Sequence("my_seq", start=1)).compile(dialect=mssql.dialect()))
>>> print(
... CreateSequence(Sequence("my_seq", start=1)).compile(
... dialect=mssql.dialect()
... )
... )
{printsql}CREATE SEQUENCE my_seq START WITH 1
For integer primary key generation, SQL Server's ``IDENTITY`` construct should
@@ -376,12 +390,12 @@ more than one backend without using dialect-specific types.
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
my_table = Table(
'my_table', metadata,
Column('my_data', VARCHAR(None)),
Column('my_n_data', NVARCHAR(None))
"my_table",
metadata,
Column("my_data", VARCHAR(None)),
Column("my_n_data", NVARCHAR(None)),
)
Collation Support
-----------------
@@ -389,10 +403,13 @@ Character collations are supported by the base string types,
specified by the string argument "collation"::
from sqlalchemy import VARCHAR
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
Column("login", VARCHAR(32, collation="Latin1_General_CI_AS"))
When such a column is associated with a :class:`_schema.Table`, the
CREATE TABLE statement for this column will yield::
CREATE TABLE statement for this column will yield:
.. sourcecode:: sql
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
@@ -412,7 +429,9 @@ versions when no OFFSET clause is present. A statement such as::
select(some_table).limit(5)
will render similarly to::
will render similarly to:
.. sourcecode:: sql
SELECT TOP 5 col1, col2.. FROM table
@@ -422,7 +441,9 @@ LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
will render similarly to::
will render similarly to:
.. sourcecode:: sql
SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
ROW_NUMBER() OVER (ORDER BY col3) AS
@@ -475,16 +496,13 @@ each new connection.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mssql+pyodbc://scott:tiger@ms_2008",
isolation_level="REPEATABLE READ"
"mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
connection = connection.execution_options(isolation_level="READ COMMITTED")
Valid values for ``isolation_level`` include:
@@ -534,7 +552,6 @@ will remain consistent with the state of the transaction::
mssql_engine = create_engine(
"mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
# disable default reset-on-return scheme
pool_reset_on_return=None,
)
@@ -563,13 +580,17 @@ Nullability
-----------
MSSQL has support for three levels of column nullability. The default
nullability allows nulls and is explicit in the CREATE TABLE
construct::
construct:
.. sourcecode:: sql
name VARCHAR(20) NULL
If ``nullable=None`` is specified then no specification is made. In
other words the database's configured default is used. This will
render::
render:
.. sourcecode:: sql
name VARCHAR(20)
@@ -625,8 +646,9 @@ behavior of this flag is as follows:
* The flag can be set to either ``True`` or ``False`` when the dialect
is created, typically via :func:`_sa.create_engine`::
eng = create_engine("mssql+pymssql://user:pass@host/db",
deprecate_large_types=True)
eng = create_engine(
"mssql+pymssql://user:pass@host/db", deprecate_large_types=True
)
* Complete control over whether the "old" or "new" types are rendered is
available in all SQLAlchemy versions by using the UPPERCASE type objects
@@ -648,9 +670,10 @@ at once using the :paramref:`_schema.Table.schema` argument of
:class:`_schema.Table`::
Table(
"some_table", metadata,
"some_table",
metadata,
Column("q", String(50)),
schema="mydatabase.dbo"
schema="mydatabase.dbo",
)
When performing operations such as table or component reflection, a schema
@@ -662,9 +685,10 @@ components will be quoted separately for case sensitive names and other
special characters. Given an argument as below::
Table(
"some_table", metadata,
"some_table",
metadata,
Column("q", String(50)),
schema="MyDataBase.dbo"
schema="MyDataBase.dbo",
)
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
@@ -677,21 +701,22 @@ Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
"database" will be None::
Table(
"some_table", metadata,
"some_table",
metadata,
Column("q", String(50)),
schema="[MyDataBase.dbo]"
schema="[MyDataBase.dbo]",
)
To individually specify both database and owner name with special characters
or embedded dots, use two sets of brackets::
Table(
"some_table", metadata,
"some_table",
metadata,
Column("q", String(50)),
schema="[MyDataBase.Period].[MyOwner.Dot]"
schema="[MyDataBase.Period].[MyOwner.Dot]",
)
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
identifier delimiters splitting the schema into separate database
and owner tokens, to allow dots within either name itself.
@@ -706,10 +731,11 @@ schema-qualified table would be auto-aliased when used in a
SELECT statement; given a table::
account_table = Table(
'account', metadata,
Column('id', Integer, primary_key=True),
Column('info', String(100)),
schema="customer_schema"
"account",
metadata,
Column("id", Integer, primary_key=True),
Column("info", String(100)),
schema="customer_schema",
)
this legacy mode of rendering would assume that "customer_schema.account"
@@ -752,37 +778,55 @@ which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
To generate a clustered primary key use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=True),
)
which will render the table, for example, as::
which will render the table, for example, as:
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y))
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY CLUSTERED (x, y)
)
Similarly, we can generate a clustered unique constraint using::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
To explicitly request a non-clustered primary key (for example, when
a separate clustered index is desired), use::
Table('my_table', metadata,
Column('x', ...),
Column('y', ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
Table(
"my_table",
metadata,
Column("x", ...),
Column("y", ...),
PrimaryKeyConstraint("x", "y", mssql_clustered=False),
)
which will render the table, for example, as::
which will render the table, for example, as:
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y))
.. sourcecode:: sql
CREATE TABLE my_table (
x INTEGER NOT NULL,
y INTEGER NOT NULL,
PRIMARY KEY NONCLUSTERED (x, y)
)
Columnstore Index Support
-------------------------
@@ -820,7 +864,7 @@ INCLUDE
The ``mssql_include`` option renders INCLUDE(colname) for the given string
names::
Index("my_index", table.c.x, mssql_include=['y'])
Index("my_index", table.c.x, mssql_include=["y"])
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
@@ -875,18 +919,19 @@ To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`_schema.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
# ...,
implicit_returning=False
implicit_returning=False,
)
Declarative form::
class MyClass(Base):
# ...
__table_args__ = {'implicit_returning':False}
__table_args__ = {"implicit_returning": False}
.. _mssql_rowcount_versioning:
@@ -920,7 +965,9 @@ isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
following ALTER DATABASE commands executed at the SQL prompt:
.. sourcecode:: sql
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
@@ -1555,29 +1602,6 @@ class MSUUid(sqltypes.Uuid):
return process
def _sentinel_value_resolver(self, dialect):
if not self.native_uuid:
# dealing entirely with strings going in and out of
# CHAR(32)
return None
# true if we expect the returned UUID values to be strings
# pymssql sends UUID objects back, pyodbc sends strings,
# however pyodbc converts them to uppercase coming back, so
# need special logic here
character_based_uuid = not dialect.supports_native_uuid
if character_based_uuid:
# we sent UUID objects in all cases, see bind_processor()
def process(uuid_value):
return str(uuid_value).upper()
return process
elif not self.as_uuid:
return _python_UUID
else:
return None
class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
__visit_name__ = "UNIQUEIDENTIFIER"
@@ -1841,7 +1865,6 @@ class MSExecutionContext(default.DefaultExecutionContext):
_enable_identity_insert = False
_select_lastrowid = False
_lastrowid = None
_rowcount = None
dialect: MSDialect
@@ -1961,13 +1984,6 @@ class MSExecutionContext(default.DefaultExecutionContext):
def get_lastrowid(self):
return self._lastrowid
@property
def rowcount(self):
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
def handle_dbapi_exception(self, e):
if self._enable_identity_insert:
try:
@@ -2019,6 +2035,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
self.tablealiases = {}
super().__init__(*args, **kwargs)
def _format_frame_clause(self, range_, **kw):
kw["literal_execute"] = True
return super()._format_frame_clause(range_, **kw)
def _with_legacy_schema_aliasing(fn):
def decorate(self, *arg, **kw):
if self.dialect.legacy_schema_aliasing:

View File

@@ -1,5 +1,5 @@
# dialects/mssql/information_schema.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/mssql/json.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -54,9 +54,7 @@ class JSON(sqltypes.JSON):
dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor
should be used::
stmt = select(
data_table.c.data["some key"].as_json()
).where(
stmt = select(data_table.c.data["some key"].as_json()).where(
data_table.c.data["some key"].as_json() == {"sub": "structure"}
)
@@ -67,9 +65,7 @@ class JSON(sqltypes.JSON):
:meth:`_types.JSON.Comparator.as_integer`,
:meth:`_types.JSON.Comparator.as_float`::
stmt = select(
data_table.c.data["some key"].as_string()
).where(
stmt = select(data_table.c.data["some key"].as_string()).where(
data_table.c.data["some key"].as_string() == "some string"
)

View File

@@ -1,5 +1,5 @@
# dialects/mssql/provision.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -22,10 +22,17 @@ from ...testing.provision import generate_driver_url
from ...testing.provision import get_temp_table_name
from ...testing.provision import log
from ...testing.provision import normalize_sequence
from ...testing.provision import post_configure_engine
from ...testing.provision import run_reap_dbs
from ...testing.provision import temp_table_keyword_args
@post_configure_engine.for_db("mssql")
def post_configure_engine(url, engine, follower_ident):
if engine.driver == "pyodbc":
engine.dialect.dbapi.pooling = False
@generate_driver_url.for_db("mssql")
def generate_driver_url(url, driver, query_str):
backend = url.get_backend_name()

View File

@@ -1,5 +1,5 @@
# dialects/mssql/pymssql.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -103,6 +103,7 @@ class MSDialect_pymssql(MSDialect):
"message 20006", # Write to the server failed
"message 20017", # Unexpected EOF from the server
"message 20047", # DBPROCESS is dead or not enabled
"The server failed to resume the transaction",
):
if msg in str(e):
return True

View File

@@ -1,5 +1,5 @@
# dialects/mssql/pyodbc.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -30,7 +30,9 @@ is configured on the client, a basic DSN-based connection looks like::
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
Which above, will pass the following connection string to PyODBC::
Which above, will pass the following connection string to PyODBC:
.. sourcecode:: text
DSN=some_dsn;UID=scott;PWD=tiger
@@ -49,7 +51,9 @@ When using a hostname connection, the driver name must also be specified in the
query parameters of the URL. As these names usually have spaces in them, the
name must be URL encoded which means using plus signs for spaces::
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server")
engine = create_engine(
"mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
)
The ``driver`` keyword is significant to the pyodbc dialect and must be
specified in lowercase.
@@ -69,6 +73,7 @@ internally::
The equivalent URL can be constructed using :class:`_sa.engine.URL`::
from sqlalchemy.engine import URL
connection_url = URL.create(
"mssql+pyodbc",
username="scott",
@@ -83,7 +88,6 @@ The equivalent URL can be constructed using :class:`_sa.engine.URL`::
},
)
Pass through exact Pyodbc string
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -94,8 +98,11 @@ using the parameter ``odbc_connect``. A :class:`_sa.engine.URL` object
can help make this easier::
from sqlalchemy.engine import URL
connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
connection_url = URL.create("mssql+pyodbc", query={"odbc_connect": connection_string})
connection_url = URL.create(
"mssql+pyodbc", query={"odbc_connect": connection_string}
)
engine = create_engine(connection_url)
@@ -127,7 +134,8 @@ database using Azure credentials::
from sqlalchemy.engine.url import URL
from azure import identity
SQL_COPT_SS_ACCESS_TOKEN = 1256 # Connection option for access tokens, as defined in msodbcsql.h
# Connection option for access tokens, as defined in msodbcsql.h
SQL_COPT_SS_ACCESS_TOKEN = 1256
TOKEN_URL = "https://database.windows.net/" # The token URL for any Azure SQL database
connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
@@ -136,14 +144,19 @@ database using Azure credentials::
azure_credentials = identity.DefaultAzureCredential()
@event.listens_for(engine, "do_connect")
def provide_token(dialect, conn_rec, cargs, cparams):
# remove the "Trusted_Connection" parameter that SQLAlchemy adds
cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
# create token credential
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode("utf-16-le")
token_struct = struct.pack(f"<I{len(raw_token)}s", len(raw_token), raw_token)
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
"utf-16-le"
)
token_struct = struct.pack(
f"<I{len(raw_token)}s", len(raw_token), raw_token
)
# apply it to keyword arguments
cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
@@ -176,7 +189,9 @@ emit a ``.rollback()`` after an operation had a failure of some kind.
This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True)
engine = create_engine(
connection_url, ignore_no_transaction_on_rollback=True
)
Using the above parameter, the dialect will catch ``ProgrammingError``
exceptions raised during ``connection.rollback()`` and emit a warning
@@ -236,7 +251,6 @@ behavior and pass long strings as varchar(max)/nvarchar(max) using the
},
)
Pyodbc Pooling / connection close behavior
------------------------------------------
@@ -301,7 +315,8 @@ Server dialect supports this parameter by passing the
engine = create_engine(
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
fast_executemany=True)
fast_executemany=True,
)
.. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
intended effect of this PyODBC feature taking effect for all INSERT

View File

@@ -1,5 +1,5 @@
# dialects/mysql/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -53,7 +53,8 @@ from .base import YEAR
from .dml import Insert
from .dml import insert
from .expression import match
from ...util import compat
from .mariadb import INET4
from .mariadb import INET6
# default dialect
base.dialect = dialect = mysqldb.dialect
@@ -71,6 +72,8 @@ __all__ = (
"DOUBLE",
"ENUM",
"FLOAT",
"INET4",
"INET6",
"INTEGER",
"INTEGER",
"JSON",

View File

@@ -1,5 +1,5 @@
# dialects/mysql/aiomysql.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors <see AUTHORS
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
@@ -23,10 +23,14 @@ This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4")
engine = create_async_engine(
"mysql+aiomysql://user:pass@hostname/dbname?charset=utf8mb4"
)
""" # noqa
from collections import deque
from .pymysql import MySQLDialect_pymysql
from ... import pool
from ... import util
@@ -57,7 +61,7 @@ class AsyncAdapt_aiomysql_cursor:
# see https://github.com/aio-libs/aiomysql/issues/543
self._cursor = self.await_(cursor.__aenter__())
self._rows = []
self._rows = deque()
@property
def description(self):
@@ -87,7 +91,7 @@ class AsyncAdapt_aiomysql_cursor:
# exhausting rows, which we already have done for sync cursor.
# another option would be to emulate aiosqlite dialect and assign
# cursor only if we are doing server side cursor operation.
self._rows[:] = []
self._rows.clear()
def execute(self, operation, parameters=None):
return self.await_(self._execute_async(operation, parameters))
@@ -106,7 +110,7 @@ class AsyncAdapt_aiomysql_cursor:
# of that here since our default result is not async.
# we could just as easily grab "_rows" here and be done with it
# but this is safer.
self._rows = list(await self._cursor.fetchall())
self._rows = deque(await self._cursor.fetchall())
return result
async def _executemany_async(self, operation, seq_of_parameters):
@@ -118,11 +122,11 @@ class AsyncAdapt_aiomysql_cursor:
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
yield self._rows.popleft()
def fetchone(self):
if self._rows:
return self._rows.pop(0)
return self._rows.popleft()
else:
return None
@@ -130,13 +134,12 @@ class AsyncAdapt_aiomysql_cursor:
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
retval = list(self._rows)
self._rows.clear()
return retval

View File

@@ -1,5 +1,5 @@
# dialects/mysql/asyncmy.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors <see AUTHORS
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
@@ -21,10 +21,13 @@ This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4")
engine = create_async_engine(
"mysql+asyncmy://user:pass@hostname/dbname?charset=utf8mb4"
)
""" # noqa
from collections import deque
from contextlib import asynccontextmanager
from .pymysql import MySQLDialect_pymysql
@@ -56,7 +59,7 @@ class AsyncAdapt_asyncmy_cursor:
cursor = self._connection.cursor()
self._cursor = self.await_(cursor.__aenter__())
self._rows = []
self._rows = deque()
@property
def description(self):
@@ -86,7 +89,7 @@ class AsyncAdapt_asyncmy_cursor:
# exhausting rows, which we already have done for sync cursor.
# another option would be to emulate aiosqlite dialect and assign
# cursor only if we are doing server side cursor operation.
self._rows[:] = []
self._rows.clear()
def execute(self, operation, parameters=None):
return self.await_(self._execute_async(operation, parameters))
@@ -108,7 +111,7 @@ class AsyncAdapt_asyncmy_cursor:
# of that here since our default result is not async.
# we could just as easily grab "_rows" here and be done with it
# but this is safer.
self._rows = list(await self._cursor.fetchall())
self._rows = deque(await self._cursor.fetchall())
return result
async def _executemany_async(self, operation, seq_of_parameters):
@@ -120,11 +123,11 @@ class AsyncAdapt_asyncmy_cursor:
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
yield self._rows.popleft()
def fetchone(self):
if self._rows:
return self._rows.pop(0)
return self._rows.popleft()
else:
return None
@@ -132,13 +135,12 @@ class AsyncAdapt_asyncmy_cursor:
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
retval = list(self._rows)
self._rows.clear()
return retval

View File

@@ -1,5 +1,5 @@
# dialects/mysql/base.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -11,7 +11,6 @@ r"""
.. dialect:: mysql
:name: MySQL / MariaDB
:full_support: 5.6, 5.7, 8.0 / 10.8, 10.9
:normal_support: 5.6+ / 10+
:best_effort: 5.0.2+ / 5.0.2+
@@ -35,7 +34,9 @@ syntactical and behavioral differences that SQLAlchemy accommodates automaticall
To connect to a MariaDB database, no changes to the database URL are required::
engine = create_engine("mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
engine = create_engine(
"mysql+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4"
)
Upon first connect, the SQLAlchemy dialect employs a
server version detection scheme that determines if the
@@ -53,7 +54,9 @@ useful for the case where an application makes use of MariaDB-specific features
and is not compatible with a MySQL database. To use this mode of operation,
replace the "mysql" token in the above URL with "mariadb"::
engine = create_engine("mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4")
engine = create_engine(
"mariadb+pymysql://user:pass@some_mariadb/dbname?charset=utf8mb4"
)
The above engine, upon first connect, will raise an error if the server version
detection detects that the backing database is not MariaDB.
@@ -99,7 +102,7 @@ the :paramref:`_sa.create_engine.pool_recycle` option which ensures that
a connection will be discarded and replaced with a new one if it has been
present in the pool for a fixed number of seconds::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
engine = create_engine("mysql+mysqldb://...", pool_recycle=3600)
For more comprehensive disconnect detection of pooled connections, including
accommodation of server restarts and network issues, a pre-ping approach may
@@ -123,12 +126,14 @@ To accommodate the rendering of these arguments, specify the form
``ENGINE`` of ``InnoDB``, ``CHARSET`` of ``utf8mb4``, and ``KEY_BLOCK_SIZE``
of ``1024``::
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mysql_charset='utf8mb4',
mysql_key_block_size="1024"
)
Table(
"mytable",
metadata,
Column("data", String(32)),
mysql_engine="InnoDB",
mysql_charset="utf8mb4",
mysql_key_block_size="1024",
)
When supporting :ref:`mysql_mariadb_only_mode` mode, similar keys against
the "mariadb" prefix must be included as well. The values can of course
@@ -137,19 +142,17 @@ be maintained::
# support both "mysql" and "mariadb-only" engine URLs
Table('mytable', metadata,
Column('data', String(32)),
mysql_engine='InnoDB',
mariadb_engine='InnoDB',
mysql_charset='utf8mb4',
mariadb_charset='utf8',
mysql_key_block_size="1024"
mariadb_key_block_size="1024"
)
Table(
"mytable",
metadata,
Column("data", String(32)),
mysql_engine="InnoDB",
mariadb_engine="InnoDB",
mysql_charset="utf8mb4",
mariadb_charset="utf8",
mysql_key_block_size="1024",
mariadb_key_block_size="1024",
)
The MySQL / MariaDB dialects will normally transfer any keyword specified as
``mysql_keyword_name`` to be rendered as ``KEYWORD_NAME`` in the
@@ -215,16 +218,14 @@ techniques are used.
To set isolation level using :func:`_sa.create_engine`::
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
"mysql+mysqldb://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED",
)
To set using per-connection execution options::
connection = engine.connect()
connection = connection.execution_options(
isolation_level="READ COMMITTED"
)
connection = connection.execution_options(isolation_level="READ COMMITTED")
Valid values for ``isolation_level`` include:
@@ -256,8 +257,8 @@ When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
the first :class:`.Integer` primary key column which is not marked as a
foreign key::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
>>> t = Table(
... "mytable", metadata, Column("mytable_id", Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
@@ -271,10 +272,12 @@ This flag
can also be used to enable auto-increment on a secondary column in a
multi-column key for some storage engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
Table(
"mytable",
metadata,
Column("gid", Integer, primary_key=True, autoincrement=False),
Column("id", Integer, primary_key=True),
)
.. _mysql_ss_cursors:
@@ -292,7 +295,9 @@ Server side cursors are enabled on a per-statement basis by using the
option::
with engine.connect() as conn:
result = conn.execution_options(stream_results=True).execute(text("select * from table"))
result = conn.execution_options(stream_results=True).execute(
text("select * from table")
)
Note that some kinds of SQL statements may not be supported with
server side cursors; generally, only SQL statements that return rows should be
@@ -320,7 +325,8 @@ a connection. This is typically delivered using the ``charset`` parameter
in the URL, such as::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4"
)
This charset is the **client character set** for the connection. Some
MySQL DBAPIs will default this to a value such as ``latin1``, and some
@@ -340,7 +346,8 @@ charset is preferred, if supported by both the database as well as the client
DBAPI, as in::
e = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4")
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4"
)
All modern DBAPIs should support the ``utf8mb4`` charset.
@@ -362,7 +369,9 @@ Dealing with Binary Data Warnings and Unicode
MySQL versions 5.6, 5.7 and later (not MariaDB at the time of this writing) now
emit a warning when attempting to pass binary data to the database, while a
character set encoding is also in place, when the binary data itself is not
valid for that encoding::
valid for that encoding:
.. sourcecode:: text
default.py:509: Warning: (1300, "Invalid utf8mb4 character string:
'F9876A'")
@@ -372,7 +381,9 @@ This warning is due to the fact that the MySQL client library is attempting to
interpret the binary string as a unicode object even if a datatype such
as :class:`.LargeBinary` is in use. To resolve this, the SQL statement requires
a binary "character set introducer" be present before any non-NULL value
that renders like this::
that renders like this:
.. sourcecode:: sql
INSERT INTO table (data) VALUES (_binary %s)
@@ -382,12 +393,13 @@ string parameter ``binary_prefix=true`` to the URL to repair this warning::
# mysqlclient
engine = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
"mysql+mysqldb://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true"
)
# PyMySQL
engine = create_engine(
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true")
"mysql+pymysql://scott:tiger@localhost/test?charset=utf8mb4&binary_prefix=true"
)
The ``binary_prefix`` flag may or may not be supported by other MySQL drivers.
@@ -430,7 +442,10 @@ the ``first_connect`` and ``connect`` events::
from sqlalchemy import create_engine, event
eng = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo='debug')
eng = create_engine(
"mysql+mysqldb://scott:tiger@localhost/test", echo="debug"
)
# `insert=True` will ensure this is the very first listener to run
@event.listens_for(eng, "connect", insert=True)
@@ -438,6 +453,7 @@ the ``first_connect`` and ``connect`` events::
cursor = dbapi_connection.cursor()
cursor.execute("SET sql_mode = 'STRICT_ALL_TABLES'")
conn = eng.connect()
In the example illustrated above, the "connect" event will invoke the "SET"
@@ -454,8 +470,8 @@ MySQL / MariaDB SQL Extensions
Many of the MySQL / MariaDB SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
table.select(table.c.password == func.md5("plaintext"))
table.select(table.c.username.op("regexp")("^[a-d]"))
And of course any valid SQL statement can be executed as a string as well.
@@ -468,11 +484,18 @@ available.
* SELECT pragma, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
select(...).prefix_with(['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
select(...).prefix_with(["HIGH_PRIORITY", "SQL_SMALL_RESULT"])
* UPDATE with LIMIT::
update(..., mysql_limit=10, mariadb_limit=10)
update(...).with_dialect_options(mysql_limit=10, mariadb_limit=10)
* DELETE
with LIMIT::
delete(...).with_dialect_options(mysql_limit=10, mariadb_limit=10)
.. versionadded:: 2.0.37 Added delete with limit
* optimizer hints, use :meth:`_expression.Select.prefix_with` and
:meth:`_query.Query.prefix_with`::
@@ -484,14 +507,16 @@ available.
select(...).with_hint(some_table, "USE INDEX xyz")
* MATCH operator support::
* MATCH
operator support::
from sqlalchemy.dialects.mysql import match
select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
from sqlalchemy.dialects.mysql import match
.. seealso::
select(...).where(match(col1, col2, against="some expr").in_boolean_mode())
:class:`_mysql.match`
.. seealso::
:class:`_mysql.match`
INSERT/DELETE...RETURNING
-------------------------
@@ -508,17 +533,15 @@ To specify an explicit ``RETURNING`` clause, use the
# INSERT..RETURNING
result = connection.execute(
table.insert().
values(name='foo').
returning(table.c.col1, table.c.col2)
table.insert().values(name="foo").returning(table.c.col1, table.c.col2)
)
print(result.all())
# DELETE..RETURNING
result = connection.execute(
table.delete().
where(table.c.name=='foo').
returning(table.c.col1, table.c.col2)
table.delete()
.where(table.c.name == "foo")
.returning(table.c.col1, table.c.col2)
)
print(result.all())
@@ -545,12 +568,11 @@ the generative method :meth:`~.mysql.Insert.on_duplicate_key_update`:
>>> from sqlalchemy.dialects.mysql import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
... id="some_existing_id", data="inserted value"
... )
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data=insert_stmt.inserted.data,
... status='U'
... data=insert_stmt.inserted.data, status="U"
... )
>>> print(on_duplicate_key_stmt)
{printsql}INSERT INTO my_table (id, data) VALUES (%s, %s)
@@ -575,8 +597,8 @@ as values:
.. sourcecode:: pycon+sql
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
... id="some_existing_id", data="inserted value"
... )
>>> on_duplicate_key_stmt = insert_stmt.on_duplicate_key_update(
... data="some data",
@@ -639,13 +661,11 @@ table:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh')
... id="some_id", data="inserted value", author="jlh"
... )
>>> do_update_stmt = stmt.on_duplicate_key_update(
... data="updated value",
... author=stmt.inserted.author
... data="updated value", author=stmt.inserted.author
... )
>>> print(do_update_stmt)
@@ -690,13 +710,13 @@ MySQL and MariaDB both provide an option to create index entries with a certain
become part of the index. SQLAlchemy provides this feature via the
``mysql_length`` and/or ``mariadb_length`` parameters::
Index('my_index', my_table.c.data, mysql_length=10, mariadb_length=10)
Index("my_index", my_table.c.data, mysql_length=10, mariadb_length=10)
Index('a_b_idx', my_table.c.a, my_table.c.b, mysql_length={'a': 4,
'b': 9})
Index("a_b_idx", my_table.c.a, my_table.c.b, mysql_length={"a": 4, "b": 9})
Index('a_b_idx', my_table.c.a, my_table.c.b, mariadb_length={'a': 4,
'b': 9})
Index(
"a_b_idx", my_table.c.a, my_table.c.b, mariadb_length={"a": 4, "b": 9}
)
Prefix lengths are given in characters for nonbinary string types and in bytes
for binary string types. The value passed to the keyword argument *must* be
@@ -713,7 +733,7 @@ MySQL storage engines permit you to specify an index prefix when creating
an index. SQLAlchemy provides this feature via the
``mysql_prefix`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_prefix='FULLTEXT')
Index("my_index", my_table.c.data, mysql_prefix="FULLTEXT")
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX, so it *must* be a valid index prefix for your MySQL
@@ -730,11 +750,13 @@ Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash', mariadb_using='hash')
Index(
"my_index", my_table.c.data, mysql_using="hash", mariadb_using="hash"
)
As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash', mariadb_using='hash')
PrimaryKeyConstraint("data", mysql_using="hash", mariadb_using="hash")
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
@@ -753,9 +775,12 @@ CREATE FULLTEXT INDEX in MySQL also supports a "WITH PARSER" option. This
is available using the keyword argument ``mysql_with_parser``::
Index(
'my_index', my_table.c.data,
mysql_prefix='FULLTEXT', mysql_with_parser="ngram",
mariadb_prefix='FULLTEXT', mariadb_with_parser="ngram",
"my_index",
my_table.c.data,
mysql_prefix="FULLTEXT",
mysql_with_parser="ngram",
mariadb_prefix="FULLTEXT",
mariadb_with_parser="ngram",
)
.. versionadded:: 1.3
@@ -782,6 +807,7 @@ them ignored on a MySQL / MariaDB backend, use a custom compile rule::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.schema import ForeignKeyConstraint
@compiles(ForeignKeyConstraint, "mysql", "mariadb")
def process(element, compiler, **kw):
element.deferrable = element.initially = None
@@ -803,10 +829,12 @@ very common ``MyISAM`` MySQL storage engine, the information loaded by table
reflection will not include foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload_with=engine
)
Table(
"mytable",
metadata,
ForeignKeyConstraint(["other_id"], ["othertable.other_id"]),
autoload_with=engine,
)
.. seealso::
@@ -878,13 +906,15 @@ parameter and pass a textual clause that also includes the ON UPDATE clause::
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column("id", Integer, primary_key=True),
Column("data", String(50)),
Column(
'last_updated',
"last_updated",
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
),
),
)
The same instructions apply to use of the :class:`_types.DateTime` and
@@ -895,34 +925,37 @@ The same instructions apply to use of the :class:`_types.DateTime` and
mytable = Table(
"mytable",
metadata,
Column('id', Integer, primary_key=True),
Column('data', String(50)),
Column("id", Integer, primary_key=True),
Column("data", String(50)),
Column(
'last_updated',
"last_updated",
DateTime,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP")
)
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
),
),
)
Even though the :paramref:`_schema.Column.server_onupdate` feature does not
generate this DDL, it still may be desirable to signal to the ORM that this
updated value should be fetched. This syntax looks like the following::
from sqlalchemy.schema import FetchedValue
class MyClass(Base):
__tablename__ = 'mytable'
__tablename__ = "mytable"
id = Column(Integer, primary_key=True)
data = Column(String(50))
last_updated = Column(
TIMESTAMP,
server_default=text("CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"),
server_onupdate=FetchedValue()
server_default=text(
"CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP"
),
server_onupdate=FetchedValue(),
)
.. _mysql_timestamp_null:
TIMESTAMP Columns and NULL
@@ -932,7 +965,9 @@ MySQL historically enforces that a column which specifies the
TIMESTAMP datatype implicitly includes a default value of
CURRENT_TIMESTAMP, even though this is not stated, and additionally
sets the column as NOT NULL, the opposite behavior vs. that of all
other datatypes::
other datatypes:
.. sourcecode:: text
mysql> CREATE TABLE ts_test (
-> a INTEGER,
@@ -977,19 +1012,24 @@ SQLAlchemy also emits NOT NULL for TIMESTAMP columns that do specify
from sqlalchemy.dialects.mysql import TIMESTAMP
m = MetaData()
t = Table('ts_test', m,
Column('a', Integer),
Column('b', Integer, nullable=False),
Column('c', TIMESTAMP),
Column('d', TIMESTAMP, nullable=False)
)
t = Table(
"ts_test",
m,
Column("a", Integer),
Column("b", Integer, nullable=False),
Column("c", TIMESTAMP),
Column("d", TIMESTAMP, nullable=False),
)
from sqlalchemy import create_engine
e = create_engine("mysql+mysqldb://scott:tiger@localhost/test", echo=True)
m.create_all(e)
output::
output:
.. sourcecode:: sql
CREATE TABLE ts_test (
a INTEGER,
@@ -1349,7 +1389,7 @@ class MySQLCompiler(compiler.SQLCompiler):
clauses = []
requires_mysql8_alias = (
requires_mysql8_alias = statement.select is None and (
self.dialect._requires_alias_for_on_duplicate_key
)
@@ -1359,10 +1399,17 @@ class MySQLCompiler(compiler.SQLCompiler):
else:
_on_dup_alias_name = "new"
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate.update):
val = on_duplicate.update[column.key]
on_duplicate_update = {
coercions.expect_as_key(roles.DMLColumnRole, key): value
for key, value in on_duplicate.update.items()
}
# traverses through all table columns to preserve table column order
for column in (col for col in cols if col.key in on_duplicate_update):
val = on_duplicate_update[column.key]
# TODO: this coercion should be up front. we can't cache
# SQL constructs with non-bound literals buried in them
if coercions._is_literal(val):
val = elements.BindParameter(None, val, type_=column.type)
value_text = self.process(val.self_group(), use_schema=False)
@@ -1400,7 +1447,7 @@ class MySQLCompiler(compiler.SQLCompiler):
name_text = self.preparer.quote(column.name)
clauses.append("%s = %s" % (name_text, value_text))
non_matching = set(on_duplicate.update) - {c.key for c in cols}
non_matching = set(on_duplicate_update) - {c.key for c in cols}
if non_matching:
util.warn(
"Additional column names not matching "
@@ -1678,8 +1725,15 @@ class MySQLCompiler(compiler.SQLCompiler):
def update_limit_clause(self, update_stmt):
limit = update_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit:
return "LIMIT %s" % limit
if limit is not None:
return f"LIMIT {int(limit)}"
else:
return None
def delete_limit_clause(self, delete_stmt):
limit = delete_stmt.kwargs.get("%s_limit" % self.dialect.name, None)
if limit is not None:
return f"LIMIT {int(limit)}"
else:
return None
@@ -1850,7 +1904,15 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
else:
default = self.get_column_default_string(column)
if default is not None:
colspec.append("DEFAULT " + default)
if (
isinstance(
column.server_default.arg, functions.FunctionElement
)
and self.dialect._support_default_function
):
colspec.append(f"DEFAULT ({default})")
else:
colspec.append("DEFAULT " + default)
return " ".join(colspec)
def post_create_table(self, table):
@@ -2380,6 +2442,8 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def _visit_enumerated_values(self, name, type_, enumerated_values):
quoted_enums = []
for e in enumerated_values:
if self.dialect.identifier_preparer._double_percents:
e = e.replace("%", "%%")
quoted_enums.append("'%s'" % e.replace("'", "''"))
return self._extend_string(
type_, {}, "%s(%s)" % (name, ",".join(quoted_enums))
@@ -2493,6 +2557,7 @@ class MySQLDialect(default.DefaultDialect):
construct_arguments = [
(sa_schema.Table, {"*": None}),
(sql.Update, {"limit": None}),
(sql.Delete, {"limit": None}),
(sa_schema.PrimaryKeyConstraint, {"using": None}),
(
sa_schema.Index,
@@ -2893,6 +2958,17 @@ class MySQLDialect(default.DefaultDialect):
# ref https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-17.html#mysqld-8-0-17-feature # noqa
return self.server_version_info >= (8, 0, 17)
@property
def _support_default_function(self):
if not self.server_version_info:
return False
elif self.is_mariadb:
# ref https://mariadb.com/kb/en/mariadb-1021-release-notes/
return self.server_version_info >= (10, 2, 1)
else:
# ref https://dev.mysql.com/doc/refman/8.0/en/data-type-defaults.html # noqa
return self.server_version_info >= (8, 0, 13)
@property
def _is_mariadb(self):
return self.is_mariadb
@@ -3050,29 +3126,47 @@ class MySQLDialect(default.DefaultDialect):
return s
default_schema_name = connection.dialect.default_schema_name
col_tuples = [
(
lower(rec["referred_schema"] or default_schema_name),
lower(rec["referred_table"]),
col_name,
)
for rec in fkeys
for col_name in rec["referred_columns"]
]
if col_tuples:
correct_for_wrong_fk_case = connection.execute(
sql.text(
"""
select table_schema, table_name, column_name
from information_schema.columns
where (table_schema, table_name, lower(column_name)) in
:table_data;
"""
).bindparams(sql.bindparam("table_data", expanding=True)),
dict(table_data=col_tuples),
# NOTE: using (table_schema, table_name, lower(column_name)) in (...)
# is very slow since mysql does not seem able to properly use indexse.
# Unpack the where condition instead.
schema_by_table_by_column = defaultdict(lambda: defaultdict(list))
for rec in fkeys:
sch = lower(rec["referred_schema"] or default_schema_name)
tbl = lower(rec["referred_table"])
for col_name in rec["referred_columns"]:
schema_by_table_by_column[sch][tbl].append(col_name)
if schema_by_table_by_column:
condition = sql.or_(
*(
sql.and_(
_info_columns.c.table_schema == schema,
sql.or_(
*(
sql.and_(
_info_columns.c.table_name == table,
sql.func.lower(
_info_columns.c.column_name
).in_(columns),
)
for table, columns in tables.items()
)
),
)
for schema, tables in schema_by_table_by_column.items()
)
)
select = sql.select(
_info_columns.c.table_schema,
_info_columns.c.table_name,
_info_columns.c.column_name,
).where(condition)
correct_for_wrong_fk_case = connection.execute(select)
# in casing=0, table name and schema name come back in their
# exact case.
# in casing=1, table name and schema name come back in lower
@@ -3445,3 +3539,12 @@ class _DecodingRow:
return item.decode(self.charset)
else:
return item
_info_columns = sql.table(
"columns",
sql.column("table_schema", VARCHAR(64)),
sql.column("table_name", VARCHAR(64)),
sql.column("column_name", VARCHAR(64)),
schema="information_schema",
)

View File

@@ -1,5 +1,5 @@
# dialects/mysql/cymysql.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/mysql/dml.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,6 +7,7 @@
from __future__ import annotations
from typing import Any
from typing import Dict
from typing import List
from typing import Mapping
from typing import Optional
@@ -141,7 +142,11 @@ class Insert(StandardInsert):
in :ref:`tutorial_parameter_ordered_updates`::
insert().on_duplicate_key_update(
[("name", "some name"), ("value", "some value")])
[
("name", "some name"),
("value", "some value"),
]
)
.. versionchanged:: 1.3 parameters can be specified as a dictionary
or list of 2-tuples; the latter form provides for parameter
@@ -181,6 +186,7 @@ class OnDuplicateClause(ClauseElement):
_parameter_ordering: Optional[List[str]] = None
update: Dict[str, Any]
stringify_dialect = "mysql"
def __init__(

View File

@@ -1,5 +1,5 @@
# dialects/mysql/enumerated.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -28,7 +28,7 @@ class ENUM(sqltypes.NativeForEmulated, sqltypes.Enum, _StringType):
E.g.::
Column('myenum', ENUM("foo", "bar", "baz"))
Column("myenum", ENUM("foo", "bar", "baz"))
:param enums: The range of valid values for this ENUM. Values in
enums are not quoted, they will be escaped and surrounded by single
@@ -102,8 +102,7 @@ class SET(_StringType):
E.g.::
Column('myset', SET("foo", "bar", "baz"))
Column("myset", SET("foo", "bar", "baz"))
The list of potential values is required in the case that this
set will be used to generate DDL for a table, or if the

View File

@@ -1,5 +1,5 @@
# dialects/mysql/expression.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -38,7 +38,9 @@ class match(Generative, elements.BinaryExpression):
.order_by(desc(match_expr))
)
Would produce SQL resembling::
Would produce SQL resembling:
.. sourcecode:: sql
SELECT id, firstname, lastname
FROM user

View File

@@ -1,5 +1,5 @@
# dialects/mysql/json.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/mysql/mariadb.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,6 +7,34 @@
# mypy: ignore-errors
from .base import MariaDBIdentifierPreparer
from .base import MySQLDialect
from .base import MySQLTypeCompiler
from ...sql import sqltypes
class INET4(sqltypes.TypeEngine[str]):
"""INET4 column type for MariaDB
.. versionadded:: 2.0.37
"""
__visit_name__ = "INET4"
class INET6(sqltypes.TypeEngine[str]):
"""INET6 column type for MariaDB
.. versionadded:: 2.0.37
"""
__visit_name__ = "INET6"
class MariaDBTypeCompiler(MySQLTypeCompiler):
def visit_INET4(self, type_, **kwargs) -> str:
return "INET4"
def visit_INET6(self, type_, **kwargs) -> str:
return "INET6"
class MariaDBDialect(MySQLDialect):
@@ -14,6 +42,7 @@ class MariaDBDialect(MySQLDialect):
supports_statement_cache = True
name = "mariadb"
preparer = MariaDBIdentifierPreparer
type_compiler_cls = MariaDBTypeCompiler
def loader(driver):

View File

@@ -1,5 +1,5 @@
# dialects/mysql/mariadbconnector.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -87,13 +87,6 @@ class MySQLExecutionContext_mariadbconnector(MySQLExecutionContext):
if self.isinsert and self.compiled.postfetch_lastrowid:
self._lastrowid = self.cursor.lastrowid
@property
def rowcount(self):
if self._rowcount is not None:
return self._rowcount
else:
return self.cursor.rowcount
def get_lastrowid(self):
return self._lastrowid
@@ -172,6 +165,7 @@ class MySQLDialect_mariadbconnector(MySQLDialect):
def create_connect_args(self, url):
opts = url.translate_connect_args()
opts.update(url.query)
int_params = [
"connect_timeout",
@@ -186,6 +180,7 @@ class MySQLDialect_mariadbconnector(MySQLDialect):
"ssl_verify_cert",
"ssl",
"pool_reset_connection",
"compress",
]
for key in int_params:

View File

@@ -1,5 +1,5 @@
# dialects/mysql/mysqlconnector.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -96,6 +96,7 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
util.coerce_kw_type(opts, "allow_local_infile", bool)
util.coerce_kw_type(opts, "autocommit", bool)
util.coerce_kw_type(opts, "buffered", bool)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "compress", bool)
util.coerce_kw_type(opts, "connection_timeout", int)
util.coerce_kw_type(opts, "connect_timeout", int)

View File

@@ -1,5 +1,5 @@
# dialects/mysql/mysqldb.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -48,9 +48,9 @@ key "ssl", which may be specified using the
"ssl": {
"ca": "/home/gord/client-ssl/ca.pem",
"cert": "/home/gord/client-ssl/client-cert.pem",
"key": "/home/gord/client-ssl/client-key.pem"
"key": "/home/gord/client-ssl/client-key.pem",
}
}
},
)
For convenience, the following keys may also be specified inline within the URL
@@ -74,7 +74,9 @@ Using MySQLdb with Google Cloud SQL
-----------------------------------
Google Cloud SQL now recommends use of the MySQLdb dialect. Connect
using a URL like the following::
using a URL like the following:
.. sourcecode:: text
mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename>
@@ -97,12 +99,7 @@ from ... import util
class MySQLExecutionContext_mysqldb(MySQLExecutionContext):
@property
def rowcount(self):
if hasattr(self, "_rowcount"):
return self._rowcount
else:
return self.cursor.rowcount
pass
class MySQLCompiler_mysqldb(MySQLCompiler):
@@ -217,7 +214,7 @@ class MySQLDialect_mysqldb(MySQLDialect):
util.coerce_kw_type(opts, "read_timeout", int)
util.coerce_kw_type(opts, "write_timeout", int)
util.coerce_kw_type(opts, "client_flag", int)
util.coerce_kw_type(opts, "local_infile", int)
util.coerce_kw_type(opts, "local_infile", bool)
# Note: using either of the below will cause all strings to be
# returned as Unicode, both in raw SQL operations and with column
# types like String and MSString.

View File

@@ -1,5 +1,5 @@
# dialects/mysql/provision.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -40,6 +40,9 @@ def generate_driver_url(url, driver, query_str):
drivername="%s+%s" % (backend, driver)
).update_query_string(query_str)
if driver == "mariadbconnector":
new_url = new_url.difference_update_query(["charset"])
try:
new_url.get_dialect()
except exc.NoSuchModuleError:

View File

@@ -1,5 +1,5 @@
# dialects/mysql/pymysql.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -41,7 +41,6 @@ necessary to indicate ``ssl_check_hostname=false`` in PyMySQL::
"&ssl_check_hostname=false"
)
MySQL-Python Compatibility
--------------------------

View File

@@ -1,5 +1,5 @@
# dialects/mysql/pyodbc.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -30,14 +30,15 @@ r"""
Pass through exact pyodbc connection string::
import urllib
connection_string = (
'DRIVER=MySQL ODBC 8.0 ANSI Driver;'
'SERVER=localhost;'
'PORT=3307;'
'DATABASE=mydb;'
'UID=root;'
'PWD=(whatever);'
'charset=utf8mb4;'
"DRIVER=MySQL ODBC 8.0 ANSI Driver;"
"SERVER=localhost;"
"PORT=3307;"
"DATABASE=mydb;"
"UID=root;"
"PWD=(whatever);"
"charset=utf8mb4;"
)
params = urllib.parse.quote_plus(connection_string)
connection_uri = "mysql+pyodbc:///?odbc_connect=%s" % params

View File

@@ -1,5 +1,5 @@
# dialects/mysql/reflection.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -505,7 +505,7 @@ class MySQLTableDefinitionParser:
#
# unique constraints come back as KEYs
kw = quotes.copy()
kw["on"] = "RESTRICT|CASCADE|SET NULL|NO ACTION"
kw["on"] = "RESTRICT|CASCADE|SET NULL|NO ACTION|SET DEFAULT"
self._re_fk_constraint = _re_compile(
r" "
r"CONSTRAINT +"

View File

@@ -1,5 +1,5 @@
# dialects/mysql/reserved_words.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -282,6 +282,7 @@ RESERVED_WORDS_MARIADB = {
}
)
# https://dev.mysql.com/doc/refman/8.3/en/keywords.html
# https://dev.mysql.com/doc/refman/8.0/en/keywords.html
# https://dev.mysql.com/doc/refman/5.7/en/keywords.html
# https://dev.mysql.com/doc/refman/5.6/en/keywords.html
@@ -403,6 +404,7 @@ RESERVED_WORDS_MYSQL = {
"int4",
"int8",
"integer",
"intersect",
"interval",
"into",
"io_after_gtids",
@@ -468,6 +470,7 @@ RESERVED_WORDS_MYSQL = {
"outfile",
"over",
"parse_gcol_expr",
"parallel",
"partition",
"percent_rank",
"persist",
@@ -476,6 +479,7 @@ RESERVED_WORDS_MYSQL = {
"primary",
"procedure",
"purge",
"qualify",
"range",
"rank",
"read",

View File

@@ -1,5 +1,5 @@
# dialects/mysql/types.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -499,7 +499,7 @@ class YEAR(sqltypes.TypeEngine):
class TEXT(_StringType, sqltypes.TEXT):
"""MySQL TEXT type, for text up to 2^16 characters."""
"""MySQL TEXT type, for character storage encoded up to 2^16 bytes."""
__visit_name__ = "TEXT"
@@ -508,7 +508,7 @@ class TEXT(_StringType, sqltypes.TEXT):
:param length: Optional, if provided the server may optimize storage
by substituting the smallest TEXT type sufficient to store
``length`` characters.
``length`` bytes of characters.
:param charset: Optional, a column-level character set for this string
value. Takes precedence to 'ascii' or 'unicode' short-hand.
@@ -535,7 +535,7 @@ class TEXT(_StringType, sqltypes.TEXT):
class TINYTEXT(_StringType):
"""MySQL TINYTEXT type, for text up to 2^8 characters."""
"""MySQL TINYTEXT type, for character storage encoded up to 2^8 bytes."""
__visit_name__ = "TINYTEXT"
@@ -567,7 +567,8 @@ class TINYTEXT(_StringType):
class MEDIUMTEXT(_StringType):
"""MySQL MEDIUMTEXT type, for text up to 2^24 characters."""
"""MySQL MEDIUMTEXT type, for character storage encoded up
to 2^24 bytes."""
__visit_name__ = "MEDIUMTEXT"
@@ -599,7 +600,7 @@ class MEDIUMTEXT(_StringType):
class LONGTEXT(_StringType):
"""MySQL LONGTEXT type, for text up to 2^32 characters."""
"""MySQL LONGTEXT type, for character storage encoded up to 2^32 bytes."""
__visit_name__ = "LONGTEXT"
@@ -683,7 +684,7 @@ class CHAR(_StringType, sqltypes.CHAR):
super().__init__(length=length, **kwargs)
@classmethod
def _adapt_string_for_cast(self, type_):
def _adapt_string_for_cast(cls, type_):
# copy the given string type into a CHAR
# for the purposes of rendering a CAST expression
type_ = sqltypes.to_instance(type_)

View File

@@ -1,5 +1,5 @@
# dialects/oracle/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# dialects/oracle/cx_oracle.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,13 +7,18 @@
# mypy: ignore-errors
r"""
.. dialect:: oracle+cx_oracle
r""".. dialect:: oracle+cx_oracle
:name: cx-Oracle
:dbapi: cx_oracle
:connectstring: oracle+cx_oracle://user:pass@hostname:port[/dbname][?service_name=<service>[&key=value&key=value...]]
:url: https://oracle.github.io/python-cx_Oracle/
Description
-----------
cx_Oracle was the original driver for Oracle Database. It was superseded by
python-oracledb which should be used instead.
DSN vs. Hostname connections
-----------------------------
@@ -23,27 +28,41 @@ dialect translates from a series of different URL forms.
Hostname Connections with Easy Connect Syntax
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Given a hostname, port and service name of the target Oracle Database, for
example from Oracle's `Easy Connect syntax
<https://cx-oracle.readthedocs.io/en/latest/user_guide/connection_handling.html#easy-connect-syntax-for-connection-strings>`_,
then connect in SQLAlchemy using the ``service_name`` query string parameter::
Given a hostname, port and service name of the target database, for example
from Oracle Database's Easy Connect syntax then connect in SQLAlchemy using the
``service_name`` query string parameter::
engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:port/?service_name=myservice&encoding=UTF-8&nencoding=UTF-8")
engine = create_engine(
"oracle+cx_oracle://scott:tiger@hostname:port?service_name=myservice&encoding=UTF-8&nencoding=UTF-8"
)
The `full Easy Connect syntax
<https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B0437826-43C1-49EC-A94D-B650B6A4A6EE>`_
is not supported. Instead, use a ``tnsnames.ora`` file and connect using a
DSN.
Note that the default driver value for encoding and nencoding was changed to
“UTF-8” in cx_Oracle 8.0 so these parameters can be omitted when using that
version, or later.
Connections with tnsnames.ora or Oracle Cloud
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
To use a full Easy Connect string, pass it as the ``dsn`` key value in a
:paramref:`_sa.create_engine.connect_args` dictionary::
Alternatively, if no port, database name, or ``service_name`` is provided, the
dialect will use an Oracle DSN "connection string". This takes the "hostname"
portion of the URL as the data source name. For example, if the
``tnsnames.ora`` file contains a `Net Service Name
<https://cx-oracle.readthedocs.io/en/latest/user_guide/connection_handling.html#net-service-names-for-connection-strings>`_
of ``myalias`` as below::
import cx_Oracle
e = create_engine(
"oracle+cx_oracle://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
},
)
Connections with tnsnames.ora or to Oracle Autonomous Database
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Alternatively, if no port, database name, or service name is provided, the
dialect will use an Oracle Database DSN "connection string". This takes the
"hostname" portion of the URL as the data source name. For example, if the
``tnsnames.ora`` file contains a TNS Alias of ``myalias`` as below:
.. sourcecode:: text
myalias =
(DESCRIPTION =
@@ -58,19 +77,22 @@ The cx_Oracle dialect connects to this database service when ``myalias`` is the
hostname portion of the URL, without specifying a port, database name or
``service_name``::
engine = create_engine("oracle+cx_oracle://scott:tiger@myalias/?encoding=UTF-8&nencoding=UTF-8")
engine = create_engine("oracle+cx_oracle://scott:tiger@myalias")
Users of Oracle Cloud should use this syntax and also configure the cloud
Users of Oracle Autonomous Database should use this syntax. If the database is
configured for mutural TLS ("mTLS"), then you must also configure the cloud
wallet as shown in cx_Oracle documentation `Connecting to Autononmous Databases
<https://cx-oracle.readthedocs.io/en/latest/user_guide/connection_handling.html#connecting-to-autononmous-databases>`_.
<https://cx-oracle.readthedocs.io/en/latest/user_guide/connection_handling.html#autonomousdb>`_.
SID Connections
^^^^^^^^^^^^^^^
To use Oracle's obsolete SID connection syntax, the SID can be passed in a
"database name" portion of the URL as below::
To use Oracle Database's obsolete System Identifier connection syntax, the SID
can be passed in a "database name" portion of the URL::
engine = create_engine("oracle+cx_oracle://scott:tiger@hostname:1521/dbname?encoding=UTF-8&nencoding=UTF-8")
engine = create_engine(
"oracle+cx_oracle://scott:tiger@hostname:port/dbname"
)
Above, the DSN passed to cx_Oracle is created by ``cx_Oracle.makedsn()`` as
follows::
@@ -79,17 +101,23 @@ follows::
>>> cx_Oracle.makedsn("hostname", 1521, sid="dbname")
'(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=hostname)(PORT=1521))(CONNECT_DATA=(SID=dbname)))'
Note that although the SQLAlchemy syntax ``hostname:port/dbname`` looks like
Oracle's Easy Connect syntax it is different. It uses a SID in place of the
service name required by Easy Connect. The Easy Connect syntax does not
support SIDs.
Passing cx_Oracle connect arguments
-----------------------------------
Additional connection arguments can usually be passed via the URL
query string; particular symbols like ``cx_Oracle.SYSDBA`` are intercepted
and converted to the correct symbol::
Additional connection arguments can usually be passed via the URL query string;
particular symbols like ``SYSDBA`` are intercepted and converted to the correct
symbol::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true")
"oracle+cx_oracle://user:pass@dsn?encoding=UTF-8&nencoding=UTF-8&mode=SYSDBA&events=true"
)
.. versionchanged:: 1.3 the cx_oracle dialect now accepts all argument names
.. versionchanged:: 1.3 the cx_Oracle dialect now accepts all argument names
within the URL string itself, to be passed to the cx_Oracle DBAPI. As
was the case earlier but not correctly documented, the
:paramref:`_sa.create_engine.connect_args` parameter also accepts all
@@ -100,19 +128,20 @@ string, use the :paramref:`_sa.create_engine.connect_args` dictionary.
Any cx_Oracle parameter value and/or constant may be passed, such as::
import cx_Oracle
e = create_engine(
"oracle+cx_oracle://user:pass@dsn",
connect_args={
"encoding": "UTF-8",
"nencoding": "UTF-8",
"mode": cx_Oracle.SYSDBA,
"events": True
}
"events": True,
},
)
Note that the default value for ``encoding`` and ``nencoding`` was changed to
"UTF-8" in cx_Oracle 8.0 so these parameters can be omitted when using that
version, or later.
Note that the default driver value for ``encoding`` and ``nencoding`` was
changed to "UTF-8" in cx_Oracle 8.0 so these parameters can be omitted when
using that version, or later.
Options consumed by the SQLAlchemy cx_Oracle dialect outside of the driver
--------------------------------------------------------------------------
@@ -122,7 +151,8 @@ itself. These options are always passed directly to :func:`_sa.create_engine`
, such as::
e = create_engine(
"oracle+cx_oracle://user:pass@dsn", coerce_to_decimal=False)
"oracle+cx_oracle://user:pass@dsn", coerce_to_decimal=False
)
The parameters accepted by the cx_oracle dialect are as follows:
@@ -130,8 +160,7 @@ The parameters accepted by the cx_oracle dialect are as follows:
to ``None``, indicating that the driver default should be used (typically
the value is 100). This setting controls how many rows are buffered when
fetching rows, and can have a significant effect on performance when
modified. The setting is used for both ``cx_Oracle`` as well as
``oracledb``.
modified.
.. versionchanged:: 2.0.26 - changed the default value from 50 to None,
to use the default value of the driver itself.
@@ -147,10 +176,16 @@ The parameters accepted by the cx_oracle dialect are as follows:
Using cx_Oracle SessionPool
---------------------------
The cx_Oracle library provides its own connection pool implementation that may
be used in place of SQLAlchemy's pooling functionality. This can be achieved
by using the :paramref:`_sa.create_engine.creator` parameter to provide a
function that returns a new connection, along with setting
The cx_Oracle driver provides its own connection pool implementation that may
be used in place of SQLAlchemy's pooling functionality. The driver pool
supports Oracle Database features such dead connection detection, connection
draining for planned database downtime, support for Oracle Application
Continuity and Transparent Application Continuity, and gives support for
Database Resident Connection Pooling (DRCP).
Using the driver pool can be achieved by using the
:paramref:`_sa.create_engine.creator` parameter to provide a function that
returns a new connection, along with setting
:paramref:`_sa.create_engine.pool_class` to ``NullPool`` to disable
SQLAlchemy's pooling::
@@ -159,32 +194,41 @@ SQLAlchemy's pooling::
from sqlalchemy.pool import NullPool
pool = cx_Oracle.SessionPool(
user="scott", password="tiger", dsn="orclpdb",
min=2, max=5, increment=1, threaded=True,
encoding="UTF-8", nencoding="UTF-8"
user="scott",
password="tiger",
dsn="orclpdb",
min=1,
max=4,
increment=1,
threaded=True,
encoding="UTF-8",
nencoding="UTF-8",
)
engine = create_engine("oracle+cx_oracle://", creator=pool.acquire, poolclass=NullPool)
engine = create_engine(
"oracle+cx_oracle://", creator=pool.acquire, poolclass=NullPool
)
The above engine may then be used normally where cx_Oracle's pool handles
connection pooling::
with engine.connect() as conn:
print(conn.scalar("select 1 FROM dual"))
print(conn.scalar("select 1 from dual"))
As well as providing a scalable solution for multi-user applications, the
cx_Oracle session pool supports some Oracle features such as DRCP and
`Application Continuity
<https://cx-oracle.readthedocs.io/en/latest/user_guide/ha.html#application-continuity-ac>`_.
Note that the pool creation parameters ``threaded``, ``encoding`` and
``nencoding`` were deprecated in later cx_Oracle releases.
Using Oracle Database Resident Connection Pooling (DRCP)
--------------------------------------------------------
When using Oracle's `DRCP
<https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-015CA8C1-2386-4626-855D-CC546DDC1086>`_,
the best practice is to pass a connection class and "purity" when acquiring a
connection from the SessionPool. Refer to the `cx_Oracle DRCP documentation
When using Oracle Database's DRCP, the best practice is to pass a connection
class and "purity" when acquiring a connection from the SessionPool. Refer to
the `cx_Oracle DRCP documentation
<https://cx-oracle.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
This can be achieved by wrapping ``pool.acquire()``::
@@ -194,21 +238,33 @@ This can be achieved by wrapping ``pool.acquire()``::
from sqlalchemy.pool import NullPool
pool = cx_Oracle.SessionPool(
user="scott", password="tiger", dsn="orclpdb",
min=2, max=5, increment=1, threaded=True,
encoding="UTF-8", nencoding="UTF-8"
user="scott",
password="tiger",
dsn="orclpdb",
min=2,
max=5,
increment=1,
threaded=True,
encoding="UTF-8",
nencoding="UTF-8",
)
def creator():
return pool.acquire(cclass="MYCLASS", purity=cx_Oracle.ATTR_PURITY_SELF)
engine = create_engine("oracle+cx_oracle://", creator=creator, poolclass=NullPool)
def creator():
return pool.acquire(
cclass="MYCLASS", purity=cx_Oracle.ATTR_PURITY_SELF
)
engine = create_engine(
"oracle+cx_oracle://", creator=creator, poolclass=NullPool
)
The above engine may then be used normally where cx_Oracle handles session
pooling and Oracle Database additionally uses DRCP::
with engine.connect() as conn:
print(conn.scalar("select 1 FROM dual"))
print(conn.scalar("select 1 from dual"))
.. _cx_oracle_unicode:
@@ -216,24 +272,28 @@ Unicode
-------
As is the case for all DBAPIs under Python 3, all strings are inherently
Unicode strings. In all cases however, the driver requires an explicit
Unicode strings. In all cases however, the driver requires an explicit
encoding configuration.
Ensuring the Correct Client Encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The long accepted standard for establishing client encoding for nearly all
Oracle related software is via the `NLS_LANG <https://www.oracle.com/database/technologies/faq-nls-lang.html>`_
environment variable. cx_Oracle like most other Oracle drivers will use
this environment variable as the source of its encoding configuration. The
format of this variable is idiosyncratic; a typical value would be
``AMERICAN_AMERICA.AL32UTF8``.
Oracle Database related software is via the `NLS_LANG
<https://www.oracle.com/database/technologies/faq-nls-lang.html>`_ environment
variable. Older versions of cx_Oracle use this environment variable as the
source of its encoding configuration. The format of this variable is
Territory_Country.CharacterSet; a typical value would be
``AMERICAN_AMERICA.AL32UTF8``. cx_Oracle version 8 and later use the character
set "UTF-8" by default, and ignore the character set component of NLS_LANG.
The cx_Oracle driver also supports a programmatic alternative which is to
pass the ``encoding`` and ``nencoding`` parameters directly to its
``.connect()`` function. These can be present in the URL as follows::
The cx_Oracle driver also supported a programmatic alternative which is to pass
the ``encoding`` and ``nencoding`` parameters directly to its ``.connect()``
function. These can be present in the URL as follows::
engine = create_engine("oracle+cx_oracle://scott:tiger@orclpdb/?encoding=UTF-8&nencoding=UTF-8")
engine = create_engine(
"oracle+cx_oracle://scott:tiger@tnsalias?encoding=UTF-8&nencoding=UTF-8"
)
For the meaning of the ``encoding`` and ``nencoding`` parameters, please
consult
@@ -248,25 +308,24 @@ consult
Unicode-specific Column datatypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Core expression language handles unicode data by use of the :class:`.Unicode`
and :class:`.UnicodeText`
datatypes. These types correspond to the VARCHAR2 and CLOB Oracle datatypes by
default. When using these datatypes with Unicode data, it is expected that
the Oracle database is configured with a Unicode-aware character set, as well
as that the ``NLS_LANG`` environment variable is set appropriately, so that
the VARCHAR2 and CLOB datatypes can accommodate the data.
The Core expression language handles unicode data by use of the
:class:`.Unicode` and :class:`.UnicodeText` datatypes. These types correspond
to the VARCHAR2 and CLOB Oracle Database datatypes by default. When using
these datatypes with Unicode data, it is expected that the database is
configured with a Unicode-aware character set, as well as that the ``NLS_LANG``
environment variable is set appropriately (this applies to older versions of
cx_Oracle), so that the VARCHAR2 and CLOB datatypes can accommodate the data.
In the case that the Oracle database is not configured with a Unicode character
In the case that Oracle Database is not configured with a Unicode character
set, the two options are to use the :class:`_types.NCHAR` and
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`,
which will cause the
SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`, which will cause
the SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle datatypes
unless the ``use_nchar_for_unicode=True`` is passed to the dialect
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle Database
datatypes unless the ``use_nchar_for_unicode=True`` is passed to the dialect
when :func:`_sa.create_engine` is called.
@@ -275,7 +334,7 @@ SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
Encoding Errors
^^^^^^^^^^^^^^^
For the unusual case that data in the Oracle database is present with a broken
For the unusual case that data in Oracle Database is present with a broken
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
passed to Unicode decoding functions in order to affect how decoding errors are
handled. The value is ultimately consumed by the Python `decode
@@ -293,13 +352,13 @@ Fine grained control over cx_Oracle data binding performance with setinputsizes
-------------------------------------------------------------------------------
The cx_Oracle DBAPI has a deep and fundamental reliance upon the usage of the
DBAPI ``setinputsizes()`` call. The purpose of this call is to establish the
DBAPI ``setinputsizes()`` call. The purpose of this call is to establish the
datatypes that are bound to a SQL statement for Python values being passed as
parameters. While virtually no other DBAPI assigns any use to the
``setinputsizes()`` call, the cx_Oracle DBAPI relies upon it heavily in its
interactions with the Oracle client interface, and in some scenarios it is not
possible for SQLAlchemy to know exactly how data should be bound, as some
settings can cause profoundly different performance characteristics, while
interactions with the Oracle Database client interface, and in some scenarios
it is not possible for SQLAlchemy to know exactly how data should be bound, as
some settings can cause profoundly different performance characteristics, while
altering the type coercion behavior at the same time.
Users of the cx_Oracle dialect are **strongly encouraged** to read through
@@ -328,13 +387,16 @@ objects which have a ``.key`` and a ``.type`` attribute::
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in inputsizes.items():
log.info(
"Bound parameter name: %s SQLAlchemy type: %r "
"DBAPI object: %s",
bindparam.key, bindparam.type, dbapitype)
log.info(
"Bound parameter name: %s SQLAlchemy type: %r DBAPI object: %s",
bindparam.key,
bindparam.type,
dbapitype,
)
Example 2 - remove all bindings to CLOB
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -348,12 +410,28 @@ series. This setting can be modified as follows::
engine = create_engine("oracle+cx_oracle://scott:tiger@host/xe")
@event.listens_for(engine, "do_setinputsizes")
def _remove_clob(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in list(inputsizes.items()):
if dbapitype is CLOB:
del inputsizes[bindparam]
.. _cx_oracle_lob:
LOB Datatypes
--------------
LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and
BLOB. Modern versions of cx_Oracle is optimized for these datatypes to be
delivered as a single buffer. As such, SQLAlchemy makes use of these newer type
handlers by default.
To disable the use of newer type handlers and deliver LOB objects as classic
buffered objects with a ``read()`` method, the parameter
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`,
which takes place only engine-wide.
.. _cx_oracle_returning:
RETURNING Support
@@ -362,29 +440,12 @@ RETURNING Support
The cx_Oracle dialect implements RETURNING using OUT parameters.
The dialect supports RETURNING fully.
.. _cx_oracle_lob:
LOB Datatypes
--------------
LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and
BLOB. Modern versions of cx_Oracle and oracledb are optimized for these
datatypes to be delivered as a single buffer. As such, SQLAlchemy makes use of
these newer type handlers by default.
To disable the use of newer type handlers and deliver LOB objects as classic
buffered objects with a ``read()`` method, the parameter
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`,
which takes place only engine-wide.
Two Phase Transactions Not Supported
-------------------------------------
------------------------------------
Two phase transactions are **not supported** under cx_Oracle due to poor
driver support. As of cx_Oracle 6.0b1, the interface for
two phase transactions has been changed to be more of a direct pass-through
to the underlying OCI layer with less automation. The additional logic
to support this system is not implemented in SQLAlchemy.
Two phase transactions are **not supported** under cx_Oracle due to poor driver
support. The newer :ref:`oracledb` dialect however **does** support two phase
transactions.
.. _cx_oracle_numeric:
@@ -395,20 +456,21 @@ SQLAlchemy's numeric types can handle receiving and returning values as Python
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle, Oracle's ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle-specific
:class:`_oracle.NUMBER` type takes this into account as well.
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle Database, the ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle
Database-specific :class:`_oracle.NUMBER` type takes this into account as well.
The cx_Oracle dialect makes extensive use of connection- and cursor-level
"outputtypehandler" callables in order to coerce numeric values as requested.
These callables are specific to the specific flavor of :class:`.Numeric` in
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle may sends incomplete or ambiguous information
about the numeric types being returned, such as a query where the numeric types
are buried under multiple levels of subquery. The type handlers do their best
to make the right decision in all cases, deferring to the underlying cx_Oracle
DBAPI for all those cases where the driver can make the best decision.
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle Database may send incomplete or ambiguous
information about the numeric types being returned, such as a query where the
numeric types are buried under multiple levels of subquery. The type handlers
do their best to make the right decision in all cases, deferring to the
underlying cx_Oracle DBAPI for all those cases where the driver can make the
best decision.
When no typing objects are present, as when executing plain SQL strings, a
default "outputtypehandler" is present which will generally return numeric
@@ -1291,8 +1353,13 @@ class OracleDialect_cx_oracle(OracleDialect):
cx_Oracle.CLOB,
cx_Oracle.NCLOB,
):
typ = (
cx_Oracle.DB_TYPE_VARCHAR
if default_type is cx_Oracle.CLOB
else cx_Oracle.DB_TYPE_NVARCHAR
)
return cursor.var(
cx_Oracle.DB_TYPE_NVARCHAR,
typ,
_CX_ORACLE_MAGIC_LOB_SIZE,
cursor.arraysize,
**dialect._cursor_var_unicode_kwargs,
@@ -1423,13 +1490,6 @@ class OracleDialect_cx_oracle(OracleDialect):
return False
def create_xid(self):
"""create a two-phase transaction ID.
this id will be passed to do_begin_twophase(), do_rollback_twophase(),
do_commit_twophase(). its format is unspecified.
"""
id_ = random.randint(0, 2**128)
return (0x1234, "%032x" % id_, "%032x" % 9)

View File

@@ -1,5 +1,5 @@
# dialects/oracle/dictionary.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,77 +1,595 @@
# dialects/oracle/oracledb.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: ignore-errors
r"""
.. dialect:: oracle+oracledb
r""".. dialect:: oracle+oracledb
:name: python-oracledb
:dbapi: oracledb
:connectstring: oracle+oracledb://user:pass@hostname:port[/dbname][?service_name=<service>[&key=value&key=value...]]
:url: https://oracle.github.io/python-oracledb/
python-oracledb is released by Oracle to supersede the cx_Oracle driver.
It is fully compatible with cx_Oracle and features both a "thin" client
mode that requires no dependencies, as well as a "thick" mode that uses
the Oracle Client Interface in the same way as cx_Oracle.
Description
-----------
.. seealso::
Python-oracledb is the Oracle Database driver for Python. It features a default
"thin" client mode that requires no dependencies, and an optional "thick" mode
that uses Oracle Client libraries. It supports SQLAlchemy features including
two phase transactions and Asyncio.
:ref:`cx_oracle` - all of cx_Oracle's notes apply to the oracledb driver
as well.
Python-oracle is the renamed, updated cx_Oracle driver. Oracle is no longer
doing any releases in the cx_Oracle namespace.
The SQLAlchemy ``oracledb`` dialect provides both a sync and an async
implementation under the same dialect name. The proper version is
selected depending on how the engine is created:
* calling :func:`_sa.create_engine` with ``oracle+oracledb://...`` will
automatically select the sync version, e.g.::
automatically select the sync version::
from sqlalchemy import create_engine
sync_engine = create_engine("oracle+oracledb://scott:tiger@localhost/?service_name=XEPDB1")
* calling :func:`_asyncio.create_async_engine` with
``oracle+oracledb://...`` will automatically select the async version,
e.g.::
sync_engine = create_engine(
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
)
* calling :func:`_asyncio.create_async_engine` with ``oracle+oracledb://...``
will automatically select the async version::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine("oracle+oracledb://scott:tiger@localhost/?service_name=XEPDB1")
The asyncio version of the dialect may also be specified explicitly using the
``oracledb_async`` suffix, as::
asyncio_engine = create_async_engine(
"oracle+oracledb://scott:tiger@localhost?service_name=FREEPDB1"
)
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine("oracle+oracledb_async://scott:tiger@localhost/?service_name=XEPDB1")
The asyncio version of the dialect may also be specified explicitly using the
``oracledb_async`` suffix::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine(
"oracle+oracledb_async://scott:tiger@localhost?service_name=FREEPDB1"
)
.. versionadded:: 2.0.25 added support for the async version of oracledb.
Thick mode support
------------------
By default the ``python-oracledb`` is started in thin mode, that does not
require oracle client libraries to be installed in the system. The
``python-oracledb`` driver also support a "thick" mode, that behaves
similarly to ``cx_oracle`` and requires that Oracle Client Interface (OCI)
is installed.
By default, the python-oracledb driver runs in a "thin" mode that does not
require Oracle Client libraries to be installed. The driver also supports a
"thick" mode that uses Oracle Client libraries to get functionality such as
Oracle Application Continuity.
To enable this mode, the user may call ``oracledb.init_oracle_client``
manually, or by passing the parameter ``thick_mode=True`` to
:func:`_sa.create_engine`. To pass custom arguments to ``init_oracle_client``,
like the ``lib_dir`` path, a dict may be passed to this parameter, as in::
To enable thick mode, call `oracledb.init_oracle_client()
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.init_oracle_client>`_
explicitly, or pass the parameter ``thick_mode=True`` to
:func:`_sa.create_engine`. To pass custom arguments to
``init_oracle_client()``, like the ``lib_dir`` path, a dict may be passed, for
example::
engine = sa.create_engine("oracle+oracledb://...", thick_mode={
"lib_dir": "/path/to/oracle/client/lib", "driver_name": "my-app"
})
engine = sa.create_engine(
"oracle+oracledb://...",
thick_mode={
"lib_dir": "/path/to/oracle/client/lib",
"config_dir": "/path/to/network_config_file_directory",
"driver_name": "my-app : 1.0.0",
},
)
Note that passing a ``lib_dir`` path should only be done on macOS or
Windows. On Linux it does not behave as you might expect.
.. seealso::
https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.init_oracle_client
python-oracledb documentation `Enabling python-oracledb Thick mode
<https://python-oracledb.readthedocs.io/en/latest/user_guide/initialization.html#enabling-python-oracledb-thick-mode>`_
Connecting to Oracle Database
-----------------------------
python-oracledb provides several methods of indicating the target database.
The dialect translates from a series of different URL forms.
Given the hostname, port and service name of the target database, you can
connect in SQLAlchemy using the ``service_name`` query string parameter::
engine = create_engine(
"oracle+oracledb://scott:tiger@hostname:port?service_name=myservice"
)
Connecting with Easy Connect strings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
You can pass any valid python-oracledb connection string as the ``dsn`` key
value in a :paramref:`_sa.create_engine.connect_args` dictionary. See
python-oracledb documentation `Oracle Net Services Connection Strings
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#oracle-net-services-connection-strings>`_.
For example to use an `Easy Connect string
<https://download.oracle.com/ocomdocs/global/Oracle-Net-Easy-Connect-Plus.pdf>`_
with a timeout to prevent connection establishment from hanging if the network
transport to the database cannot be establishd in 30 seconds, and also setting
a keep-alive time of 60 seconds to stop idle network connections from being
terminated by a firewall::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "hostname:port/myservice?transport_connect_timeout=30&expire_time=60",
},
)
The Easy Connect syntax has been enhanced during the life of Oracle Database.
Review the documentation for your database version. The current documentation
is at `Understanding the Easy Connect Naming Method
<https://www.oracle.com/pls/topic/lookup?ctx=dblatest&id=GUID-B0437826-43C1-49EC-A94D-B650B6A4A6EE>`_.
The general syntax is similar to:
.. sourcecode:: text
[[protocol:]//]host[:port][/[service_name]][?parameter_name=value{&parameter_name=value}]
Note that although the SQLAlchemy URL syntax ``hostname:port/dbname`` looks
like Oracle's Easy Connect syntax, it is different. SQLAlchemy's URL requires a
system identifier (SID) for the ``dbname`` component::
engine = create_engine("oracle+oracledb://scott:tiger@hostname:port/sid")
Easy Connect syntax does not support SIDs. It uses services names, which are
the preferred choice for connecting to Oracle Database.
Passing python-oracledb connect arguments
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Other python-oracledb driver `connection options
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.connect>`_
can be passed in ``connect_args``. For example::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "hostname:port/myservice",
"events": True,
"mode": oracledb.AUTH_MODE_SYSDBA,
},
)
Connecting with tnsnames.ora TNS aliases
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
If no port, database name, or service name is provided, the dialect will use an
Oracle Database DSN "connection string". This takes the "hostname" portion of
the URL as the data source name. For example, if the ``tnsnames.ora`` file
contains a `TNS Alias
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#tns-aliases-for-connection-strings>`_
of ``myalias`` as below:
.. sourcecode:: text
myalias =
(DESCRIPTION =
(ADDRESS = (PROTOCOL = TCP)(HOST = mymachine.example.com)(PORT = 1521))
(CONNECT_DATA =
(SERVER = DEDICATED)
(SERVICE_NAME = orclpdb1)
)
)
The python-oracledb dialect connects to this database service when ``myalias`` is the
hostname portion of the URL, without specifying a port, database name or
``service_name``::
engine = create_engine("oracle+oracledb://scott:tiger@myalias")
Connecting to Oracle Autonomous Database
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Users of Oracle Autonomous Database should use either use the TNS Alias URL
shown above, or pass the TNS Alias as the ``dsn`` key value in a
:paramref:`_sa.create_engine.connect_args` dictionary.
If Oracle Autonomous Database is configured for mutual TLS ("mTLS")
connections, then additional configuration is required as shown in `Connecting
to Oracle Cloud Autonomous Databases
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#connecting-to-oracle-cloud-autonomous-databases>`_. In
summary, Thick mode users should configure file locations and set the wallet
path in ``sqlnet.ora`` appropriately::
e = create_engine(
"oracle+oracledb://@",
thick_mode={
# directory containing tnsnames.ora and cwallet.so
"config_dir": "/opt/oracle/wallet_dir",
},
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "mydb_high",
},
)
Thin mode users of mTLS should pass the appropriate directories and PEM wallet
password when creating the engine, similar to::
e = create_engine(
"oracle+oracledb://@",
connect_args={
"user": "scott",
"password": "tiger",
"dsn": "mydb_high",
"config_dir": "/opt/oracle/wallet_dir", # directory containing tnsnames.ora
"wallet_location": "/opt/oracle/wallet_dir", # directory containing ewallet.pem
"wallet_password": "top secret", # password for the PEM file
},
)
Typically ``config_dir`` and ``wallet_location`` are the same directory, which
is where the Oracle Autonomous Database wallet zip file was extracted. Note
this directory should be protected.
Connection Pooling
------------------
Applications with multiple concurrent users should use connection pooling. A
minimal sized connection pool is also beneficial for long-running, single-user
applications that do not frequently use a connection.
The python-oracledb driver provides its own connection pool implementation that
may be used in place of SQLAlchemy's pooling functionality. The driver pool
gives support for high availability features such as dead connection detection,
connection draining for planned database downtime, support for Oracle
Application Continuity and Transparent Application Continuity, and gives
support for `Database Resident Connection Pooling (DRCP)
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
To take advantage of python-oracledb's pool, use the
:paramref:`_sa.create_engine.creator` parameter to provide a function that
returns a new connection, along with setting
:paramref:`_sa.create_engine.pool_class` to ``NullPool`` to disable
SQLAlchemy's pooling::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use the optional python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
)
engine = create_engine(
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
)
The above engine may then be used normally. Internally, python-oracledb handles
connection pooling::
with engine.connect() as conn:
print(conn.scalar(text("select 1 from dual")))
Refer to the python-oracledb documentation for `oracledb.create_pool()
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#oracledb.create_pool>`_
for the arguments that can be used when creating a connection pool.
.. _drcp:
Using Oracle Database Resident Connection Pooling (DRCP)
--------------------------------------------------------
When using Oracle Database's Database Resident Connection Pooling (DRCP), the
best practice is to specify a connection class and "purity". Refer to the
`python-oracledb documentation on DRCP
<https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#database-resident-connection-pooling-drcp>`_.
For example::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use the optional python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
cclass="MYCLASS",
purity=oracledb.PURITY_SELF,
)
engine = create_engine(
"oracle+oracledb://", creator=pool.acquire, poolclass=NullPool
)
The above engine may then be used normally where python-oracledb handles
application connection pooling and Oracle Database additionally uses DRCP::
with engine.connect() as conn:
print(conn.scalar(text("select 1 from dual")))
If you wish to use different connection classes or purities for different
connections, then wrap ``pool.acquire()``::
import oracledb
from sqlalchemy import create_engine
from sqlalchemy import text
from sqlalchemy.pool import NullPool
# Uncomment to use python-oracledb Thick mode.
# Review the python-oracledb doc for the appropriate parameters
# oracledb.init_oracle_client(<your parameters>)
pool = oracledb.create_pool(
user="scott",
password="tiger",
dsn="localhost:1521/freepdb1",
min=1,
max=4,
increment=1,
cclass="MYCLASS",
purity=oracledb.PURITY_SELF,
)
.. versionadded:: 2.0.0 added support for oracledb driver.
def creator():
return pool.acquire(cclass="MYOTHERCLASS", purity=oracledb.PURITY_NEW)
engine = create_engine(
"oracle+oracledb://", creator=creator, poolclass=NullPool
)
Engine Options consumed by the SQLAlchemy oracledb dialect outside of the driver
--------------------------------------------------------------------------------
There are also options that are consumed by the SQLAlchemy oracledb dialect
itself. These options are always passed directly to :func:`_sa.create_engine`,
such as::
e = create_engine("oracle+oracledb://user:pass@tnsalias", arraysize=500)
The parameters accepted by the oracledb dialect are as follows:
* ``arraysize`` - set the driver cursor.arraysize value. It defaults to
``None``, indicating that the driver default value of 100 should be used.
This setting controls how many rows are buffered when fetching rows, and can
have a significant effect on performance if increased for queries that return
large numbers of rows.
.. versionchanged:: 2.0.26 - changed the default value from 50 to None,
to use the default value of the driver itself.
* ``auto_convert_lobs`` - defaults to True; See :ref:`oracledb_lob`.
* ``coerce_to_decimal`` - see :ref:`oracledb_numeric` for detail.
* ``encoding_errors`` - see :ref:`oracledb_unicode_encoding_errors` for detail.
.. _oracledb_unicode:
Unicode
-------
As is the case for all DBAPIs under Python 3, all strings are inherently
Unicode strings.
Ensuring the Correct Client Encoding
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
In python-oracledb, the encoding used for all character data is "UTF-8".
Unicode-specific Column datatypes
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The Core expression language handles unicode data by use of the
:class:`.Unicode` and :class:`.UnicodeText` datatypes. These types correspond
to the VARCHAR2 and CLOB Oracle Database datatypes by default. When using
these datatypes with Unicode data, it is expected that the database is
configured with a Unicode-aware character set so that the VARCHAR2 and CLOB
datatypes can accommodate the data.
In the case that Oracle Database is not configured with a Unicode character
set, the two options are to use the :class:`_types.NCHAR` and
:class:`_oracle.NCLOB` datatypes explicitly, or to pass the flag
``use_nchar_for_unicode=True`` to :func:`_sa.create_engine`, which will cause
the SQLAlchemy dialect to use NCHAR/NCLOB for the :class:`.Unicode` /
:class:`.UnicodeText` datatypes instead of VARCHAR/CLOB.
.. versionchanged:: 1.3 The :class:`.Unicode` and :class:`.UnicodeText`
datatypes now correspond to the ``VARCHAR2`` and ``CLOB`` Oracle Database
datatypes unless the ``use_nchar_for_unicode=True`` is passed to the dialect
when :func:`_sa.create_engine` is called.
.. _oracledb_unicode_encoding_errors:
Encoding Errors
^^^^^^^^^^^^^^^
For the unusual case that data in Oracle Database is present with a broken
encoding, the dialect accepts a parameter ``encoding_errors`` which will be
passed to Unicode decoding functions in order to affect how decoding errors are
handled. The value is ultimately consumed by the Python `decode
<https://docs.python.org/3/library/stdtypes.html#bytes.decode>`_ function, and
is passed both via python-oracledb's ``encodingErrors`` parameter consumed by
``Cursor.var()``, as well as SQLAlchemy's own decoding function, as the
python-oracledb dialect makes use of both under different circumstances.
.. versionadded:: 1.3.11
.. _oracledb_setinputsizes:
Fine grained control over python-oracledb data binding with setinputsizes
-------------------------------------------------------------------------
The python-oracle DBAPI has a deep and fundamental reliance upon the usage of
the DBAPI ``setinputsizes()`` call. The purpose of this call is to establish
the datatypes that are bound to a SQL statement for Python values being passed
as parameters. While virtually no other DBAPI assigns any use to the
``setinputsizes()`` call, the python-oracledb DBAPI relies upon it heavily in
its interactions with the Oracle Database, and in some scenarios it is not
possible for SQLAlchemy to know exactly how data should be bound, as some
settings can cause profoundly different performance characteristics, while
altering the type coercion behavior at the same time.
Users of the oracledb dialect are **strongly encouraged** to read through
python-oracledb's list of built-in datatype symbols at `Database Types
<https://python-oracledb.readthedocs.io/en/latest/api_manual/module.html#database-types>`_
Note that in some cases, significant performance degradation can occur when
using these types vs. not.
On the SQLAlchemy side, the :meth:`.DialectEvents.do_setinputsizes` event can
be used both for runtime visibility (e.g. logging) of the setinputsizes step as
well as to fully control how ``setinputsizes()`` is used on a per-statement
basis.
.. versionadded:: 1.2.9 Added :meth:`.DialectEvents.setinputsizes`
Example 1 - logging all setinputsizes calls
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
The following example illustrates how to log the intermediary values from a
SQLAlchemy perspective before they are converted to the raw ``setinputsizes()``
parameter dictionary. The keys of the dictionary are :class:`.BindParameter`
objects which have a ``.key`` and a ``.type`` attribute::
from sqlalchemy import create_engine, event
engine = create_engine(
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
)
@event.listens_for(engine, "do_setinputsizes")
def _log_setinputsizes(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in inputsizes.items():
log.info(
"Bound parameter name: %s SQLAlchemy type: %r DBAPI object: %s",
bindparam.key,
bindparam.type,
dbapitype,
)
Example 2 - remove all bindings to CLOB
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
For performance, fetching LOB datatypes from Oracle Database is set by default
for the ``Text`` type within SQLAlchemy. This setting can be modified as
follows::
from sqlalchemy import create_engine, event
from oracledb import CLOB
engine = create_engine(
"oracle+oracledb://scott:tiger@localhost:1521?service_name=freepdb1"
)
@event.listens_for(engine, "do_setinputsizes")
def _remove_clob(inputsizes, cursor, statement, parameters, context):
for bindparam, dbapitype in list(inputsizes.items()):
if dbapitype is CLOB:
del inputsizes[bindparam]
.. _oracledb_lob:
LOB Datatypes
--------------
LOB datatypes refer to the "large object" datatypes such as CLOB, NCLOB and
BLOB. Oracle Database can efficiently return these datatypes as a single
buffer. SQLAlchemy makes use of type handlers to do this by default.
To disable the use of the type handlers and deliver LOB objects as classic
buffered objects with a ``read()`` method, the parameter
``auto_convert_lobs=False`` may be passed to :func:`_sa.create_engine`.
.. _oracledb_returning:
RETURNING Support
-----------------
The oracledb dialect implements RETURNING using OUT parameters. The dialect
supports RETURNING fully.
Two Phase Transaction Support
-----------------------------
Two phase transactions are fully supported with python-oracledb. (Thin mode
requires python-oracledb 2.3). APIs for two phase transactions are provided at
the Core level via :meth:`_engine.Connection.begin_twophase` and
:paramref:`_orm.Session.twophase` for transparent ORM use.
.. versionchanged:: 2.0.32 added support for two phase transactions
.. _oracledb_numeric:
Precision Numerics
------------------
SQLAlchemy's numeric types can handle receiving and returning values as Python
``Decimal`` objects or float objects. When a :class:`.Numeric` object, or a
subclass such as :class:`.Float`, :class:`_oracle.DOUBLE_PRECISION` etc. is in
use, the :paramref:`.Numeric.asdecimal` flag determines if values should be
coerced to ``Decimal`` upon return, or returned as float objects. To make
matters more complicated under Oracle Database, the ``NUMBER`` type can also
represent integer values if the "scale" is zero, so the Oracle
Database-specific :class:`_oracle.NUMBER` type takes this into account as well.
The oracledb dialect makes extensive use of connection- and cursor-level
"outputtypehandler" callables in order to coerce numeric values as requested.
These callables are specific to the specific flavor of :class:`.Numeric` in
use, as well as if no SQLAlchemy typing objects are present. There are
observed scenarios where Oracle Database may send incomplete or ambiguous
information about the numeric types being returned, such as a query where the
numeric types are buried under multiple levels of subquery. The type handlers
do their best to make the right decision in all cases, deferring to the
underlying python-oracledb DBAPI for all those cases where the driver can make
the best decision.
When no typing objects are present, as when executing plain SQL strings, a
default "outputtypehandler" is present which will generally return numeric
values which specify precision and scale as Python ``Decimal`` objects. To
disable this coercion to decimal for performance reasons, pass the flag
``coerce_to_decimal=False`` to :func:`_sa.create_engine`::
engine = create_engine(
"oracle+oracledb://scott:tiger@tnsalias", coerce_to_decimal=False
)
The ``coerce_to_decimal`` flag only impacts the results of plain string
SQL statements that are not otherwise associated with a :class:`.Numeric`
SQLAlchemy type (or a subclass of such).
.. versionchanged:: 1.2 The numeric handling system for the oracle dialects has
been reworked to take advantage of newer driver features as well as better
integration of outputtypehandlers.
.. versionadded:: 2.0.0 added support for the python-oracledb driver.
""" # noqa
from __future__ import annotations
@@ -81,12 +599,14 @@ import re
from typing import Any
from typing import TYPE_CHECKING
from .cx_oracle import OracleDialect_cx_oracle as _OracleDialect_cx_oracle
from . import cx_oracle as _cx_oracle
from ... import exc
from ... import pool
from ...connectors.asyncio import AsyncAdapt_dbapi_connection
from ...connectors.asyncio import AsyncAdapt_dbapi_cursor
from ...connectors.asyncio import AsyncAdapt_dbapi_ss_cursor
from ...connectors.asyncio import AsyncAdaptFallback_dbapi_connection
from ...engine import default
from ...util import asbool
from ...util import await_fallback
from ...util import await_only
@@ -96,8 +616,16 @@ if TYPE_CHECKING:
from oracledb import AsyncCursor
class OracleDialect_oracledb(_OracleDialect_cx_oracle):
class OracleExecutionContext_oracledb(
_cx_oracle.OracleExecutionContext_cx_oracle
):
pass
class OracleDialect_oracledb(_cx_oracle.OracleDialect_cx_oracle):
supports_statement_cache = True
execution_ctx_cls = OracleExecutionContext_oracledb
driver = "oracledb"
_min_version = (1,)
@@ -155,6 +683,56 @@ class OracleDialect_oracledb(_OracleDialect_cx_oracle):
f"oracledb version {self._min_version} and above are supported"
)
def do_begin_twophase(self, connection, xid):
conn_xis = connection.connection.xid(*xid)
connection.connection.tpc_begin(conn_xis)
connection.connection.info["oracledb_xid"] = conn_xis
def do_prepare_twophase(self, connection, xid):
should_commit = connection.connection.tpc_prepare()
connection.info["oracledb_should_commit"] = should_commit
def do_rollback_twophase(
self, connection, xid, is_prepared=True, recover=False
):
if recover:
conn_xid = connection.connection.xid(*xid)
else:
conn_xid = None
connection.connection.tpc_rollback(conn_xid)
def do_commit_twophase(
self, connection, xid, is_prepared=True, recover=False
):
conn_xid = None
if not is_prepared:
should_commit = connection.connection.tpc_prepare()
elif recover:
conn_xid = connection.connection.xid(*xid)
should_commit = True
else:
should_commit = connection.info["oracledb_should_commit"]
if should_commit:
connection.connection.tpc_commit(conn_xid)
def do_recover_twophase(self, connection):
return [
# oracledb seems to return bytes
(
fi,
gti.decode() if isinstance(gti, bytes) else gti,
bq.decode() if isinstance(bq, bytes) else bq,
)
for fi, gti, bq in connection.connection.tpc_recover()
]
def _check_max_identifier_length(self, connection):
if self.oracledb_ver >= (2, 5):
max_len = connection.connection.max_identifier_length
if max_len is not None:
return max_len
return super()._check_max_identifier_length(connection)
class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
_cursor: AsyncCursor
@@ -185,7 +763,7 @@ class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
self._adapt_connection._handle_exception(error)
async def _execute_async(self, operation, parameters):
# override to not use mutex, oracledb already has mutex
# override to not use mutex, oracledb already has a mutex
if parameters is None:
result = await self._cursor.execute(operation)
@@ -201,7 +779,7 @@ class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
operation,
seq_of_parameters,
):
# override to not use mutex, oracledb already has mutex
# override to not use mutex, oracledb already has a mutex
return await self._cursor.executemany(operation, seq_of_parameters)
def __enter__(self):
@@ -211,6 +789,17 @@ class AsyncAdapt_oracledb_cursor(AsyncAdapt_dbapi_cursor):
self.close()
class AsyncAdapt_oracledb_ss_cursor(
AsyncAdapt_dbapi_ss_cursor, AsyncAdapt_oracledb_cursor
):
__slots__ = ()
def close(self) -> None:
if self._cursor is not None:
self._cursor.close()
self._cursor = None # type: ignore
class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection):
_connection: AsyncConnection
__slots__ = ()
@@ -248,9 +837,34 @@ class AsyncAdapt_oracledb_connection(AsyncAdapt_dbapi_connection):
def stmtcachesize(self, value):
self._connection.stmtcachesize = value
@property
def max_identifier_length(self):
return self._connection.max_identifier_length
def cursor(self):
return AsyncAdapt_oracledb_cursor(self)
def ss_cursor(self):
return AsyncAdapt_oracledb_ss_cursor(self)
def xid(self, *args: Any, **kwargs: Any) -> Any:
return self._connection.xid(*args, **kwargs)
def tpc_begin(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_begin(*args, **kwargs))
def tpc_commit(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_commit(*args, **kwargs))
def tpc_prepare(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_prepare(*args, **kwargs))
def tpc_recover(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_recover(*args, **kwargs))
def tpc_rollback(self, *args: Any, **kwargs: Any) -> Any:
return self.await_(self._connection.tpc_rollback(*args, **kwargs))
class AsyncAdaptFallback_oracledb_connection(
AsyncAdaptFallback_dbapi_connection, AsyncAdapt_oracledb_connection
@@ -281,9 +895,31 @@ class OracledbAdaptDBAPI:
)
class OracleExecutionContextAsync_oracledb(OracleExecutionContext_oracledb):
# restore default create cursor
create_cursor = default.DefaultExecutionContext.create_cursor
def create_default_cursor(self):
# copy of OracleExecutionContext_cx_oracle.create_cursor
c = self._dbapi_connection.cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def create_server_side_cursor(self):
c = self._dbapi_connection.ss_cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
class OracleDialectAsync_oracledb(OracleDialect_oracledb):
is_async = True
supports_server_side_cursors = True
supports_statement_cache = True
execution_ctx_cls = OracleExecutionContextAsync_oracledb
_min_version = (2,)

View File

@@ -1,5 +1,5 @@
# dialects/oracle/provision.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -89,7 +89,7 @@ def _oracle_drop_db(cfg, eng, ident):
# cx_Oracle seems to occasionally leak open connections when a large
# suite it run, even if we confirm we have zero references to
# connection objects.
# while there is a "kill session" command in Oracle,
# while there is a "kill session" command in Oracle Database,
# it unfortunately does not release the connection sufficiently.
_ora_drop_ignore(conn, ident)
_ora_drop_ignore(conn, "%s_ts1" % ident)

View File

@@ -1,5 +1,5 @@
# dialects/oracle/types.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -64,17 +64,18 @@ class NUMBER(sqltypes.Numeric, sqltypes.Integer):
class FLOAT(sqltypes.FLOAT):
"""Oracle FLOAT.
"""Oracle Database FLOAT.
This is the same as :class:`_sqltypes.FLOAT` except that
an Oracle-specific :paramref:`_oracle.FLOAT.binary_precision`
an Oracle Database -specific :paramref:`_oracle.FLOAT.binary_precision`
parameter is accepted, and
the :paramref:`_sqltypes.Float.precision` parameter is not accepted.
Oracle FLOAT types indicate precision in terms of "binary precision", which
defaults to 126. For a REAL type, the value is 63. This parameter does not
cleanly map to a specific number of decimal places but is roughly
equivalent to the desired number of decimal places divided by 0.3103.
Oracle Database FLOAT types indicate precision in terms of "binary
precision", which defaults to 126. For a REAL type, the value is 63. This
parameter does not cleanly map to a specific number of decimal places but
is roughly equivalent to the desired number of decimal places divided by
0.3103.
.. versionadded:: 2.0
@@ -91,10 +92,11 @@ class FLOAT(sqltypes.FLOAT):
r"""
Construct a FLOAT
:param binary_precision: Oracle binary precision value to be rendered
in DDL. This may be approximated to the number of decimal characters
using the formula "decimal precision = 0.30103 * binary precision".
The default value used by Oracle for FLOAT / DOUBLE PRECISION is 126.
:param binary_precision: Oracle Database binary precision value to be
rendered in DDL. This may be approximated to the number of decimal
characters using the formula "decimal precision = 0.30103 * binary
precision". The default value used by Oracle Database for FLOAT /
DOUBLE PRECISION is 126.
:param asdecimal: See :paramref:`_sqltypes.Float.asdecimal`
@@ -109,10 +111,36 @@ class FLOAT(sqltypes.FLOAT):
class BINARY_DOUBLE(sqltypes.Double):
"""Implement the Oracle ``BINARY_DOUBLE`` datatype.
This datatype differs from the Oracle ``DOUBLE`` datatype in that it
delivers a true 8-byte FP value. The datatype may be combined with a
generic :class:`.Double` datatype using :meth:`.TypeEngine.with_variant`.
.. seealso::
:ref:`oracle_float_support`
"""
__visit_name__ = "BINARY_DOUBLE"
class BINARY_FLOAT(sqltypes.Float):
"""Implement the Oracle ``BINARY_FLOAT`` datatype.
This datatype differs from the Oracle ``FLOAT`` datatype in that it
delivers a true 4-byte FP value. The datatype may be combined with a
generic :class:`.Float` datatype using :meth:`.TypeEngine.with_variant`.
.. seealso::
:ref:`oracle_float_support`
"""
__visit_name__ = "BINARY_FLOAT"
@@ -163,10 +191,10 @@ class _OracleDateLiteralRender:
class DATE(_OracleDateLiteralRender, sqltypes.DateTime):
"""Provide the oracle DATE type.
"""Provide the Oracle Database DATE type.
This type has no special Python behavior, except that it subclasses
:class:`_types.DateTime`; this is to suit the fact that the Oracle
:class:`_types.DateTime`; this is to suit the fact that the Oracle Database
``DATE`` type supports a time value.
"""
@@ -246,8 +274,8 @@ class INTERVAL(sqltypes.NativeForEmulated, sqltypes._AbstractInterval):
class TIMESTAMP(sqltypes.TIMESTAMP):
"""Oracle implementation of ``TIMESTAMP``, which supports additional
Oracle-specific modes
"""Oracle Database implementation of ``TIMESTAMP``, which supports
additional Oracle Database-specific modes
.. versionadded:: 2.0
@@ -257,10 +285,11 @@ class TIMESTAMP(sqltypes.TIMESTAMP):
"""Construct a new :class:`_oracle.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
use Oracle's ``TIMESTAMP WITH TIME ZONE`` datatype.
use Oracle Database's ``TIMESTAMP WITH TIME ZONE`` datatype.
:param local_timezone: boolean. Indicates that the TIMESTAMP type
should use Oracle's ``TIMESTAMP WITH LOCAL TIME ZONE`` datatype.
should use Oracle Database's ``TIMESTAMP WITH LOCAL TIME ZONE``
datatype.
"""
@@ -273,7 +302,7 @@ class TIMESTAMP(sqltypes.TIMESTAMP):
class ROWID(sqltypes.TypeEngine):
"""Oracle ROWID type.
"""Oracle Database ROWID type.
When used in a cast() or similar, generates ROWID.

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -8,7 +8,7 @@
from types import ModuleType
from . import array as arraylib # noqa # must be above base and other dialects
from . import array as arraylib # noqa # keep above base and other dialects
from . import asyncpg # noqa
from . import base
from . import pg8000 # noqa

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/_psycopg_common.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/array.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -54,11 +54,13 @@ class array(expression.ExpressionClauseList[_T]):
from sqlalchemy.dialects import postgresql
from sqlalchemy import select, func
stmt = select(array([1,2]) + array([3,4,5]))
stmt = select(array([1, 2]) + array([3, 4, 5]))
print(stmt.compile(dialect=postgresql.dialect()))
Produces the SQL::
Produces the SQL:
.. sourcecode:: sql
SELECT ARRAY[%(param_1)s, %(param_2)s] ||
ARRAY[%(param_3)s, %(param_4)s, %(param_5)s]) AS anon_1
@@ -67,7 +69,7 @@ class array(expression.ExpressionClauseList[_T]):
:class:`_types.ARRAY`. The "inner" type of the array is inferred from
the values present, unless the ``type_`` keyword argument is passed::
array(['foo', 'bar'], type_=CHAR)
array(["foo", "bar"], type_=CHAR)
Multidimensional arrays are produced by nesting :class:`.array` constructs.
The dimensionality of the final :class:`_types.ARRAY`
@@ -76,16 +78,21 @@ class array(expression.ExpressionClauseList[_T]):
type::
stmt = select(
array([
array([1, 2]), array([3, 4]), array([column('q'), column('x')])
])
array(
[array([1, 2]), array([3, 4]), array([column("q"), column("x")])]
)
)
print(stmt.compile(dialect=postgresql.dialect()))
Produces::
Produces:
SELECT ARRAY[ARRAY[%(param_1)s, %(param_2)s],
ARRAY[%(param_3)s, %(param_4)s], ARRAY[q, x]] AS anon_1
.. sourcecode:: sql
SELECT ARRAY[
ARRAY[%(param_1)s, %(param_2)s],
ARRAY[%(param_3)s, %(param_4)s],
ARRAY[q, x]
] AS anon_1
.. versionadded:: 1.3.6 added support for multidimensional array literals
@@ -93,7 +100,7 @@ class array(expression.ExpressionClauseList[_T]):
:class:`_postgresql.ARRAY`
"""
""" # noqa: E501
__visit_name__ = "array"
@@ -166,9 +173,11 @@ class ARRAY(sqltypes.ARRAY):
from sqlalchemy.dialects import postgresql
mytable = Table("mytable", metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2))
)
mytable = Table(
"mytable",
metadata,
Column("data", postgresql.ARRAY(Integer, dimensions=2)),
)
The :class:`_postgresql.ARRAY` type provides all operations defined on the
core :class:`_types.ARRAY` type, including support for "dimensions",
@@ -183,8 +192,9 @@ class ARRAY(sqltypes.ARRAY):
mytable.c.data.contains([1, 2])
The :class:`_postgresql.ARRAY` type may not be supported on all
PostgreSQL DBAPIs; it is currently known to work on psycopg2 only.
Indexed access is one-based by default, to match that of PostgreSQL;
for zero-based indexed access, set
:paramref:`_postgresql.ARRAY.zero_indexes`.
Additionally, the :class:`_postgresql.ARRAY`
type does not work directly in
@@ -203,6 +213,7 @@ class ARRAY(sqltypes.ARRAY):
from sqlalchemy.dialects.postgresql import ARRAY
from sqlalchemy.ext.mutable import MutableList
class SomeOrmClass(Base):
# ...
@@ -224,41 +235,6 @@ class ARRAY(sqltypes.ARRAY):
"""
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`_types.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
kwargs may be ignored by this operator but are required for API
conformance.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
def __init__(
self,
item_type: _TypeEngineArgument[Any],
@@ -270,7 +246,7 @@ class ARRAY(sqltypes.ARRAY):
E.g.::
Column('myarray', ARRAY(Integer))
Column("myarray", ARRAY(Integer))
Arguments are:
@@ -310,6 +286,41 @@ class ARRAY(sqltypes.ARRAY):
self.dimensions = dimensions
self.zero_indexes = zero_indexes
class Comparator(sqltypes.ARRAY.Comparator):
"""Define comparison operations for :class:`_types.ARRAY`.
Note that these operations are in addition to those provided
by the base :class:`.types.ARRAY.Comparator` class, including
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`.
"""
def contains(self, other, **kwargs):
"""Boolean expression. Test if elements are a superset of the
elements of the argument array expression.
kwargs may be ignored by this operator but are required for API
conformance.
"""
return self.operate(CONTAINS, other, result_type=sqltypes.Boolean)
def contained_by(self, other):
"""Boolean expression. Test if elements are a proper subset of the
elements of the argument array expression.
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
)
def overlap(self, other):
"""Boolean expression. Test if array has elements in common with
an argument array expression.
"""
return self.operate(OVERLAP, other, result_type=sqltypes.Boolean)
comparator_factory = Comparator
@property
def hashable(self):
return self.as_tuple

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/asyncpg.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors <see AUTHORS
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
@@ -23,7 +23,10 @@ This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname")
engine = create_async_engine(
"postgresql+asyncpg://user:pass@hostname/dbname"
)
.. versionadded:: 1.4
@@ -78,11 +81,15 @@ asyncpg dialect, therefore is handled as a DBAPI argument, not a dialect
argument)::
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500")
engine = create_async_engine(
"postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=500"
)
To disable the prepared statement cache, use a value of zero::
engine = create_async_engine("postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0")
engine = create_async_engine(
"postgresql+asyncpg://user:pass@hostname/dbname?prepared_statement_cache_size=0"
)
.. versionadded:: 1.4.0b2 Added ``prepared_statement_cache_size`` for asyncpg.
@@ -112,8 +119,8 @@ To disable the prepared statement cache, use a value of zero::
.. _asyncpg_prepared_statement_name:
Prepared Statement Name
-----------------------
Prepared Statement Name with PGBouncer
--------------------------------------
By default, asyncpg enumerates prepared statements in numeric order, which
can lead to errors if a name has already been taken for another prepared
@@ -128,10 +135,10 @@ a prepared statement is prepared::
from uuid import uuid4
engine = create_async_engine(
"postgresql+asyncpg://user:pass@hostname/dbname",
"postgresql+asyncpg://user:pass@somepgbouncer/dbname",
poolclass=NullPool,
connect_args={
'prepared_statement_name_func': lambda: f'__asyncpg_{uuid4()}__',
"prepared_statement_name_func": lambda: f"__asyncpg_{uuid4()}__",
},
)
@@ -141,7 +148,7 @@ a prepared statement is prepared::
https://github.com/sqlalchemy/sqlalchemy/issues/6467
.. warning:: To prevent a buildup of useless prepared statements in
.. warning:: When using PGBouncer, to prevent a buildup of useless prepared statements in
your application, it's important to use the :class:`.NullPool` pool
class, and to configure PgBouncer to use `DISCARD <https://www.postgresql.org/docs/current/sql-discard.html>`_
when returning connections. The DISCARD command is used to release resources held by the db connection,
@@ -171,7 +178,7 @@ client using this setting passed to :func:`_asyncio.create_async_engine`::
from __future__ import annotations
import collections
from collections import deque
import decimal
import json as _py_json
import re
@@ -258,20 +265,20 @@ class AsyncpgInteger(sqltypes.Integer):
render_bind_cast = True
class AsyncpgSmallInteger(sqltypes.SmallInteger):
render_bind_cast = True
class AsyncpgBigInteger(sqltypes.BigInteger):
render_bind_cast = True
class AsyncpgJSON(json.JSON):
render_bind_cast = True
def result_processor(self, dialect, coltype):
return None
class AsyncpgJSONB(json.JSONB):
render_bind_cast = True
def result_processor(self, dialect, coltype):
return None
@@ -487,7 +494,7 @@ class AsyncAdapt_asyncpg_cursor:
def __init__(self, adapt_connection):
self._adapt_connection = adapt_connection
self._connection = adapt_connection._connection
self._rows = []
self._rows = deque()
self._cursor = None
self.description = None
self.arraysize = 1
@@ -495,7 +502,7 @@ class AsyncAdapt_asyncpg_cursor:
self._invalidate_schema_cache_asof = 0
def close(self):
self._rows[:] = []
self._rows.clear()
def _handle_exception(self, error):
self._adapt_connection._handle_exception(error)
@@ -535,11 +542,12 @@ class AsyncAdapt_asyncpg_cursor:
self._cursor = await prepared_stmt.cursor(*parameters)
self.rowcount = -1
else:
self._rows = await prepared_stmt.fetch(*parameters)
self._rows = deque(await prepared_stmt.fetch(*parameters))
status = prepared_stmt.get_statusmsg()
reg = re.match(
r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)", status
r"(?:SELECT|UPDATE|DELETE|INSERT \d+) (\d+)",
status or "",
)
if reg:
self.rowcount = int(reg.group(1))
@@ -583,11 +591,11 @@ class AsyncAdapt_asyncpg_cursor:
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
yield self._rows.popleft()
def fetchone(self):
if self._rows:
return self._rows.pop(0)
return self._rows.popleft()
else:
return None
@@ -595,13 +603,12 @@ class AsyncAdapt_asyncpg_cursor:
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
retval = list(self._rows)
self._rows.clear()
return retval
@@ -611,23 +618,21 @@ class AsyncAdapt_asyncpg_ss_cursor(AsyncAdapt_asyncpg_cursor):
def __init__(self, adapt_connection):
super().__init__(adapt_connection)
self._rowbuffer = None
self._rowbuffer = deque()
def close(self):
self._cursor = None
self._rowbuffer = None
self._rowbuffer.clear()
def _buffer_rows(self):
assert self._cursor is not None
new_rows = self._adapt_connection.await_(self._cursor.fetch(50))
self._rowbuffer = collections.deque(new_rows)
self._rowbuffer.extend(new_rows)
def __aiter__(self):
return self
async def __anext__(self):
if not self._rowbuffer:
self._buffer_rows()
while True:
while self._rowbuffer:
yield self._rowbuffer.popleft()
@@ -650,21 +655,19 @@ class AsyncAdapt_asyncpg_ss_cursor(AsyncAdapt_asyncpg_cursor):
if not self._rowbuffer:
self._buffer_rows()
buf = list(self._rowbuffer)
lb = len(buf)
assert self._cursor is not None
rb = self._rowbuffer
lb = len(rb)
if size > lb:
buf.extend(
rb.extend(
self._adapt_connection.await_(self._cursor.fetch(size - lb))
)
result = buf[0:size]
self._rowbuffer = collections.deque(buf[size:])
return result
return [rb.popleft() for _ in range(min(size, len(rb)))]
def fetchall(self):
ret = list(self._rowbuffer) + list(
self._adapt_connection.await_(self._all())
)
ret = list(self._rowbuffer)
ret.extend(self._adapt_connection.await_(self._all()))
self._rowbuffer.clear()
return ret
@@ -714,7 +717,7 @@ class AsyncAdapt_asyncpg_connection(AdaptedConnection):
):
self.dbapi = dbapi
self._connection = connection
self.isolation_level = self._isolation_setting = "read_committed"
self.isolation_level = self._isolation_setting = None
self.readonly = False
self.deferrable = False
self._transaction = None
@@ -849,25 +852,45 @@ class AsyncAdapt_asyncpg_connection(AdaptedConnection):
else:
return AsyncAdapt_asyncpg_cursor(self)
async def _rollback_and_discard(self):
try:
await self._transaction.rollback()
finally:
# if asyncpg .rollback() was actually called, then whether or
# not it raised or succeeded, the transation is done, discard it
self._transaction = None
self._started = False
async def _commit_and_discard(self):
try:
await self._transaction.commit()
finally:
# if asyncpg .commit() was actually called, then whether or
# not it raised or succeeded, the transation is done, discard it
self._transaction = None
self._started = False
def rollback(self):
if self._started:
try:
self.await_(self._transaction.rollback())
except Exception as error:
self._handle_exception(error)
finally:
self.await_(self._rollback_and_discard())
self._transaction = None
self._started = False
except Exception as error:
# don't dereference asyncpg transaction if we didn't
# actually try to call rollback() on it
self._handle_exception(error)
def commit(self):
if self._started:
try:
self.await_(self._transaction.commit())
except Exception as error:
self._handle_exception(error)
finally:
self.await_(self._commit_and_discard())
self._transaction = None
self._started = False
except Exception as error:
# don't dereference asyncpg transaction if we didn't
# actually try to call commit() on it
self._handle_exception(error)
def close(self):
self.rollback()
@@ -881,9 +904,10 @@ class AsyncAdapt_asyncpg_connection(AdaptedConnection):
try:
# try to gracefully close; see #10717
# timeout added in asyncpg 0.14.0 December 2017
self.await_(self._connection.close(timeout=2))
self.await_(asyncio.shield(self._connection.close(timeout=2)))
except (
asyncio.TimeoutError,
asyncio.CancelledError,
OSError,
self.dbapi.asyncpg.PostgresError,
):
@@ -1032,6 +1056,7 @@ class PGDialect_asyncpg(PGDialect):
INTERVAL: AsyncPgInterval,
sqltypes.Boolean: AsyncpgBoolean,
sqltypes.Integer: AsyncpgInteger,
sqltypes.SmallInteger: AsyncpgSmallInteger,
sqltypes.BigInteger: AsyncpgBigInteger,
sqltypes.Numeric: AsyncpgNumeric,
sqltypes.Float: AsyncpgFloat,

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/dml.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,7 +7,10 @@
from __future__ import annotations
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from . import ext
from .._typing import _OnConflictConstraintT
@@ -26,7 +29,9 @@ from ...sql.base import ColumnCollection
from ...sql.base import ReadOnlyColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.elements import ColumnElement
from ...sql.elements import KeyedColumnElement
from ...sql.elements import TextClause
from ...sql.expression import alias
from ...util.typing import Self
@@ -153,11 +158,10 @@ class Insert(StandardInsert):
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
Optional argument. An expression object representing a ``WHERE``
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
meeting the ``WHERE`` condition will not be updated (effectively a
``DO NOTHING`` for those rows).
.. seealso::
@@ -212,8 +216,10 @@ class OnConflictClause(ClauseElement):
stringify_dialect = "postgresql"
constraint_target: Optional[str]
inferred_target_elements: _OnConflictIndexElementsT
inferred_target_whereclause: _OnConflictIndexWhereT
inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
inferred_target_whereclause: Optional[
Union[ColumnElement[Any], TextClause]
]
def __init__(
self,
@@ -254,8 +260,24 @@ class OnConflictClause(ClauseElement):
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
self.inferred_target_elements = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in index_elements
]
self.inferred_target_whereclause = (
coercions.expect(
(
roles.StatementOptionRole
if isinstance(constraint, ext.ExcludeConstraint)
else roles.WhereHavingRole
),
index_where,
)
if index_where is not None
else None
)
elif constraint is None:
self.constraint_target = self.inferred_target_elements = (
self.inferred_target_whereclause
@@ -269,6 +291,9 @@ class OnConflictDoNothing(OnConflictClause):
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
update_whereclause: Optional[ColumnElement[Any]]
def __init__(
self,
constraint: _OnConflictConstraintT = None,
@@ -307,4 +332,8 @@ class OnConflictDoUpdate(OnConflictClause):
(coercions.expect(roles.DMLColumnRole, key), value)
for key, value in set_.items()
]
self.update_whereclause = where
self.update_whereclause = (
coercions.expect(roles.WhereHavingRole, where)
if where is not None
else None
)

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/ext.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -35,22 +35,26 @@ class aggregate_order_by(expression.ColumnElement):
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select(expr)
would represent the expression::
would represent the expression:
.. sourcecode:: sql
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
table.c.a, aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select(expr)
Would represent::
Would represent:
.. sourcecode:: sql
SELECT string_agg(a, ',' ORDER BY a) FROM table;
@@ -131,10 +135,10 @@ class ExcludeConstraint(ColumnCollectionConstraint):
E.g.::
const = ExcludeConstraint(
(Column('period'), '&&'),
(Column('group'), '='),
where=(Column('group') != 'some group'),
ops={'group': 'my_operator_class'}
(Column("period"), "&&"),
(Column("group"), "="),
where=(Column("group") != "some group"),
ops={"group": "my_operator_class"},
)
The constraint is normally embedded into the :class:`_schema.Table`
@@ -142,19 +146,20 @@ class ExcludeConstraint(ColumnCollectionConstraint):
directly, or added later using :meth:`.append_constraint`::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('period', TSRANGE()),
Column('group', String)
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("period", TSRANGE()),
Column("group", String),
)
some_table.append_constraint(
ExcludeConstraint(
(some_table.c.period, '&&'),
(some_table.c.group, '='),
where=some_table.c.group != 'some group',
name='some_table_excl_const',
ops={'group': 'my_operator_class'}
(some_table.c.period, "&&"),
(some_table.c.group, "="),
where=some_table.c.group != "some group",
name="some_table_excl_const",
ops={"group": "my_operator_class"},
)
)

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/hstore.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -28,28 +28,29 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
The :class:`.HSTORE` type stores dictionaries containing strings, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', HSTORE)
data_table = Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", HSTORE),
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
data_table.insert(), data={"key1": "value1", "key2": "value2"}
)
:class:`.HSTORE` provides for a wide range of operations, including:
* Index operations::
data_table.c.data['some key'] == 'some value'
data_table.c.data["some key"] == "some value"
* Containment operations::
data_table.c.data.has_key('some key')
data_table.c.data.has_key("some key")
data_table.c.data.has_all(['one', 'two', 'three'])
data_table.c.data.has_all(["one", "two", "three"])
* Concatenation::
@@ -72,17 +73,19 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
from sqlalchemy.ext.mutable import MutableDict
class MyClass(Base):
__tablename__ = 'data_table'
__tablename__ = "data_table"
id = Column(Integer, primary_key=True)
data = Column(MutableDict.as_mutable(HSTORE))
my_object = session.query(MyClass).one()
# in-place mutation, requires Mutable extension
# in order for the ORM to detect
my_object.data['some_key'] = 'some value'
my_object.data["some_key"] = "some value"
session.commit()
@@ -96,7 +99,7 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
:class:`.hstore` - render the PostgreSQL ``hstore()`` function.
"""
""" # noqa: E501
__visit_name__ = "HSTORE"
hashable = False
@@ -192,6 +195,9 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
comparator_factory = Comparator
def bind_processor(self, dialect):
# note that dialect-specific types like that of psycopg and
# psycopg2 will override this method to allow driver-level conversion
# instead, see _PsycopgHStore
def process(value):
if isinstance(value, dict):
return _serialize_hstore(value)
@@ -201,6 +207,9 @@ class HSTORE(sqltypes.Indexable, sqltypes.Concatenable, sqltypes.TypeEngine):
return process
def result_processor(self, dialect, coltype):
# note that dialect-specific types like that of psycopg and
# psycopg2 will override this method to allow driver-level conversion
# instead, see _PsycopgHStore
def process(value):
if value is not None:
return _parse_hstore(value)
@@ -221,12 +230,12 @@ class hstore(sqlfunc.GenericFunction):
from sqlalchemy.dialects.postgresql import array, hstore
select(hstore('key1', 'value1'))
select(hstore("key1", "value1"))
select(
hstore(
array(['key1', 'key2', 'key3']),
array(['value1', 'value2', 'value3'])
array(["key1", "key2", "key3"]),
array(["value1", "value2", "value3"]),
)
)

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/json.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -90,14 +90,14 @@ class JSON(sqltypes.JSON):
* Index operations (the ``->`` operator)::
data_table.c.data['some key']
data_table.c.data["some key"]
data_table.c.data[5]
* Index operations returning text
(the ``->>`` operator)::
* Index operations returning text (the ``->>`` operator)::
data_table.c.data['some key'].astext == 'some value'
data_table.c.data["some key"].astext == "some value"
Note that equivalent functionality is available via the
:attr:`.JSON.Comparator.as_string` accessor.
@@ -105,18 +105,20 @@ class JSON(sqltypes.JSON):
* Index operations with CAST
(equivalent to ``CAST(col ->> ['some key'] AS <type>)``)::
data_table.c.data['some key'].astext.cast(Integer) == 5
data_table.c.data["some key"].astext.cast(Integer) == 5
Note that equivalent functionality is available via the
:attr:`.JSON.Comparator.as_integer` and similar accessors.
* Path index operations (the ``#>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
* Path index operations returning text (the ``#>>`` operator)::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')].astext == 'some value'
data_table.c.data[
("key_1", "key_2", 5, ..., "key_n")
].astext == "some value"
Index operations return an expression object whose type defaults to
:class:`_types.JSON` by default,
@@ -128,10 +130,11 @@ class JSON(sqltypes.JSON):
using psycopg2, the DBAPI only allows serializers at the per-cursor
or per-connection level. E.g.::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn
)
engine = create_engine(
"postgresql+psycopg2://scott:tiger@localhost/test",
json_serializer=my_serialize_fn,
json_deserializer=my_deserialize_fn,
)
When using the psycopg2 dialect, the json_deserializer is registered
against the database using ``psycopg2.extras.register_default_json``.
@@ -144,6 +147,7 @@ class JSON(sqltypes.JSON):
""" # noqa
render_bind_cast = True
astext_type = sqltypes.Text()
def __init__(self, none_as_null=False, astext_type=None):
@@ -155,7 +159,8 @@ class JSON(sqltypes.JSON):
be used to persist a NULL value::
from sqlalchemy import null
conn.execute(table.insert(), data=null())
conn.execute(table.insert(), {"data": null()})
.. seealso::
@@ -180,7 +185,7 @@ class JSON(sqltypes.JSON):
E.g.::
select(data_table.c.data['some key'].astext)
select(data_table.c.data["some key"].astext)
.. seealso::
@@ -207,15 +212,16 @@ class JSONB(JSON):
The :class:`_postgresql.JSONB` type stores arbitrary JSONB format data,
e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSONB)
data_table = Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", JSONB),
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
data = {"key1": "value1", "key2": "value2"}
data_table.insert(), data={"key1": "value1", "key2": "value2"}
)
The :class:`_postgresql.JSONB` type includes all operations provided by
@@ -256,22 +262,27 @@ class JSONB(JSON):
"""Define comparison operations for :class:`_types.JSON`."""
def has_key(self, other):
"""Boolean expression. Test for presence of a key. Note that the
key may be a SQLA expression.
"""Boolean expression. Test for presence of a key (equivalent of
the ``?`` operator). Note that the key may be a SQLA expression.
"""
return self.operate(HAS_KEY, other, result_type=sqltypes.Boolean)
def has_all(self, other):
"""Boolean expression. Test for presence of all keys in jsonb"""
"""Boolean expression. Test for presence of all keys in jsonb
(equivalent of the ``?&`` operator)
"""
return self.operate(HAS_ALL, other, result_type=sqltypes.Boolean)
def has_any(self, other):
"""Boolean expression. Test for presence of any key in jsonb"""
"""Boolean expression. Test for presence of any key in jsonb
(equivalent of the ``?|`` operator)
"""
return self.operate(HAS_ANY, other, result_type=sqltypes.Boolean)
def contains(self, other, **kwargs):
"""Boolean expression. Test if keys (or array) are a superset
of/contained the keys of the argument jsonb expression.
of/contained the keys of the argument jsonb expression
(equivalent of the ``@>`` operator).
kwargs may be ignored by this operator but are required for API
conformance.
@@ -280,7 +291,8 @@ class JSONB(JSON):
def contained_by(self, other):
"""Boolean expression. Test if keys are a proper subset of the
keys of the argument jsonb expression.
keys of the argument jsonb expression
(equivalent of the ``<@`` operator).
"""
return self.operate(
CONTAINED_BY, other, result_type=sqltypes.Boolean
@@ -288,7 +300,7 @@ class JSONB(JSON):
def delete_path(self, array):
"""JSONB expression. Deletes field or array element specified in
the argument array.
the argument array (equivalent of the ``#-`` operator).
The input may be a list of strings that will be coerced to an
``ARRAY`` or an instance of :meth:`_postgres.array`.
@@ -302,7 +314,7 @@ class JSONB(JSON):
def path_exists(self, other):
"""Boolean expression. Test for presence of item given by the
argument JSONPath expression.
argument JSONPath expression (equivalent of the ``@?`` operator).
.. versionadded:: 2.0
"""
@@ -312,7 +324,8 @@ class JSONB(JSON):
def path_match(self, other):
"""Boolean expression. Test if JSONPath predicate given by the
argument JSONPath expression matches.
argument JSONPath expression matches
(equivalent of the ``@@`` operator).
Only the first item of the result is taken into account.

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/named_types.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -185,8 +185,10 @@ class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
:meth:`_schema.Table.drop`
methods are called::
table = Table('sometable', metadata,
Column('some_enum', ENUM('a', 'b', 'c', name='myenum'))
table = Table(
"sometable",
metadata,
Column("some_enum", ENUM("a", "b", "c", name="myenum")),
)
table.create(engine) # will emit CREATE ENUM and CREATE TABLE
@@ -197,21 +199,17 @@ class ENUM(NamedType, type_api.NativeForEmulated, sqltypes.Enum):
:class:`_postgresql.ENUM` independently, and associate it with the
:class:`_schema.MetaData` object itself::
my_enum = ENUM('a', 'b', 'c', name='myenum', metadata=metadata)
my_enum = ENUM("a", "b", "c", name="myenum", metadata=metadata)
t1 = Table('sometable_one', metadata,
Column('some_enum', myenum)
)
t1 = Table("sometable_one", metadata, Column("some_enum", myenum))
t2 = Table('sometable_two', metadata,
Column('some_enum', myenum)
)
t2 = Table("sometable_two", metadata, Column("some_enum", myenum))
When this pattern is used, care must still be taken at the level
of individual table creates. Emitting CREATE TABLE without also
specifying ``checkfirst=True`` will still cause issues::
t1.create(engine) # will fail: no such type 'myenum'
t1.create(engine) # will fail: no such type 'myenum'
If we specify ``checkfirst=True``, the individual table-level create
operation will check for the ``ENUM`` and create if not exists::
@@ -387,14 +385,12 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
A domain is essentially a data type with optional constraints
that restrict the allowed set of values. E.g.::
PositiveInt = DOMAIN(
"pos_int", Integer, check="VALUE > 0", not_null=True
)
PositiveInt = DOMAIN("pos_int", Integer, check="VALUE > 0", not_null=True)
UsPostalCode = DOMAIN(
"us_postal_code",
Text,
check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'"
check="VALUE ~ '^\d{5}$' OR VALUE ~ '^\d{5}-\d{4}$'",
)
See the `PostgreSQL documentation`__ for additional details
@@ -403,7 +399,7 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
.. versionadded:: 2.0
"""
""" # noqa: E501
DDLGenerator = DomainGenerator
DDLDropper = DomainDropper
@@ -416,10 +412,10 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
data_type: _TypeEngineArgument[Any],
*,
collation: Optional[str] = None,
default: Optional[Union[str, elements.TextClause]] = None,
default: Union[elements.TextClause, str, None] = None,
constraint_name: Optional[str] = None,
not_null: Optional[bool] = None,
check: Optional[str] = None,
check: Union[elements.TextClause, str, None] = None,
create_type: bool = True,
**kw: Any,
):
@@ -463,7 +459,7 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
self.default = default
self.collation = collation
self.constraint_name = constraint_name
self.not_null = not_null
self.not_null = bool(not_null)
if check is not None:
check = coercions.expect(roles.DDLExpressionRole, check)
self.check = check
@@ -474,6 +470,20 @@ class DOMAIN(NamedType, sqltypes.SchemaType):
def __test_init__(cls):
return cls("name", sqltypes.Integer)
def adapt(self, impl, **kw):
if self.default:
kw["default"] = self.default
if self.constraint_name is not None:
kw["constraint_name"] = self.constraint_name
if self.not_null:
kw["not_null"] = self.not_null
if self.check is not None:
kw["check"] = str(self.check)
if self.create_type:
kw["create_type"] = self.create_type
return super().adapt(impl, **kw)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/operators.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/pg8000.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors <see AUTHORS
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors <see AUTHORS
# file>
#
# This module is part of SQLAlchemy and is released under
@@ -27,19 +27,21 @@ PostgreSQL ``client_encoding`` parameter; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
# encoding
# client_encoding = sql_ascii # actually, defaults to database encoding
client_encoding = utf8
The ``client_encoding`` can be overridden for a session by executing the SQL:
SET CLIENT_ENCODING TO 'utf8';
.. sourcecode:: sql
SET CLIENT_ENCODING TO 'utf8';
SQLAlchemy will execute this SQL on all new connections based on the value
passed to :func:`_sa.create_engine` using the ``client_encoding`` parameter::
engine = create_engine(
"postgresql+pg8000://user:pass@host/dbname", client_encoding='utf8')
"postgresql+pg8000://user:pass@host/dbname", client_encoding="utf8"
)
.. _pg8000_ssl:
@@ -50,6 +52,7 @@ pg8000 accepts a Python ``SSLContext`` object which may be specified using the
:paramref:`_sa.create_engine.connect_args` dictionary::
import ssl
ssl_context = ssl.create_default_context()
engine = sa.create_engine(
"postgresql+pg8000://scott:tiger@192.168.0.199/test",
@@ -61,6 +64,7 @@ or does not match the host name (as seen from the client), it may also be
necessary to disable hostname checking::
import ssl
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/pg_catalog.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -77,7 +77,7 @@ RELKINDS_MAT_VIEW = ("m",)
RELKINDS_ALL_TABLE_LIKE = RELKINDS_TABLE + RELKINDS_VIEW + RELKINDS_MAT_VIEW
# tables
pg_catalog_meta = MetaData()
pg_catalog_meta = MetaData(schema="pg_catalog")
pg_namespace = Table(
"pg_namespace",
@@ -85,7 +85,6 @@ pg_namespace = Table(
Column("oid", OID),
Column("nspname", NAME),
Column("nspowner", OID),
schema="pg_catalog",
)
pg_class = Table(
@@ -120,7 +119,6 @@ pg_class = Table(
Column("relispartition", Boolean, info={"server_version": (10,)}),
Column("relrewrite", OID, info={"server_version": (11,)}),
Column("reloptions", ARRAY(Text)),
schema="pg_catalog",
)
pg_type = Table(
@@ -155,7 +153,6 @@ pg_type = Table(
Column("typndims", Integer),
Column("typcollation", OID, info={"server_version": (9, 1)}),
Column("typdefault", Text),
schema="pg_catalog",
)
pg_index = Table(
@@ -182,7 +179,6 @@ pg_index = Table(
Column("indoption", INT2VECTOR),
Column("indexprs", PG_NODE_TREE),
Column("indpred", PG_NODE_TREE),
schema="pg_catalog",
)
pg_attribute = Table(
@@ -209,7 +205,6 @@ pg_attribute = Table(
Column("attislocal", Boolean),
Column("attinhcount", Integer),
Column("attcollation", OID, info={"server_version": (9, 1)}),
schema="pg_catalog",
)
pg_constraint = Table(
@@ -235,7 +230,6 @@ pg_constraint = Table(
Column("connoinherit", Boolean, info={"server_version": (9, 2)}),
Column("conkey", ARRAY(SmallInteger)),
Column("confkey", ARRAY(SmallInteger)),
schema="pg_catalog",
)
pg_sequence = Table(
@@ -249,7 +243,6 @@ pg_sequence = Table(
Column("seqmin", BigInteger),
Column("seqcache", BigInteger),
Column("seqcycle", Boolean),
schema="pg_catalog",
info={"server_version": (10,)},
)
@@ -260,7 +253,6 @@ pg_attrdef = Table(
Column("adrelid", OID),
Column("adnum", SmallInteger),
Column("adbin", PG_NODE_TREE),
schema="pg_catalog",
)
pg_description = Table(
@@ -270,7 +262,6 @@ pg_description = Table(
Column("classoid", OID),
Column("objsubid", Integer),
Column("description", Text(collation="C")),
schema="pg_catalog",
)
pg_enum = Table(
@@ -280,7 +271,6 @@ pg_enum = Table(
Column("enumtypid", OID),
Column("enumsortorder", Float(), info={"server_version": (9, 1)}),
Column("enumlabel", NAME),
schema="pg_catalog",
)
pg_am = Table(
@@ -290,5 +280,21 @@ pg_am = Table(
Column("amname", NAME),
Column("amhandler", REGPROC, info={"server_version": (9, 6)}),
Column("amtype", CHAR, info={"server_version": (9, 6)}),
schema="pg_catalog",
)
pg_collation = Table(
"pg_collation",
pg_catalog_meta,
Column("oid", OID, info={"server_version": (9, 3)}),
Column("collname", NAME),
Column("collnamespace", OID),
Column("collowner", OID),
Column("collprovider", CHAR, info={"server_version": (10,)}),
Column("collisdeterministic", Boolean, info={"server_version": (12,)}),
Column("collencoding", Integer),
Column("collcollate", Text),
Column("collctype", Text),
Column("colliculocale", Text),
Column("collicurules", Text, info={"server_version": (16,)}),
Column("collversion", Text, info={"server_version": (10,)}),
)

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/provision.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -97,7 +97,7 @@ def drop_all_schema_objects_pre_tables(cfg, eng):
for xid in conn.exec_driver_sql(
"select gid from pg_prepared_xacts"
).scalars():
conn.execute("ROLLBACK PREPARED '%s'" % xid)
conn.exec_driver_sql("ROLLBACK PREPARED '%s'" % xid)
@drop_all_schema_objects_post_tables.for_db("postgresql")

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/psycopg.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -29,20 +29,29 @@ selected depending on how the engine is created:
automatically select the sync version, e.g.::
from sqlalchemy import create_engine
sync_engine = create_engine("postgresql+psycopg://scott:tiger@localhost/test")
sync_engine = create_engine(
"postgresql+psycopg://scott:tiger@localhost/test"
)
* calling :func:`_asyncio.create_async_engine` with
``postgresql+psycopg://...`` will automatically select the async version,
e.g.::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine("postgresql+psycopg://scott:tiger@localhost/test")
asyncio_engine = create_async_engine(
"postgresql+psycopg://scott:tiger@localhost/test"
)
The asyncio version of the dialect may also be specified explicitly using the
``psycopg_async`` suffix, as::
from sqlalchemy.ext.asyncio import create_async_engine
asyncio_engine = create_async_engine("postgresql+psycopg_async://scott:tiger@localhost/test")
asyncio_engine = create_async_engine(
"postgresql+psycopg_async://scott:tiger@localhost/test"
)
.. seealso::
@@ -50,9 +59,42 @@ The asyncio version of the dialect may also be specified explicitly using the
dialect shares most of its behavior with the ``psycopg2`` dialect.
Further documentation is available there.
Using a different Cursor class
------------------------------
One of the differences between ``psycopg`` and the older ``psycopg2``
is how bound parameters are handled: ``psycopg2`` would bind them
client side, while ``psycopg`` by default will bind them server side.
It's possible to configure ``psycopg`` to do client side binding by
specifying the ``cursor_factory`` to be ``ClientCursor`` when creating
the engine::
from psycopg import ClientCursor
client_side_engine = create_engine(
"postgresql+psycopg://...",
connect_args={"cursor_factory": ClientCursor},
)
Similarly when using an async engine the ``AsyncClientCursor`` can be
specified::
from psycopg import AsyncClientCursor
client_side_engine = create_async_engine(
"postgresql+psycopg://...",
connect_args={"cursor_factory": AsyncClientCursor},
)
.. seealso::
`Client-side-binding cursors <https://www.psycopg.org/psycopg3/docs/advanced/cursors.html#client-side-binding-cursors>`_
""" # noqa
from __future__ import annotations
from collections import deque
import logging
import re
from typing import cast
@@ -93,8 +135,6 @@ class _PGREGCONFIG(REGCONFIG):
class _PGJSON(JSON):
render_bind_cast = True
def bind_processor(self, dialect):
return self._make_bind_processor(None, dialect._psycopg_Json)
@@ -103,8 +143,6 @@ class _PGJSON(JSON):
class _PGJSONB(JSONB):
render_bind_cast = True
def bind_processor(self, dialect):
return self._make_bind_processor(None, dialect._psycopg_Jsonb)
@@ -368,10 +406,12 @@ class PGDialect_psycopg(_PGDialect_common_psycopg):
# register the adapter for connections made subsequent to
# this one
assert self._psycopg_adapters_map
register_hstore(info, self._psycopg_adapters_map)
# register the adapter for this connection
register_hstore(info, connection.connection)
assert connection.connection
register_hstore(info, connection.connection.driver_connection)
@classmethod
def import_dbapi(cls):
@@ -532,7 +572,7 @@ class AsyncAdapt_psycopg_cursor:
def __init__(self, cursor, await_) -> None:
self._cursor = cursor
self.await_ = await_
self._rows = []
self._rows = deque()
def __getattr__(self, name):
return getattr(self._cursor, name)
@@ -559,24 +599,19 @@ class AsyncAdapt_psycopg_cursor:
# eq/ne
if res and res.status == self._psycopg_ExecStatus.TUPLES_OK:
rows = self.await_(self._cursor.fetchall())
if not isinstance(rows, list):
self._rows = list(rows)
else:
self._rows = rows
self._rows = deque(rows)
return result
def executemany(self, query, params_seq):
return self.await_(self._cursor.executemany(query, params_seq))
def __iter__(self):
# TODO: try to avoid pop(0) on a list
while self._rows:
yield self._rows.pop(0)
yield self._rows.popleft()
def fetchone(self):
if self._rows:
# TODO: try to avoid pop(0) on a list
return self._rows.pop(0)
return self._rows.popleft()
else:
return None
@@ -584,13 +619,12 @@ class AsyncAdapt_psycopg_cursor:
if size is None:
size = self._cursor.arraysize
retval = self._rows[0:size]
self._rows = self._rows[size:]
return retval
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = self._rows
self._rows = []
retval = list(self._rows)
self._rows.clear()
return retval

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/psycopg2.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -88,7 +88,6 @@ connection URI::
"postgresql+psycopg2://scott:tiger@192.168.0.199:5432/test?sslmode=require"
)
Unix Domain Connections
------------------------
@@ -103,13 +102,17 @@ in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql")
create_engine(
"postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql"
)
.. warning:: The format accepted here allows for a hostname in the main URL
in addition to the "host" query string argument. **When using this URL
format, the initial host is silently ignored**. That is, this URL::
engine = create_engine("postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2")
engine = create_engine(
"postgresql+psycopg2://user:password@myhost1/dbname?host=myhost2"
)
Above, the hostname ``myhost1`` is **silently ignored and discarded.** The
host which is connected is the ``myhost2`` host.
@@ -190,7 +193,7 @@ any or all elements of the connection string.
For this form, the URL can be passed without any elements other than the
initial scheme::
engine = create_engine('postgresql+psycopg2://')
engine = create_engine("postgresql+psycopg2://")
In the above form, a blank "dsn" string is passed to the ``psycopg2.connect()``
function which in turn represents an empty DSN passed to libpq.
@@ -242,7 +245,7 @@ Psycopg2 Fast Execution Helpers
Modern versions of psycopg2 include a feature known as
`Fast Execution Helpers \
<https://initd.org/psycopg/docs/extras.html#fast-execution-helpers>`_, which
<https://www.psycopg.org/docs/extras.html#fast-execution-helpers>`_, which
have been shown in benchmarking to improve psycopg2's executemany()
performance, primarily with INSERT statements, by at least
an order of magnitude.
@@ -264,8 +267,8 @@ used feature. The use of this extension may be enabled using the
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values_plus_batch')
executemany_mode="values_plus_batch",
)
Possible options for ``executemany_mode`` include:
@@ -311,8 +314,10 @@ is below::
engine = create_engine(
"postgresql+psycopg2://scott:tiger@host/dbname",
executemany_mode='values_plus_batch',
insertmanyvalues_page_size=5000, executemany_batch_page_size=500)
executemany_mode="values_plus_batch",
insertmanyvalues_page_size=5000,
executemany_batch_page_size=500,
)
.. seealso::
@@ -338,7 +343,9 @@ in the following ways:
passed in the database URL; this parameter is consumed by the underlying
``libpq`` PostgreSQL client library::
engine = create_engine("postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8")
engine = create_engine(
"postgresql+psycopg2://user:pass@host/dbname?client_encoding=utf8"
)
Alternatively, the above ``client_encoding`` value may be passed using
:paramref:`_sa.create_engine.connect_args` for programmatic establishment with
@@ -346,7 +353,7 @@ in the following ways:
engine = create_engine(
"postgresql+psycopg2://user:pass@host/dbname",
connect_args={'client_encoding': 'utf8'}
connect_args={"client_encoding": "utf8"},
)
* For all PostgreSQL versions, psycopg2 supports a client-side encoding
@@ -355,8 +362,7 @@ in the following ways:
``client_encoding`` parameter passed to :func:`_sa.create_engine`::
engine = create_engine(
"postgresql+psycopg2://user:pass@host/dbname",
client_encoding="utf8"
"postgresql+psycopg2://user:pass@host/dbname", client_encoding="utf8"
)
.. tip:: The above ``client_encoding`` parameter admittedly is very similar
@@ -375,11 +381,9 @@ in the following ways:
# postgresql.conf file
# client_encoding = sql_ascii # actually, defaults to database
# encoding
# encoding
client_encoding = utf8
Transactions
------------
@@ -426,15 +430,15 @@ is set to the ``logging.INFO`` level, notice messages will be logged::
import logging
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
Above, it is assumed that logging is configured externally. If this is not
the case, configuration such as ``logging.basicConfig()`` must be utilized::
import logging
logging.basicConfig() # log messages to stdout
logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO)
logging.basicConfig() # log messages to stdout
logging.getLogger("sqlalchemy.dialects.postgresql").setLevel(logging.INFO)
.. seealso::
@@ -471,8 +475,10 @@ textual HSTORE expression. If this behavior is not desired, disable the
use of the hstore extension by setting ``use_native_hstore`` to ``False`` as
follows::
engine = create_engine("postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False)
engine = create_engine(
"postgresql+psycopg2://scott:tiger@localhost/test",
use_native_hstore=False,
)
The ``HSTORE`` type is **still supported** when the
``psycopg2.extensions.register_hstore()`` extension is not used. It merely
@@ -844,33 +850,43 @@ class PGDialect_psycopg2(_PGDialect_common_psycopg):
# checks based on strings. in the case that .closed
# didn't cut it, fall back onto these.
str_e = str(e).partition("\n")[0]
for msg in [
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/connection.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL SYSCALL error: Operation timed out",
"SSL SYSCALL error: Bad address",
]:
for msg in self._is_disconnect_messages:
idx = str_e.find(msg)
if idx >= 0 and '"' not in str_e[:idx]:
return True
return False
@util.memoized_property
def _is_disconnect_messages(self):
return (
# these error messages from libpq: interfaces/libpq/fe-misc.c
# and interfaces/libpq/fe-secure.c.
"terminating connection",
"closed the connection",
"connection not open",
"could not receive data from server",
"could not send data to server",
# psycopg2 client errors, psycopg2/connection.h,
# psycopg2/cursor.h
"connection already closed",
"cursor already closed",
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
"losed the connection unexpectedly",
# these can occur in newer SSL
"connection has been closed unexpectedly",
"SSL error: decryption failed or bad record mac",
"SSL SYSCALL error: Bad file descriptor",
"SSL SYSCALL error: EOF detected",
"SSL SYSCALL error: Operation timed out",
"SSL SYSCALL error: Bad address",
# This can occur in OpenSSL 1 when an unexpected EOF occurs.
# https://www.openssl.org/docs/man1.1.1/man3/SSL_get_error.html#BUGS
# It may also occur in newer OpenSSL for a non-recoverable I/O
# error as a result of a system call that does not set 'errno'
# in libc.
"SSL SYSCALL error: Success",
)
dialect = PGDialect_psycopg2

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/psycopg2cffi.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/ranges.py
# Copyright (C) 2013-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -360,6 +360,8 @@ class Range(Generic[_T]):
else:
return self._contains_value(value)
__contains__ = contains
def overlaps(self, other: Range[_T]) -> bool:
"Determine whether this range overlaps with `other`."

View File

@@ -1,5 +1,5 @@
# dialects/postgresql/types.py
# Copyright (C) 2013-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2013-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -94,12 +94,11 @@ class MONEY(sqltypes.TypeEngine[str]):
from sqlalchemy import Dialect
from sqlalchemy import TypeDecorator
class NumericMoney(TypeDecorator):
impl = MONEY
def process_result_value(
self, value: Any, dialect: Dialect
) -> None:
def process_result_value(self, value: Any, dialect: Dialect) -> None:
if value is not None:
# adjust this for the currency and numeric
m = re.match(r"\$([\d.]+)", value)
@@ -114,6 +113,7 @@ class MONEY(sqltypes.TypeEngine[str]):
from sqlalchemy import cast
from sqlalchemy import TypeDecorator
class NumericMoney(TypeDecorator):
impl = MONEY
@@ -122,7 +122,7 @@ class MONEY(sqltypes.TypeEngine[str]):
.. versionadded:: 1.2
"""
""" # noqa: E501
__visit_name__ = "MONEY"

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/aiosqlite.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -31,6 +31,7 @@ This dialect should normally be used only with the
:func:`_asyncio.create_async_engine` engine creation function::
from sqlalchemy.ext.asyncio import create_async_engine
engine = create_async_engine("sqlite+aiosqlite:///filename")
The URL passes through all arguments to the ``pysqlite`` driver, so all
@@ -58,12 +59,14 @@ The solution is similar to :ref:`pysqlite_serializable`. This is achieved by the
engine = create_async_engine("sqlite+aiosqlite:///myfile.db")
@event.listens_for(engine.sync_engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable aiosqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine.sync_engine, "begin")
def do_begin(conn):
# emit our own BEGIN
@@ -75,9 +78,32 @@ The solution is similar to :ref:`pysqlite_serializable`. This is achieved by the
with the SQLite driver,
as this function necessarily will also alter the ".isolation_level" setting.
.. _aiosqlite_pooling:
Pooling Behavior
----------------
The SQLAlchemy ``aiosqlite`` DBAPI establishes the connection pool differently
based on the kind of SQLite database that's requested:
* When a ``:memory:`` SQLite database is specified, the dialect by default
will use :class:`.StaticPool`. This pool maintains a single
connection, so that all access to the engine
use the same ``:memory:`` database.
* When a file-based database is specified, the dialect will use
:class:`.AsyncAdaptedQueuePool` as the source of connections.
.. versionchanged:: 2.0.38
SQLite file database engines now use :class:`.AsyncAdaptedQueuePool` by default.
Previously, :class:`.NullPool` were used. The :class:`.NullPool` class
may be used by specifying it via the
:paramref:`_sa.create_engine.poolclass` parameter.
""" # noqa
import asyncio
from collections import deque
from functools import partial
from .base import SQLiteExecutionContext
@@ -113,10 +139,10 @@ class AsyncAdapt_aiosqlite_cursor:
self.arraysize = 1
self.rowcount = -1
self.description = None
self._rows = []
self._rows = deque()
def close(self):
self._rows[:] = []
self._rows.clear()
def execute(self, operation, parameters=None):
try:
@@ -132,7 +158,7 @@ class AsyncAdapt_aiosqlite_cursor:
self.lastrowid = self.rowcount = -1
if not self.server_side:
self._rows = self.await_(_cursor.fetchall())
self._rows = deque(self.await_(_cursor.fetchall()))
else:
self.description = None
self.lastrowid = _cursor.lastrowid
@@ -161,11 +187,11 @@ class AsyncAdapt_aiosqlite_cursor:
def __iter__(self):
while self._rows:
yield self._rows.pop(0)
yield self._rows.popleft()
def fetchone(self):
if self._rows:
return self._rows.pop(0)
return self._rows.popleft()
else:
return None
@@ -173,13 +199,12 @@ class AsyncAdapt_aiosqlite_cursor:
if size is None:
size = self.arraysize
retval = self._rows[0:size]
self._rows[:] = self._rows[size:]
return retval
rr = self._rows
return [rr.popleft() for _ in range(min(size, len(rr)))]
def fetchall(self):
retval = self._rows[:]
self._rows[:] = []
retval = list(self._rows)
self._rows.clear()
return retval
@@ -377,7 +402,7 @@ class SQLiteDialect_aiosqlite(SQLiteDialect_pysqlite):
@classmethod
def get_pool_class(cls, url):
if cls._is_url_file_db(url):
return pool.NullPool
return pool.AsyncAdaptedQueuePool
else:
return pool.StaticPool

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/base.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,10 +7,9 @@
# mypy: ignore-errors
r"""
r'''
.. dialect:: sqlite
:name: SQLite
:full_support: 3.36.0
:normal_support: 3.12+
:best_effort: 3.7.16+
@@ -70,9 +69,12 @@ To specifically render the AUTOINCREMENT keyword on the primary key column
when rendering DDL, add the flag ``sqlite_autoincrement=True`` to the Table
construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Table(
"sometable",
metadata,
Column("id", Integer, primary_key=True),
sqlite_autoincrement=True,
)
Allowing autoincrement behavior SQLAlchemy types other than Integer/INTEGER
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -92,8 +94,13 @@ One approach to achieve this is to use :class:`.Integer` on SQLite
only using :meth:`.TypeEngine.with_variant`::
table = Table(
"my_table", metadata,
Column("id", BigInteger().with_variant(Integer, "sqlite"), primary_key=True)
"my_table",
metadata,
Column(
"id",
BigInteger().with_variant(Integer, "sqlite"),
primary_key=True,
),
)
Another is to use a subclass of :class:`.BigInteger` that overrides its DDL
@@ -102,21 +109,23 @@ name to be ``INTEGER`` when compiled against SQLite::
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
class SLBigInteger(BigInteger):
pass
@compiles(SLBigInteger, 'sqlite')
@compiles(SLBigInteger, "sqlite")
def bi_c(element, compiler, **kw):
return "INTEGER"
@compiles(SLBigInteger)
def bi_c(element, compiler, **kw):
return compiler.visit_BIGINT(element, **kw)
table = Table(
"my_table", metadata,
Column("id", SLBigInteger(), primary_key=True)
"my_table", metadata, Column("id", SLBigInteger(), primary_key=True)
)
.. seealso::
@@ -236,26 +245,24 @@ To specify an explicit ``RETURNING`` clause, use the
# INSERT..RETURNING
result = connection.execute(
table.insert().
values(name='foo').
returning(table.c.col1, table.c.col2)
table.insert().values(name="foo").returning(table.c.col1, table.c.col2)
)
print(result.all())
# UPDATE..RETURNING
result = connection.execute(
table.update().
where(table.c.name=='foo').
values(name='bar').
returning(table.c.col1, table.c.col2)
table.update()
.where(table.c.name == "foo")
.values(name="bar")
.returning(table.c.col1, table.c.col2)
)
print(result.all())
# DELETE..RETURNING
result = connection.execute(
table.delete().
where(table.c.name=='foo').
returning(table.c.col1, table.c.col2)
table.delete()
.where(table.c.name == "foo")
.returning(table.c.col1, table.c.col2)
)
print(result.all())
@@ -318,6 +325,7 @@ new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
@@ -380,13 +388,16 @@ ABORT, FAIL, IGNORE, and REPLACE. For example, to add a UNIQUE constraint
that specifies the IGNORE algorithm::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer),
UniqueConstraint('id', 'data', sqlite_on_conflict='IGNORE')
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", Integer),
UniqueConstraint("id", "data", sqlite_on_conflict="IGNORE"),
)
The above renders CREATE TABLE DDL as::
The above renders CREATE TABLE DDL as:
.. sourcecode:: sql
CREATE TABLE some_table (
id INTEGER NOT NULL,
@@ -403,13 +414,17 @@ be added to the :class:`_schema.Column` as well, which will be added to the
UNIQUE constraint in the DDL::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, unique=True,
sqlite_on_conflict_unique='IGNORE')
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"data", Integer, unique=True, sqlite_on_conflict_unique="IGNORE"
),
)
rendering::
rendering:
.. sourcecode:: sql
CREATE TABLE some_table (
id INTEGER NOT NULL,
@@ -422,13 +437,17 @@ To apply the FAIL algorithm for a NOT NULL constraint,
``sqlite_on_conflict_not_null`` is used::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', Integer, nullable=False,
sqlite_on_conflict_not_null='FAIL')
"some_table",
metadata,
Column("id", Integer, primary_key=True),
Column(
"data", Integer, nullable=False, sqlite_on_conflict_not_null="FAIL"
),
)
this renders the column inline ON CONFLICT phrase::
this renders the column inline ON CONFLICT phrase:
.. sourcecode:: sql
CREATE TABLE some_table (
id INTEGER NOT NULL,
@@ -440,13 +459,20 @@ this renders the column inline ON CONFLICT phrase::
Similarly, for an inline primary key, use ``sqlite_on_conflict_primary_key``::
some_table = Table(
'some_table', metadata,
Column('id', Integer, primary_key=True,
sqlite_on_conflict_primary_key='FAIL')
"some_table",
metadata,
Column(
"id",
Integer,
primary_key=True,
sqlite_on_conflict_primary_key="FAIL",
),
)
SQLAlchemy renders the PRIMARY KEY constraint separately, so the conflict
resolution algorithm is applied to the constraint itself::
resolution algorithm is applied to the constraint itself:
.. sourcecode:: sql
CREATE TABLE some_table (
id INTEGER NOT NULL,
@@ -456,7 +482,7 @@ resolution algorithm is applied to the constraint itself::
.. _sqlite_on_conflict_insert:
INSERT...ON CONFLICT (Upsert)
-----------------------------------
-----------------------------
.. seealso:: This section describes the :term:`DML` version of "ON CONFLICT" for
SQLite, which occurs within an INSERT statement. For "ON CONFLICT" as
@@ -484,21 +510,18 @@ and :meth:`_sqlite.Insert.on_conflict_do_nothing`:
>>> from sqlalchemy.dialects.sqlite import insert
>>> insert_stmt = insert(my_table).values(
... id='some_existing_id',
... data='inserted value')
... id="some_existing_id", data="inserted value"
... )
>>> do_update_stmt = insert_stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... index_elements=["id"], set_=dict(data="updated value")
... )
>>> print(do_update_stmt)
{printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
ON CONFLICT (id) DO UPDATE SET data = ?{stop}
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(
... index_elements=['id']
... )
>>> do_nothing_stmt = insert_stmt.on_conflict_do_nothing(index_elements=["id"])
>>> print(do_nothing_stmt)
{printsql}INSERT INTO my_table (id, data) VALUES (?, ?)
@@ -529,13 +552,13 @@ Both methods supply the "target" of the conflict using column inference:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(user_email='a@b.com', data='inserted data')
>>> stmt = insert(my_table).values(user_email="a@b.com", data="inserted data")
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=[my_table.c.user_email],
... index_where=my_table.c.user_email.like('%@gmail.com'),
... set_=dict(data=stmt.excluded.data)
... )
... index_where=my_table.c.user_email.like("%@gmail.com"),
... set_=dict(data=stmt.excluded.data),
... )
>>> print(do_update_stmt)
{printsql}INSERT INTO my_table (data, user_email) VALUES (?, ?)
@@ -555,11 +578,10 @@ for UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = insert(my_table).values(id="some_id", data="inserted value")
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value')
... index_elements=["id"], set_=dict(data="updated value")
... )
>>> print(do_update_stmt)
@@ -587,14 +609,12 @@ would have been inserted had the constraint not failed:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... id="some_id", data="inserted value", author="jlh"
... )
>>> do_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author)
... index_elements=["id"],
... set_=dict(data="updated value", author=stmt.excluded.author),
... )
>>> print(do_update_stmt)
@@ -611,15 +631,13 @@ parameter, which will limit those rows which receive an UPDATE:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(
... id='some_id',
... data='inserted value',
... author='jlh'
... id="some_id", data="inserted value", author="jlh"
... )
>>> on_update_stmt = stmt.on_conflict_do_update(
... index_elements=['id'],
... set_=dict(data='updated value', author=stmt.excluded.author),
... where=(my_table.c.status == 2)
... index_elements=["id"],
... set_=dict(data="updated value", author=stmt.excluded.author),
... where=(my_table.c.status == 2),
... )
>>> print(on_update_stmt)
{printsql}INSERT INTO my_table (id, data, author) VALUES (?, ?, ?)
@@ -636,8 +654,8 @@ using the :meth:`_sqlite.Insert.on_conflict_do_nothing` method:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = stmt.on_conflict_do_nothing(index_elements=['id'])
>>> stmt = insert(my_table).values(id="some_id", data="inserted value")
>>> stmt = stmt.on_conflict_do_nothing(index_elements=["id"])
>>> print(stmt)
{printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT (id) DO NOTHING
@@ -648,7 +666,7 @@ occurs:
.. sourcecode:: pycon+sql
>>> stmt = insert(my_table).values(id='some_id', data='inserted value')
>>> stmt = insert(my_table).values(id="some_id", data="inserted value")
>>> stmt = stmt.on_conflict_do_nothing()
>>> print(stmt)
{printsql}INSERT INTO my_table (id, data) VALUES (?, ?) ON CONFLICT DO NOTHING
@@ -708,11 +726,16 @@ Partial Indexes
A partial index, e.g. one which uses a WHERE clause, can be specified
with the DDL system using the argument ``sqlite_where``::
tbl = Table('testtbl', m, Column('data', Integer))
idx = Index('test_idx1', tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10))
tbl = Table("testtbl", m, Column("data", Integer))
idx = Index(
"test_idx1",
tbl.c.data,
sqlite_where=and_(tbl.c.data > 5, tbl.c.data < 10),
)
The index will be rendered at create time as::
The index will be rendered at create time as:
.. sourcecode:: sql
CREATE INDEX test_idx1 ON testtbl (data)
WHERE data > 5 AND data < 10
@@ -732,7 +755,11 @@ The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
import sqlite3
assert sqlite3.sqlite_version_info < (3, 10, 0), "bug is fixed in this version"
assert sqlite3.sqlite_version_info < (
3,
10,
0,
), "bug is fixed in this version"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
@@ -742,17 +769,22 @@ The bug, entirely outside of SQLAlchemy, can be illustrated thusly::
cursor.execute("insert into x (a, b) values (2, 2)")
cursor.execute("select x.a, x.b from x")
assert [c[0] for c in cursor.description] == ['a', 'b']
assert [c[0] for c in cursor.description] == ["a", "b"]
cursor.execute('''
cursor.execute(
"""
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
assert [c[0] for c in cursor.description] == ['a', 'b'], \
[c[0] for c in cursor.description]
"""
)
assert [c[0] for c in cursor.description] == ["a", "b"], [
c[0] for c in cursor.description
]
The second assertion fails::
The second assertion fails:
.. sourcecode:: text
Traceback (most recent call last):
File "test.py", line 19, in <module>
@@ -780,11 +812,13 @@ to filter these out::
result = conn.exec_driver_sql("select x.a, x.b from x")
assert result.keys() == ["a", "b"]
result = conn.exec_driver_sql('''
result = conn.exec_driver_sql(
"""
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
"""
)
assert result.keys() == ["a", "b"]
Note that above, even though SQLAlchemy filters out the dots, *both
@@ -808,16 +842,20 @@ contain dots, and the functionality of :meth:`_engine.CursorResult.keys` and
the ``sqlite_raw_colnames`` execution option may be provided, either on a
per-:class:`_engine.Connection` basis::
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql('''
result = conn.execution_options(sqlite_raw_colnames=True).exec_driver_sql(
"""
select x.a, x.b from x where a=1
union
select x.a, x.b from x where a=2
''')
"""
)
assert result.keys() == ["x.a", "x.b"]
or on a per-:class:`_engine.Engine` basis::
engine = create_engine("sqlite://", execution_options={"sqlite_raw_colnames": True})
engine = create_engine(
"sqlite://", execution_options={"sqlite_raw_colnames": True}
)
When using the per-:class:`_engine.Engine` execution option, note that
**Core and ORM queries that use UNION may not function properly**.
@@ -832,12 +870,18 @@ dialect in conjunction with the :class:`_schema.Table` construct:
Table("some_table", metadata, ..., sqlite_with_rowid=False)
*
``STRICT``::
Table("some_table", metadata, ..., sqlite_strict=True)
.. versionadded:: 2.0.37
.. seealso::
`SQLite CREATE TABLE options
<https://www.sqlite.org/lang_createtable.html>`_
.. _sqlite_include_internal:
Reflecting internal schema tables
@@ -866,7 +910,7 @@ passed to methods such as :meth:`_schema.MetaData.reflect` or
`SQLite Internal Schema Objects <https://www.sqlite.org/fileformat2.html#intschema>`_ - in the SQLite
documentation.
""" # noqa
''' # noqa
from __future__ import annotations
import datetime
@@ -980,7 +1024,9 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"%(year)04d-%(month)02d-%(day)02d %(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
e.g.:
.. sourcecode:: text
2021-03-15 12:05:57.105542
@@ -996,9 +1042,11 @@ class DATETIME(_DateTimeMixin, sqltypes.DateTime):
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(storage_format="%(year)04d/%(month)02d/%(day)02d "
"%(hour)02d:%(minute)02d:%(second)02d",
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)"
dt = DATETIME(
storage_format=(
"%(year)04d/%(month)02d/%(day)02d %(hour)02d:%(minute)02d:%(second)02d"
),
regexp=r"(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)",
)
:param storage_format: format string which will be applied to the dict
@@ -1088,7 +1136,9 @@ class DATE(_DateTimeMixin, sqltypes.Date):
"%(year)04d-%(month)02d-%(day)02d"
e.g.::
e.g.:
.. sourcecode:: text
2011-03-15
@@ -1106,9 +1156,9 @@ class DATE(_DateTimeMixin, sqltypes.Date):
from sqlalchemy.dialects.sqlite import DATE
d = DATE(
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)")
)
storage_format="%(month)02d/%(day)02d/%(year)04d",
regexp=re.compile("(?P<month>\d+)/(?P<day>\d+)/(?P<year>\d+)"),
)
:param storage_format: format string which will be applied to the
dict with keys year, month, and day.
@@ -1162,7 +1212,9 @@ class TIME(_DateTimeMixin, sqltypes.Time):
"%(hour)02d:%(minute)02d:%(second)02d.%(microsecond)06d"
e.g.::
e.g.:
.. sourcecode:: text
12:05:57.10558
@@ -1178,9 +1230,9 @@ class TIME(_DateTimeMixin, sqltypes.Time):
import re
from sqlalchemy.dialects.sqlite import TIME
t = TIME(storage_format="%(hour)02d-%(minute)02d-"
"%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
t = TIME(
storage_format="%(hour)02d-%(minute)02d-%(second)02d-%(microsecond)06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?"),
)
:param storage_format: format string which will be applied to the dict
@@ -1429,9 +1481,7 @@ class SQLiteCompiler(compiler.SQLCompiler):
return self._generate_generic_binary(binary, " NOT REGEXP ", **kw)
def _on_conflict_target(self, clause, **kw):
if clause.constraint_target is not None:
target_text = "(%s)" % clause.constraint_target
elif clause.inferred_target_elements is not None:
if clause.inferred_target_elements is not None:
target_text = "(%s)" % ", ".join(
(
self.preparer.quote(c)
@@ -1445,7 +1495,7 @@ class SQLiteCompiler(compiler.SQLCompiler):
clause.inferred_target_whereclause,
include_table=False,
use_schema=False,
literal_binds=True,
literal_execute=True,
)
else:
@@ -1528,6 +1578,13 @@ class SQLiteCompiler(compiler.SQLCompiler):
return "ON CONFLICT %s DO UPDATE SET %s" % (target_text, action_text)
def visit_bitwise_xor_op_binary(self, binary, operator, **kw):
# sqlite has no xor. Use "a XOR b" = "(a | b) - (a & b)".
kw["eager_grouping"] = True
or_ = self._generate_generic_binary(binary, " | ", **kw)
and_ = self._generate_generic_binary(binary, " & ", **kw)
return f"({or_} - {and_})"
class SQLiteDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
@@ -1701,9 +1758,12 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
return text
def post_create_table(self, table):
text = ""
if table.dialect_options["sqlite"]["with_rowid"] is False:
return "\n WITHOUT ROWID"
return ""
text += "\n WITHOUT ROWID"
if table.dialect_options["sqlite"]["strict"] is True:
text += "\n STRICT"
return text
class SQLiteTypeCompiler(compiler.GenericTypeCompiler):
@@ -1938,6 +1998,7 @@ class SQLiteDialect(default.DefaultDialect):
{
"autoincrement": False,
"with_rowid": True,
"strict": False,
},
),
(sa_schema.Index, {"where": None}),
@@ -2231,6 +2292,14 @@ class SQLiteDialect(default.DefaultDialect):
tablesql = self._get_table_sql(
connection, table_name, schema, **kw
)
# remove create table
match = re.match(
r"create table .*?\((.*)\)$",
tablesql.strip(),
re.DOTALL | re.IGNORECASE,
)
assert match, f"create table not found in {tablesql}"
tablesql = match.group(1).strip()
columns.append(
self._get_column_info(
@@ -2285,7 +2354,10 @@ class SQLiteDialect(default.DefaultDialect):
if generated:
sqltext = ""
if tablesql:
pattern = r"[^,]*\s+AS\s+\(([^,]*)\)\s*(?:virtual|stored)?"
pattern = (
r"[^,]*\s+GENERATED\s+ALWAYS\s+AS"
r"\s+\((.*)\)\s*(?:virtual|stored)?"
)
match = re.search(
re.escape(name) + pattern, tablesql, re.IGNORECASE
)
@@ -2570,8 +2642,8 @@ class SQLiteDialect(default.DefaultDialect):
return
UNIQUE_PATTERN = r'(?:CONSTRAINT "?(.+?)"? +)?UNIQUE *\((.+?)\)'
INLINE_UNIQUE_PATTERN = (
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?) '
r"+[a-z0-9_ ]+? +UNIQUE"
r'(?:(".+?")|(?:[\[`])?([a-z0-9_]+)(?:[\]`])?)[\t ]'
r"+[a-z0-9_ ]+?[\t ]+UNIQUE"
)
for match in re.finditer(UNIQUE_PATTERN, table_data, re.I):
@@ -2606,15 +2678,21 @@ class SQLiteDialect(default.DefaultDialect):
connection, table_name, schema=schema, **kw
)
CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?" r"CHECK *\( *(.+) *\),? *"
cks = []
# NOTE: we aren't using re.S here because we actually are
# taking advantage of each CHECK constraint being all on one
# line in the table definition in order to delineate. This
# NOTE NOTE NOTE
# DO NOT CHANGE THIS REGULAR EXPRESSION. There is no known way
# to parse CHECK constraints that contain newlines themselves using
# regular expressions, and the approach here relies upon each
# individual
# CHECK constraint being on a single line by itself. This
# necessarily makes assumptions as to how the CREATE TABLE
# was emitted.
# was emitted. A more comprehensive DDL parsing solution would be
# needed to improve upon the current situation. See #11840 for
# background
CHECK_PATTERN = r"(?:CONSTRAINT (.+) +)?CHECK *\( *(.+) *\),? *"
cks = []
for match in re.finditer(CHECK_PATTERN, table_data or "", re.I):
name = match.group(1)
if name:

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/dml.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -7,6 +7,10 @@
from __future__ import annotations
from typing import Any
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from .._typing import _OnConflictIndexElementsT
from .._typing import _OnConflictIndexWhereT
@@ -15,6 +19,7 @@ from .._typing import _OnConflictWhereT
from ... import util
from ...sql import coercions
from ...sql import roles
from ...sql import schema
from ...sql._typing import _DMLTableArgument
from ...sql.base import _exclusive_against
from ...sql.base import _generative
@@ -22,7 +27,9 @@ from ...sql.base import ColumnCollection
from ...sql.base import ReadOnlyColumnCollection
from ...sql.dml import Insert as StandardInsert
from ...sql.elements import ClauseElement
from ...sql.elements import ColumnElement
from ...sql.elements import KeyedColumnElement
from ...sql.elements import TextClause
from ...sql.expression import alias
from ...util.typing import Self
@@ -141,11 +148,10 @@ class Insert(StandardInsert):
:paramref:`.Insert.on_conflict_do_update.set_` dictionary.
:param where:
Optional argument. If present, can be a literal SQL
string or an acceptable expression for a ``WHERE`` clause
that restricts the rows affected by ``DO UPDATE SET``. Rows
not meeting the ``WHERE`` condition will not be updated
(effectively a ``DO NOTHING`` for those rows).
Optional argument. An expression object representing a ``WHERE``
clause that restricts the rows affected by ``DO UPDATE SET``. Rows not
meeting the ``WHERE`` condition will not be updated (effectively a
``DO NOTHING`` for those rows).
"""
@@ -184,9 +190,10 @@ class Insert(StandardInsert):
class OnConflictClause(ClauseElement):
stringify_dialect = "sqlite"
constraint_target: None
inferred_target_elements: _OnConflictIndexElementsT
inferred_target_whereclause: _OnConflictIndexWhereT
inferred_target_elements: Optional[List[Union[str, schema.Column[Any]]]]
inferred_target_whereclause: Optional[
Union[ColumnElement[Any], TextClause]
]
def __init__(
self,
@@ -194,11 +201,20 @@ class OnConflictClause(ClauseElement):
index_where: _OnConflictIndexWhereT = None,
):
if index_elements is not None:
self.constraint_target = None
self.inferred_target_elements = index_elements
self.inferred_target_whereclause = index_where
self.inferred_target_elements = [
coercions.expect(roles.DDLConstraintColumnRole, column)
for column in index_elements
]
self.inferred_target_whereclause = (
coercions.expect(
roles.WhereHavingRole,
index_where,
)
if index_where is not None
else None
)
else:
self.constraint_target = self.inferred_target_elements = (
self.inferred_target_elements = (
self.inferred_target_whereclause
) = None
@@ -210,6 +226,9 @@ class OnConflictDoNothing(OnConflictClause):
class OnConflictDoUpdate(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
update_values_to_set: List[Tuple[Union[schema.Column[Any], str], Any]]
update_whereclause: Optional[ColumnElement[Any]]
def __init__(
self,
index_elements: _OnConflictIndexElementsT = None,
@@ -237,4 +256,8 @@ class OnConflictDoUpdate(OnConflictClause):
(coercions.expect(roles.DMLColumnRole, key), value)
for key, value in set_.items()
]
self.update_whereclause = where
self.update_whereclause = (
coercions.expect(roles.WhereHavingRole, where)
if where is not None
else None
)

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/json.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/provision.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/pysqlcipher.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -39,7 +39,7 @@ Current dialect selection logic is:
e = create_engine(
"sqlite+pysqlcipher://:password@/dbname.db",
module=sqlcipher_compatible_driver
module=sqlcipher_compatible_driver,
)
These drivers make use of the SQLCipher engine. This system essentially
@@ -55,12 +55,12 @@ The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
e = create_engine("sqlite+pysqlcipher://:testing@/foo.db")
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
e = create_engine("sqlite+pysqlcipher://:testing@//path/to/foo.db")
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
@@ -68,7 +68,9 @@ in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
e = create_engine(
"sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000"
)
.. warning:: Previous versions of sqlalchemy did not take into consideration
the encryption-related pragmas passed in the url string, that were silently

View File

@@ -1,5 +1,5 @@
# dialects/sqlite/pysqlite.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -28,7 +28,9 @@ Connect Strings
---------------
The file specification for the SQLite database is taken as the "database"
portion of the URL. Note that the format of a SQLAlchemy url is::
portion of the URL. Note that the format of a SQLAlchemy url is:
.. sourcecode:: text
driver://user:pass@host/database
@@ -37,25 +39,28 @@ the **right** of the third slash. So connecting to a relative filepath
looks like::
# relative path
e = create_engine('sqlite:///path/to/database.db')
e = create_engine("sqlite:///path/to/database.db")
An absolute path, which is denoted by starting with a slash, means you
need **four** slashes::
# absolute path
e = create_engine('sqlite:////path/to/database.db')
e = create_engine("sqlite:////path/to/database.db")
To use a Windows path, regular drive specifications and backslashes can be
used. Double backslashes are probably needed::
# absolute path on Windows
e = create_engine('sqlite:///C:\\path\\to\\database.db')
e = create_engine("sqlite:///C:\\path\\to\\database.db")
The sqlite ``:memory:`` identifier is the default if no filepath is
present. Specify ``sqlite://`` and nothing else::
To use sqlite ``:memory:`` database specify it as the filename using
``sqlite:///:memory:``. It's also the default if no filepath is
present, specifying only ``sqlite://`` and nothing else::
# in-memory database
e = create_engine('sqlite://')
# in-memory database (note three slashes)
e = create_engine("sqlite:///:memory:")
# also in-memory database
e2 = create_engine("sqlite://")
.. _pysqlite_uri_connections:
@@ -95,7 +100,9 @@ Above, the pysqlite / sqlite3 DBAPI would be passed arguments as::
sqlite3.connect(
"file:path/to/database?mode=ro&nolock=1",
check_same_thread=True, timeout=10, uri=True
check_same_thread=True,
timeout=10,
uri=True,
)
Regarding future parameters added to either the Python or native drivers. new
@@ -141,8 +148,11 @@ as follows::
def regexp(a, b):
return re.search(a, b) is not None
sqlite_connection.create_function(
"regexp", 2, regexp,
"regexp",
2,
regexp,
)
There is currently no support for regular expression flags as a separate
@@ -183,10 +193,12 @@ Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
connect_args={'detect_types':
sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
engine = create_engine(
"sqlite://",
connect_args={
"detect_types": sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES
},
native_datetime=True,
)
With this flag enabled, the DATE and TIMESTAMP types (but note - not the
@@ -241,6 +253,7 @@ Pooling may be disabled for a file based database by specifying the
parameter::
from sqlalchemy import NullPool
engine = create_engine("sqlite:///myfile.db", poolclass=NullPool)
It's been observed that the :class:`.NullPool` implementation incurs an
@@ -260,9 +273,12 @@ globally, and the ``check_same_thread`` flag can be passed to Pysqlite
as ``False``::
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite://',
connect_args={'check_same_thread':False},
poolclass=StaticPool)
engine = create_engine(
"sqlite://",
connect_args={"check_same_thread": False},
poolclass=StaticPool,
)
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
@@ -281,14 +297,14 @@ needed within multiple threads for this case::
# maintain the same connection per thread
from sqlalchemy.pool import SingletonThreadPool
engine = create_engine('sqlite:///mydb.db',
poolclass=SingletonThreadPool)
engine = create_engine("sqlite:///mydb.db", poolclass=SingletonThreadPool)
# maintain the same connection across all threads
from sqlalchemy.pool import StaticPool
engine = create_engine('sqlite:///mydb.db',
poolclass=StaticPool)
engine = create_engine("sqlite:///mydb.db", poolclass=StaticPool)
Note that :class:`.SingletonThreadPool` should be configured for the number
of threads that are to be used; beyond that number, connections will be
@@ -317,13 +333,14 @@ same column, use a custom type that will check each row individually::
from sqlalchemy import String
from sqlalchemy import TypeDecorator
class MixedBinary(TypeDecorator):
impl = String
cache_ok = True
def process_result_value(self, value, dialect):
if isinstance(value, str):
value = bytes(value, 'utf-8')
value = bytes(value, "utf-8")
elif value is not None:
value = bytes(value)
@@ -364,12 +381,14 @@ ourselves. This is achieved using two event listeners::
engine = create_engine("sqlite:///myfile.db")
@event.listens_for(engine, "connect")
def do_connect(dbapi_connection, connection_record):
# disable pysqlite's emitting of the BEGIN statement entirely.
# also stops it from emitting COMMIT before any DDL.
dbapi_connection.isolation_level = None
@event.listens_for(engine, "begin")
def do_begin(conn):
# emit our own BEGIN
@@ -439,7 +458,6 @@ connection when it is created. That is accomplished with an event listener::
with engine.connect() as conn:
print(conn.scalar(text("SELECT UDF()")))
""" # noqa
import math