mirror of
https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git
synced 2025-08-14 00:25:46 +02:00
Проверка 09.02.2025
This commit is contained in:
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/__init__.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/aioodbc.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -32,13 +32,12 @@ This dialect should normally be used only with the
|
||||
styles are otherwise equivalent to those documented in the pyodbc section::
|
||||
|
||||
from sqlalchemy.ext.asyncio import create_async_engine
|
||||
|
||||
engine = create_async_engine(
|
||||
"mssql+aioodbc://scott:tiger@mssql2017:1433/test?"
|
||||
"driver=ODBC+Driver+18+for+SQL+Server&TrustServerCertificate=yes"
|
||||
)
|
||||
|
||||
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/base.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -9,7 +9,6 @@
|
||||
"""
|
||||
.. dialect:: mssql
|
||||
:name: Microsoft SQL Server
|
||||
:full_support: 2017
|
||||
:normal_support: 2012+
|
||||
:best_effort: 2005+
|
||||
|
||||
@@ -40,9 +39,12 @@ considered to be the identity column - unless it is associated with a
|
||||
from sqlalchemy import Table, MetaData, Column, Integer
|
||||
|
||||
m = MetaData()
|
||||
t = Table('t', m,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('x', Integer))
|
||||
t = Table(
|
||||
"t",
|
||||
m,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("x", Integer),
|
||||
)
|
||||
m.create_all(engine)
|
||||
|
||||
The above example will generate DDL as:
|
||||
@@ -60,9 +62,12 @@ specify ``False`` for the :paramref:`_schema.Column.autoincrement` flag,
|
||||
on the first integer primary key column::
|
||||
|
||||
m = MetaData()
|
||||
t = Table('t', m,
|
||||
Column('id', Integer, primary_key=True, autoincrement=False),
|
||||
Column('x', Integer))
|
||||
t = Table(
|
||||
"t",
|
||||
m,
|
||||
Column("id", Integer, primary_key=True, autoincrement=False),
|
||||
Column("x", Integer),
|
||||
)
|
||||
m.create_all(engine)
|
||||
|
||||
To add the ``IDENTITY`` keyword to a non-primary key column, specify
|
||||
@@ -72,9 +77,12 @@ To add the ``IDENTITY`` keyword to a non-primary key column, specify
|
||||
is set to ``False`` on any integer primary key column::
|
||||
|
||||
m = MetaData()
|
||||
t = Table('t', m,
|
||||
Column('id', Integer, primary_key=True, autoincrement=False),
|
||||
Column('x', Integer, autoincrement=True))
|
||||
t = Table(
|
||||
"t",
|
||||
m,
|
||||
Column("id", Integer, primary_key=True, autoincrement=False),
|
||||
Column("x", Integer, autoincrement=True),
|
||||
)
|
||||
m.create_all(engine)
|
||||
|
||||
.. versionchanged:: 1.4 Added :class:`_schema.Identity` construct
|
||||
@@ -137,14 +145,12 @@ parameters passed to the :class:`_schema.Identity` object::
|
||||
from sqlalchemy import Table, Integer, Column, Identity
|
||||
|
||||
test = Table(
|
||||
'test', metadata,
|
||||
"test",
|
||||
metadata,
|
||||
Column(
|
||||
'id',
|
||||
Integer,
|
||||
primary_key=True,
|
||||
Identity(start=100, increment=10)
|
||||
"id", Integer, primary_key=True, Identity(start=100, increment=10)
|
||||
),
|
||||
Column('name', String(20))
|
||||
Column("name", String(20)),
|
||||
)
|
||||
|
||||
The CREATE TABLE for the above :class:`_schema.Table` object would be:
|
||||
@@ -154,7 +160,7 @@ The CREATE TABLE for the above :class:`_schema.Table` object would be:
|
||||
CREATE TABLE test (
|
||||
id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY,
|
||||
name VARCHAR(20) NULL,
|
||||
)
|
||||
)
|
||||
|
||||
.. note::
|
||||
|
||||
@@ -187,6 +193,7 @@ type deployed to the SQL Server database can be specified as ``Numeric`` using
|
||||
|
||||
Base = declarative_base()
|
||||
|
||||
|
||||
class TestTable(Base):
|
||||
__tablename__ = "test"
|
||||
id = Column(
|
||||
@@ -212,8 +219,9 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
|
||||
|
||||
from sqlalchemy import TypeDecorator
|
||||
|
||||
|
||||
class NumericAsInteger(TypeDecorator):
|
||||
'''normalize floating point return values into ints'''
|
||||
"normalize floating point return values into ints"
|
||||
|
||||
impl = Numeric(10, 0, asdecimal=False)
|
||||
cache_ok = True
|
||||
@@ -223,6 +231,7 @@ integer values in Python 3), use :class:`_types.TypeDecorator` as follows::
|
||||
value = int(value)
|
||||
return value
|
||||
|
||||
|
||||
class TestTable(Base):
|
||||
__tablename__ = "test"
|
||||
id = Column(
|
||||
@@ -271,11 +280,11 @@ The process for fetching this value has several variants:
|
||||
fetched in order to receive the value. Given a table as::
|
||||
|
||||
t = Table(
|
||||
't',
|
||||
"t",
|
||||
metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('x', Integer),
|
||||
implicit_returning=False
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("x", Integer),
|
||||
implicit_returning=False,
|
||||
)
|
||||
|
||||
an INSERT will look like:
|
||||
@@ -301,12 +310,13 @@ statement proceeding, and ``SET IDENTITY_INSERT OFF`` subsequent to the
|
||||
execution. Given this example::
|
||||
|
||||
m = MetaData()
|
||||
t = Table('t', m, Column('id', Integer, primary_key=True),
|
||||
Column('x', Integer))
|
||||
t = Table(
|
||||
"t", m, Column("id", Integer, primary_key=True), Column("x", Integer)
|
||||
)
|
||||
m.create_all(engine)
|
||||
|
||||
with engine.begin() as conn:
|
||||
conn.execute(t.insert(), {'id': 1, 'x':1}, {'id':2, 'x':2})
|
||||
conn.execute(t.insert(), {"id": 1, "x": 1}, {"id": 2, "x": 2})
|
||||
|
||||
The above column will be created with IDENTITY, however the INSERT statement
|
||||
we emit is specifying explicit values. In the echo output we can see
|
||||
@@ -342,7 +352,11 @@ The :class:`.Sequence` object creates "real" sequences, i.e.,
|
||||
>>> from sqlalchemy import Sequence
|
||||
>>> from sqlalchemy.schema import CreateSequence
|
||||
>>> from sqlalchemy.dialects import mssql
|
||||
>>> print(CreateSequence(Sequence("my_seq", start=1)).compile(dialect=mssql.dialect()))
|
||||
>>> print(
|
||||
... CreateSequence(Sequence("my_seq", start=1)).compile(
|
||||
... dialect=mssql.dialect()
|
||||
... )
|
||||
... )
|
||||
{printsql}CREATE SEQUENCE my_seq START WITH 1
|
||||
|
||||
For integer primary key generation, SQL Server's ``IDENTITY`` construct should
|
||||
@@ -376,12 +390,12 @@ more than one backend without using dialect-specific types.
|
||||
To build a SQL Server VARCHAR or NVARCHAR with MAX length, use None::
|
||||
|
||||
my_table = Table(
|
||||
'my_table', metadata,
|
||||
Column('my_data', VARCHAR(None)),
|
||||
Column('my_n_data', NVARCHAR(None))
|
||||
"my_table",
|
||||
metadata,
|
||||
Column("my_data", VARCHAR(None)),
|
||||
Column("my_n_data", NVARCHAR(None)),
|
||||
)
|
||||
|
||||
|
||||
Collation Support
|
||||
-----------------
|
||||
|
||||
@@ -389,10 +403,13 @@ Character collations are supported by the base string types,
|
||||
specified by the string argument "collation"::
|
||||
|
||||
from sqlalchemy import VARCHAR
|
||||
Column('login', VARCHAR(32, collation='Latin1_General_CI_AS'))
|
||||
|
||||
Column("login", VARCHAR(32, collation="Latin1_General_CI_AS"))
|
||||
|
||||
When such a column is associated with a :class:`_schema.Table`, the
|
||||
CREATE TABLE statement for this column will yield::
|
||||
CREATE TABLE statement for this column will yield:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL
|
||||
|
||||
@@ -412,7 +429,9 @@ versions when no OFFSET clause is present. A statement such as::
|
||||
|
||||
select(some_table).limit(5)
|
||||
|
||||
will render similarly to::
|
||||
will render similarly to:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT TOP 5 col1, col2.. FROM table
|
||||
|
||||
@@ -422,7 +441,9 @@ LIMIT and OFFSET, or just OFFSET alone, will be rendered using the
|
||||
|
||||
select(some_table).order_by(some_table.c.col3).limit(5).offset(10)
|
||||
|
||||
will render similarly to::
|
||||
will render similarly to:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
SELECT anon_1.col1, anon_1.col2 FROM (SELECT col1, col2,
|
||||
ROW_NUMBER() OVER (ORDER BY col3) AS
|
||||
@@ -475,16 +496,13 @@ each new connection.
|
||||
To set isolation level using :func:`_sa.create_engine`::
|
||||
|
||||
engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@ms_2008",
|
||||
isolation_level="REPEATABLE READ"
|
||||
"mssql+pyodbc://scott:tiger@ms_2008", isolation_level="REPEATABLE READ"
|
||||
)
|
||||
|
||||
To set using per-connection execution options::
|
||||
|
||||
connection = engine.connect()
|
||||
connection = connection.execution_options(
|
||||
isolation_level="READ COMMITTED"
|
||||
)
|
||||
connection = connection.execution_options(isolation_level="READ COMMITTED")
|
||||
|
||||
Valid values for ``isolation_level`` include:
|
||||
|
||||
@@ -534,7 +552,6 @@ will remain consistent with the state of the transaction::
|
||||
|
||||
mssql_engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger^5HHH@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
|
||||
|
||||
# disable default reset-on-return scheme
|
||||
pool_reset_on_return=None,
|
||||
)
|
||||
@@ -563,13 +580,17 @@ Nullability
|
||||
-----------
|
||||
MSSQL has support for three levels of column nullability. The default
|
||||
nullability allows nulls and is explicit in the CREATE TABLE
|
||||
construct::
|
||||
construct:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
name VARCHAR(20) NULL
|
||||
|
||||
If ``nullable=None`` is specified then no specification is made. In
|
||||
other words the database's configured default is used. This will
|
||||
render::
|
||||
render:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
name VARCHAR(20)
|
||||
|
||||
@@ -625,8 +646,9 @@ behavior of this flag is as follows:
|
||||
* The flag can be set to either ``True`` or ``False`` when the dialect
|
||||
is created, typically via :func:`_sa.create_engine`::
|
||||
|
||||
eng = create_engine("mssql+pymssql://user:pass@host/db",
|
||||
deprecate_large_types=True)
|
||||
eng = create_engine(
|
||||
"mssql+pymssql://user:pass@host/db", deprecate_large_types=True
|
||||
)
|
||||
|
||||
* Complete control over whether the "old" or "new" types are rendered is
|
||||
available in all SQLAlchemy versions by using the UPPERCASE type objects
|
||||
@@ -648,9 +670,10 @@ at once using the :paramref:`_schema.Table.schema` argument of
|
||||
:class:`_schema.Table`::
|
||||
|
||||
Table(
|
||||
"some_table", metadata,
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("q", String(50)),
|
||||
schema="mydatabase.dbo"
|
||||
schema="mydatabase.dbo",
|
||||
)
|
||||
|
||||
When performing operations such as table or component reflection, a schema
|
||||
@@ -662,9 +685,10 @@ components will be quoted separately for case sensitive names and other
|
||||
special characters. Given an argument as below::
|
||||
|
||||
Table(
|
||||
"some_table", metadata,
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("q", String(50)),
|
||||
schema="MyDataBase.dbo"
|
||||
schema="MyDataBase.dbo",
|
||||
)
|
||||
|
||||
The above schema would be rendered as ``[MyDataBase].dbo``, and also in
|
||||
@@ -677,21 +701,22 @@ Below, the "owner" will be considered as ``MyDataBase.dbo`` and the
|
||||
"database" will be None::
|
||||
|
||||
Table(
|
||||
"some_table", metadata,
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("q", String(50)),
|
||||
schema="[MyDataBase.dbo]"
|
||||
schema="[MyDataBase.dbo]",
|
||||
)
|
||||
|
||||
To individually specify both database and owner name with special characters
|
||||
or embedded dots, use two sets of brackets::
|
||||
|
||||
Table(
|
||||
"some_table", metadata,
|
||||
"some_table",
|
||||
metadata,
|
||||
Column("q", String(50)),
|
||||
schema="[MyDataBase.Period].[MyOwner.Dot]"
|
||||
schema="[MyDataBase.Period].[MyOwner.Dot]",
|
||||
)
|
||||
|
||||
|
||||
.. versionchanged:: 1.2 the SQL Server dialect now treats brackets as
|
||||
identifier delimiters splitting the schema into separate database
|
||||
and owner tokens, to allow dots within either name itself.
|
||||
@@ -706,10 +731,11 @@ schema-qualified table would be auto-aliased when used in a
|
||||
SELECT statement; given a table::
|
||||
|
||||
account_table = Table(
|
||||
'account', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Column('info', String(100)),
|
||||
schema="customer_schema"
|
||||
"account",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
Column("info", String(100)),
|
||||
schema="customer_schema",
|
||||
)
|
||||
|
||||
this legacy mode of rendering would assume that "customer_schema.account"
|
||||
@@ -752,37 +778,55 @@ which renders the index as ``CREATE CLUSTERED INDEX my_index ON table (x)``.
|
||||
|
||||
To generate a clustered primary key use::
|
||||
|
||||
Table('my_table', metadata,
|
||||
Column('x', ...),
|
||||
Column('y', ...),
|
||||
PrimaryKeyConstraint("x", "y", mssql_clustered=True))
|
||||
Table(
|
||||
"my_table",
|
||||
metadata,
|
||||
Column("x", ...),
|
||||
Column("y", ...),
|
||||
PrimaryKeyConstraint("x", "y", mssql_clustered=True),
|
||||
)
|
||||
|
||||
which will render the table, for example, as::
|
||||
which will render the table, for example, as:
|
||||
|
||||
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
|
||||
PRIMARY KEY CLUSTERED (x, y))
|
||||
.. sourcecode:: sql
|
||||
|
||||
CREATE TABLE my_table (
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
PRIMARY KEY CLUSTERED (x, y)
|
||||
)
|
||||
|
||||
Similarly, we can generate a clustered unique constraint using::
|
||||
|
||||
Table('my_table', metadata,
|
||||
Column('x', ...),
|
||||
Column('y', ...),
|
||||
PrimaryKeyConstraint("x"),
|
||||
UniqueConstraint("y", mssql_clustered=True),
|
||||
)
|
||||
Table(
|
||||
"my_table",
|
||||
metadata,
|
||||
Column("x", ...),
|
||||
Column("y", ...),
|
||||
PrimaryKeyConstraint("x"),
|
||||
UniqueConstraint("y", mssql_clustered=True),
|
||||
)
|
||||
|
||||
To explicitly request a non-clustered primary key (for example, when
|
||||
a separate clustered index is desired), use::
|
||||
|
||||
Table('my_table', metadata,
|
||||
Column('x', ...),
|
||||
Column('y', ...),
|
||||
PrimaryKeyConstraint("x", "y", mssql_clustered=False))
|
||||
Table(
|
||||
"my_table",
|
||||
metadata,
|
||||
Column("x", ...),
|
||||
Column("y", ...),
|
||||
PrimaryKeyConstraint("x", "y", mssql_clustered=False),
|
||||
)
|
||||
|
||||
which will render the table, for example, as::
|
||||
which will render the table, for example, as:
|
||||
|
||||
CREATE TABLE my_table (x INTEGER NOT NULL, y INTEGER NOT NULL,
|
||||
PRIMARY KEY NONCLUSTERED (x, y))
|
||||
.. sourcecode:: sql
|
||||
|
||||
CREATE TABLE my_table (
|
||||
x INTEGER NOT NULL,
|
||||
y INTEGER NOT NULL,
|
||||
PRIMARY KEY NONCLUSTERED (x, y)
|
||||
)
|
||||
|
||||
Columnstore Index Support
|
||||
-------------------------
|
||||
@@ -820,7 +864,7 @@ INCLUDE
|
||||
The ``mssql_include`` option renders INCLUDE(colname) for the given string
|
||||
names::
|
||||
|
||||
Index("my_index", table.c.x, mssql_include=['y'])
|
||||
Index("my_index", table.c.x, mssql_include=["y"])
|
||||
|
||||
would render the index as ``CREATE INDEX my_index ON table (x) INCLUDE (y)``
|
||||
|
||||
@@ -875,18 +919,19 @@ To disable the usage of OUTPUT INSERTED on a per-table basis,
|
||||
specify ``implicit_returning=False`` for each :class:`_schema.Table`
|
||||
which has triggers::
|
||||
|
||||
Table('mytable', metadata,
|
||||
Column('id', Integer, primary_key=True),
|
||||
Table(
|
||||
"mytable",
|
||||
metadata,
|
||||
Column("id", Integer, primary_key=True),
|
||||
# ...,
|
||||
implicit_returning=False
|
||||
implicit_returning=False,
|
||||
)
|
||||
|
||||
Declarative form::
|
||||
|
||||
class MyClass(Base):
|
||||
# ...
|
||||
__table_args__ = {'implicit_returning':False}
|
||||
|
||||
__table_args__ = {"implicit_returning": False}
|
||||
|
||||
.. _mssql_rowcount_versioning:
|
||||
|
||||
@@ -920,7 +965,9 @@ isolation mode that locks entire tables, and causes even mildly concurrent
|
||||
applications to have long held locks and frequent deadlocks.
|
||||
Enabling snapshot isolation for the database as a whole is recommended
|
||||
for modern levels of concurrency support. This is accomplished via the
|
||||
following ALTER DATABASE commands executed at the SQL prompt::
|
||||
following ALTER DATABASE commands executed at the SQL prompt:
|
||||
|
||||
.. sourcecode:: sql
|
||||
|
||||
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
|
||||
|
||||
@@ -1555,29 +1602,6 @@ class MSUUid(sqltypes.Uuid):
|
||||
|
||||
return process
|
||||
|
||||
def _sentinel_value_resolver(self, dialect):
|
||||
if not self.native_uuid:
|
||||
# dealing entirely with strings going in and out of
|
||||
# CHAR(32)
|
||||
return None
|
||||
|
||||
# true if we expect the returned UUID values to be strings
|
||||
# pymssql sends UUID objects back, pyodbc sends strings,
|
||||
# however pyodbc converts them to uppercase coming back, so
|
||||
# need special logic here
|
||||
character_based_uuid = not dialect.supports_native_uuid
|
||||
|
||||
if character_based_uuid:
|
||||
# we sent UUID objects in all cases, see bind_processor()
|
||||
def process(uuid_value):
|
||||
return str(uuid_value).upper()
|
||||
|
||||
return process
|
||||
elif not self.as_uuid:
|
||||
return _python_UUID
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class UNIQUEIDENTIFIER(sqltypes.Uuid[sqltypes._UUID_RETURN]):
|
||||
__visit_name__ = "UNIQUEIDENTIFIER"
|
||||
@@ -1841,7 +1865,6 @@ class MSExecutionContext(default.DefaultExecutionContext):
|
||||
_enable_identity_insert = False
|
||||
_select_lastrowid = False
|
||||
_lastrowid = None
|
||||
_rowcount = None
|
||||
|
||||
dialect: MSDialect
|
||||
|
||||
@@ -1961,13 +1984,6 @@ class MSExecutionContext(default.DefaultExecutionContext):
|
||||
def get_lastrowid(self):
|
||||
return self._lastrowid
|
||||
|
||||
@property
|
||||
def rowcount(self):
|
||||
if self._rowcount is not None:
|
||||
return self._rowcount
|
||||
else:
|
||||
return self.cursor.rowcount
|
||||
|
||||
def handle_dbapi_exception(self, e):
|
||||
if self._enable_identity_insert:
|
||||
try:
|
||||
@@ -2019,6 +2035,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
|
||||
self.tablealiases = {}
|
||||
super().__init__(*args, **kwargs)
|
||||
|
||||
def _format_frame_clause(self, range_, **kw):
|
||||
kw["literal_execute"] = True
|
||||
return super()._format_frame_clause(range_, **kw)
|
||||
|
||||
def _with_legacy_schema_aliasing(fn):
|
||||
def decorate(self, *arg, **kw):
|
||||
if self.dialect.legacy_schema_aliasing:
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/information_schema.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/json.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -54,9 +54,7 @@ class JSON(sqltypes.JSON):
|
||||
dictionary or list, the :meth:`_types.JSON.Comparator.as_json` accessor
|
||||
should be used::
|
||||
|
||||
stmt = select(
|
||||
data_table.c.data["some key"].as_json()
|
||||
).where(
|
||||
stmt = select(data_table.c.data["some key"].as_json()).where(
|
||||
data_table.c.data["some key"].as_json() == {"sub": "structure"}
|
||||
)
|
||||
|
||||
@@ -67,9 +65,7 @@ class JSON(sqltypes.JSON):
|
||||
:meth:`_types.JSON.Comparator.as_integer`,
|
||||
:meth:`_types.JSON.Comparator.as_float`::
|
||||
|
||||
stmt = select(
|
||||
data_table.c.data["some key"].as_string()
|
||||
).where(
|
||||
stmt = select(data_table.c.data["some key"].as_string()).where(
|
||||
data_table.c.data["some key"].as_string() == "some string"
|
||||
)
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/provision.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -22,10 +22,17 @@ from ...testing.provision import generate_driver_url
|
||||
from ...testing.provision import get_temp_table_name
|
||||
from ...testing.provision import log
|
||||
from ...testing.provision import normalize_sequence
|
||||
from ...testing.provision import post_configure_engine
|
||||
from ...testing.provision import run_reap_dbs
|
||||
from ...testing.provision import temp_table_keyword_args
|
||||
|
||||
|
||||
@post_configure_engine.for_db("mssql")
|
||||
def post_configure_engine(url, engine, follower_ident):
|
||||
if engine.driver == "pyodbc":
|
||||
engine.dialect.dbapi.pooling = False
|
||||
|
||||
|
||||
@generate_driver_url.for_db("mssql")
|
||||
def generate_driver_url(url, driver, query_str):
|
||||
backend = url.get_backend_name()
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/pymssql.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -103,6 +103,7 @@ class MSDialect_pymssql(MSDialect):
|
||||
"message 20006", # Write to the server failed
|
||||
"message 20017", # Unexpected EOF from the server
|
||||
"message 20047", # DBPROCESS is dead or not enabled
|
||||
"The server failed to resume the transaction",
|
||||
):
|
||||
if msg in str(e):
|
||||
return True
|
||||
|
@@ -1,5 +1,5 @@
|
||||
# dialects/mssql/pyodbc.py
|
||||
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
|
||||
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
|
||||
# <see AUTHORS file>
|
||||
#
|
||||
# This module is part of SQLAlchemy and is released under
|
||||
@@ -30,7 +30,9 @@ is configured on the client, a basic DSN-based connection looks like::
|
||||
|
||||
engine = create_engine("mssql+pyodbc://scott:tiger@some_dsn")
|
||||
|
||||
Which above, will pass the following connection string to PyODBC::
|
||||
Which above, will pass the following connection string to PyODBC:
|
||||
|
||||
.. sourcecode:: text
|
||||
|
||||
DSN=some_dsn;UID=scott;PWD=tiger
|
||||
|
||||
@@ -49,7 +51,9 @@ When using a hostname connection, the driver name must also be specified in the
|
||||
query parameters of the URL. As these names usually have spaces in them, the
|
||||
name must be URL encoded which means using plus signs for spaces::
|
||||
|
||||
engine = create_engine("mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server")
|
||||
engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@myhost:port/databasename?driver=ODBC+Driver+17+for+SQL+Server"
|
||||
)
|
||||
|
||||
The ``driver`` keyword is significant to the pyodbc dialect and must be
|
||||
specified in lowercase.
|
||||
@@ -69,6 +73,7 @@ internally::
|
||||
The equivalent URL can be constructed using :class:`_sa.engine.URL`::
|
||||
|
||||
from sqlalchemy.engine import URL
|
||||
|
||||
connection_url = URL.create(
|
||||
"mssql+pyodbc",
|
||||
username="scott",
|
||||
@@ -83,7 +88,6 @@ The equivalent URL can be constructed using :class:`_sa.engine.URL`::
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
Pass through exact Pyodbc string
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
@@ -94,8 +98,11 @@ using the parameter ``odbc_connect``. A :class:`_sa.engine.URL` object
|
||||
can help make this easier::
|
||||
|
||||
from sqlalchemy.engine import URL
|
||||
|
||||
connection_string = "DRIVER={SQL Server Native Client 10.0};SERVER=dagger;DATABASE=test;UID=user;PWD=password"
|
||||
connection_url = URL.create("mssql+pyodbc", query={"odbc_connect": connection_string})
|
||||
connection_url = URL.create(
|
||||
"mssql+pyodbc", query={"odbc_connect": connection_string}
|
||||
)
|
||||
|
||||
engine = create_engine(connection_url)
|
||||
|
||||
@@ -127,7 +134,8 @@ database using Azure credentials::
|
||||
from sqlalchemy.engine.url import URL
|
||||
from azure import identity
|
||||
|
||||
SQL_COPT_SS_ACCESS_TOKEN = 1256 # Connection option for access tokens, as defined in msodbcsql.h
|
||||
# Connection option for access tokens, as defined in msodbcsql.h
|
||||
SQL_COPT_SS_ACCESS_TOKEN = 1256
|
||||
TOKEN_URL = "https://database.windows.net/" # The token URL for any Azure SQL database
|
||||
|
||||
connection_string = "mssql+pyodbc://@my-server.database.windows.net/myDb?driver=ODBC+Driver+17+for+SQL+Server"
|
||||
@@ -136,14 +144,19 @@ database using Azure credentials::
|
||||
|
||||
azure_credentials = identity.DefaultAzureCredential()
|
||||
|
||||
|
||||
@event.listens_for(engine, "do_connect")
|
||||
def provide_token(dialect, conn_rec, cargs, cparams):
|
||||
# remove the "Trusted_Connection" parameter that SQLAlchemy adds
|
||||
cargs[0] = cargs[0].replace(";Trusted_Connection=Yes", "")
|
||||
|
||||
# create token credential
|
||||
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode("utf-16-le")
|
||||
token_struct = struct.pack(f"<I{len(raw_token)}s", len(raw_token), raw_token)
|
||||
raw_token = azure_credentials.get_token(TOKEN_URL).token.encode(
|
||||
"utf-16-le"
|
||||
)
|
||||
token_struct = struct.pack(
|
||||
f"<I{len(raw_token)}s", len(raw_token), raw_token
|
||||
)
|
||||
|
||||
# apply it to keyword arguments
|
||||
cparams["attrs_before"] = {SQL_COPT_SS_ACCESS_TOKEN: token_struct}
|
||||
@@ -176,7 +189,9 @@ emit a ``.rollback()`` after an operation had a failure of some kind.
|
||||
This specific case can be handled by passing ``ignore_no_transaction_on_rollback=True`` to
|
||||
the SQL Server dialect via the :func:`_sa.create_engine` function as follows::
|
||||
|
||||
engine = create_engine(connection_url, ignore_no_transaction_on_rollback=True)
|
||||
engine = create_engine(
|
||||
connection_url, ignore_no_transaction_on_rollback=True
|
||||
)
|
||||
|
||||
Using the above parameter, the dialect will catch ``ProgrammingError``
|
||||
exceptions raised during ``connection.rollback()`` and emit a warning
|
||||
@@ -236,7 +251,6 @@ behavior and pass long strings as varchar(max)/nvarchar(max) using the
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
Pyodbc Pooling / connection close behavior
|
||||
------------------------------------------
|
||||
|
||||
@@ -301,7 +315,8 @@ Server dialect supports this parameter by passing the
|
||||
|
||||
engine = create_engine(
|
||||
"mssql+pyodbc://scott:tiger@mssql2017:1433/test?driver=ODBC+Driver+17+for+SQL+Server",
|
||||
fast_executemany=True)
|
||||
fast_executemany=True,
|
||||
)
|
||||
|
||||
.. versionchanged:: 2.0.9 - the ``fast_executemany`` parameter now has its
|
||||
intended effect of this PyODBC feature taking effect for all INSERT
|
||||
|
Reference in New Issue
Block a user