1
0
mirror of https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git synced 2025-08-14 00:25:46 +02:00

Проверка 09.02.2025

This commit is contained in:
MoonTestUse1
2025-02-09 01:11:49 +06:00
parent ce52f8a23a
commit 0aa3ef8fc2
5827 changed files with 14316 additions and 1906434 deletions

View File

@@ -1,5 +1,5 @@
# sql/__init__.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/_dml_constructors.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -24,10 +24,7 @@ def insert(table: _DMLTableArgument) -> Insert:
from sqlalchemy import insert
stmt = (
insert(user_table).
values(name='username', fullname='Full Username')
)
stmt = insert(user_table).values(name="username", fullname="Full Username")
Similar functionality is available via the
:meth:`_expression.TableClause.insert` method on
@@ -78,7 +75,7 @@ def insert(table: _DMLTableArgument) -> Insert:
:ref:`tutorial_core_insert` - in the :ref:`unified_tutorial`
"""
""" # noqa: E501
return Insert(table)
@@ -90,9 +87,7 @@ def update(table: _DMLTableArgument) -> Update:
from sqlalchemy import update
stmt = (
update(user_table).
where(user_table.c.id == 5).
values(name='user #5')
update(user_table).where(user_table.c.id == 5).values(name="user #5")
)
Similar functionality is available via the
@@ -109,7 +104,7 @@ def update(table: _DMLTableArgument) -> Update:
:ref:`tutorial_core_update_delete` - in the :ref:`unified_tutorial`
"""
""" # noqa: E501
return Update(table)
@@ -120,10 +115,7 @@ def delete(table: _DMLTableArgument) -> Delete:
from sqlalchemy import delete
stmt = (
delete(user_table).
where(user_table.c.id == 5)
)
stmt = delete(user_table).where(user_table.c.id == 5)
Similar functionality is available via the
:meth:`_expression.TableClause.delete` method on

View File

@@ -1,5 +1,5 @@
# sql/_elements_constructors.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -125,11 +125,8 @@ def and_( # type: ignore[empty-body]
from sqlalchemy import and_
stmt = select(users_table).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
and_(users_table.c.name == "wendy", users_table.c.enrolled == True)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
@@ -137,9 +134,8 @@ def and_( # type: ignore[empty-body]
operator precedence behavior)::
stmt = select(users_table).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
(users_table.c.name == "wendy") & (users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`_expression.Select.where`
@@ -147,9 +143,11 @@ def and_( # type: ignore[empty-body]
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select(users_table).\
where(users_table.c.name == 'wendy').\
where(users_table.c.enrolled == True)
stmt = (
select(users_table)
.where(users_table.c.name == "wendy")
.where(users_table.c.enrolled == True)
)
The :func:`.and_` construct must be given at least one positional
argument in order to be valid; a :func:`.and_` construct with no
@@ -159,6 +157,7 @@ def and_( # type: ignore[empty-body]
specified::
from sqlalchemy import true
criteria = and_(true(), *expressions)
The above expression will compile to SQL as the expression ``true``
@@ -190,11 +189,8 @@ if not TYPE_CHECKING:
from sqlalchemy import and_
stmt = select(users_table).where(
and_(
users_table.c.name == 'wendy',
users_table.c.enrolled == True
)
)
and_(users_table.c.name == "wendy", users_table.c.enrolled == True)
)
The :func:`.and_` conjunction is also available using the
Python ``&`` operator (though note that compound expressions
@@ -202,9 +198,8 @@ if not TYPE_CHECKING:
operator precedence behavior)::
stmt = select(users_table).where(
(users_table.c.name == 'wendy') &
(users_table.c.enrolled == True)
)
(users_table.c.name == "wendy") & (users_table.c.enrolled == True)
)
The :func:`.and_` operation is also implicit in some cases;
the :meth:`_expression.Select.where`
@@ -212,9 +207,11 @@ if not TYPE_CHECKING:
times against a statement, which will have the effect of each
clause being combined using :func:`.and_`::
stmt = select(users_table).\
where(users_table.c.name == 'wendy').\
where(users_table.c.enrolled == True)
stmt = (
select(users_table)
.where(users_table.c.name == "wendy")
.where(users_table.c.enrolled == True)
)
The :func:`.and_` construct must be given at least one positional
argument in order to be valid; a :func:`.and_` construct with no
@@ -224,6 +221,7 @@ if not TYPE_CHECKING:
specified::
from sqlalchemy import true
criteria = and_(true(), *expressions)
The above expression will compile to SQL as the expression ``true``
@@ -241,7 +239,7 @@ if not TYPE_CHECKING:
:func:`.or_`
"""
""" # noqa: E501
return BooleanClauseList.and_(*clauses)
@@ -307,9 +305,12 @@ def asc(
e.g.::
from sqlalchemy import asc
stmt = select(users_table).order_by(asc(users_table.c.name))
will produce SQL as::
will produce SQL as:
.. sourcecode:: sql
SELECT id, name FROM user ORDER BY name ASC
@@ -346,9 +347,11 @@ def collate(
e.g.::
collate(mycolumn, 'utf8_bin')
collate(mycolumn, "utf8_bin")
produces::
produces:
.. sourcecode:: sql
mycolumn COLLATE utf8_bin
@@ -373,9 +376,12 @@ def between(
E.g.::
from sqlalchemy import between
stmt = select(users_table).where(between(users_table.c.id, 5, 7))
Would produce SQL resembling::
Would produce SQL resembling:
.. sourcecode:: sql
SELECT id, name FROM user WHERE id BETWEEN :id_1 AND :id_2
@@ -493,10 +499,13 @@ def bindparam(
from sqlalchemy import bindparam
stmt = select(users_table).\
where(users_table.c.name == bindparam('username'))
stmt = select(users_table).where(
users_table.c.name == bindparam("username")
)
The above statement, when rendered, will produce SQL similar to::
The above statement, when rendered, will produce SQL similar to:
.. sourcecode:: sql
SELECT id, name FROM user WHERE name = :username
@@ -504,22 +513,25 @@ def bindparam(
would typically be applied at execution time to a method
like :meth:`_engine.Connection.execute`::
result = connection.execute(stmt, username='wendy')
result = connection.execute(stmt, {"username": "wendy"})
Explicit use of :func:`.bindparam` is also common when producing
UPDATE or DELETE statements that are to be invoked multiple times,
where the WHERE criterion of the statement is to change on each
invocation, such as::
stmt = (users_table.update().
where(user_table.c.name == bindparam('username')).
values(fullname=bindparam('fullname'))
)
stmt = (
users_table.update()
.where(user_table.c.name == bindparam("username"))
.values(fullname=bindparam("fullname"))
)
connection.execute(
stmt, [{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
]
stmt,
[
{"username": "wendy", "fullname": "Wendy Smith"},
{"username": "jack", "fullname": "Jack Jones"},
],
)
SQLAlchemy's Core expression system makes wide use of
@@ -528,7 +540,7 @@ def bindparam(
coerced into fixed :func:`.bindparam` constructs. For example, given
a comparison operation such as::
expr = users_table.c.name == 'Wendy'
expr = users_table.c.name == "Wendy"
The above expression will produce a :class:`.BinaryExpression`
construct, where the left side is the :class:`_schema.Column` object
@@ -536,9 +548,11 @@ def bindparam(
:class:`.BindParameter` representing the literal value::
print(repr(expr.right))
BindParameter('%(4327771088 name)s', 'Wendy', type_=String())
BindParameter("%(4327771088 name)s", "Wendy", type_=String())
The expression above will render SQL such as::
The expression above will render SQL such as:
.. sourcecode:: sql
user.name = :name_1
@@ -547,10 +561,12 @@ def bindparam(
along where it is later used within statement execution. If we
invoke a statement like the following::
stmt = select(users_table).where(users_table.c.name == 'Wendy')
stmt = select(users_table).where(users_table.c.name == "Wendy")
result = connection.execute(stmt)
We would see SQL logging output as::
We would see SQL logging output as:
.. sourcecode:: sql
SELECT "user".id, "user".name
FROM "user"
@@ -568,9 +584,11 @@ def bindparam(
bound placeholders based on the arguments passed, as in::
stmt = users_table.insert()
result = connection.execute(stmt, name='Wendy')
result = connection.execute(stmt, {"name": "Wendy"})
The above will produce SQL output as::
The above will produce SQL output as:
.. sourcecode:: sql
INSERT INTO "user" (name) VALUES (%(name)s)
{'name': 'Wendy'}
@@ -643,12 +661,12 @@ def bindparam(
:param quote:
True if this parameter name requires quoting and is not
currently known as a SQLAlchemy reserved word; this currently
only applies to the Oracle backend, where bound names must
only applies to the Oracle Database backends, where bound names must
sometimes be quoted.
:param isoutparam:
if True, the parameter should be treated like a stored procedure
"OUT" parameter. This applies to backends such as Oracle which
"OUT" parameter. This applies to backends such as Oracle Database which
support OUT parameters.
:param expanding:
@@ -734,16 +752,17 @@ def case(
from sqlalchemy import case
stmt = select(users_table).\
where(
case(
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J'),
else_='E'
)
)
stmt = select(users_table).where(
case(
(users_table.c.name == "wendy", "W"),
(users_table.c.name == "jack", "J"),
else_="E",
)
)
The above statement will produce SQL resembling::
The above statement will produce SQL resembling:
.. sourcecode:: sql
SELECT id, name FROM user
WHERE CASE
@@ -761,14 +780,9 @@ def case(
compared against keyed to result expressions. The statement below is
equivalent to the preceding statement::
stmt = select(users_table).\
where(
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name,
else_='E'
)
)
stmt = select(users_table).where(
case({"wendy": "W", "jack": "J"}, value=users_table.c.name, else_="E")
)
The values which are accepted as result values in
:paramref:`.case.whens` as well as with :paramref:`.case.else_` are
@@ -783,20 +797,16 @@ def case(
from sqlalchemy import case, literal_column
case(
(
orderline.c.qty > 100,
literal_column("'greaterthan100'")
),
(
orderline.c.qty > 10,
literal_column("'greaterthan10'")
),
else_=literal_column("'lessthan10'")
(orderline.c.qty > 100, literal_column("'greaterthan100'")),
(orderline.c.qty > 10, literal_column("'greaterthan10'")),
else_=literal_column("'lessthan10'"),
)
The above will render the given constants without using bound
parameters for the result values (but still for the comparison
values), as in::
values), as in:
.. sourcecode:: sql
CASE
WHEN (orderline.qty > :qty_1) THEN 'greaterthan100'
@@ -817,8 +827,8 @@ def case(
resulting value, e.g.::
case(
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J')
(users_table.c.name == "wendy", "W"),
(users_table.c.name == "jack", "J"),
)
In the second form, it accepts a Python dictionary of comparison
@@ -826,10 +836,7 @@ def case(
:paramref:`.case.value` to be present, and values will be compared
using the ``==`` operator, e.g.::
case(
{"wendy": "W", "jack": "J"},
value=users_table.c.name
)
case({"wendy": "W", "jack": "J"}, value=users_table.c.name)
:param value: An optional SQL expression which will be used as a
fixed "comparison point" for candidate values within a dictionary
@@ -842,7 +849,7 @@ def case(
expressions evaluate to true.
"""
""" # noqa: E501
return Case(*whens, value=value, else_=else_)
@@ -860,7 +867,9 @@ def cast(
stmt = select(cast(product_table.c.unit_price, Numeric(10, 4)))
The above statement will produce SQL resembling::
The above statement will produce SQL resembling:
.. sourcecode:: sql
SELECT CAST(unit_price AS NUMERIC(10, 4)) FROM product
@@ -929,11 +938,11 @@ def try_cast(
from sqlalchemy import select, try_cast, Numeric
stmt = select(
try_cast(product_table.c.unit_price, Numeric(10, 4))
)
stmt = select(try_cast(product_table.c.unit_price, Numeric(10, 4)))
The above would render on Microsoft SQL Server as::
The above would render on Microsoft SQL Server as:
.. sourcecode:: sql
SELECT TRY_CAST (product_table.unit_price AS NUMERIC(10, 4))
FROM product_table
@@ -964,7 +973,9 @@ def column(
id, name = column("id"), column("name")
stmt = select(id, name).select_from("user")
The above statement would produce SQL like::
The above statement would produce SQL like:
.. sourcecode:: sql
SELECT id, name FROM user
@@ -1000,13 +1011,14 @@ def column(
from sqlalchemy import table, column, select
user = table("user",
column("id"),
column("name"),
column("description"),
user = table(
"user",
column("id"),
column("name"),
column("description"),
)
stmt = select(user.c.description).where(user.c.name == 'wendy')
stmt = select(user.c.description).where(user.c.name == "wendy")
A :func:`_expression.column` / :func:`.table`
construct like that illustrated
@@ -1053,7 +1065,9 @@ def desc(
stmt = select(users_table).order_by(desc(users_table.c.name))
will produce SQL as::
will produce SQL as:
.. sourcecode:: sql
SELECT id, name FROM user ORDER BY name DESC
@@ -1086,16 +1100,26 @@ def desc(
def distinct(expr: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
"""Produce an column-expression-level unary ``DISTINCT`` clause.
This applies the ``DISTINCT`` keyword to an individual column
expression, and is typically contained within an aggregate function,
as in::
This applies the ``DISTINCT`` keyword to an **individual column
expression** (e.g. not the whole statement), and renders **specifically
in that column position**; this is used for containment within
an aggregate function, as in::
from sqlalchemy import distinct, func
stmt = select(func.count(distinct(users_table.c.name)))
The above would produce an expression resembling::
stmt = select(users_table.c.id, func.count(distinct(users_table.c.name)))
SELECT COUNT(DISTINCT name) FROM user
The above would produce an statement resembling:
.. sourcecode:: sql
SELECT user.id, count(DISTINCT user.name) FROM user
.. tip:: The :func:`_sql.distinct` function does **not** apply DISTINCT
to the full SELECT statement, instead applying a DISTINCT modifier
to **individual column expressions**. For general ``SELECT DISTINCT``
support, use the
:meth:`_sql.Select.distinct` method on :class:`_sql.Select`.
The :func:`.distinct` function is also available as a column-level
method, e.g. :meth:`_expression.ColumnElement.distinct`, as in::
@@ -1118,7 +1142,7 @@ def distinct(expr: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
:data:`.func`
"""
""" # noqa: E501
return UnaryExpression._create_distinct(expr)
@@ -1148,6 +1172,9 @@ def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract:
:param field: The field to extract.
.. warning:: This field is used as a literal SQL string.
**DO NOT PASS UNTRUSTED INPUT TO THIS STRING**.
:param expr: A column or Python scalar expression serving as the
right side of the ``EXTRACT`` expression.
@@ -1156,9 +1183,10 @@ def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract:
from sqlalchemy import extract
from sqlalchemy import table, column
logged_table = table("user",
column("id"),
column("date_created"),
logged_table = table(
"user",
column("id"),
column("date_created"),
)
stmt = select(logged_table.c.id).where(
@@ -1170,9 +1198,9 @@ def extract(field: str, expr: _ColumnExpressionArgument[Any]) -> Extract:
Similarly, one can also select an extracted component::
stmt = select(
extract("YEAR", logged_table.c.date_created)
).where(logged_table.c.id == 1)
stmt = select(extract("YEAR", logged_table.c.date_created)).where(
logged_table.c.id == 1
)
The implementation of ``EXTRACT`` may vary across database backends.
Users are reminded to consult their database documentation.
@@ -1231,7 +1259,8 @@ def funcfilter(
E.g.::
from sqlalchemy import funcfilter
funcfilter(func.count(1), MyClass.name == 'some name')
funcfilter(func.count(1), MyClass.name == "some name")
Would produce "COUNT(1) FILTER (WHERE myclass.name = 'some name')".
@@ -1288,10 +1317,11 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
from sqlalchemy import desc, nulls_first
stmt = select(users_table).order_by(
nulls_first(desc(users_table.c.name)))
stmt = select(users_table).order_by(nulls_first(desc(users_table.c.name)))
The SQL expression from the above would resemble::
The SQL expression from the above would resemble:
.. sourcecode:: sql
SELECT id, name FROM user ORDER BY name DESC NULLS FIRST
@@ -1302,7 +1332,8 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
function version, as in::
stmt = select(users_table).order_by(
users_table.c.name.desc().nulls_first())
users_table.c.name.desc().nulls_first()
)
.. versionchanged:: 1.4 :func:`.nulls_first` is renamed from
:func:`.nullsfirst` in previous releases.
@@ -1318,7 +1349,7 @@ def nulls_first(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
:meth:`_expression.Select.order_by`
"""
""" # noqa: E501
return UnaryExpression._create_nulls_first(column)
@@ -1332,10 +1363,11 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
from sqlalchemy import desc, nulls_last
stmt = select(users_table).order_by(
nulls_last(desc(users_table.c.name)))
stmt = select(users_table).order_by(nulls_last(desc(users_table.c.name)))
The SQL expression from the above would resemble::
The SQL expression from the above would resemble:
.. sourcecode:: sql
SELECT id, name FROM user ORDER BY name DESC NULLS LAST
@@ -1345,8 +1377,7 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
rather than as its standalone
function version, as in::
stmt = select(users_table).order_by(
users_table.c.name.desc().nulls_last())
stmt = select(users_table).order_by(users_table.c.name.desc().nulls_last())
.. versionchanged:: 1.4 :func:`.nulls_last` is renamed from
:func:`.nullslast` in previous releases.
@@ -1362,7 +1393,7 @@ def nulls_last(column: _ColumnExpressionArgument[_T]) -> UnaryExpression[_T]:
:meth:`_expression.Select.order_by`
"""
""" # noqa: E501
return UnaryExpression._create_nulls_last(column)
@@ -1377,11 +1408,8 @@ def or_( # type: ignore[empty-body]
from sqlalchemy import or_
stmt = select(users_table).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
or_(users_table.c.name == "wendy", users_table.c.name == "jack")
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
@@ -1389,9 +1417,8 @@ def or_( # type: ignore[empty-body]
operator precedence behavior)::
stmt = select(users_table).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
(users_table.c.name == "wendy") | (users_table.c.name == "jack")
)
The :func:`.or_` construct must be given at least one positional
argument in order to be valid; a :func:`.or_` construct with no
@@ -1401,6 +1428,7 @@ def or_( # type: ignore[empty-body]
specified::
from sqlalchemy import false
or_criteria = or_(false(), *expressions)
The above expression will compile to SQL as the expression ``false``
@@ -1432,11 +1460,8 @@ if not TYPE_CHECKING:
from sqlalchemy import or_
stmt = select(users_table).where(
or_(
users_table.c.name == 'wendy',
users_table.c.name == 'jack'
)
)
or_(users_table.c.name == "wendy", users_table.c.name == "jack")
)
The :func:`.or_` conjunction is also available using the
Python ``|`` operator (though note that compound expressions
@@ -1444,9 +1469,8 @@ if not TYPE_CHECKING:
operator precedence behavior)::
stmt = select(users_table).where(
(users_table.c.name == 'wendy') |
(users_table.c.name == 'jack')
)
(users_table.c.name == "wendy") | (users_table.c.name == "jack")
)
The :func:`.or_` construct must be given at least one positional
argument in order to be valid; a :func:`.or_` construct with no
@@ -1456,6 +1480,7 @@ if not TYPE_CHECKING:
specified::
from sqlalchemy import false
or_criteria = or_(false(), *expressions)
The above expression will compile to SQL as the expression ``false``
@@ -1473,7 +1498,7 @@ if not TYPE_CHECKING:
:func:`.and_`
"""
""" # noqa: E501
return BooleanClauseList.or_(*clauses)
@@ -1494,7 +1519,9 @@ def over(
func.row_number().over(order_by=mytable.c.some_column)
Would produce::
Would produce:
.. sourcecode:: sql
ROW_NUMBER() OVER(ORDER BY some_column)
@@ -1503,10 +1530,11 @@ def over(
mutually-exclusive parameters each accept a 2-tuple, which contains
a combination of integers and None::
func.row_number().over(
order_by=my_table.c.some_column, range_=(None, 0))
func.row_number().over(order_by=my_table.c.some_column, range_=(None, 0))
The above would produce::
The above would produce:
.. sourcecode:: sql
ROW_NUMBER() OVER(ORDER BY some_column
RANGE BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW)
@@ -1517,19 +1545,19 @@ def over(
* RANGE BETWEEN 5 PRECEDING AND 10 FOLLOWING::
func.row_number().over(order_by='x', range_=(-5, 10))
func.row_number().over(order_by="x", range_=(-5, 10))
* ROWS BETWEEN UNBOUNDED PRECEDING AND CURRENT ROW::
func.row_number().over(order_by='x', rows=(None, 0))
func.row_number().over(order_by="x", rows=(None, 0))
* RANGE BETWEEN 2 PRECEDING AND UNBOUNDED FOLLOWING::
func.row_number().over(order_by='x', range_=(-2, None))
func.row_number().over(order_by="x", range_=(-2, None))
* RANGE BETWEEN 1 FOLLOWING AND 3 FOLLOWING::
func.row_number().over(order_by='x', range_=(1, 3))
func.row_number().over(order_by="x", range_=(1, 3))
:param element: a :class:`.FunctionElement`, :class:`.WithinGroup`,
or other compatible construct.
@@ -1558,7 +1586,7 @@ def over(
:func:`_expression.within_group`
"""
""" # noqa: E501
return Over(element, partition_by, order_by, range_, rows)
@@ -1589,7 +1617,7 @@ def text(text: str) -> TextClause:
E.g.::
t = text("SELECT * FROM users WHERE id=:user_id")
result = connection.execute(t, user_id=12)
result = connection.execute(t, {"user_id": 12})
For SQL statements where a colon is required verbatim, as within
an inline string, use a backslash to escape::
@@ -1607,9 +1635,11 @@ def text(text: str) -> TextClause:
method allows
specification of return columns including names and types::
t = text("SELECT * FROM users WHERE id=:user_id").\
bindparams(user_id=7).\
columns(id=Integer, name=String)
t = (
text("SELECT * FROM users WHERE id=:user_id")
.bindparams(user_id=7)
.columns(id=Integer, name=String)
)
for id, name in connection.execute(t):
print(id, name)
@@ -1619,7 +1649,7 @@ def text(text: str) -> TextClause:
such as for the WHERE clause of a SELECT statement::
s = select(users.c.id, users.c.name).where(text("id=:user_id"))
result = connection.execute(s, user_id=12)
result = connection.execute(s, {"user_id": 12})
:func:`_expression.text` is also used for the construction
of a full, standalone statement using plain text.
@@ -1691,9 +1721,7 @@ def tuple_(
from sqlalchemy import tuple_
tuple_(table.c.col1, table.c.col2).in_(
[(1, 2), (5, 12), (10, 19)]
)
tuple_(table.c.col1, table.c.col2).in_([(1, 2), (5, 12), (10, 19)])
.. versionchanged:: 1.3.6 Added support for SQLite IN tuples.
@@ -1743,10 +1771,9 @@ def type_coerce(
:meth:`_expression.ColumnElement.label`::
stmt = select(
type_coerce(log_table.date_string, StringDateTime()).label('date')
type_coerce(log_table.date_string, StringDateTime()).label("date")
)
A type that features bound-value handling will also have that behavior
take effect when literal values or :func:`.bindparam` constructs are
passed to :func:`.type_coerce` as targets.
@@ -1807,11 +1834,10 @@ def within_group(
the :meth:`.FunctionElement.within_group` method, e.g.::
from sqlalchemy import within_group
stmt = select(
department.c.id,
func.percentile_cont(0.5).within_group(
department.c.salary.desc()
)
func.percentile_cont(0.5).within_group(department.c.salary.desc()),
)
The above statement would produce SQL similar to

View File

@@ -1,5 +1,5 @@
# sql/_orm_types.py
# Copyright (C) 2022-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2022-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/_py_util.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/_selectable_constructors.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -155,16 +155,16 @@ def exists(
:meth:`_sql.SelectBase.exists` method::
exists_criteria = (
select(table2.c.col2).
where(table1.c.col1 == table2.c.col2).
exists()
select(table2.c.col2).where(table1.c.col1 == table2.c.col2).exists()
)
The EXISTS criteria is then used inside of an enclosing SELECT::
stmt = select(table1.c.col1).where(exists_criteria)
The above statement will then be of the form::
The above statement will then be of the form:
.. sourcecode:: sql
SELECT col1 FROM table1 WHERE EXISTS
(SELECT table2.col2 FROM table2 WHERE table2.col2 = table1.col1)
@@ -225,11 +225,14 @@ def join(
E.g.::
j = join(user_table, address_table,
user_table.c.id == address_table.c.user_id)
j = join(
user_table, address_table, user_table.c.id == address_table.c.user_id
)
stmt = select(user_table).select_from(j)
would emit SQL along the lines of::
would emit SQL along the lines of:
.. sourcecode:: sql
SELECT user.id, user.name FROM user
JOIN address ON user.id = address.user_id
@@ -263,7 +266,7 @@ def join(
:class:`_expression.Join` - the type of object produced.
"""
""" # noqa: E501
return Join(left, right, onclause, isouter, full)
@@ -529,13 +532,14 @@ def tablesample(
from sqlalchemy import func
selectable = people.tablesample(
func.bernoulli(1),
name='alias',
seed=func.random())
func.bernoulli(1), name="alias", seed=func.random()
)
stmt = select(selectable.c.people_id)
Assuming ``people`` with a column ``people_id``, the above
statement would render as::
statement would render as:
.. sourcecode:: sql
SELECT alias.people_id FROM
people AS alias TABLESAMPLE bernoulli(:bernoulli_1)
@@ -613,12 +617,10 @@ def values(
from sqlalchemy import values
value_expr = values(
column('id', Integer),
column('name', String),
name="my_values"
).data(
[(1, 'name1'), (2, 'name2'), (3, 'name3')]
)
column("id", Integer),
column("name", String),
name="my_values",
).data([(1, "name1"), (2, "name2"), (3, "name3")])
:param \*columns: column expressions, typically composed using
:func:`_expression.column` objects.

View File

@@ -1,5 +1,5 @@
# sql/_typing.py
# Copyright (C) 2022-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2022-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -69,6 +69,7 @@ if TYPE_CHECKING:
from .sqltypes import TableValueType
from .sqltypes import TupleType
from .type_api import TypeEngine
from ..engine import Dialect
from ..util.typing import TypeGuard
_T = TypeVar("_T", bound=Any)
@@ -92,11 +93,21 @@ class _CoreAdapterProto(Protocol):
def __call__(self, obj: _CE) -> _CE: ...
class _HasDialect(Protocol):
"""protocol for Engine/Connection-like objects that have dialect
attribute.
"""
@property
def dialect(self) -> Dialect: ...
# match column types that are not ORM entities
_NOT_ENTITY = TypeVar(
"_NOT_ENTITY",
int,
str,
bool,
"datetime",
"date",
"time",
@@ -106,10 +117,12 @@ _NOT_ENTITY = TypeVar(
"Decimal",
)
_StarOrOne = Literal["*", 1]
_MAYBE_ENTITY = TypeVar(
"_MAYBE_ENTITY",
roles.ColumnsClauseRole,
Literal["*", 1],
_StarOrOne,
Type[Any],
Inspectable[_HasClauseElement[Any]],
_HasClauseElement[Any],
@@ -134,7 +147,7 @@ _ColumnsClauseArgument = Union[
roles.TypedColumnsClauseRole[_T],
roles.ColumnsClauseRole,
"SQLCoreOperations[_T]",
Literal["*", 1],
_StarOrOne,
Type[_T],
Inspectable[_HasClauseElement[_T]],
_HasClauseElement[_T],
@@ -174,6 +187,7 @@ _ColumnExpressionArgument = Union[
_HasClauseElement[_T],
"SQLCoreOperations[_T]",
roles.ExpressionElementRole[_T],
roles.TypedColumnsClauseRole[_T],
Callable[[], "ColumnElement[_T]"],
"LambdaElement",
]

View File

@@ -1,5 +1,5 @@
# sql/annotation.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/base.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -72,7 +72,6 @@ if TYPE_CHECKING:
from .elements import ClauseList
from .elements import ColumnClause # noqa
from .elements import ColumnElement
from .elements import KeyedColumnElement
from .elements import NamedColumn
from .elements import SQLCoreOperations
from .elements import TextClause
@@ -481,7 +480,7 @@ class DialectKWArgs:
Index.argument_for("mydialect", "length", None)
some_index = Index('a', 'b', mydialect_length=5)
some_index = Index("a", "b", mydialect_length=5)
The :meth:`.DialectKWArgs.argument_for` method is a per-argument
way adding extra arguments to the
@@ -570,7 +569,7 @@ class DialectKWArgs:
and ``<argument_name>``. For example, the ``postgresql_where``
argument would be locatable as::
arg = my_object.dialect_options['postgresql']['where']
arg = my_object.dialect_options["postgresql"]["where"]
.. versionadded:: 0.9.2
@@ -918,11 +917,7 @@ class Options(metaclass=_MetaOptions):
execution_options,
) = QueryContext.default_load_options.from_execution_options(
"_sa_orm_load_options",
{
"populate_existing",
"autoflush",
"yield_per"
},
{"populate_existing", "autoflush", "yield_per"},
execution_options,
statement._execution_options,
)
@@ -1029,6 +1024,7 @@ class Executable(roles.StatementRole):
]
is_select = False
is_from_statement = False
is_update = False
is_insert = False
is_text = False
@@ -1167,6 +1163,7 @@ class Executable(roles.StatementRole):
render_nulls: bool = ...,
is_delete_using: bool = ...,
is_update_from: bool = ...,
preserve_rowcount: bool = False,
**opt: Any,
) -> Self: ...
@@ -1223,6 +1220,7 @@ class Executable(roles.StatementRole):
from sqlalchemy import event
@event.listens_for(some_engine, "before_execute")
def _process_opt(conn, statement, multiparams, params, execution_options):
"run a SQL function before invoking a statement"
@@ -1352,7 +1350,7 @@ class _SentinelColumnCharacterization(NamedTuple):
_COLKEY = TypeVar("_COLKEY", Union[None, str], str)
_COL_co = TypeVar("_COL_co", bound="ColumnElement[Any]", covariant=True)
_COL = TypeVar("_COL", bound="KeyedColumnElement[Any]")
_COL = TypeVar("_COL", bound="ColumnElement[Any]")
class _ColumnMetrics(Generic[_COL_co]):
@@ -1474,14 +1472,14 @@ class ColumnCollection(Generic[_COLKEY, _COL_co]):
mean either two columns with the same key, in which case the column
returned by key access is **arbitrary**::
>>> x1, x2 = Column('x', Integer), Column('x', Integer)
>>> x1, x2 = Column("x", Integer), Column("x", Integer)
>>> cc = ColumnCollection(columns=[(x1.name, x1), (x2.name, x2)])
>>> list(cc)
[Column('x', Integer(), table=None),
Column('x', Integer(), table=None)]
>>> cc['x'] is x1
>>> cc["x"] is x1
False
>>> cc['x'] is x2
>>> cc["x"] is x2
True
Or it can also mean the same column multiple times. These cases are
@@ -1640,9 +1638,15 @@ class ColumnCollection(Generic[_COLKEY, _COL_co]):
def __eq__(self, other: Any) -> bool:
return self.compare(other)
@overload
def get(self, key: str, default: None = None) -> Optional[_COL_co]: ...
@overload
def get(self, key: str, default: _COL) -> Union[_COL_co, _COL]: ...
def get(
self, key: str, default: Optional[_COL_co] = None
) -> Optional[_COL_co]:
self, key: str, default: Optional[_COL] = None
) -> Optional[Union[_COL_co, _COL]]:
"""Get a :class:`_sql.ColumnClause` or :class:`_schema.Column` object
based on a string key name from this
:class:`_expression.ColumnCollection`."""
@@ -1923,16 +1927,15 @@ class DedupeColumnCollection(ColumnCollection[str, _NAMEDCOL]):
"""
def add(
self, column: ColumnElement[Any], key: Optional[str] = None
def add( # type: ignore[override]
self, column: _NAMEDCOL, key: Optional[str] = None
) -> None:
named_column = cast(_NAMEDCOL, column)
if key is not None and named_column.key != key:
if key is not None and column.key != key:
raise exc.ArgumentError(
"DedupeColumnCollection requires columns be under "
"the same key as their .key"
)
key = named_column.key
key = column.key
if key is None:
raise exc.ArgumentError(
@@ -1942,17 +1945,17 @@ class DedupeColumnCollection(ColumnCollection[str, _NAMEDCOL]):
if key in self._index:
existing = self._index[key][1]
if existing is named_column:
if existing is column:
return
self.replace(named_column)
self.replace(column)
# pop out memoized proxy_set as this
# operation may very well be occurring
# in a _make_proxy operation
util.memoized_property.reset(named_column, "proxy_set")
util.memoized_property.reset(column, "proxy_set")
else:
self._append_new_column(key, named_column)
self._append_new_column(key, column)
def _append_new_column(self, key: str, named_column: _NAMEDCOL) -> None:
l = len(self._collection)
@@ -2027,8 +2030,8 @@ class DedupeColumnCollection(ColumnCollection[str, _NAMEDCOL]):
e.g.::
t = Table('sometable', metadata, Column('col1', Integer))
t.columns.replace(Column('col1', Integer, key='columnone'))
t = Table("sometable", metadata, Column("col1", Integer))
t.columns.replace(Column("col1", Integer, key="columnone"))
will remove the original 'col1' from the collection, and add
the new column under the name 'columnname'.
@@ -2131,7 +2134,7 @@ class ColumnSet(util.OrderedSet["ColumnClause[Any]"]):
l.append(c == local)
return elements.and_(*l)
def __hash__(self):
def __hash__(self): # type: ignore[override]
return hash(tuple(x for x in self))

View File

@@ -1,5 +1,5 @@
# sql/cache_key.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -37,6 +37,7 @@ from ..util.typing import Protocol
if typing.TYPE_CHECKING:
from .elements import BindParameter
from .elements import ClauseElement
from .elements import ColumnElement
from .visitors import _TraverseInternalsType
from ..engine.interfaces import _CoreSingleExecuteParams
@@ -557,18 +558,17 @@ class CacheKey(NamedTuple):
_anon_map = prefix_anon_map()
return {b.key % _anon_map: b.effective_value for b in self.bindparams}
@util.preload_module("sqlalchemy.sql.elements")
def _apply_params_to_element(
self, original_cache_key: CacheKey, target_element: ClauseElement
) -> ClauseElement:
if target_element._is_immutable:
self, original_cache_key: CacheKey, target_element: ColumnElement[Any]
) -> ColumnElement[Any]:
if target_element._is_immutable or original_cache_key is self:
return target_element
translate = {
k.key: v.value
for k, v in zip(original_cache_key.bindparams, self.bindparams)
}
return target_element.params(translate)
elements = util.preloaded.sql_elements
return elements._OverrideBinds(
target_element, self.bindparams, original_cache_key.bindparams
)
def _ad_hoc_cache_key_from_args(

View File

@@ -1,5 +1,5 @@
# sql/coercions.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -29,7 +29,6 @@ from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from . import operators
from . import roles
from . import visitors
from ._typing import is_from_clause
@@ -58,9 +57,9 @@ if typing.TYPE_CHECKING:
from .elements import ClauseElement
from .elements import ColumnClause
from .elements import ColumnElement
from .elements import DQLDMLClauseElement
from .elements import NamedColumn
from .elements import SQLCoreOperations
from .elements import TextClause
from .schema import Column
from .selectable import _ColumnsClauseElement
from .selectable import _JoinTargetProtocol
@@ -191,7 +190,7 @@ def expect(
role: Type[roles.DDLReferredColumnRole],
element: Any,
**kw: Any,
) -> Column[Any]: ...
) -> Union[Column[Any], str]: ...
@overload
@@ -207,7 +206,7 @@ def expect(
role: Type[roles.StatementOptionRole],
element: Any,
**kw: Any,
) -> DQLDMLClauseElement: ...
) -> Union[ColumnElement[Any], TextClause]: ...
@overload
@@ -493,6 +492,7 @@ class RoleImpl:
element: Any,
argname: Optional[str] = None,
resolved: Optional[Any] = None,
*,
advice: Optional[str] = None,
code: Optional[str] = None,
err: Optional[Exception] = None,
@@ -595,7 +595,7 @@ def _no_text_coercion(
class _NoTextCoercion(RoleImpl):
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
def _literal_coercion(self, element, *, argname=None, **kw):
if isinstance(element, str) and issubclass(
elements.TextClause, self._role_class
):
@@ -613,7 +613,7 @@ class _CoerceLiterals(RoleImpl):
def _text_coercion(self, element, argname=None):
return _no_text_coercion(element, argname)
def _literal_coercion(self, element, argname=None, **kw):
def _literal_coercion(self, element, *, argname=None, **kw):
if isinstance(element, str):
if self._coerce_star and element == "*":
return elements.ColumnClause("*", is_literal=True)
@@ -641,7 +641,8 @@ class LiteralValueImpl(RoleImpl):
self,
element,
resolved,
argname,
argname=None,
*,
type_=None,
literal_execute=False,
**kw,
@@ -659,7 +660,7 @@ class LiteralValueImpl(RoleImpl):
literal_execute=literal_execute,
)
def _literal_coercion(self, element, argname=None, type_=None, **kw):
def _literal_coercion(self, element, **kw):
return element
@@ -671,6 +672,7 @@ class _SelectIsNotFrom(RoleImpl):
element: Any,
argname: Optional[str] = None,
resolved: Optional[Any] = None,
*,
advice: Optional[str] = None,
code: Optional[str] = None,
err: Optional[Exception] = None,
@@ -745,7 +747,7 @@ class ExpressionElementImpl(_ColumnCoercions, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, name=None, type_=None, argname=None, is_crud=False, **kw
self, element, *, name=None, type_=None, is_crud=False, **kw
):
if (
element is None
@@ -787,15 +789,22 @@ class ExpressionElementImpl(_ColumnCoercions, RoleImpl):
class BinaryElementImpl(ExpressionElementImpl, RoleImpl):
__slots__ = ()
def _literal_coercion(
self, element, expr, operator, bindparam_type=None, argname=None, **kw
def _literal_coercion( # type: ignore[override]
self,
element,
*,
expr,
operator,
bindparam_type=None,
argname=None,
**kw,
):
try:
return expr._bind_param(operator, element, type_=bindparam_type)
except exc.ArgumentError as err:
self._raise_for_expected(element, err=err)
def _post_coercion(self, resolved, expr, bindparam_type=None, **kw):
def _post_coercion(self, resolved, *, expr, bindparam_type=None, **kw):
if resolved.type._isnull and not expr.type._isnull:
resolved = resolved._with_binary_element_type(
bindparam_type if bindparam_type is not None else expr.type
@@ -833,22 +842,23 @@ class InElementImpl(RoleImpl):
% (elem.__class__.__name__)
)
def _literal_coercion(self, element, expr, operator, **kw):
@util.preload_module("sqlalchemy.sql.elements")
def _literal_coercion(self, element, *, expr, operator, **kw):
if util.is_non_string_iterable(element):
non_literal_expressions: Dict[
Optional[operators.ColumnOperators],
operators.ColumnOperators,
Optional[_ColumnExpressionArgument[Any]],
_ColumnExpressionArgument[Any],
] = {}
element = list(element)
for o in element:
if not _is_literal(o):
if not isinstance(o, operators.ColumnOperators):
if not isinstance(
o, util.preloaded.sql_elements.ColumnElement
) and not hasattr(o, "__clause_element__"):
self._raise_for_expected(element, **kw)
else:
non_literal_expressions[o] = o
elif o is None:
non_literal_expressions[o] = elements.Null()
if non_literal_expressions:
return elements.ClauseList(
@@ -867,7 +877,7 @@ class InElementImpl(RoleImpl):
else:
self._raise_for_expected(element, **kw)
def _post_coercion(self, element, expr, operator, **kw):
def _post_coercion(self, element, *, expr, operator, **kw):
if element._is_select_base:
# for IN, we are doing scalar_subquery() coercion without
# a warning
@@ -893,12 +903,10 @@ class OnClauseImpl(_ColumnCoercions, RoleImpl):
_coerce_consts = True
def _literal_coercion(
self, element, name=None, type_=None, argname=None, is_crud=False, **kw
):
def _literal_coercion(self, element, **kw):
self._raise_for_expected(element)
def _post_coercion(self, resolved, original_element=None, **kw):
def _post_coercion(self, resolved, *, original_element=None, **kw):
# this is a hack right now as we want to use coercion on an
# ORM InstrumentedAttribute, but we want to return the object
# itself if it is one, not its clause element.
@@ -983,7 +991,7 @@ class GroupByImpl(ByOfImpl, RoleImpl):
class DMLColumnImpl(_ReturnsStringKey, RoleImpl):
__slots__ = ()
def _post_coercion(self, element, as_key=False, **kw):
def _post_coercion(self, element, *, as_key=False, **kw):
if as_key:
return element.key
else:
@@ -993,7 +1001,7 @@ class DMLColumnImpl(_ReturnsStringKey, RoleImpl):
class ConstExprImpl(RoleImpl):
__slots__ = ()
def _literal_coercion(self, element, argname=None, **kw):
def _literal_coercion(self, element, *, argname=None, **kw):
if element is None:
return elements.Null()
elif element is False:
@@ -1019,7 +1027,7 @@ class TruncatedLabelImpl(_StringOnly, RoleImpl):
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, argname=None, **kw):
def _literal_coercion(self, element, **kw):
"""coerce the given value to :class:`._truncated_label`.
Existing :class:`._truncated_label` and
@@ -1069,7 +1077,9 @@ class LimitOffsetImpl(RoleImpl):
else:
self._raise_for_expected(element, argname, resolved)
def _literal_coercion(self, element, name, type_, **kw):
def _literal_coercion( # type: ignore[override]
self, element, *, name, type_, **kw
):
if element is None:
return None
else:
@@ -1111,7 +1121,7 @@ class ColumnsClauseImpl(_SelectIsNotFrom, _CoerceLiterals, RoleImpl):
_guess_straight_column = re.compile(r"^\w\S*$", re.I)
def _raise_for_expected(
self, element, argname=None, resolved=None, advice=None, **kw
self, element, argname=None, resolved=None, *, advice=None, **kw
):
if not advice and isinstance(element, list):
advice = (
@@ -1149,7 +1159,9 @@ class ReturnsRowsImpl(RoleImpl):
class StatementImpl(_CoerceLiterals, RoleImpl):
__slots__ = ()
def _post_coercion(self, resolved, original_element, argname=None, **kw):
def _post_coercion(
self, resolved, *, original_element, argname=None, **kw
):
if resolved is not original_element and not isinstance(
original_element, str
):
@@ -1215,7 +1227,7 @@ class JoinTargetImpl(RoleImpl):
_skip_clauseelement_for_target_match = True
def _literal_coercion(self, element, argname=None, **kw):
def _literal_coercion(self, element, *, argname=None, **kw):
self._raise_for_expected(element, argname)
def _implicit_coercions(
@@ -1223,6 +1235,7 @@ class JoinTargetImpl(RoleImpl):
element: Any,
resolved: Any,
argname: Optional[str] = None,
*,
legacy: bool = False,
**kw: Any,
) -> Any:
@@ -1256,6 +1269,7 @@ class FromClauseImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
element: Any,
resolved: Any,
argname: Optional[str] = None,
*,
explicit_subquery: bool = False,
allow_select: bool = True,
**kw: Any,
@@ -1277,7 +1291,7 @@ class FromClauseImpl(_SelectIsNotFrom, _NoTextCoercion, RoleImpl):
else:
self._raise_for_expected(element, argname, resolved)
def _post_coercion(self, element, deannotate=False, **kw):
def _post_coercion(self, element, *, deannotate=False, **kw):
if deannotate:
return element._deannotate()
else:
@@ -1292,7 +1306,7 @@ class StrictFromClauseImpl(FromClauseImpl):
element: Any,
resolved: Any,
argname: Optional[str] = None,
explicit_subquery: bool = False,
*,
allow_select: bool = False,
**kw: Any,
) -> Any:
@@ -1312,7 +1326,7 @@ class StrictFromClauseImpl(FromClauseImpl):
class AnonymizedFromClauseImpl(StrictFromClauseImpl):
__slots__ = ()
def _post_coercion(self, element, flat=False, name=None, **kw):
def _post_coercion(self, element, *, flat=False, name=None, **kw):
assert name is None
return element._anonymous_fromclause(flat=flat)

View File

@@ -1,5 +1,5 @@
# sql/compiler.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -29,6 +29,7 @@ import collections
import collections.abc as collections_abc
import contextlib
from enum import IntEnum
import functools
import itertools
import operator
import re
@@ -114,7 +115,6 @@ if typing.TYPE_CHECKING:
from .selectable import Select
from .selectable import SelectState
from .type_api import _BindProcessorType
from .type_api import _SentinelProcessorType
from ..engine.cursor import CursorResultMetaData
from ..engine.interfaces import _CoreSingleExecuteParams
from ..engine.interfaces import _DBAPIAnyExecuteParams
@@ -545,8 +545,8 @@ class _InsertManyValues(NamedTuple):
"""
sentinel_param_keys: Optional[Sequence[Union[str, int]]] = None
"""parameter str keys / int indexes in each param dictionary / tuple
sentinel_param_keys: Optional[Sequence[str]] = None
"""parameter str keys in each param dictionary / tuple
that would link to the client side "sentinel" values for that row, which
we can use to match up parameter sets to result rows.
@@ -556,6 +556,10 @@ class _InsertManyValues(NamedTuple):
.. versionadded:: 2.0.10
.. versionchanged:: 2.0.29 - the sequence is now string dictionary keys
only, used against the "compiled parameteters" collection before
the parameters were converted by bound parameter processors
"""
implicit_sentinel: bool = False
@@ -600,7 +604,8 @@ class _InsertManyValuesBatch(NamedTuple):
replaced_parameters: _DBAPIAnyExecuteParams
processed_setinputsizes: Optional[_GenericSetInputSizesType]
batch: Sequence[_DBAPISingleExecuteParams]
batch_size: int
sentinel_values: Sequence[Tuple[Any, ...]]
current_batch_size: int
batchnum: int
total_batches: int
rows_sorted: bool
@@ -1675,19 +1680,9 @@ class SQLCompiler(Compiled):
for v in self._insertmanyvalues.insert_crud_params
]
sentinel_param_int_idxs = (
[
self.positiontup.index(cast(str, _param_key))
for _param_key in self._insertmanyvalues.sentinel_param_keys # noqa: E501
]
if self._insertmanyvalues.sentinel_param_keys is not None
else None
)
self._insertmanyvalues = self._insertmanyvalues._replace(
single_values_expr=single_values_expr,
insert_crud_params=insert_crud_params,
sentinel_param_keys=sentinel_param_int_idxs,
)
def _process_numeric(self):
@@ -1756,21 +1751,11 @@ class SQLCompiler(Compiled):
for v in self._insertmanyvalues.insert_crud_params
]
sentinel_param_int_idxs = (
[
self.positiontup.index(cast(str, _param_key))
for _param_key in self._insertmanyvalues.sentinel_param_keys # noqa: E501
]
if self._insertmanyvalues.sentinel_param_keys is not None
else None
)
self._insertmanyvalues = self._insertmanyvalues._replace(
# This has the numbers (:1, :2)
single_values_expr=single_values_expr,
# The single binds are instead %s so they can be formatted
insert_crud_params=insert_crud_params,
sentinel_param_keys=sentinel_param_int_idxs,
)
@util.memoized_property
@@ -1802,23 +1787,6 @@ class SQLCompiler(Compiled):
if value is not None
}
@util.memoized_property
def _imv_sentinel_value_resolvers(
self,
) -> Optional[Sequence[Optional[_SentinelProcessorType[Any]]]]:
imv = self._insertmanyvalues
if imv is None or imv.sentinel_columns is None:
return None
sentinel_value_resolvers = [
_scol.type._cached_sentinel_value_processor(self.dialect)
for _scol in imv.sentinel_columns
]
if util.NONE_SET.issuperset(sentinel_value_resolvers):
return None
else:
return sentinel_value_resolvers
def is_subquery(self):
return len(self.stack) > 1
@@ -2378,11 +2346,76 @@ class SQLCompiler(Compiled):
"""Called when a SELECT statement has no froms, and no FROM clause is
to be appended.
Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output.
Gives Oracle Database a chance to tack on a ``FROM DUAL`` to the string
output.
"""
return ""
def visit_override_binds(self, override_binds, **kw):
"""SQL compile the nested element of an _OverrideBinds with
bindparams swapped out.
The _OverrideBinds is not normally expected to be compiled; it
is meant to be used when an already cached statement is to be used,
the compilation was already performed, and only the bound params should
be swapped in at execution time.
However, there are test cases that exericise this object, and
additionally the ORM subquery loader is known to feed in expressions
which include this construct into new queries (discovered in #11173),
so it has to do the right thing at compile time as well.
"""
# get SQL text first
sqltext = override_binds.element._compiler_dispatch(self, **kw)
# for a test compile that is not for caching, change binds after the
# fact. note that we don't try to
# swap the bindparam as we compile, because our element may be
# elsewhere in the statement already (e.g. a subquery or perhaps a
# CTE) and was already visited / compiled. See
# test_relationship_criteria.py ->
# test_selectinload_local_criteria_subquery
for k in override_binds.translate:
if k not in self.binds:
continue
bp = self.binds[k]
# so this would work, just change the value of bp in place.
# but we dont want to mutate things outside.
# bp.value = override_binds.translate[bp.key]
# continue
# instead, need to replace bp with new_bp or otherwise accommodate
# in all internal collections
new_bp = bp._with_value(
override_binds.translate[bp.key],
maintain_key=True,
required=False,
)
name = self.bind_names[bp]
self.binds[k] = self.binds[name] = new_bp
self.bind_names[new_bp] = name
self.bind_names.pop(bp, None)
if bp in self.post_compile_params:
self.post_compile_params |= {new_bp}
if bp in self.literal_execute_params:
self.literal_execute_params |= {new_bp}
ckbm_tuple = self._cache_key_bind_match
if ckbm_tuple:
ckbm, cksm = ckbm_tuple
for bp in bp._cloned_set:
if bp.key in cksm:
cb = cksm[bp.key]
ckbm[cb].append(new_bp)
return sqltext
def visit_grouping(self, grouping, asfrom=False, **kwargs):
return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")"
@@ -2904,7 +2937,7 @@ class SQLCompiler(Compiled):
**kwargs: Any,
) -> str:
if add_to_result_map is not None:
add_to_result_map(func.name, func.name, (), func.type)
add_to_result_map(func.name, func.name, (func.name,), func.type)
disp = getattr(self, "visit_%s_func" % func.name.lower(), None)
@@ -3614,6 +3647,7 @@ class SQLCompiler(Compiled):
render_postcompile=False,
**kwargs,
):
if not skip_bind_expression:
impl = bindparam.type.dialect_impl(self.dialect)
if impl._has_bind_expression:
@@ -4353,6 +4387,11 @@ class SQLCompiler(Compiled):
objects: Tuple[Any, ...],
type_: TypeEngine[Any],
) -> None:
# note objects must be non-empty for cursor.py to handle the
# collection properly
assert objects
if keyname is None or keyname == "*":
self._ordered_columns = False
self._ad_hoc_textual = True
@@ -4426,7 +4465,7 @@ class SQLCompiler(Compiled):
_add_to_result_map = add_to_result_map
def add_to_result_map(keyname, name, objects, type_):
_add_to_result_map(keyname, name, (), type_)
_add_to_result_map(keyname, name, (keyname,), type_)
# if we redefined col_expr for type expressions, wrap the
# callable with one that adds the original column to the targets
@@ -5358,13 +5397,22 @@ class SQLCompiler(Compiled):
self,
statement: str,
parameters: _DBAPIMultiExecuteParams,
compiled_parameters: List[_MutableCoreSingleExecuteParams],
generic_setinputsizes: Optional[_GenericSetInputSizesType],
batch_size: int,
sort_by_parameter_order: bool,
schema_translate_map: Optional[SchemaTranslateMapType],
) -> Iterator[_InsertManyValuesBatch]:
imv = self._insertmanyvalues
assert imv is not None
if not imv.sentinel_param_keys:
_sentinel_from_params = None
else:
_sentinel_from_params = operator.itemgetter(
*imv.sentinel_param_keys
)
lenparams = len(parameters)
if imv.is_default_expr and not self.dialect.supports_default_metavalue:
# backend doesn't support
@@ -5396,15 +5444,24 @@ class SQLCompiler(Compiled):
downgraded = False
if use_row_at_a_time:
for batchnum, param in enumerate(
cast("Sequence[_DBAPISingleExecuteParams]", parameters), 1
for batchnum, (param, compiled_param) in enumerate(
cast(
"Sequence[Tuple[_DBAPISingleExecuteParams, _MutableCoreSingleExecuteParams]]", # noqa: E501
zip(parameters, compiled_parameters),
),
1,
):
yield _InsertManyValuesBatch(
statement,
param,
generic_setinputsizes,
[param],
batch_size,
(
[_sentinel_from_params(compiled_param)]
if _sentinel_from_params
else []
),
1,
batchnum,
lenparams,
sort_by_parameter_order,
@@ -5412,7 +5469,19 @@ class SQLCompiler(Compiled):
)
return
executemany_values = f"({imv.single_values_expr})"
if schema_translate_map:
rst = functools.partial(
self.preparer._render_schema_translates,
schema_translate_map=schema_translate_map,
)
else:
rst = None
imv_single_values_expr = imv.single_values_expr
if rst:
imv_single_values_expr = rst(imv_single_values_expr)
executemany_values = f"({imv_single_values_expr})"
statement = statement.replace(executemany_values, "__EXECMANY_TOKEN__")
# Use optional insertmanyvalues_max_parameters
@@ -5435,7 +5504,10 @@ class SQLCompiler(Compiled):
),
)
batches = list(parameters)
batches = cast("List[Sequence[Any]]", list(parameters))
compiled_batches = cast(
"List[Sequence[Any]]", list(compiled_parameters)
)
processed_setinputsizes: Optional[_GenericSetInputSizesType] = None
batchnum = 1
@@ -5446,6 +5518,12 @@ class SQLCompiler(Compiled):
insert_crud_params = imv.insert_crud_params
assert insert_crud_params is not None
if rst:
insert_crud_params = [
(col, key, rst(expr), st)
for col, key, expr, st in insert_crud_params
]
escaped_bind_names: Mapping[str, str]
expand_pos_lower_index = expand_pos_upper_index = 0
@@ -5493,10 +5571,10 @@ class SQLCompiler(Compiled):
if imv.embed_values_counter:
executemany_values_w_comma = (
f"({imv.single_values_expr}, _IMV_VALUES_COUNTER), "
f"({imv_single_values_expr}, _IMV_VALUES_COUNTER), "
)
else:
executemany_values_w_comma = f"({imv.single_values_expr}), "
executemany_values_w_comma = f"({imv_single_values_expr}), "
all_names_we_will_expand: Set[str] = set()
for elem in imv.insert_crud_params:
@@ -5529,8 +5607,16 @@ class SQLCompiler(Compiled):
)
while batches:
batch = cast("Sequence[Any]", batches[0:batch_size])
batch = batches[0:batch_size]
compiled_batch = compiled_batches[0:batch_size]
batches[0:batch_size] = []
compiled_batches[0:batch_size] = []
if batches:
current_batch_size = batch_size
else:
current_batch_size = len(batch)
if generic_setinputsizes:
# if setinputsizes is present, expand this collection to
@@ -5540,7 +5626,7 @@ class SQLCompiler(Compiled):
(new_key, len_, typ)
for new_key, len_, typ in (
(f"{key}_{index}", len_, typ)
for index in range(len(batch))
for index in range(current_batch_size)
for key, len_, typ in generic_setinputsizes
)
]
@@ -5550,6 +5636,9 @@ class SQLCompiler(Compiled):
num_ins_params = imv.num_positional_params_counted
batch_iterator: Iterable[Sequence[Any]]
extra_params_left: Sequence[Any]
extra_params_right: Sequence[Any]
if num_ins_params == len(batch[0]):
extra_params_left = extra_params_right = ()
batch_iterator = batch
@@ -5572,7 +5661,7 @@ class SQLCompiler(Compiled):
)[:-2]
else:
expanded_values_string = (
(executemany_values_w_comma * len(batch))
(executemany_values_w_comma * current_batch_size)
)[:-2]
if self._numeric_binds and num_ins_params > 0:
@@ -5588,7 +5677,7 @@ class SQLCompiler(Compiled):
assert not extra_params_right
start = expand_pos_lower_index + 1
end = num_ins_params * (len(batch)) + start
end = num_ins_params * (current_batch_size) + start
# need to format here, since statement may contain
# unescaped %, while values_string contains just (%s, %s)
@@ -5638,7 +5727,12 @@ class SQLCompiler(Compiled):
replaced_parameters,
processed_setinputsizes,
batch,
batch_size,
(
[_sentinel_from_params(cb) for cb in compiled_batch]
if _sentinel_from_params
else []
),
current_batch_size,
batchnum,
total_batches,
sort_by_parameter_order,
@@ -6020,6 +6114,10 @@ class SQLCompiler(Compiled):
"""Provide a hook for MySQL to add LIMIT to the UPDATE"""
return None
def delete_limit_clause(self, delete_stmt):
"""Provide a hook for MySQL to add LIMIT to the DELETE"""
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
"""Provide a hook to override the initial table clause
in an UPDATE statement.
@@ -6312,6 +6410,10 @@ class SQLCompiler(Compiled):
if t:
text += " WHERE " + t
limit_clause = self.delete_limit_clause(delete_stmt)
if limit_clause:
text += " " + limit_clause
if (
self.implicit_returning or delete_stmt._returning
) and not self.returning_precedes_values:
@@ -6396,8 +6498,10 @@ class StrSQLCompiler(SQLCompiler):
def visit_json_path_getitem_op_binary(self, binary, operator, **kw):
return self.visit_getitem_binary(binary, operator, **kw)
def visit_sequence(self, seq, **kw):
return "<next sequence value: %s>" % self.preparer.format_sequence(seq)
def visit_sequence(self, sequence, **kw):
return (
f"<next sequence value: {self.preparer.format_sequence(sequence)}>"
)
def returning_clause(
self,
@@ -6431,7 +6535,7 @@ class StrSQLCompiler(SQLCompiler):
for t in extra_froms
)
def visit_empty_set_expr(self, type_, **kw):
def visit_empty_set_expr(self, element_types, **kw):
return "SELECT 1 WHERE 1!=1"
def get_from_hint_text(self, table, text):
@@ -7202,7 +7306,7 @@ class StrSQLTypeCompiler(GenericTypeCompiler):
class _SchemaForObjectCallable(Protocol):
def __call__(self, obj: Any) -> str: ...
def __call__(self, __obj: Any) -> str: ...
class _BindNameForColProtocol(Protocol):

View File

@@ -1,5 +1,5 @@
# sql/crud.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -1286,7 +1286,7 @@ class _multiparam_column(elements.ColumnElement[Any]):
def compare(self, other, **kw):
raise NotImplementedError()
def _copy_internals(self, other, **kw):
def _copy_internals(self, **kw):
raise NotImplementedError()
def __eq__(self, other):

View File

@@ -1,5 +1,5 @@
# sql/ddl.py
# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -155,8 +155,8 @@ class ExecutableDDLElement(roles.DDLRole, Executable, BaseDDLElement):
event.listen(
users,
'after_create',
AddConstraint(constraint).execute_if(dialect='postgresql')
"after_create",
AddConstraint(constraint).execute_if(dialect="postgresql"),
)
.. seealso::
@@ -231,20 +231,20 @@ class ExecutableDDLElement(roles.DDLRole, Executable, BaseDDLElement):
Used to provide a wrapper for event listening::
event.listen(
metadata,
'before_create',
DDL("my_ddl").execute_if(dialect='postgresql')
)
metadata,
"before_create",
DDL("my_ddl").execute_if(dialect="postgresql"),
)
:param dialect: May be a string or tuple of strings.
If a string, it will be compared to the name of the
executing database dialect::
DDL('something').execute_if(dialect='postgresql')
DDL("something").execute_if(dialect="postgresql")
If a tuple, specifies multiple dialect names::
DDL('something').execute_if(dialect=('postgresql', 'mysql'))
DDL("something").execute_if(dialect=("postgresql", "mysql"))
:param callable\_: A callable, which will be invoked with
three positional arguments as well as optional keyword
@@ -342,17 +342,19 @@ class DDL(ExecutableDDLElement):
from sqlalchemy import event, DDL
tbl = Table('users', metadata, Column('uid', Integer))
event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger'))
tbl = Table("users", metadata, Column("uid", Integer))
event.listen(tbl, "before_create", DDL("DROP TRIGGER users_trigger"))
spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE')
event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb'))
spow = DDL("ALTER TABLE %(table)s SET secretpowers TRUE")
event.listen(tbl, "after_create", spow.execute_if(dialect="somedb"))
drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE')
drop_spow = DDL("ALTER TABLE users SET secretpowers FALSE")
connection.execute(drop_spow)
When operating on Table events, the following ``statement``
string substitutions are available::
string substitutions are available:
.. sourcecode:: text
%(table)s - the Table name, with any required quoting applied
%(schema)s - the schema name, with any required quoting applied
@@ -470,8 +472,8 @@ class CreateSchema(_CreateBase):
def __init__(
self,
name,
if_not_exists=False,
name: str,
if_not_exists: bool = False,
):
"""Create a new :class:`.CreateSchema` construct."""
@@ -491,9 +493,9 @@ class DropSchema(_DropBase):
def __init__(
self,
name,
cascade=False,
if_exists=False,
name: str,
cascade: bool = False,
if_exists: bool = False,
):
"""Create a new :class:`.DropSchema` construct."""
@@ -568,6 +570,7 @@ class CreateColumn(BaseDDLElement):
from sqlalchemy import schema
from sqlalchemy.ext.compiler import compiles
@compiles(schema.CreateColumn)
def compile(element, compiler, **kw):
column = element.element
@@ -576,9 +579,9 @@ class CreateColumn(BaseDDLElement):
return compiler.visit_create_column(element, **kw)
text = "%s SPECIAL DIRECTIVE %s" % (
column.name,
compiler.type_compiler.process(column.type)
)
column.name,
compiler.type_compiler.process(column.type),
)
default = compiler.get_column_default_string(column)
if default is not None:
text += " DEFAULT " + default
@@ -588,8 +591,8 @@ class CreateColumn(BaseDDLElement):
if column.constraints:
text += " ".join(
compiler.process(const)
for const in column.constraints)
compiler.process(const) for const in column.constraints
)
return text
The above construct can be applied to a :class:`_schema.Table`
@@ -600,17 +603,21 @@ class CreateColumn(BaseDDLElement):
metadata = MetaData()
table = Table('mytable', MetaData(),
Column('x', Integer, info={"special":True}, primary_key=True),
Column('y', String(50)),
Column('z', String(20), info={"special":True})
)
table = Table(
"mytable",
MetaData(),
Column("x", Integer, info={"special": True}, primary_key=True),
Column("y", String(50)),
Column("z", String(20), info={"special": True}),
)
metadata.create_all(conn)
Above, the directives we've added to the :attr:`_schema.Column.info`
collection
will be detected by our custom compilation scheme::
will be detected by our custom compilation scheme:
.. sourcecode:: sql
CREATE TABLE mytable (
x SPECIAL DIRECTIVE INTEGER NOT NULL,
@@ -635,18 +642,21 @@ class CreateColumn(BaseDDLElement):
from sqlalchemy.schema import CreateColumn
@compiles(CreateColumn, "postgresql")
def skip_xmin(element, compiler, **kw):
if element.element.name == 'xmin':
if element.element.name == "xmin":
return None
else:
return compiler.visit_create_column(element, **kw)
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('xmin', Integer)
)
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("xmin", Integer),
)
Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE``
which only includes the ``id`` column in the string; the ``xmin`` column

View File

@@ -1,5 +1,5 @@
# sql/default_comparator.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/dml.py
# Copyright (C) 2009-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2009-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -23,6 +23,7 @@ from typing import NoReturn
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
@@ -42,6 +43,7 @@ from .base import _from_objects
from .base import _generative
from .base import _select_iterables
from .base import ColumnCollection
from .base import ColumnSet
from .base import CompileState
from .base import DialectKWArgs
from .base import Executable
@@ -418,10 +420,16 @@ class UpdateBase(
is_dml = True
def _generate_fromclause_column_proxies(
self, fromclause: FromClause
self,
fromclause: FromClause,
columns: ColumnCollection[str, KeyedColumnElement[Any]],
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
) -> None:
fromclause._columns._populate_separate_keys(
col._make_proxy(fromclause)
columns._populate_separate_keys(
col._make_proxy(
fromclause, primary_key=primary_key, foreign_keys=foreign_keys
)
for col in self._all_selected_columns
if is_column_element(col)
)
@@ -525,11 +533,11 @@ class UpdateBase(
E.g.::
stmt = table.insert().values(data='newdata').return_defaults()
stmt = table.insert().values(data="newdata").return_defaults()
result = connection.execute(stmt)
server_created_at = result.returned_defaults['created_at']
server_created_at = result.returned_defaults["created_at"]
When used against an UPDATE statement
:meth:`.UpdateBase.return_defaults` instead looks for columns that
@@ -1032,7 +1040,7 @@ class ValuesBase(UpdateBase):
users.insert().values(name="some name")
users.update().where(users.c.id==5).values(name="some name")
users.update().where(users.c.id == 5).values(name="some name")
:param \*args: As an alternative to passing key/value parameters,
a dictionary, tuple, or list of dictionaries or tuples can be passed
@@ -1062,13 +1070,17 @@ class ValuesBase(UpdateBase):
this syntax is supported on backends such as SQLite, PostgreSQL,
MySQL, but not necessarily others::
users.insert().values([
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
])
users.insert().values(
[
{"name": "some name"},
{"name": "some other name"},
{"name": "yet another name"},
]
)
The above form would render a multiple VALUES statement similar to::
The above form would render a multiple VALUES statement similar to:
.. sourcecode:: sql
INSERT INTO users (name) VALUES
(:name_1),
@@ -1246,7 +1258,7 @@ class Insert(ValuesBase):
e.g.::
sel = select(table1.c.a, table1.c.b).where(table1.c.c > 5)
ins = table2.insert().from_select(['a', 'b'], sel)
ins = table2.insert().from_select(["a", "b"], sel)
:param names: a sequence of string column names or
:class:`_schema.Column`
@@ -1535,9 +1547,7 @@ class Update(DMLWhereBase, ValuesBase):
E.g.::
stmt = table.update().ordered_values(
("name", "ed"), ("ident", "foo")
)
stmt = table.update().ordered_values(("name", "ed"), ("ident", "foo"))
.. seealso::
@@ -1550,7 +1560,7 @@ class Update(DMLWhereBase, ValuesBase):
:paramref:`_expression.update.preserve_parameter_order`
parameter, which will be removed in SQLAlchemy 2.0.
"""
""" # noqa: E501
if self._values:
raise exc.ArgumentError(
"This statement already has values present"

View File

@@ -1,5 +1,5 @@
# sql/elements.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -14,7 +14,7 @@
from __future__ import annotations
from decimal import Decimal
from enum import IntEnum
from enum import Enum
import itertools
import operator
import re
@@ -77,15 +77,18 @@ from .. import util
from ..util import HasMemoized_ro_memoized_attribute
from ..util import TypingOnly
from ..util.typing import Literal
from ..util.typing import ParamSpec
from ..util.typing import Self
if typing.TYPE_CHECKING:
from ._typing import _ByArgument
from ._typing import _ColumnExpressionArgument
from ._typing import _ColumnExpressionOrStrLabelArgument
from ._typing import _HasDialect
from ._typing import _InfoType
from ._typing import _PropagateAttrsType
from ._typing import _TypeEngineArgument
from .base import ColumnSet
from .cache_key import _CacheKeyTraversalType
from .cache_key import CacheKey
from .compiler import Compiled
@@ -104,9 +107,9 @@ if typing.TYPE_CHECKING:
from .type_api import TypeEngine
from .visitors import _CloneCallableType
from .visitors import _TraverseInternalsType
from .visitors import anon_map
from ..engine import Connection
from ..engine import Dialect
from ..engine import Engine
from ..engine.interfaces import _CoreMultiExecuteParams
from ..engine.interfaces import CacheStats
from ..engine.interfaces import CompiledCacheType
@@ -243,7 +246,7 @@ class CompilerElement(Visitable):
@util.preload_module("sqlalchemy.engine.url")
def compile(
self,
bind: Optional[Union[Engine, Connection]] = None,
bind: Optional[_HasDialect] = None,
dialect: Optional[Dialect] = None,
**kw: Any,
) -> Compiled:
@@ -279,7 +282,7 @@ class CompilerElement(Visitable):
from sqlalchemy.sql import table, column, select
t = table('t', column('x'))
t = table("t", column("x"))
s = select(t).where(t.c.x == 5)
@@ -582,10 +585,10 @@ class ClauseElement(
:func:`_expression.bindparam`
elements replaced with values taken from the given dictionary::
>>> clause = column('x') + bindparam('foo')
>>> clause = column("x") + bindparam("foo")
>>> print(clause.compile().params)
{'foo':None}
>>> print(clause.params({'foo':7}).compile().params)
>>> print(clause.params({"foo": 7}).compile().params)
{'foo':7}
"""
@@ -775,7 +778,7 @@ class DQLDMLClauseElement(ClauseElement):
def compile( # noqa: A001
self,
bind: Optional[Union[Engine, Connection]] = None,
bind: Optional[_HasDialect] = None,
dialect: Optional[Dialect] = None,
**kw: Any,
) -> SQLCompiler: ...
@@ -1064,6 +1067,9 @@ class SQLCoreOperations(Generic[_T_co], ColumnOperators, TypingOnly):
other: Any,
) -> ColumnElement[str]: ...
@overload
def __add__(self, other: Any) -> ColumnElement[Any]: ...
def __add__(self, other: Any) -> ColumnElement[Any]: ...
@overload
@@ -1281,9 +1287,9 @@ class ColumnElement(
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
>>> column("a") + column("b")
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
>>> print(column("a") + column("b"))
{printsql}a + b
.. seealso::
@@ -1372,7 +1378,9 @@ class ColumnElement(
SQL.
Concretely, this is the "name" of a column or a label in a
SELECT statement; ``<columnname>`` and ``<labelname>`` below::
SELECT statement; ``<columnname>`` and ``<labelname>`` below:
.. sourcecode:: sql
SELECT <columnmame> FROM table
@@ -1425,13 +1433,11 @@ class ColumnElement(
_alt_names: Sequence[str] = ()
@overload
def self_group(
self: ColumnElement[_T], against: Optional[OperatorType] = None
) -> ColumnElement[_T]: ...
def self_group(self, against: None = None) -> ColumnElement[_T]: ...
@overload
def self_group(
self: ColumnElement[Any], against: Optional[OperatorType] = None
self, against: Optional[OperatorType] = None
) -> ColumnElement[Any]: ...
def self_group(
@@ -1634,6 +1640,8 @@ class ColumnElement(
self,
selectable: FromClause,
*,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
@@ -1910,8 +1918,9 @@ class BindParameter(roles.InElementRole, KeyedColumnElement[_T]):
from sqlalchemy import bindparam
stmt = select(users_table).\
where(users_table.c.name == bindparam('username'))
stmt = select(users_table).where(
users_table.c.name == bindparam("username")
)
Detailed discussion of how :class:`.BindParameter` is used is
at :func:`.bindparam`.
@@ -2228,7 +2237,6 @@ class TextClause(
t = text("SELECT * FROM users")
result = connection.execute(t)
The :class:`_expression.TextClause` construct is produced using the
:func:`_expression.text`
function; see that function for full documentation.
@@ -2305,16 +2313,19 @@ class TextClause(
Given a text construct such as::
from sqlalchemy import text
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = text(
"SELECT id, name FROM user WHERE name=:name AND timestamp=:timestamp"
)
the :meth:`_expression.TextClause.bindparams`
method can be used to establish
the initial value of ``:name`` and ``:timestamp``,
using simple keyword arguments::
stmt = stmt.bindparams(name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
stmt = stmt.bindparams(
name="jack", timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
Where above, new :class:`.BindParameter` objects
will be generated with the names ``name`` and ``timestamp``, and
@@ -2329,10 +2340,11 @@ class TextClause(
argument, then an optional value and type::
from sqlalchemy import bindparam
stmt = stmt.bindparams(
bindparam('name', value='jack', type_=String),
bindparam('timestamp', type_=DateTime)
)
bindparam("name", value="jack", type_=String),
bindparam("timestamp", type_=DateTime),
)
Above, we specified the type of :class:`.DateTime` for the
``timestamp`` bind, and the type of :class:`.String` for the ``name``
@@ -2342,8 +2354,9 @@ class TextClause(
Additional bound parameters can be supplied at statement execution
time, e.g.::
result = connection.execute(stmt,
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5))
result = connection.execute(
stmt, timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
The :meth:`_expression.TextClause.bindparams`
method can be called repeatedly,
@@ -2353,15 +2366,15 @@ class TextClause(
first with typing information, and a
second time with value information, and it will be combined::
stmt = text("SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp")
stmt = stmt.bindparams(
bindparam('name', type_=String),
bindparam('timestamp', type_=DateTime)
stmt = text(
"SELECT id, name FROM user WHERE name=:name "
"AND timestamp=:timestamp"
)
stmt = stmt.bindparams(
name='jack',
timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
bindparam("name", type_=String), bindparam("timestamp", type_=DateTime)
)
stmt = stmt.bindparams(
name="jack", timestamp=datetime.datetime(2012, 10, 8, 15, 12, 5)
)
The :meth:`_expression.TextClause.bindparams`
@@ -2375,18 +2388,17 @@ class TextClause(
object::
stmt1 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name1', unique=True)
bindparam("name", value="name1", unique=True)
)
stmt2 = text("select id from table where name=:name").bindparams(
bindparam("name", value='name2', unique=True)
bindparam("name", value="name2", unique=True)
)
union = union_all(
stmt1.columns(column("id")),
stmt2.columns(column("id"))
)
union = union_all(stmt1.columns(column("id")), stmt2.columns(column("id")))
The above statement will render as::
The above statement will render as:
.. sourcecode:: sql
select id from table where name=:name_1
UNION ALL select id from table where name=:name_2
@@ -2396,7 +2408,7 @@ class TextClause(
:func:`_expression.text`
constructs.
"""
""" # noqa: E501
self._bindparams = new_params = self._bindparams.copy()
for bind in binds:
@@ -2427,7 +2439,9 @@ class TextClause(
@util.preload_module("sqlalchemy.sql.selectable")
def columns(
self, *cols: _ColumnExpressionArgument[Any], **types: TypeEngine[Any]
self,
*cols: _ColumnExpressionArgument[Any],
**types: _TypeEngineArgument[Any],
) -> TextualSelect:
r"""Turn this :class:`_expression.TextClause` object into a
:class:`_expression.TextualSelect`
@@ -2448,12 +2462,13 @@ class TextClause(
from sqlalchemy.sql import column, text
stmt = text("SELECT id, name FROM some_table")
stmt = stmt.columns(column('id'), column('name')).subquery('st')
stmt = stmt.columns(column("id"), column("name")).subquery("st")
stmt = select(mytable).\
select_from(
mytable.join(stmt, mytable.c.name == stmt.c.name)
).where(stmt.c.id > 5)
stmt = (
select(mytable)
.select_from(mytable.join(stmt, mytable.c.name == stmt.c.name))
.where(stmt.c.id > 5)
)
Above, we pass a series of :func:`_expression.column` elements to the
:meth:`_expression.TextClause.columns` method positionally. These
@@ -2474,10 +2489,10 @@ class TextClause(
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
column('id', Integer),
column('name', Unicode),
column('timestamp', DateTime)
)
column("id", Integer),
column("name", Unicode),
column("timestamp", DateTime),
)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
@@ -2486,11 +2501,7 @@ class TextClause(
types alone may be used, if only type conversion is needed::
stmt = text("SELECT id, name, timestamp FROM some_table")
stmt = stmt.columns(
id=Integer,
name=Unicode,
timestamp=DateTime
)
stmt = stmt.columns(id=Integer, name=Unicode, timestamp=DateTime)
for id, name, timestamp in connection.execute(stmt):
print(id, name, timestamp)
@@ -2504,26 +2515,31 @@ class TextClause(
the result set will match to those columns positionally, meaning the
name or origin of the column in the textual SQL doesn't matter::
stmt = text("SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1").columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address
)
stmt = text(
"SELECT users.id, addresses.id, users.id, "
"users.name, addresses.email_address AS email "
"FROM users JOIN addresses ON users.id=addresses.user_id "
"WHERE users.id = 1"
).columns(
User.id,
Address.id,
Address.user_id,
User.name,
Address.email_address,
)
query = session.query(User).from_statement(stmt).options(
contains_eager(User.addresses))
query = (
session.query(User)
.from_statement(stmt)
.options(contains_eager(User.addresses))
)
The :meth:`_expression.TextClause.columns` method provides a direct
route to calling :meth:`_expression.FromClause.subquery` as well as
:meth:`_expression.SelectBase.cte`
against a textual SELECT statement::
stmt = stmt.columns(id=Integer, name=String).cte('st')
stmt = stmt.columns(id=Integer, name=String).cte("st")
stmt = select(sometable).where(sometable.c.id == stmt.c.id)
@@ -2576,7 +2592,9 @@ class TextClause(
# be using this method.
return self.type.comparator_factory(self) # type: ignore
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[Any]]:
if against is operators.in_op:
return Grouping(self)
else:
@@ -2781,7 +2799,9 @@ class ClauseList(
def _from_objects(self) -> List[FromClause]:
return list(itertools.chain(*[c._from_objects for c in self.clauses]))
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[Any]]:
if self.group and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
@@ -2804,7 +2824,9 @@ class OperatorExpression(ColumnElement[_T]):
def is_comparison(self):
return operators.is_comparison(self.operator)
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[_T]]:
if (
self.group
and operators.is_precedent(self.operator, against)
@@ -3164,7 +3186,9 @@ class BooleanClauseList(ExpressionClauseList[bool]):
def _select_iterable(self) -> _SelectIterable:
return (self,)
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[bool]]:
if not self.clauses:
return self
else:
@@ -3247,7 +3271,7 @@ class Tuple(ClauseList, ColumnElement[typing_Tuple[Any, ...]]):
]
)
def self_group(self, against=None):
def self_group(self, against: Optional[OperatorType] = None) -> Self:
# Tuple is parenthesized by definition.
return self
@@ -3260,14 +3284,13 @@ class Case(ColumnElement[_T]):
from sqlalchemy import case
stmt = select(users_table).\
where(
case(
(users_table.c.name == 'wendy', 'W'),
(users_table.c.name == 'jack', 'J'),
else_='E'
)
)
stmt = select(users_table).where(
case(
(users_table.c.name == "wendy", "W"),
(users_table.c.name == "jack", "J"),
else_="E",
)
)
Details on :class:`.Case` usage is at :func:`.case`.
@@ -3480,7 +3503,9 @@ class TypeCoerce(WrapsColumnExpression[_T]):
def wrapped_column_expression(self):
return self.clause
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> TypeCoerce[_T]:
grouped = self.clause.self_group(against=against)
if grouped is not self.clause:
return TypeCoerce(grouped, self.type)
@@ -3675,7 +3700,7 @@ class UnaryExpression(ColumnElement[_T]):
@property
def _order_by_label_element(self) -> Optional[Label[Any]]:
if self.modifier in (operators.desc_op, operators.asc_op):
if operators.is_order_by_modifier(self.modifier):
return self.element._order_by_label_element
else:
return None
@@ -3695,7 +3720,9 @@ class UnaryExpression(ColumnElement[_T]):
else:
return ClauseElement._negate(self)
def self_group(self, against=None):
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[_T]]:
if self.operator and operators.is_precedent(self.operator, against):
return Grouping(self)
else:
@@ -3782,7 +3809,7 @@ class AsBoolean(WrapsColumnExpression[bool], UnaryExpression[bool]):
def wrapped_column_expression(self):
return self.element
def self_group(self, against=None):
def self_group(self, against: Optional[OperatorType] = None) -> Self:
return self
def _negate(self):
@@ -3801,9 +3828,9 @@ class BinaryExpression(OperatorExpression[_T]):
.. sourcecode:: pycon+sql
>>> from sqlalchemy.sql import column
>>> column('a') + column('b')
>>> column("a") + column("b")
<sqlalchemy.sql.expression.BinaryExpression object at 0x101029dd0>
>>> print(column('a') + column('b'))
>>> print(column("a") + column("b"))
{printsql}a + b
"""
@@ -3892,7 +3919,7 @@ class BinaryExpression(OperatorExpression[_T]):
The rationale here is so that ColumnElement objects can be hashable.
What? Well, suppose you do this::
c1, c2 = column('x'), column('y')
c1, c2 = column("x"), column("y")
s1 = set([c1, c2])
We do that **a lot**, columns inside of sets is an extremely basic
@@ -3982,7 +4009,7 @@ class Slice(ColumnElement[Any]):
)
self.type = type_api.NULLTYPE
def self_group(self, against=None):
def self_group(self, against: Optional[OperatorType] = None) -> Self:
assert against is operator.getitem
return self
@@ -4001,7 +4028,7 @@ class GroupedElement(DQLDMLClauseElement):
element: ClauseElement
def self_group(self, against=None):
def self_group(self, against: Optional[OperatorType] = None) -> Self:
return self
def _ungroup(self):
@@ -4065,8 +4092,65 @@ class Grouping(GroupedElement, ColumnElement[_T]):
self.element = state["element"]
self.type = state["type"]
if TYPE_CHECKING:
class _OverRange(IntEnum):
def self_group(
self, against: Optional[OperatorType] = None
) -> Self: ...
class _OverrideBinds(Grouping[_T]):
"""used by cache_key->_apply_params_to_element to allow compilation /
execution of a SQL element that's been cached, using an alternate set of
bound parameter values.
This is used by the ORM to swap new parameter values into expressions
that are embedded into loader options like with_expression(),
selectinload(). Previously, this task was accomplished using the
.params() method which would perform a deep-copy instead. This deep
copy proved to be too expensive for more complex expressions.
See #11085
"""
__visit_name__ = "override_binds"
def __init__(
self,
element: ColumnElement[_T],
bindparams: Sequence[BindParameter[Any]],
replaces_params: Sequence[BindParameter[Any]],
):
self.element = element
self.translate = {
k.key: v.value for k, v in zip(replaces_params, bindparams)
}
def _gen_cache_key(
self, anon_map: anon_map, bindparams: List[BindParameter[Any]]
) -> Optional[typing_Tuple[Any, ...]]:
"""generate a cache key for the given element, substituting its bind
values for the translation values present."""
existing_bps: List[BindParameter[Any]] = []
ck = self.element._gen_cache_key(anon_map, existing_bps)
bindparams.extend(
(
bp._with_value(
self.translate[bp.key], maintain_key=True, required=False
)
if bp.key in self.translate
else bp
)
for bp in existing_bps
)
return ck
class _OverRange(Enum):
RANGE_UNBOUNDED = 0
RANGE_CURRENT = 1
@@ -4074,6 +4158,8 @@ class _OverRange(IntEnum):
RANGE_UNBOUNDED = _OverRange.RANGE_UNBOUNDED
RANGE_CURRENT = _OverRange.RANGE_CURRENT
_IntOrRange = Union[int, _OverRange]
class Over(ColumnElement[_T]):
"""Represent an OVER clause.
@@ -4102,7 +4188,8 @@ class Over(ColumnElement[_T]):
"""The underlying expression object to which this :class:`.Over`
object refers."""
range_: Optional[typing_Tuple[int, int]]
range_: Optional[typing_Tuple[_IntOrRange, _IntOrRange]]
rows: Optional[typing_Tuple[_IntOrRange, _IntOrRange]]
def __init__(
self,
@@ -4147,19 +4234,24 @@ class Over(ColumnElement[_T]):
)
def _interpret_range(
self, range_: typing_Tuple[Optional[int], Optional[int]]
) -> typing_Tuple[int, int]:
self,
range_: typing_Tuple[Optional[_IntOrRange], Optional[_IntOrRange]],
) -> typing_Tuple[_IntOrRange, _IntOrRange]:
if not isinstance(range_, tuple) or len(range_) != 2:
raise exc.ArgumentError("2-tuple expected for range/rows")
lower: int
upper: int
r0, r1 = range_
if range_[0] is None:
lower: _IntOrRange
upper: _IntOrRange
if r0 is None:
lower = RANGE_UNBOUNDED
elif isinstance(r0, _OverRange):
lower = r0
else:
try:
lower = int(range_[0])
lower = int(r0)
except ValueError as err:
raise exc.ArgumentError(
"Integer or None expected for range value"
@@ -4168,11 +4260,13 @@ class Over(ColumnElement[_T]):
if lower == 0:
lower = RANGE_CURRENT
if range_[1] is None:
if r1 is None:
upper = RANGE_UNBOUNDED
elif isinstance(r1, _OverRange):
upper = r1
else:
try:
upper = int(range_[1])
upper = int(r1)
except ValueError as err:
raise exc.ArgumentError(
"Integer or None expected for range value"
@@ -4211,7 +4305,7 @@ class WithinGroup(ColumnElement[_T]):
``rank()``, ``dense_rank()``, etc.
It's supported only by certain database backends, such as PostgreSQL,
Oracle and MS SQL Server.
Oracle Database and MS SQL Server.
The :class:`.WithinGroup` construct extracts its type from the
method :meth:`.FunctionElement.within_group_type`. If this returns
@@ -4230,7 +4324,7 @@ class WithinGroup(ColumnElement[_T]):
def __init__(
self,
element: FunctionElement[_T],
element: Union[FunctionElement[_T], FunctionFilter[_T]],
*order_by: _ColumnExpressionArgument[Any],
):
self.element = element
@@ -4244,7 +4338,14 @@ class WithinGroup(ColumnElement[_T]):
tuple(self.order_by) if self.order_by is not None else ()
)
def over(self, partition_by=None, order_by=None, range_=None, rows=None):
def over(
self,
*,
partition_by: Optional[_ByArgument] = None,
order_by: Optional[_ByArgument] = None,
rows: Optional[typing_Tuple[Optional[int], Optional[int]]] = None,
range_: Optional[typing_Tuple[Optional[int], Optional[int]]] = None,
) -> Over[_T]:
"""Produce an OVER clause against this :class:`.WithinGroup`
construct.
@@ -4260,6 +4361,24 @@ class WithinGroup(ColumnElement[_T]):
rows=rows,
)
@overload
def filter(self) -> Self: ...
@overload
def filter(
self,
__criterion0: _ColumnExpressionArgument[bool],
*criterion: _ColumnExpressionArgument[bool],
) -> FunctionFilter[_T]: ...
def filter(
self, *criterion: _ColumnExpressionArgument[bool]
) -> Union[Self, FunctionFilter[_T]]:
"""Produce a FILTER clause against this function."""
if not criterion:
return self
return FunctionFilter(self, *criterion)
if not TYPE_CHECKING:
@util.memoized_property
@@ -4283,7 +4402,7 @@ class WithinGroup(ColumnElement[_T]):
)
class FunctionFilter(ColumnElement[_T]):
class FunctionFilter(Generative, ColumnElement[_T]):
"""Represent a function FILTER clause.
This is a special operator against aggregate and window functions,
@@ -4312,12 +4431,13 @@ class FunctionFilter(ColumnElement[_T]):
def __init__(
self,
func: FunctionElement[_T],
func: Union[FunctionElement[_T], WithinGroup[_T]],
*criterion: _ColumnExpressionArgument[bool],
):
self.func = func
self.filter(*criterion)
self.filter.non_generative(self, *criterion) # type: ignore
@_generative
def filter(self, *criterion: _ColumnExpressionArgument[bool]) -> Self:
"""Produce an additional FILTER against the function.
@@ -4364,12 +4484,13 @@ class FunctionFilter(ColumnElement[_T]):
The expression::
func.rank().filter(MyClass.y > 5).over(order_by='x')
func.rank().filter(MyClass.y > 5).over(order_by="x")
is shorthand for::
from sqlalchemy import over, funcfilter
over(funcfilter(func.rank(), MyClass.y > 5), order_by='x')
over(funcfilter(func.rank(), MyClass.y > 5), order_by="x")
See :func:`_expression.over` for a full description.
@@ -4382,6 +4503,19 @@ class FunctionFilter(ColumnElement[_T]):
rows=rows,
)
def within_group(
self, *order_by: _ColumnExpressionArgument[Any]
) -> WithinGroup[_T]:
"""Produce a WITHIN GROUP (ORDER BY expr) clause against
this function.
"""
return WithinGroup(self, *order_by)
def within_group_type(
self, within_group: WithinGroup[_T]
) -> Optional[TypeEngine[_T]]:
return None
def self_group(
self, against: Optional[OperatorType] = None
) -> Union[Self, Grouping[_T]]:
@@ -4425,7 +4559,7 @@ class NamedColumn(KeyedColumnElement[_T]):
return self.name
@HasMemoized.memoized_attribute
def _tq_key_label(self):
def _tq_key_label(self) -> Optional[str]:
"""table qualified label based on column key.
for table-bound columns this is <tablename>_<column key/proxy key>;
@@ -4483,6 +4617,8 @@ class NamedColumn(KeyedColumnElement[_T]):
self,
selectable: FromClause,
*,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
@@ -4514,6 +4650,9 @@ class NamedColumn(KeyedColumnElement[_T]):
return c.key, c
_PS = ParamSpec("_PS")
class Label(roles.LabeledColumnExprRole[_T], NamedColumn[_T]):
"""Represents a column label (AS).
@@ -4611,13 +4750,18 @@ class Label(roles.LabeledColumnExprRole[_T], NamedColumn[_T]):
def element(self) -> ColumnElement[_T]:
return self._element.self_group(against=operators.as_)
def self_group(self, against=None):
def self_group(self, against: Optional[OperatorType] = None) -> Label[_T]:
return self._apply_to_inner(self._element.self_group, against=against)
def _negate(self):
return self._apply_to_inner(self._element._negate)
def _apply_to_inner(self, fn, *arg, **kw):
def _apply_to_inner(
self,
fn: Callable[_PS, ColumnElement[_T]],
*arg: _PS.args,
**kw: _PS.kwargs,
) -> Label[_T]:
sub_element = fn(*arg, **kw)
if sub_element is not self._element:
return Label(self.name, sub_element, type_=self.type)
@@ -4655,6 +4799,8 @@ class Label(roles.LabeledColumnExprRole[_T], NamedColumn[_T]):
self,
selectable: FromClause,
*,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
compound_select_cols: Optional[Sequence[ColumnElement[Any]]] = None,
**kw: Any,
@@ -4667,6 +4813,8 @@ class Label(roles.LabeledColumnExprRole[_T], NamedColumn[_T]):
disallow_is_literal=True,
name_is_truncatable=isinstance(name, _truncated_label),
compound_select_cols=compound_select_cols,
primary_key=primary_key,
foreign_keys=foreign_keys,
)
# there was a note here to remove this assertion, which was here
@@ -4710,7 +4858,9 @@ class ColumnClause(
id, name = column("id"), column("name")
stmt = select(id, name).select_from("user")
The above statement would produce SQL like::
The above statement would produce SQL like:
.. sourcecode:: sql
SELECT id, name FROM user
@@ -4899,6 +5049,8 @@ class ColumnClause(
self,
selectable: FromClause,
*,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
@@ -4978,15 +5130,25 @@ class CollationClause(ColumnElement[str]):
]
@classmethod
@util.preload_module("sqlalchemy.sql.sqltypes")
def _create_collation_expression(
cls, expression: _ColumnExpressionArgument[str], collation: str
) -> BinaryExpression[str]:
sqltypes = util.preloaded.sql_sqltypes
expr = coercions.expect(roles.ExpressionElementRole[str], expression)
if expr.type._type_affinity is sqltypes.String:
collate_type = expr.type._with_collation(collation)
else:
collate_type = expr.type
return BinaryExpression(
expr,
CollationClause(collation),
operators.collate,
type_=expr.type,
type_=collate_type,
)
def __init__(self, collation):
@@ -5030,7 +5192,7 @@ class quoted_name(util.MemoizedSlots, str):
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle, Firebird, and DB2 "normalize" case-insensitive names
Oracle Database, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
@@ -5051,11 +5213,11 @@ class quoted_name(util.MemoizedSlots, str):
from sqlalchemy import inspect
from sqlalchemy.sql import quoted_name
engine = create_engine("oracle+cx_oracle://some_dsn")
engine = create_engine("oracle+oracledb://some_dsn")
print(inspect(engine).has_table(quoted_name("some_table", True)))
The above logic will run the "has table" logic against the Oracle backend,
passing the name exactly as ``"some_table"`` without converting to
The above logic will run the "has table" logic against the Oracle Database
backend, passing the name exactly as ``"some_table"`` without converting to
upper case.
.. versionchanged:: 1.2 The :class:`.quoted_name` construct is now
@@ -5175,7 +5337,14 @@ class AnnotatedColumnElement(Annotated):
def _with_annotations(self, values):
clone = super()._with_annotations(values)
clone.__dict__.pop("comparator", None)
for attr in (
"comparator",
"_proxy_key",
"_tq_key_label",
"_tq_label",
"_non_anon_label",
):
clone.__dict__.pop(attr, None)
return clone
@util.memoized_property
@@ -5246,11 +5415,12 @@ class conv(_truncated_label):
E.g. when we create a :class:`.Constraint` using a naming convention
as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name='x5'))
m = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
t = Table(
"t", m, Column("x", Integer), CheckConstraint("x > 5", name="x5")
)
The name of the above constraint will be rendered as ``"ck_t_x5"``.
That is, the existing name ``x5`` is used in the naming convention as the
@@ -5263,11 +5433,15 @@ class conv(_truncated_label):
use this explicitly as follows::
m = MetaData(naming_convention={
"ck": "ck_%(table_name)s_%(constraint_name)s"
})
t = Table('t', m, Column('x', Integer),
CheckConstraint('x > 5', name=conv('ck_t_x5')))
m = MetaData(
naming_convention={"ck": "ck_%(table_name)s_%(constraint_name)s"}
)
t = Table(
"t",
m,
Column("x", Integer),
CheckConstraint("x > 5", name=conv("ck_t_x5")),
)
Where above, the :func:`_schema.conv` marker indicates that the constraint
name here is final, and the name will render as ``"ck_t_x5"`` and not

View File

@@ -1,5 +1,5 @@
# sql/events.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -63,13 +63,14 @@ class DDLEvents(event.Events[SchemaEventTarget]):
from sqlalchemy import Table, Column, Metadata, Integer
m = MetaData()
some_table = Table('some_table', m, Column('data', Integer))
some_table = Table("some_table", m, Column("data", Integer))
@event.listens_for(some_table, "after_create")
def after_create(target, connection, **kw):
connection.execute(text(
"ALTER TABLE %s SET name=foo_%s" % (target.name, target.name)
))
connection.execute(
text("ALTER TABLE %s SET name=foo_%s" % (target.name, target.name))
)
some_engine = create_engine("postgresql://scott:tiger@host/test")
@@ -127,10 +128,11 @@ class DDLEvents(event.Events[SchemaEventTarget]):
as listener callables::
from sqlalchemy import DDL
event.listen(
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s")
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
)
**Event Propagation to MetaData Copies**
@@ -149,7 +151,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
some_table,
"after_create",
DDL("ALTER TABLE %(table)s SET name=foo_%(table)s"),
propagate=True
propagate=True,
)
new_metadata = MetaData()
@@ -169,7 +171,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
:ref:`schema_ddl_sequences`
"""
""" # noqa: E501
_target_class_doc = "SomeSchemaClassOrObject"
_dispatch_target = SchemaEventTarget
@@ -358,16 +360,17 @@ class DDLEvents(event.Events[SchemaEventTarget]):
metadata = MetaData()
@event.listens_for(metadata, 'column_reflect')
@event.listens_for(metadata, "column_reflect")
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
# under this MetaData
...
# will use the above event hook
my_table = Table("my_table", metadata, autoload_with=some_engine)
.. versionadded:: 1.4.0b2 The :meth:`_events.DDLEvents.column_reflect`
hook may now be applied to a :class:`_schema.MetaData` object as
well as the :class:`_schema.MetaData` class itself where it will
@@ -379,9 +382,11 @@ class DDLEvents(event.Events[SchemaEventTarget]):
from sqlalchemy import Table
@event.listens_for(Table, 'column_reflect')
@event.listens_for(Table, "column_reflect")
def receive_column_reflect(inspector, table, column_info):
# receives for all Table objects that are reflected
...
It can also be applied to a specific :class:`_schema.Table` at the
point that one is being reflected using the
@@ -390,9 +395,7 @@ class DDLEvents(event.Events[SchemaEventTarget]):
t1 = Table(
"my_table",
autoload_with=some_engine,
listeners=[
('column_reflect', receive_column_reflect)
]
listeners=[("column_reflect", receive_column_reflect)],
)
The dictionary of column information as returned by the

View File

@@ -1,5 +1,5 @@
# sql/expression.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/functions.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -69,6 +69,7 @@ if TYPE_CHECKING:
from ._typing import _ColumnExpressionArgument
from ._typing import _ColumnExpressionOrLiteralArgument
from ._typing import _ColumnExpressionOrStrLabelArgument
from ._typing import _StarOrOne
from ._typing import _TypeEngineArgument
from .base import _EntityNamespace
from .elements import ClauseElement
@@ -245,9 +246,8 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
.. sourcecode:: pycon+sql
>>> fn = (
... func.generate_series(1, 5).
... table_valued("value", "start", "stop", "step")
>>> fn = func.generate_series(1, 5).table_valued(
... "value", "start", "stop", "step"
... )
>>> print(select(fn))
@@ -264,7 +264,9 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
.. sourcecode:: pycon+sql
>>> fn = func.generate_series(4, 1, -1).table_valued("gen", with_ordinality="ordinality")
>>> fn = func.generate_series(4, 1, -1).table_valued(
... "gen", with_ordinality="ordinality"
... )
>>> print(select(fn))
{printsql}SELECT anon_1.gen, anon_1.ordinality
FROM generate_series(:generate_series_1, :generate_series_2, :generate_series_3) WITH ORDINALITY AS anon_1
@@ -376,7 +378,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
.. sourcecode:: pycon+sql
>>> from sqlalchemy import column, select, func
>>> stmt = select(column('x'), column('y')).select_from(func.myfunction())
>>> stmt = select(column("x"), column("y")).select_from(func.myfunction())
>>> print(stmt)
{printsql}SELECT x, y FROM myfunction()
@@ -441,12 +443,13 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
The expression::
func.row_number().over(order_by='x')
func.row_number().over(order_by="x")
is shorthand for::
from sqlalchemy import over
over(func.row_number(), order_by='x')
over(func.row_number(), order_by="x")
See :func:`_expression.over` for a full description.
@@ -510,6 +513,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
is shorthand for::
from sqlalchemy import funcfilter
funcfilter(func.count(1), True)
.. seealso::
@@ -566,7 +570,7 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
An ORM example is as follows::
class Venue(Base):
__tablename__ = 'venue'
__tablename__ = "venue"
id = Column(Integer, primary_key=True)
name = Column(String)
@@ -574,9 +578,10 @@ class FunctionElement(Executable, ColumnElement[_T], FromClause, Generative):
"Venue",
primaryjoin=func.instr(
remote(foreign(name)), name + "/"
).as_comparison(1, 2) == 1,
).as_comparison(1, 2)
== 1,
viewonly=True,
order_by=name
order_by=name,
)
Above, the "Venue" class can load descendant "Venue" objects by
@@ -880,8 +885,11 @@ class _FunctionGenerator:
.. sourcecode:: pycon+sql
>>> print(func.my_string(u'hi', type_=Unicode) + ' ' +
... func.my_string(u'there', type_=Unicode))
>>> print(
... func.my_string("hi", type_=Unicode)
... + " "
... + func.my_string("there", type_=Unicode)
... )
{printsql}my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3)
The object returned by a :data:`.func` call is usually an instance of
@@ -1366,10 +1374,12 @@ class GenericFunction(Function[_T]):
from sqlalchemy.sql.functions import GenericFunction
from sqlalchemy.types import DateTime
class as_utc(GenericFunction):
type = DateTime()
inherit_cache = True
print(select(func.as_utc()))
User-defined generic functions can be organized into
@@ -1417,6 +1427,7 @@ class GenericFunction(Function[_T]):
from sqlalchemy.sql import quoted_name
class GeoBuffer(GenericFunction):
type = Geometry()
package = "geo"
@@ -1656,7 +1667,7 @@ class concat(GenericFunction[str]):
.. sourcecode:: pycon+sql
>>> print(select(func.concat('a', 'b')))
>>> print(select(func.concat("a", "b")))
{printsql}SELECT concat(:concat_2, :concat_3) AS concat_1
String concatenation in SQLAlchemy is more commonly available using the
@@ -1704,11 +1715,13 @@ class count(GenericFunction[int]):
from sqlalchemy import select
from sqlalchemy import table, column
my_table = table('some_table', column('id'))
my_table = table("some_table", column("id"))
stmt = select(func.count()).select_from(my_table)
Executing ``stmt`` would emit::
Executing ``stmt`` would emit:
.. sourcecode:: sql
SELECT count(*) AS count_1
FROM some_table
@@ -1721,7 +1734,9 @@ class count(GenericFunction[int]):
def __init__(
self,
expression: Optional[_ColumnExpressionArgument[Any]] = None,
expression: Union[
_ColumnExpressionArgument[Any], _StarOrOne, None
] = None,
**kwargs: Any,
):
if expression is None:
@@ -2006,9 +2021,7 @@ class grouping_sets(GenericFunction[_T]):
from sqlalchemy import tuple_
stmt = select(
func.sum(table.c.value),
table.c.col_1, table.c.col_2,
table.c.col_3
func.sum(table.c.value), table.c.col_1, table.c.col_2, table.c.col_3
).group_by(
func.grouping_sets(
tuple_(table.c.col_1, table.c.col_2),
@@ -2016,10 +2029,9 @@ class grouping_sets(GenericFunction[_T]):
)
)
.. versionadded:: 1.2
"""
""" # noqa: E501
_has_args = True
inherit_cache = True

View File

@@ -1,5 +1,5 @@
# sql/lambdas.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -278,7 +278,7 @@ class LambdaElement(elements.ClauseElement):
rec = AnalyzedFunction(
tracker, self, apply_propagate_attrs, fn
)
rec.closure_bindparams = bindparams
rec.closure_bindparams = list(bindparams)
lambda_cache[key] = rec
else:
rec = lambda_cache[key]
@@ -437,7 +437,7 @@ class DeferredLambdaElement(LambdaElement):
def __init__(
self,
fn: _LambdaType,
fn: _AnyLambdaType,
role: Type[roles.SQLRole],
opts: Union[Type[LambdaOptions], LambdaOptions] = LambdaOptions,
lambda_args: Tuple[Any, ...] = (),
@@ -518,7 +518,6 @@ class StatementLambdaElement(
stmt += lambda s: s.where(table.c.col == parameter)
.. versionadded:: 1.4
.. seealso::
@@ -558,9 +557,7 @@ class StatementLambdaElement(
... stmt = lambda_stmt(
... lambda: select(table.c.x, table.c.y),
... )
... stmt = stmt.add_criteria(
... lambda: table.c.x > parameter
... )
... stmt = stmt.add_criteria(lambda: table.c.x > parameter)
... return stmt
The :meth:`_sql.StatementLambdaElement.add_criteria` method is
@@ -571,18 +568,15 @@ class StatementLambdaElement(
>>> def my_stmt(self, foo):
... stmt = lambda_stmt(
... lambda: select(func.max(foo.x, foo.y)),
... track_closure_variables=False
... )
... stmt = stmt.add_criteria(
... lambda: self.where_criteria,
... track_on=[self]
... track_closure_variables=False,
... )
... stmt = stmt.add_criteria(lambda: self.where_criteria, track_on=[self])
... return stmt
See :func:`_sql.lambda_stmt` for a description of the parameters
accepted.
"""
""" # noqa: E501
opts = self.opts + dict(
enable_tracking=enable_tracking,

View File

@@ -1,5 +1,5 @@
# sql/naming.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/operators.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -148,6 +148,7 @@ class Operators:
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
@@ -172,6 +173,7 @@ class Operators:
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
@@ -196,6 +198,7 @@ class Operators:
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
@@ -224,7 +227,7 @@ class Operators:
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
somecolumn.op("&")(0xFF)
is a bitwise AND of the value in ``somecolumn``.
@@ -275,7 +278,7 @@ class Operators:
e.g.::
>>> expr = column('x').op('+', python_impl=lambda a, b: a + b)('y')
>>> expr = column("x").op("+", python_impl=lambda a, b: a + b)("y")
The operator for the above expression will also work for non-SQL
left and right objects::
@@ -389,10 +392,9 @@ class custom_op(OperatorType, Generic[_T]):
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
unary = UnaryExpression(
table.c.somecolumn, modifier=operators.custom_op("!"), type_=Numeric
)
.. seealso::
@@ -400,7 +402,7 @@ class custom_op(OperatorType, Generic[_T]):
:meth:`.Operators.bool_op`
"""
""" # noqa: E501
__name__ = "custom_op"
@@ -698,14 +700,15 @@ class ColumnOperators(Operators):
) -> ColumnOperators:
r"""Implement the ``like`` operator.
In a column context, produces the expression::
In a column context, produces the expression:
.. sourcecode:: sql
a LIKE other
E.g.::
stmt = select(sometable).\
where(sometable.c.column.like("%foobar%"))
stmt = select(sometable).where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
@@ -725,18 +728,21 @@ class ColumnOperators(Operators):
) -> ColumnOperators:
r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
In a column context, produces an expression either of the form::
In a column context, produces an expression either of the form:
.. sourcecode:: sql
lower(a) LIKE lower(other)
Or on backends that support the ILIKE operator::
Or on backends that support the ILIKE operator:
.. sourcecode:: sql
a ILIKE other
E.g.::
stmt = select(sometable).\
where(sometable.c.column.ilike("%foobar%"))
stmt = select(sometable).where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
@@ -748,7 +754,7 @@ class ColumnOperators(Operators):
:meth:`.ColumnOperators.like`
"""
""" # noqa: E501
return self.operate(ilike_op, other, escape=escape)
def bitwise_xor(self, other: Any) -> ColumnOperators:
@@ -842,12 +848,15 @@ class ColumnOperators(Operators):
The given parameter ``other`` may be:
* A list of literal values, e.g.::
* A list of literal values,
e.g.::
stmt.where(column.in_([1, 2, 3]))
In this calling form, the list of items is converted to a set of
bound parameters the same length as the list given::
bound parameters the same length as the list given:
.. sourcecode:: sql
WHERE COL IN (?, ?, ?)
@@ -855,16 +864,20 @@ class ColumnOperators(Operators):
:func:`.tuple_` containing multiple expressions::
from sqlalchemy import tuple_
stmt.where(tuple_(col1, col2).in_([(1, 10), (2, 20), (3, 30)]))
* An empty list, e.g.::
* An empty list,
e.g.::
stmt.where(column.in_([]))
In this calling form, the expression renders an "empty set"
expression. These expressions are tailored to individual backends
and are generally trying to get an empty SELECT statement as a
subquery. Such as on SQLite, the expression is::
subquery. Such as on SQLite, the expression is:
.. sourcecode:: sql
WHERE col IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
@@ -874,10 +887,12 @@ class ColumnOperators(Operators):
* A bound parameter, e.g. :func:`.bindparam`, may be used if it
includes the :paramref:`.bindparam.expanding` flag::
stmt.where(column.in_(bindparam('value', expanding=True)))
stmt.where(column.in_(bindparam("value", expanding=True)))
In this calling form, the expression renders a special non-SQL
placeholder expression that looks like::
placeholder expression that looks like:
.. sourcecode:: sql
WHERE COL IN ([EXPANDING_value])
@@ -887,7 +902,9 @@ class ColumnOperators(Operators):
connection.execute(stmt, {"value": [1, 2, 3]})
The database would be passed a bound parameter for each value::
The database would be passed a bound parameter for each value:
.. sourcecode:: sql
WHERE COL IN (?, ?, ?)
@@ -895,7 +912,9 @@ class ColumnOperators(Operators):
If an empty list is passed, a special "empty list" expression,
which is specific to the database in use, is rendered. On
SQLite this would be::
SQLite this would be:
.. sourcecode:: sql
WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
@@ -906,13 +925,12 @@ class ColumnOperators(Operators):
correlated scalar select::
stmt.where(
column.in_(
select(othertable.c.y).
where(table.c.x == othertable.c.x)
)
column.in_(select(othertable.c.y).where(table.c.x == othertable.c.x))
)
In this calling form, :meth:`.ColumnOperators.in_` renders as given::
In this calling form, :meth:`.ColumnOperators.in_` renders as given:
.. sourcecode:: sql
WHERE COL IN (SELECT othertable.y
FROM othertable WHERE othertable.x = table.x)
@@ -921,7 +939,7 @@ class ColumnOperators(Operators):
construct, or a :func:`.bindparam` construct that includes the
:paramref:`.bindparam.expanding` flag set to True.
"""
""" # noqa: E501
return self.operate(in_op, other)
def not_in(self, other: Any) -> ColumnOperators:
@@ -1065,14 +1083,15 @@ class ColumnOperators(Operators):
r"""Implement the ``startswith`` operator.
Produces a LIKE expression that tests against a match for the start
of a string value::
of a string value:
.. sourcecode:: sql
column LIKE <other> || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.startswith("foobar"))
stmt = select(sometable).where(sometable.c.column.startswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1101,7 +1120,9 @@ class ColumnOperators(Operators):
somecolumn.startswith("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE :param || '%' ESCAPE '/'
@@ -1117,7 +1138,9 @@ class ColumnOperators(Operators):
somecolumn.startswith("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE :param || '%' ESCAPE '^'
@@ -1137,7 +1160,7 @@ class ColumnOperators(Operators):
:meth:`.ColumnOperators.like`
"""
""" # noqa: E501
return self.operate(
startswith_op, other, escape=escape, autoescape=autoescape
)
@@ -1152,14 +1175,15 @@ class ColumnOperators(Operators):
version of :meth:`.ColumnOperators.startswith`.
Produces a LIKE expression that tests against an insensitive
match for the start of a string value::
match for the start of a string value:
.. sourcecode:: sql
lower(column) LIKE lower(<other>) || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.istartswith("foobar"))
stmt = select(sometable).where(sometable.c.column.istartswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1188,7 +1212,9 @@ class ColumnOperators(Operators):
somecolumn.istartswith("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE lower(:param) || '%' ESCAPE '/'
@@ -1204,7 +1230,9 @@ class ColumnOperators(Operators):
somecolumn.istartswith("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE lower(:param) || '%' ESCAPE '^'
@@ -1219,7 +1247,7 @@ class ColumnOperators(Operators):
.. seealso::
:meth:`.ColumnOperators.startswith`
"""
""" # noqa: E501
return self.operate(
istartswith_op, other, escape=escape, autoescape=autoescape
)
@@ -1233,14 +1261,15 @@ class ColumnOperators(Operators):
r"""Implement the 'endswith' operator.
Produces a LIKE expression that tests against a match for the end
of a string value::
of a string value:
.. sourcecode:: sql
column LIKE '%' || <other>
E.g.::
stmt = select(sometable).\
where(sometable.c.column.endswith("foobar"))
stmt = select(sometable).where(sometable.c.column.endswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1269,7 +1298,9 @@ class ColumnOperators(Operators):
somecolumn.endswith("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE '%' || :param ESCAPE '/'
@@ -1285,7 +1316,9 @@ class ColumnOperators(Operators):
somecolumn.endswith("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE '%' || :param ESCAPE '^'
@@ -1305,7 +1338,7 @@ class ColumnOperators(Operators):
:meth:`.ColumnOperators.like`
"""
""" # noqa: E501
return self.operate(
endswith_op, other, escape=escape, autoescape=autoescape
)
@@ -1320,14 +1353,15 @@ class ColumnOperators(Operators):
version of :meth:`.ColumnOperators.endswith`.
Produces a LIKE expression that tests against an insensitive match
for the end of a string value::
for the end of a string value:
.. sourcecode:: sql
lower(column) LIKE '%' || lower(<other>)
E.g.::
stmt = select(sometable).\
where(sometable.c.column.iendswith("foobar"))
stmt = select(sometable).where(sometable.c.column.iendswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1356,7 +1390,9 @@ class ColumnOperators(Operators):
somecolumn.iendswith("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE '%' || lower(:param) ESCAPE '/'
@@ -1372,7 +1408,9 @@ class ColumnOperators(Operators):
somecolumn.iendswith("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE '%' || lower(:param) ESCAPE '^'
@@ -1387,7 +1425,7 @@ class ColumnOperators(Operators):
.. seealso::
:meth:`.ColumnOperators.endswith`
"""
""" # noqa: E501
return self.operate(
iendswith_op, other, escape=escape, autoescape=autoescape
)
@@ -1396,14 +1434,15 @@ class ColumnOperators(Operators):
r"""Implement the 'contains' operator.
Produces a LIKE expression that tests against a match for the middle
of a string value::
of a string value:
.. sourcecode:: sql
column LIKE '%' || <other> || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.contains("foobar"))
stmt = select(sometable).where(sometable.c.column.contains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1432,7 +1471,9 @@ class ColumnOperators(Operators):
somecolumn.contains("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE '%' || :param || '%' ESCAPE '/'
@@ -1448,7 +1489,9 @@ class ColumnOperators(Operators):
somecolumn.contains("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
somecolumn LIKE '%' || :param || '%' ESCAPE '^'
@@ -1469,7 +1512,7 @@ class ColumnOperators(Operators):
:meth:`.ColumnOperators.like`
"""
""" # noqa: E501
return self.operate(contains_op, other, **kw)
def icontains(self, other: Any, **kw: Any) -> ColumnOperators:
@@ -1477,14 +1520,15 @@ class ColumnOperators(Operators):
version of :meth:`.ColumnOperators.contains`.
Produces a LIKE expression that tests against an insensitive match
for the middle of a string value::
for the middle of a string value:
.. sourcecode:: sql
lower(column) LIKE '%' || lower(<other>) || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.icontains("foobar"))
stmt = select(sometable).where(sometable.c.column.icontains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
@@ -1513,7 +1557,9 @@ class ColumnOperators(Operators):
somecolumn.icontains("foo%bar", autoescape=True)
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE '%' || lower(:param) || '%' ESCAPE '/'
@@ -1529,7 +1575,9 @@ class ColumnOperators(Operators):
somecolumn.icontains("foo/%bar", escape="^")
Will render as::
Will render as:
.. sourcecode:: sql
lower(somecolumn) LIKE '%' || lower(:param) || '%' ESCAPE '^'
@@ -1545,7 +1593,7 @@ class ColumnOperators(Operators):
:meth:`.ColumnOperators.contains`
"""
""" # noqa: E501
return self.operate(icontains_op, other, **kw)
def match(self, other: Any, **kwargs: Any) -> ColumnOperators:
@@ -1569,7 +1617,7 @@ class ColumnOperators(Operators):
:class:`_mysql.match` - MySQL specific construct with
additional features.
* Oracle - renders ``CONTAINS(x, y)``
* Oracle Database - renders ``CONTAINS(x, y)``
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "MATCH". This is compatible with SQLite, for
@@ -1586,7 +1634,7 @@ class ColumnOperators(Operators):
E.g.::
stmt = select(table.c.some_column).where(
table.c.some_column.regexp_match('^(b|c)')
table.c.some_column.regexp_match("^(b|c)")
)
:meth:`_sql.ColumnOperators.regexp_match` attempts to resolve to
@@ -1597,7 +1645,7 @@ class ColumnOperators(Operators):
Examples include:
* PostgreSQL - renders ``x ~ y`` or ``x !~ y`` when negated.
* Oracle - renders ``REGEXP_LIKE(x, y)``
* Oracle Database - renders ``REGEXP_LIKE(x, y)``
* SQLite - uses SQLite's ``REGEXP`` placeholder operator and calls into
the Python ``re.match()`` builtin.
* other backends may provide special implementations.
@@ -1605,9 +1653,9 @@ class ColumnOperators(Operators):
the operator as "REGEXP" or "NOT REGEXP". This is compatible with
SQLite and MySQL, for example.
Regular expression support is currently implemented for Oracle,
PostgreSQL, MySQL and MariaDB. Partial support is available for
SQLite. Support among third-party dialects may vary.
Regular expression support is currently implemented for Oracle
Database, PostgreSQL, MySQL and MariaDB. Partial support is available
for SQLite. Support among third-party dialects may vary.
:param pattern: The regular expression pattern string or column
clause.
@@ -1644,11 +1692,7 @@ class ColumnOperators(Operators):
E.g.::
stmt = select(
table.c.some_column.regexp_replace(
'b(..)',
'X\1Y',
flags='g'
)
table.c.some_column.regexp_replace("b(..)", "X\1Y", flags="g")
)
:meth:`_sql.ColumnOperators.regexp_replace` attempts to resolve to
@@ -1658,8 +1702,8 @@ class ColumnOperators(Operators):
**not backend agnostic**.
Regular expression replacement support is currently implemented for
Oracle, PostgreSQL, MySQL 8 or greater and MariaDB. Support among
third-party dialects may vary.
Oracle Database, PostgreSQL, MySQL 8 or greater and MariaDB. Support
among third-party dialects may vary.
:param pattern: The regular expression pattern string or column
clause.
@@ -2474,6 +2518,12 @@ def is_associative(op: OperatorType) -> bool:
return op in _associative
def is_order_by_modifier(op: Optional[OperatorType]) -> bool:
return op in _order_by_modifier
_order_by_modifier = {desc_op, asc_op, nulls_first_op, nulls_last_op}
_natural_self_precedent = _associative.union(
[getitem, json_getitem_op, json_path_getitem_op]
)

View File

@@ -1,5 +1,5 @@
# sql/roles.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under

View File

@@ -1,5 +1,5 @@
# sql/schema.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -60,6 +60,7 @@ from . import roles
from . import type_api
from . import visitors
from .base import _DefaultDescriptionTuple
from .base import _NoArg
from .base import _NoneName
from .base import _SentinelColumnCharacterization
from .base import _SentinelDefaultCharacterization
@@ -76,7 +77,6 @@ from .elements import TextClause
from .selectable import TableClause
from .type_api import to_instance
from .visitors import ExternallyTraversible
from .visitors import InternalTraversal
from .. import event
from .. import exc
from .. import inspection
@@ -95,12 +95,13 @@ if typing.TYPE_CHECKING:
from ._typing import _InfoType
from ._typing import _TextCoercedExpressionArgument
from ._typing import _TypeEngineArgument
from .base import ColumnSet
from .base import ReadOnlyColumnCollection
from .compiler import DDLCompiler
from .elements import BindParameter
from .elements import KeyedColumnElement
from .functions import Function
from .type_api import TypeEngine
from .visitors import _TraverseInternalsType
from .visitors import anon_map
from ..engine import Connection
from ..engine import Engine
@@ -124,6 +125,8 @@ _ServerDefaultArgument = Union[
"FetchedValue", str, TextClause, ColumnElement[Any]
]
_ServerOnUpdateArgument = _ServerDefaultArgument
class SchemaConst(Enum):
RETAIN_SCHEMA = 1
@@ -319,9 +322,10 @@ class Table(
e.g.::
mytable = Table(
"mytable", metadata,
Column('mytable_id', Integer, primary_key=True),
Column('value', String(50))
"mytable",
metadata,
Column("mytable_id", Integer, primary_key=True),
Column("value", String(50)),
)
The :class:`_schema.Table`
@@ -391,11 +395,6 @@ class Table(
"""
_traverse_internals: _TraverseInternalsType = (
TableClause._traverse_internals
+ [("schema", InternalTraversal.dp_string)]
)
if TYPE_CHECKING:
@util.ro_non_memoized_property
@@ -636,11 +635,13 @@ class Table(
:class:`_schema.Column`
named "y"::
Table("mytable", metadata,
Column('y', Integer),
extend_existing=True,
autoload_with=engine
)
Table(
"mytable",
metadata,
Column("y", Integer),
extend_existing=True,
autoload_with=engine,
)
.. seealso::
@@ -737,12 +738,12 @@ class Table(
"handle the column reflection event"
# ...
t = Table(
'sometable',
"sometable",
autoload_with=engine,
listeners=[
('column_reflect', listen_for_reflect)
])
listeners=[("column_reflect", listen_for_reflect)],
)
.. seealso::
@@ -1349,7 +1350,7 @@ class Table(
m1 = MetaData()
user = Table('user', m1, Column('id', Integer, primary_key=True))
user = Table("user", m1, Column("id", Integer, primary_key=True))
m2 = MetaData()
user_copy = user.to_metadata(m2)
@@ -1373,7 +1374,7 @@ class Table(
unless
set explicitly::
m2 = MetaData(schema='newschema')
m2 = MetaData(schema="newschema")
# user_copy_one will have "newschema" as the schema name
user_copy_one = user.to_metadata(m2, schema=None)
@@ -1400,15 +1401,16 @@ class Table(
E.g.::
def referred_schema_fn(table, to_schema,
constraint, referred_schema):
if referred_schema == 'base_tables':
def referred_schema_fn(table, to_schema, constraint, referred_schema):
if referred_schema == "base_tables":
return referred_schema
else:
return to_schema
new_table = table.to_metadata(m2, schema="alt_schema",
referred_schema_fn=referred_schema_fn)
new_table = table.to_metadata(
m2, schema="alt_schema", referred_schema_fn=referred_schema_fn
)
:param name: optional string name indicating the target table name.
If not specified or None, the table name is retained. This allows
@@ -1416,7 +1418,7 @@ class Table(
:class:`_schema.MetaData` target
with a new name.
"""
""" # noqa: E501
if name is None:
name = self.name
@@ -1438,7 +1440,7 @@ class Table(
args = []
for col in self.columns:
args.append(col._copy(schema=actual_schema))
args.append(col._copy(schema=actual_schema, _to_metadata=metadata))
table = Table(
name,
metadata,
@@ -1514,7 +1516,8 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
name: Optional[str] = None,
type_: Optional[_TypeEngineArgument[_T]] = None,
autoincrement: _AutoIncrementType = "auto",
default: Optional[Any] = None,
default: Optional[Any] = _NoArg.NO_ARG,
insert_default: Optional[Any] = _NoArg.NO_ARG,
doc: Optional[str] = None,
key: Optional[str] = None,
index: Optional[bool] = None,
@@ -1526,7 +1529,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
onupdate: Optional[Any] = None,
primary_key: bool = False,
server_default: Optional[_ServerDefaultArgument] = None,
server_onupdate: Optional[FetchedValue] = None,
server_onupdate: Optional[_ServerOnUpdateArgument] = None,
quote: Optional[bool] = None,
system: bool = False,
comment: Optional[str] = None,
@@ -1547,7 +1550,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
unless they are a reserved word. Names with any number of upper
case characters will be quoted and sent exactly. Note that this
behavior applies even for databases which standardize upper
case names as case insensitive such as Oracle.
case names as case insensitive such as Oracle Database.
The name field may be omitted at construction time and applied
later, at any time before the Column is associated with a
@@ -1560,10 +1563,10 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
as well, e.g.::
# use a type with arguments
Column('data', String(50))
Column("data", String(50))
# use no arguments
Column('level', Integer)
Column("level", Integer)
The ``type`` argument may be the second positional argument
or specified by keyword.
@@ -1619,8 +1622,8 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
will imply that database-specific keywords such as PostgreSQL
``SERIAL``, MySQL ``AUTO_INCREMENT``, or ``IDENTITY`` on SQL Server
should also be rendered. Not every database backend has an
"implied" default generator available; for example the Oracle
backend always needs an explicit construct such as
"implied" default generator available; for example the Oracle Database
backends alway needs an explicit construct such as
:class:`.Identity` to be included with a :class:`.Column` in order
for the DDL rendered to include auto-generating constructs to also
be produced in the database.
@@ -1665,8 +1668,12 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
# turn on autoincrement for this column despite
# the ForeignKey()
Column('id', ForeignKey('other.id'),
primary_key=True, autoincrement='ignore_fk')
Column(
"id",
ForeignKey("other.id"),
primary_key=True,
autoincrement="ignore_fk",
)
It is typically not desirable to have "autoincrement" enabled on a
column that refers to another via foreign key, as such a column is
@@ -1694,7 +1701,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
is not included as this is unnecessary and not recommended
by the database vendor. See the section
:ref:`sqlite_autoincrement` for more background.
* Oracle - The Oracle dialect has no default "autoincrement"
* Oracle Database - The Oracle Database dialects have no default "autoincrement"
feature available at this time, instead the :class:`.Identity`
construct is recommended to achieve this (the :class:`.Sequence`
construct may also be used).
@@ -1711,10 +1718,10 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
(see
`https://www.python.org/dev/peps/pep-0249/#lastrowid
<https://www.python.org/dev/peps/pep-0249/#lastrowid>`_)
* PostgreSQL, SQL Server, Oracle - use RETURNING or an equivalent
* PostgreSQL, SQL Server, Oracle Database - use RETURNING or an equivalent
construct when rendering an INSERT statement, and then retrieving
the newly generated primary key values after execution
* PostgreSQL, Oracle for :class:`_schema.Table` objects that
* PostgreSQL, Oracle Database for :class:`_schema.Table` objects that
set :paramref:`_schema.Table.implicit_returning` to False -
for a :class:`.Sequence` only, the :class:`.Sequence` is invoked
explicitly before the INSERT statement takes place so that the
@@ -1751,6 +1758,11 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
:ref:`metadata_defaults_toplevel`
:param insert_default: An alias of :paramref:`.Column.default`
for compatibility with :func:`_orm.mapped_column`.
.. versionadded: 2.0.31
:param doc: optional String that can be used by the ORM or similar
to document attributes on the Python side. This attribute does
**not** render SQL comments; use the
@@ -1778,7 +1790,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
"some_table",
metadata,
Column("x", Integer),
Index("ix_some_table_x", "x")
Index("ix_some_table_x", "x"),
)
To add the :paramref:`_schema.Index.unique` flag to the
@@ -1860,14 +1872,22 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
String types will be emitted as-is, surrounded by single quotes::
Column('x', Text, server_default="val")
Column("x", Text, server_default="val")
will render:
.. sourcecode:: sql
x TEXT DEFAULT 'val'
A :func:`~sqlalchemy.sql.expression.text` expression will be
rendered as-is, without quotes::
Column('y', DateTime, server_default=text('NOW()'))
Column("y", DateTime, server_default=text("NOW()"))
will render:
.. sourcecode:: sql
y DATETIME DEFAULT NOW()
@@ -1882,20 +1902,21 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
from sqlalchemy.dialects.postgresql import array
engine = create_engine(
'postgresql+psycopg2://scott:tiger@localhost/mydatabase'
"postgresql+psycopg2://scott:tiger@localhost/mydatabase"
)
metadata_obj = MetaData()
tbl = Table(
"foo",
metadata_obj,
Column("bar",
ARRAY(Text),
server_default=array(["biz", "bang", "bash"])
)
"foo",
metadata_obj,
Column(
"bar", ARRAY(Text), server_default=array(["biz", "bang", "bash"])
),
)
metadata_obj.create_all(engine)
The above results in a table created with the following SQL::
The above results in a table created with the following SQL:
.. sourcecode:: sql
CREATE TABLE foo (
bar TEXT[] DEFAULT ARRAY['biz', 'bang', 'bash']
@@ -1960,12 +1981,7 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
:class:`_schema.UniqueConstraint` construct explicitly at the
level of the :class:`_schema.Table` construct itself::
Table(
"some_table",
metadata,
Column("x", Integer),
UniqueConstraint("x")
)
Table("some_table", metadata, Column("x", Integer), UniqueConstraint("x"))
The :paramref:`_schema.UniqueConstraint.name` parameter
of the unique constraint object is left at its default value
@@ -2104,12 +2120,19 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
# otherwise, add DDL-related events
self._set_type(self.type)
if default is not None:
if not isinstance(default, (ColumnDefault, Sequence)):
default = ColumnDefault(default)
if insert_default is not _NoArg.NO_ARG:
resolved_default = insert_default
elif default is not _NoArg.NO_ARG:
resolved_default = default
else:
resolved_default = None
self.default = default
l_args.append(default)
if resolved_default is not None:
if not isinstance(resolved_default, (ColumnDefault, Sequence)):
resolved_default = ColumnDefault(resolved_default)
self.default = resolved_default
l_args.append(resolved_default)
else:
self.default = None
@@ -2466,6 +2489,8 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
server_onupdate = self.server_onupdate
if isinstance(server_default, (Computed, Identity)):
# TODO: likely should be copied in all cases
# TODO: if a Sequence, we would need to transfer the Sequence
# .metadata as well
args.append(server_default._copy(**kw))
server_default = server_onupdate = None
@@ -2569,8 +2594,11 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
new_onupdate = self.onupdate._copy()
new_onupdate._set_parent(other)
if self.index and not other.index:
other.index = True
if self.index in (True, False) and other.index is None:
other.index = self.index
if self.unique in (True, False) and other.unique is None:
other.unique = self.unique
if self.doc and other.doc is None:
other.doc = self.doc
@@ -2578,9 +2606,6 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
if self.comment and other.comment is None:
other.comment = self.comment
if self.unique and not other.unique:
other.unique = True
for const in self.constraints:
if not const._type_bound:
new_const = const._copy()
@@ -2594,6 +2619,8 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
def _make_proxy(
self,
selectable: FromClause,
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
name: Optional[str] = None,
key: Optional[str] = None,
name_is_truncatable: bool = False,
@@ -2663,10 +2690,13 @@ class Column(DialectKWArgs, SchemaItem, ColumnClause[_T]):
c._propagate_attrs = selectable._propagate_attrs
if selectable._is_clone_of is not None:
c._is_clone_of = selectable._is_clone_of.columns.get(c.key)
if self.primary_key:
selectable.primary_key.add(c) # type: ignore
primary_key.add(c)
if fk:
selectable.foreign_keys.update(fk) # type: ignore
foreign_keys.update(fk) # type: ignore
return c.key, c
@@ -2727,8 +2757,10 @@ class ForeignKey(DialectKWArgs, SchemaItem):
object,
e.g.::
t = Table("remote_table", metadata,
Column("remote_id", ForeignKey("main_table.id"))
t = Table(
"remote_table",
metadata,
Column("remote_id", ForeignKey("main_table.id")),
)
Note that ``ForeignKey`` is only a marker object that defines
@@ -3377,12 +3409,11 @@ class ColumnDefault(DefaultGenerator, ABC):
For example, the following::
Column('foo', Integer, default=50)
Column("foo", Integer, default=50)
Is equivalent to::
Column('foo', Integer, ColumnDefault(50))
Column("foo", Integer, ColumnDefault(50))
"""
@@ -3669,9 +3700,14 @@ class Sequence(HasSchemaAttr, IdentityOptions, DefaultGenerator):
The :class:`.Sequence` is typically associated with a primary key column::
some_table = Table(
'some_table', metadata,
Column('id', Integer, Sequence('some_table_seq', start=1),
primary_key=True)
"some_table",
metadata,
Column(
"id",
Integer,
Sequence("some_table_seq", start=1),
primary_key=True,
),
)
When CREATE TABLE is emitted for the above :class:`_schema.Table`, if the
@@ -3782,11 +3818,11 @@ class Sequence(HasSchemaAttr, IdentityOptions, DefaultGenerator):
:param cache: optional integer value; number of future values in the
sequence which are calculated in advance. Renders the CACHE keyword
understood by Oracle and PostgreSQL.
understood by Oracle Database and PostgreSQL.
:param order: optional boolean value; if ``True``, renders the
ORDER keyword, understood by Oracle, indicating the sequence is
definitively ordered. May be necessary to provide deterministic
ORDER keyword, understood by Oracle Database, indicating the sequence
is definitively ordered. May be necessary to provide deterministic
ordering using Oracle RAC.
:param data_type: The type to be returned by the sequence, for
@@ -3947,7 +3983,7 @@ class FetchedValue(SchemaEventTarget):
E.g.::
Column('foo', Integer, FetchedValue())
Column("foo", Integer, FetchedValue())
Would indicate that some trigger or default generator
will create a new value for the ``foo`` column during an
@@ -4013,11 +4049,11 @@ class DefaultClause(FetchedValue):
For example, the following::
Column('foo', Integer, server_default="50")
Column("foo", Integer, server_default="50")
Is equivalent to::
Column('foo', Integer, DefaultClause("50"))
Column("foo", Integer, DefaultClause("50"))
"""
@@ -4196,6 +4232,10 @@ class ColumnCollectionMixin:
] = _gather_expressions
if processed_expressions is not None:
# this is expected to be an empty list
assert not processed_expressions
self._pending_colargs = []
for (
expr,
@@ -4846,11 +4886,13 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
:class:`_schema.Column` objects corresponding to those marked with
the :paramref:`_schema.Column.primary_key` flag::
>>> my_table = Table('mytable', metadata,
... Column('id', Integer, primary_key=True),
... Column('version_id', Integer, primary_key=True),
... Column('data', String(50))
... )
>>> my_table = Table(
... "mytable",
... metadata,
... Column("id", Integer, primary_key=True),
... Column("version_id", Integer, primary_key=True),
... Column("data", String(50)),
... )
>>> my_table.primary_key
PrimaryKeyConstraint(
Column('id', Integer(), table=<mytable>,
@@ -4864,13 +4906,14 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
the "name" of the constraint can also be specified, as well as other
options which may be recognized by dialects::
my_table = Table('mytable', metadata,
Column('id', Integer),
Column('version_id', Integer),
Column('data', String(50)),
PrimaryKeyConstraint('id', 'version_id',
name='mytable_pk')
)
my_table = Table(
"mytable",
metadata,
Column("id", Integer),
Column("version_id", Integer),
Column("data", String(50)),
PrimaryKeyConstraint("id", "version_id", name="mytable_pk"),
)
The two styles of column-specification should generally not be mixed.
An warning is emitted if the columns present in the
@@ -4888,13 +4931,14 @@ class PrimaryKeyConstraint(ColumnCollectionConstraint):
primary key column collection from the :class:`_schema.Table` based on the
flags::
my_table = Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Column('version_id', Integer, primary_key=True),
Column('data', String(50)),
PrimaryKeyConstraint(name='mytable_pk',
mssql_clustered=True)
)
my_table = Table(
"mytable",
metadata,
Column("id", Integer, primary_key=True),
Column("version_id", Integer, primary_key=True),
Column("data", String(50)),
PrimaryKeyConstraint(name="mytable_pk", mssql_clustered=True),
)
"""
@@ -5091,19 +5135,21 @@ class Index(
E.g.::
sometable = Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100))
)
sometable = Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
)
Index("some_index", sometable.c.name)
For a no-frills, single column index, adding
:class:`_schema.Column` also supports ``index=True``::
sometable = Table("sometable", metadata,
Column("name", String(50), index=True)
)
sometable = Table(
"sometable", metadata, Column("name", String(50), index=True)
)
For a composite index, multiple columns can be specified::
@@ -5122,22 +5168,26 @@ class Index(
the names
of the indexed columns can be specified as strings::
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address")
)
Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", "name", "address"),
)
To support functional or expression-based indexes in this form, the
:func:`_expression.text` construct may be used::
from sqlalchemy import text
Table("sometable", metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)"))
)
Table(
"sometable",
metadata,
Column("name", String(50)),
Column("address", String(100)),
Index("some_index", text("lower(name)")),
)
.. seealso::
@@ -5616,6 +5666,38 @@ class MetaData(HasSchemaAttr):
sorted(self.tables.values(), key=lambda t: t.key) # type: ignore
)
# overload needed to work around mypy this mypy
# https://github.com/python/mypy/issues/17093
@overload
def reflect(
self,
bind: Engine,
schema: Optional[str] = ...,
views: bool = ...,
only: Union[
_typing_Sequence[str], Callable[[str, MetaData], bool], None
] = ...,
extend_existing: bool = ...,
autoload_replace: bool = ...,
resolve_fks: bool = ...,
**dialect_kwargs: Any,
) -> None: ...
@overload
def reflect(
self,
bind: Connection,
schema: Optional[str] = ...,
views: bool = ...,
only: Union[
_typing_Sequence[str], Callable[[str, MetaData], bool], None
] = ...,
extend_existing: bool = ...,
autoload_replace: bool = ...,
resolve_fks: bool = ...,
**dialect_kwargs: Any,
) -> None: ...
@util.preload_module("sqlalchemy.engine.reflection")
def reflect(
self,
@@ -5863,9 +5945,11 @@ class Computed(FetchedValue, SchemaItem):
from sqlalchemy import Computed
Table('square', metadata_obj,
Column('side', Float, nullable=False),
Column('area', Float, Computed('side * side'))
Table(
"square",
metadata_obj,
Column("side", Float, nullable=False),
Column("area", Float, Computed("side * side")),
)
See the linked documentation below for complete details.
@@ -5970,9 +6054,11 @@ class Identity(IdentityOptions, FetchedValue, SchemaItem):
from sqlalchemy import Identity
Table('foo', metadata_obj,
Column('id', Integer, Identity())
Column('description', Text),
Table(
"foo",
metadata_obj,
Column("id", Integer, Identity()),
Column("description", Text),
)
See the linked documentation below for complete details.
@@ -6032,7 +6118,7 @@ class Identity(IdentityOptions, FetchedValue, SchemaItem):
:param on_null:
Set to ``True`` to specify ON NULL in conjunction with a
``always=False`` identity column. This option is only supported on
some backends, like Oracle.
some backends, like Oracle Database.
:param start: the starting index of the sequence.
:param increment: the increment value of the sequence.

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# sql/sqltypes.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -59,9 +59,11 @@ from .. import util
from ..engine import processors
from ..util import langhelpers
from ..util import OrderedDict
from ..util import warn_deprecated
from ..util.typing import get_args
from ..util.typing import is_literal
from ..util.typing import is_pep695
from ..util.typing import Literal
from ..util.typing import typing_get_args
if TYPE_CHECKING:
from ._typing import _ColumnExpressionArgument
@@ -202,7 +204,7 @@ class String(Concatenable, TypeEngine[str]):
.. sourcecode:: pycon+sql
>>> from sqlalchemy import cast, select, String
>>> print(select(cast('some string', String(collation='utf8'))))
>>> print(select(cast("some string", String(collation="utf8"))))
{printsql}SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. note::
@@ -217,6 +219,11 @@ class String(Concatenable, TypeEngine[str]):
self.length = length
self.collation = collation
def _with_collation(self, collation):
new_type = self.copy()
new_type.collation = collation
return new_type
def _resolve_for_literal(self, value):
# I was SO PROUD of my regex trick, but we dont need it.
# re.search(r"[^\u0000-\u007F]", value)
@@ -269,8 +276,8 @@ class Unicode(String):
The :class:`.Unicode` type is a :class:`.String` subclass that assumes
input and output strings that may contain non-ASCII characters, and for
some backends implies an underlying column type that is explicitly
supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle and SQL
Server. This will impact the output of ``CREATE TABLE`` statements and
supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle Database and
SQL Server. This will impact the output of ``CREATE TABLE`` statements and
``CAST`` functions at the dialect level.
The character encoding used by the :class:`.Unicode` type that is used to
@@ -301,7 +308,6 @@ class Unicode(String):
:meth:`.DialectEvents.do_setinputsizes`
"""
__visit_name__ = "unicode"
@@ -629,16 +635,16 @@ class Float(Numeric[_N]):
indicates a number of digits for the generic
:class:`_sqltypes.Float` datatype.
.. note:: For the Oracle backend, the
.. note:: For the Oracle Database backend, the
:paramref:`_sqltypes.Float.precision` parameter is not accepted
when rendering DDL, as Oracle does not support float precision
when rendering DDL, as Oracle Database does not support float precision
specified as a number of decimal places. Instead, use the
Oracle-specific :class:`_oracle.FLOAT` datatype and specify the
Oracle Database-specific :class:`_oracle.FLOAT` datatype and specify the
:paramref:`_oracle.FLOAT.binary_precision` parameter. This is new
in version 2.0 of SQLAlchemy.
To create a database agnostic :class:`_types.Float` that
separately specifies binary precision for Oracle, use
separately specifies binary precision for Oracle Database, use
:meth:`_types.TypeEngine.with_variant` as follows::
from sqlalchemy import Column
@@ -647,7 +653,7 @@ class Float(Numeric[_N]):
Column(
"float_data",
Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle")
Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle"),
)
:param asdecimal: the same flag as that of :class:`.Numeric`, but
@@ -749,7 +755,7 @@ class DateTime(
to make use of the :class:`_types.TIMESTAMP` datatype directly when
using this flag, as some databases include separate generic
date/time-holding types distinct from the timezone-capable
TIMESTAMP datatype, such as Oracle.
TIMESTAMP datatype, such as Oracle Database.
"""
@@ -865,6 +871,12 @@ class _Binary(TypeEngine[bytes]):
def __init__(self, length: Optional[int] = None):
self.length = length
@util.ro_memoized_property
def _generic_type_affinity(
self,
) -> Type[TypeEngine[bytes]]:
return LargeBinary
def literal_processor(self, dialect):
def process(value):
# TODO: this is useless for real world scenarios; implement
@@ -1006,7 +1018,7 @@ class SchemaType(SchemaEventTarget, TypeEngineMixin):
if _adapted_from:
self.dispatch = self.dispatch._join(_adapted_from.dispatch)
def _set_parent(self, column, **kw):
def _set_parent(self, parent, **kw):
# set parent hook is when this type is associated with a column.
# Column calls it for all SchemaEventTarget instances, either the
# base type and/or variants in _variant_mapping.
@@ -1020,7 +1032,7 @@ class SchemaType(SchemaEventTarget, TypeEngineMixin):
# on_table/metadata_create/drop in this method, which is used by
# "native" types with a separate CREATE/DROP e.g. Postgresql.ENUM
column._on_table_attach(util.portable_instancemethod(self._set_table))
parent._on_table_attach(util.portable_instancemethod(self._set_table))
def _variant_mapping_for_set_table(self, column):
if column.type._variant_mapping:
@@ -1080,6 +1092,11 @@ class SchemaType(SchemaEventTarget, TypeEngineMixin):
return self.adapt(
cast("Type[TypeEngine[Any]]", self.__class__),
_create_events=True,
metadata=(
kw.get("_to_metadata", self.metadata)
if self.metadata is not None
else None
),
)
@overload
@@ -1211,15 +1228,14 @@ class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
import enum
from sqlalchemy import Enum
class MyEnum(enum.Enum):
one = 1
two = 2
three = 3
t = Table(
'data', MetaData(),
Column('value', Enum(MyEnum))
)
t = Table("data", MetaData(), Column("value", Enum(MyEnum)))
connection.execute(t.insert(), {"value": MyEnum.two})
assert connection.scalar(t.select()) is MyEnum.two
@@ -1497,16 +1513,9 @@ class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
native_enum = None
if not we_are_generic_form and python_type is matched_on:
# if we have enumerated values, and the incoming python
# type is exactly the one that matched in the type map,
# then we use these enumerated values and dont try to parse
# what's incoming
enum_args = self._enums_argument
elif is_literal(python_type):
def process_literal(pt):
# for a literal, where we need to get its contents, parse it out.
enum_args = typing_get_args(python_type)
enum_args = get_args(pt)
bad_args = [arg for arg in enum_args if not isinstance(arg, str)]
if bad_args:
raise exc.ArgumentError(
@@ -1515,6 +1524,42 @@ class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
f"provide an explicit Enum datatype for this Python type"
)
native_enum = False
return enum_args, native_enum
if not we_are_generic_form and python_type is matched_on:
# if we have enumerated values, and the incoming python
# type is exactly the one that matched in the type map,
# then we use these enumerated values and dont try to parse
# what's incoming
enum_args = self._enums_argument
elif is_literal(python_type):
enum_args, native_enum = process_literal(python_type)
elif is_pep695(python_type):
value = python_type.__value__
if is_pep695(value):
new_value = value
while is_pep695(new_value):
new_value = new_value.__value__
if is_literal(new_value):
value = new_value
warn_deprecated(
f"Mapping recursive TypeAliasType '{python_type}' "
"that resolve to literal to generate an Enum is "
"deprecated. SQLAlchemy 2.1 will not support this "
"use case. Please avoid using recursing "
"TypeAliasType.",
"2.0",
)
if not is_literal(value):
raise exc.ArgumentError(
f"Can't associate TypeAliasType '{python_type}' to an "
"Enum since it's not a direct alias of a Literal. Only "
"aliases in this form `type my_alias = Literal['a', "
"'b']` are supported when generating Enums."
)
enum_args, native_enum = process_literal(value)
elif isinstance(python_type, type) and issubclass(
python_type, enum.Enum
):
@@ -1664,10 +1709,10 @@ class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
assert "_enums" in kw
return impltype(**kw)
def adapt(self, impltype, **kw):
def adapt(self, cls, **kw):
kw["_enums"] = self._enums_argument
kw["_disable_warnings"] = True
return super().adapt(impltype, **kw)
return super().adapt(cls, **kw)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
@@ -1903,6 +1948,13 @@ class Boolean(SchemaType, Emulated, TypeEngine[bool]):
if _adapted_from:
self.dispatch = self.dispatch._join(_adapted_from.dispatch)
def copy(self, **kw):
# override SchemaType.copy() to not include to_metadata logic
return self.adapt(
cast("Type[TypeEngine[Any]]", self.__class__),
_create_events=True,
)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
@@ -2007,10 +2059,9 @@ class _AbstractInterval(HasExpressionLookup, TypeEngine[dt.timedelta]):
class Interval(Emulated, _AbstractInterval, TypeDecorator[dt.timedelta]):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL and Oracle, the native ``INTERVAL`` type is used; for others,
the value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
The Interval type deals with ``datetime.timedelta`` objects. In PostgreSQL
and Oracle Database, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch" (Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
@@ -2035,16 +2086,16 @@ class Interval(Emulated, _AbstractInterval, TypeDecorator[dt.timedelta]):
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently PostgreSQL, Oracle).
supported (currently PostgreSQL, Oracle Database).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and PostgreSQL
i.e. Oracle Database and PostgreSQL
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
support a "day precision" parameter, i.e. Oracle Database.
"""
super().__init__()
@@ -2154,15 +2205,16 @@ class JSON(Indexable, TypeEngine[Any]):
The :class:`_types.JSON` type stores arbitrary JSON format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSON)
data_table = Table(
"data_table",
metadata,
Column("id", Integer, primary_key=True),
Column("data", JSON),
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
{"data": {"key1": "value1", "key2": "value2"}}
data_table.insert(), {"data": {"key1": "value1", "key2": "value2"}}
)
**JSON-Specific Expression Operators**
@@ -2172,7 +2224,7 @@ class JSON(Indexable, TypeEngine[Any]):
* Keyed index operations::
data_table.c.data['some key']
data_table.c.data["some key"]
* Integer index operations::
@@ -2180,7 +2232,7 @@ class JSON(Indexable, TypeEngine[Any]):
* Path index operations::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
data_table.c.data[("key_1", "key_2", 5, ..., "key_n")]
* Data casters for specific JSON element types, subsequent to an index
or path operation being invoked::
@@ -2235,13 +2287,12 @@ class JSON(Indexable, TypeEngine[Any]):
from sqlalchemy import cast, type_coerce
from sqlalchemy import String, JSON
cast(
data_table.c.data['some_key'], String
) == type_coerce(55, JSON)
cast(data_table.c.data["some_key"], String) == type_coerce(55, JSON)
The above case now works directly as::
data_table.c.data['some_key'].as_integer() == 5
data_table.c.data["some_key"].as_integer() == 5
For details on the previous comparison approach within the 1.3.x
series, see the documentation for SQLAlchemy 1.2 or the included HTML
@@ -2272,6 +2323,7 @@ class JSON(Indexable, TypeEngine[Any]):
should be SQL NULL as opposed to JSON ``"null"``::
from sqlalchemy import null
conn.execute(table.insert(), {"json_value": null()})
To insert or select against a value that is JSON ``"null"``, use the
@@ -2304,7 +2356,8 @@ class JSON(Indexable, TypeEngine[Any]):
engine = create_engine(
"sqlite://",
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False))
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False),
)
.. versionchanged:: 1.3.7
@@ -2322,7 +2375,7 @@ class JSON(Indexable, TypeEngine[Any]):
:class:`sqlalchemy.dialects.sqlite.JSON`
"""
""" # noqa: E501
__visit_name__ = "JSON"
@@ -2356,8 +2409,7 @@ class JSON(Indexable, TypeEngine[Any]):
transparent method is to use :func:`_expression.text`::
Table(
'my_table', metadata,
Column('json_data', JSON, default=text("'null'"))
"my_table", metadata, Column("json_data", JSON, default=text("'null'"))
)
While it is possible to use :attr:`_types.JSON.NULL` in this context, the
@@ -2369,7 +2421,7 @@ class JSON(Indexable, TypeEngine[Any]):
generated defaults.
"""
""" # noqa: E501
def __init__(self, none_as_null: bool = False):
"""Construct a :class:`_types.JSON` type.
@@ -2382,6 +2434,7 @@ class JSON(Indexable, TypeEngine[Any]):
as SQL NULL::
from sqlalchemy import null
conn.execute(table.insert(), {"data": null()})
.. note::
@@ -2516,95 +2569,101 @@ class JSON(Indexable, TypeEngine[Any]):
return operator, index, self.type
def as_boolean(self):
"""Cast an indexed value as boolean.
"""Consider an indexed value as boolean.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_boolean()
).where(
mytable.c.json_column['some_data'].as_boolean() == True
stmt = select(mytable.c.json_column["some_data"].as_boolean()).where(
mytable.c.json_column["some_data"].as_boolean() == True
)
.. versionadded:: 1.3.11
"""
""" # noqa: E501
return self._binary_w_type(Boolean(), "as_boolean")
def as_string(self):
"""Cast an indexed value as string.
"""Consider an indexed value as string.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_string()
).where(
mytable.c.json_column['some_data'].as_string() ==
'some string'
stmt = select(mytable.c.json_column["some_data"].as_string()).where(
mytable.c.json_column["some_data"].as_string() == "some string"
)
.. versionadded:: 1.3.11
"""
""" # noqa: E501
return self._binary_w_type(Unicode(), "as_string")
def as_integer(self):
"""Cast an indexed value as integer.
"""Consider an indexed value as integer.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_integer()
).where(
mytable.c.json_column['some_data'].as_integer() == 5
stmt = select(mytable.c.json_column["some_data"].as_integer()).where(
mytable.c.json_column["some_data"].as_integer() == 5
)
.. versionadded:: 1.3.11
"""
""" # noqa: E501
return self._binary_w_type(Integer(), "as_integer")
def as_float(self):
"""Cast an indexed value as float.
"""Consider an indexed value as float.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_float()
).where(
mytable.c.json_column['some_data'].as_float() == 29.75
stmt = select(mytable.c.json_column["some_data"].as_float()).where(
mytable.c.json_column["some_data"].as_float() == 29.75
)
.. versionadded:: 1.3.11
"""
""" # noqa: E501
return self._binary_w_type(Float(), "as_float")
def as_numeric(self, precision, scale, asdecimal=True):
"""Cast an indexed value as numeric/decimal.
"""Consider an indexed value as numeric/decimal.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_numeric(10, 6)
).where(
mytable.c.
json_column['some_data'].as_numeric(10, 6) == 29.75
stmt = select(mytable.c.json_column["some_data"].as_numeric(10, 6)).where(
mytable.c.json_column["some_data"].as_numeric(10, 6) == 29.75
)
.. versionadded:: 1.4.0b2
"""
""" # noqa: E501
return self._binary_w_type(
Numeric(precision, scale, asdecimal=asdecimal), "as_numeric"
)
def as_json(self):
"""Cast an indexed value as JSON.
"""Consider an indexed value as JSON.
This is similar to using :class:`_sql.type_coerce`, and will
usually not apply a ``CAST()``.
e.g.::
stmt = select(mytable.c.json_column['some_data'].as_json())
stmt = select(mytable.c.json_column["some_data"].as_json())
This is typically the default behavior of indexed elements in any
case.
@@ -2722,26 +2781,21 @@ class ARRAY(
An :class:`_types.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer))
)
mytable = Table("mytable", metadata, Column("data", ARRAY(Integer)))
The above type represents an N-dimensional array,
meaning a supporting backend such as PostgreSQL will interpret values
with any number of dimensions automatically. To produce an INSERT
construct that passes in a 1-dimensional array of integers::
connection.execute(
mytable.insert(),
{"data": [1,2,3]}
)
connection.execute(mytable.insert(), {"data": [1, 2, 3]})
The :class:`_types.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer, dimensions=2))
)
mytable = Table(
"mytable", metadata, Column("data", ARRAY(Integer, dimensions=2))
)
Sending a number of dimensions is optional, but recommended if the
datatype is to represent arrays of more than one dimension. This number
@@ -2766,22 +2820,21 @@ class ARRAY(
dimension parameter will generally assume single-dimensional behaviors.
SQL expressions of type :class:`_types.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Arrays default to 1-based indexing.
The operator produces binary expression
"slice" behavior. The ``[]`` operator produces expression
constructs which will produce the appropriate SQL, both for
SELECT statements::
select(mytable.c.data[5], mytable.c.data[2:7])
as well as UPDATE statements when the :meth:`_expression.Update.values`
method
is used::
method is used::
mytable.update().values({
mytable.c.data[5]: 7,
mytable.c.data[2:7]: [1, 2, 3]
})
mytable.update().values(
{mytable.c.data[5]: 7, mytable.c.data[2:7]: [1, 2, 3]}
)
Indexed access is one-based by default;
for zero-based index conversion, set :paramref:`_types.ARRAY.zero_indexes`.
The :class:`_types.ARRAY` type also provides for the operators
:meth:`.types.ARRAY.Comparator.any` and
@@ -2800,6 +2853,7 @@ class ARRAY(
from sqlalchemy import ARRAY
from sqlalchemy.ext.mutable import MutableList
class SomeOrmClass(Base):
# ...
@@ -2827,6 +2881,56 @@ class ARRAY(
"""If True, Python zero-based indexes should be interpreted as one-based
on the SQL expression side."""
def __init__(
self,
item_type: _TypeEngineArgument[Any],
as_tuple: bool = False,
dimensions: Optional[int] = None,
zero_indexes: bool = False,
):
"""Construct an :class:`_types.ARRAY`.
E.g.::
Column("myarray", ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
not generally needed as a Python list corresponds well
to a SQL array.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This impacts how the array is declared
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
:class:`_types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
class Comparator(
Indexable.Comparator[Sequence[Any]],
Concatenable.Comparator[Sequence[Any]],
@@ -2897,9 +3001,7 @@ class ARRAY(
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.any(7, operator=operators.lt)
)
select(table.c.data).where(table.c.data.any(7, operator=operators.lt))
)
:param other: expression to be compared
@@ -2913,7 +3015,7 @@ class ARRAY(
:meth:`.types.ARRAY.Comparator.all`
"""
""" # noqa: E501
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
@@ -2946,9 +3048,7 @@ class ARRAY(
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.all(7, operator=operators.lt)
)
select(table.c.data).where(table.c.data.all(7, operator=operators.lt))
)
:param other: expression to be compared
@@ -2962,7 +3062,7 @@ class ARRAY(
:meth:`.types.ARRAY.Comparator.any`
"""
""" # noqa: E501
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
@@ -2981,56 +3081,6 @@ class ARRAY(
comparator_factory = Comparator
def __init__(
self,
item_type: _TypeEngineArgument[Any],
as_tuple: bool = False,
dimensions: Optional[int] = None,
zero_indexes: bool = False,
):
"""Construct an :class:`_types.ARRAY`.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
not generally needed as a Python list corresponds well
to a SQL array.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This impacts how the array is declared
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
:class:`_types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@@ -3042,13 +3092,13 @@ class ARRAY(
def compare_values(self, x, y):
return x == y
def _set_parent(self, column, outer=False, **kw):
def _set_parent(self, parent, outer=False, **kw):
"""Support SchemaEventTarget"""
if not outer and isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column, **kw)
self.item_type._set_parent(parent, **kw)
def _set_parent_with_dispatch(self, parent):
def _set_parent_with_dispatch(self, parent, **kw):
"""Support SchemaEventTarget"""
super()._set_parent_with_dispatch(parent, outer=True)
@@ -3282,8 +3332,8 @@ class BIGINT(BigInteger):
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type.
:class:`_types.TIMESTAMP` datatypes have support for timezone
storage on some backends, such as PostgreSQL and Oracle. Use the
:class:`_types.TIMESTAMP` datatypes have support for timezone storage on
some backends, such as PostgreSQL and Oracle Database. Use the
:paramref:`~types.TIMESTAMP.timezone` argument in order to enable
"TIMESTAMP WITH TIMEZONE" for these backends.
@@ -3335,7 +3385,7 @@ class TEXT(Text):
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
This type is found in Oracle Database and Informix.
"""
__visit_name__ = "CLOB"
@@ -3501,14 +3551,13 @@ class Uuid(Emulated, TypeEngine[_UUID_RETURN]):
t = Table(
"t",
metadata_obj,
Column('uuid_data', Uuid, primary_key=True),
Column("other_data", String)
Column("uuid_data", Uuid, primary_key=True),
Column("other_data", String),
)
with engine.begin() as conn:
conn.execute(
t.insert(),
{"uuid_data": uuid.uuid4(), "other_data", "some data"}
t.insert(), {"uuid_data": uuid.uuid4(), "other_data": "some data"}
)
To have the :class:`_sqltypes.Uuid` datatype work with string-based
@@ -3522,7 +3571,7 @@ class Uuid(Emulated, TypeEngine[_UUID_RETURN]):
:class:`_sqltypes.UUID` - represents exactly the ``UUID`` datatype
without any backend-agnostic behaviors.
"""
""" # noqa: E501
__visit_name__ = "uuid"
@@ -3661,31 +3710,6 @@ class Uuid(Emulated, TypeEngine[_UUID_RETURN]):
return process
def _sentinel_value_resolver(self, dialect):
"""For the "insertmanyvalues" feature only, return a callable that
will receive the uuid object or string
as it is normally passed to the DB in the parameter set, after
bind_processor() is called. Convert this value to match
what it would be as coming back from a RETURNING or similar
statement for the given backend.
Individual dialects and drivers may need their own implementations
based on how their UUID types send data and how the drivers behave
(e.g. pyodbc)
"""
if not self.native_uuid or not dialect.supports_native_uuid:
# dealing entirely with strings going in and out of
# CHAR(32)
return None
elif self.as_uuid:
# we sent UUID objects and we are getting UUID objects back
return None
else:
# we sent strings and we are getting UUID objects back
return _python_UUID
class UUID(Uuid[_UUID_RETURN], type_api.NativeForEmulated):
"""Represent the SQL UUID type.

View File

@@ -1,5 +1,5 @@
# sql/traversals.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -562,6 +562,8 @@ class TraversalComparatorStrategy(HasTraversalDispatch, util.MemoizedSlots):
return False
else:
continue
elif right_child is None:
return False
comparison = dispatch(
left_attrname, left, left_child, right, right_child, **kw

View File

@@ -1,5 +1,5 @@
# sql/type_api.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -183,6 +183,9 @@ class TypeEngine(Visitable, Generic[_T]):
self.expr = expr
self.type = expr.type
def __reduce__(self) -> Any:
return self.__class__, (self.expr,)
@util.preload_module("sqlalchemy.sql.default_comparator")
def operate(
self, op: OperatorType, *other: Any, **kwargs: Any
@@ -308,11 +311,13 @@ class TypeEngine(Visitable, Generic[_T]):
E.g.::
Table(
'some_table', metadata,
"some_table",
metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
server_default="no value",
),
)
The ORM uses this flag to indicate that a positive value of ``None``
@@ -574,18 +579,6 @@ class TypeEngine(Visitable, Generic[_T]):
"""
return None
def _sentinel_value_resolver(
self, dialect: Dialect
) -> Optional[_SentinelProcessorType[_T]]:
"""Return an optional callable that will match parameter values
(post-bind processing) to result values
(pre-result-processing), for use in the "sentinel" feature.
.. versionadded:: 2.0.10
"""
return None
@util.memoized_property
def _has_bind_expression(self) -> bool:
"""memoized boolean, check if bind_expression is implemented.
@@ -650,7 +643,7 @@ class TypeEngine(Visitable, Generic[_T]):
string_type = String()
string_type = string_type.with_variant(
mysql.VARCHAR(collation='foo'), 'mysql', 'mariadb'
mysql.VARCHAR(collation="foo"), "mysql", "mariadb"
)
The variant mapping indicates that when this type is
@@ -767,6 +760,10 @@ class TypeEngine(Visitable, Generic[_T]):
return self
def _with_collation(self, collation: str) -> Self:
"""set up error handling for the collate expression"""
raise NotImplementedError("this datatype does not support collation")
@util.ro_memoized_property
def _type_affinity(self) -> Optional[Type[TypeEngine[_T]]]:
"""Return a rudimental 'affinity' value expressing the general class
@@ -933,18 +930,6 @@ class TypeEngine(Visitable, Generic[_T]):
d["result"][coltype] = rp
return rp
def _cached_sentinel_value_processor(
self, dialect: Dialect
) -> Optional[_SentinelProcessorType[_T]]:
try:
return dialect._type_memos[self]["sentinel"]
except KeyError:
pass
d = self._dialect_info(dialect)
d["sentinel"] = bp = d["impl"]._sentinel_value_resolver(dialect)
return bp
def _cached_custom_processor(
self, dialect: Dialect, key: str, fn: Callable[[TypeEngine[_T]], _O]
) -> _O:
@@ -1029,9 +1014,11 @@ class TypeEngine(Visitable, Generic[_T]):
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(
typ = util.constructor_copy(
self, cast(Type[TypeEngine[Any]], cls), **kw
)
typ._variant_mapping = self._variant_mapping
return typ
def coerce_compared_value(
self, op: Optional[OperatorType], value: Any
@@ -1143,7 +1130,7 @@ class ExternalType(TypeEngineMixin):
"""
cache_ok: Optional[bool] = None
"""Indicate if statements using this :class:`.ExternalType` are "safe to
'''Indicate if statements using this :class:`.ExternalType` are "safe to
cache".
The default value ``None`` will emit a warning and then not allow caching
@@ -1184,12 +1171,12 @@ class ExternalType(TypeEngineMixin):
series of tuples. Given a previously un-cacheable type as::
class LookupType(UserDefinedType):
'''a custom type that accepts a dictionary as a parameter.
"""a custom type that accepts a dictionary as a parameter.
this is the non-cacheable version, as "self.lookup" is not
hashable.
'''
"""
def __init__(self, lookup):
self.lookup = lookup
@@ -1197,8 +1184,7 @@ class ExternalType(TypeEngineMixin):
def get_col_spec(self, **kw):
return "VARCHAR(255)"
def bind_processor(self, dialect):
# ... works with "self.lookup" ...
def bind_processor(self, dialect): ... # works with "self.lookup" ...
Where "lookup" is a dictionary. The type will not be able to generate
a cache key::
@@ -1234,7 +1220,7 @@ class ExternalType(TypeEngineMixin):
to the ".lookup" attribute::
class LookupType(UserDefinedType):
'''a custom type that accepts a dictionary as a parameter.
"""a custom type that accepts a dictionary as a parameter.
The dictionary is stored both as itself in a private variable,
and published in a public variable as a sorted tuple of tuples,
@@ -1242,7 +1228,7 @@ class ExternalType(TypeEngineMixin):
two equivalent dictionaries. Note it assumes the keys and
values of the dictionary are themselves hashable.
'''
"""
cache_ok = True
@@ -1251,15 +1237,12 @@ class ExternalType(TypeEngineMixin):
# assume keys/values of "lookup" are hashable; otherwise
# they would also need to be converted in some way here
self.lookup = tuple(
(key, lookup[key]) for key in sorted(lookup)
)
self.lookup = tuple((key, lookup[key]) for key in sorted(lookup))
def get_col_spec(self, **kw):
return "VARCHAR(255)"
def bind_processor(self, dialect):
# ... works with "self._lookup" ...
def bind_processor(self, dialect): ... # works with "self._lookup" ...
Where above, the cache key for ``LookupType({"a": 10, "b": 20})`` will be::
@@ -1277,7 +1260,7 @@ class ExternalType(TypeEngineMixin):
:ref:`sql_caching`
""" # noqa: E501
''' # noqa: E501
@util.non_memoized_property
def _static_cache_key(
@@ -1319,10 +1302,11 @@ class UserDefinedType(
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
cache_ok = True
def __init__(self, precision = 8):
def __init__(self, precision=8):
self.precision = precision
def get_col_spec(self, **kw):
@@ -1331,19 +1315,23 @@ class UserDefinedType(
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', metadata_obj,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
table = Table(
"foo",
metadata_obj,
Column("id", Integer, primary_key=True),
Column("data", MyType(16)),
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
@@ -1508,7 +1496,7 @@ class NativeForEmulated(TypeEngineMixin):
class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
"""Allows the creation of types which add additional functionality
'''Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
@@ -1519,10 +1507,11 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
"""Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
"""
impl = types.Unicode
@@ -1575,6 +1564,8 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
class MyEpochType(types.TypeDecorator):
impl = types.Integer
cache_ok = True
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
@@ -1612,6 +1603,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
from sqlalchemy import JSON
from sqlalchemy import TypeDecorator
class MyJsonType(TypeDecorator):
impl = JSON
@@ -1632,6 +1624,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
from sqlalchemy import ARRAY
from sqlalchemy import TypeDecorator
class MyArrayType(TypeDecorator):
impl = ARRAY
@@ -1640,8 +1633,7 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
"""
'''
__visit_name__ = "type_decorator"
@@ -1737,20 +1729,48 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
kwargs["_python_is_types"] = self.expr.type.coerce_to_is_types
return super().reverse_operate(op, other, **kwargs)
@staticmethod
def _reduce_td_comparator(
impl: TypeEngine[Any], expr: ColumnElement[_T]
) -> Any:
return TypeDecorator._create_td_comparator_type(impl)(expr)
@staticmethod
def _create_td_comparator_type(
impl: TypeEngine[Any],
) -> _ComparatorFactory[Any]:
def __reduce__(self: TypeDecorator.Comparator[Any]) -> Any:
return (TypeDecorator._reduce_td_comparator, (impl, self.expr))
return type(
"TDComparator",
(TypeDecorator.Comparator, impl.comparator_factory), # type: ignore # noqa: E501
{"__reduce__": __reduce__},
)
@property
def comparator_factory( # type: ignore # mypy properties bug
self,
) -> _ComparatorFactory[Any]:
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__: # type: ignore # noqa: E501
return self.impl.comparator_factory
return self.impl_instance.comparator_factory
else:
# reconcile the Comparator class on the impl with that
# of TypeDecorator
return type(
"TDComparator",
(TypeDecorator.Comparator, self.impl.comparator_factory), # type: ignore # noqa: E501
{},
# of TypeDecorator.
# the use of multiple staticmethods is to support repeated
# pickling of the Comparator itself
return TypeDecorator._create_td_comparator_type(self.impl_instance)
def _copy_with_check(self) -> Self:
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError(
"Type object %s does not properly "
"implement the copy() method, it must "
"return an object of type %s" % (self, self.__class__)
)
return tt
def _gen_dialect_impl(self, dialect: Dialect) -> TypeEngine[_T]:
if dialect.name in self._variant_mapping:
@@ -1766,16 +1786,17 @@ class TypeDecorator(SchemaEventTarget, ExternalType, TypeEngine[_T]):
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError(
"Type object %s does not properly "
"implement the copy() method, it must "
"return an object of type %s" % (self, self.__class__)
)
tt = self._copy_with_check()
tt.impl = tt.impl_instance = typedesc
return tt
def _with_collation(self, collation: str) -> Self:
tt = self._copy_with_check()
tt.impl = tt.impl_instance = self.impl_instance._with_collation(
collation
)
return tt
@util.ro_non_memoized_property
def _type_affinity(self) -> Optional[Type[TypeEngine[Any]]]:
return self.impl_instance._type_affinity
@@ -2299,11 +2320,10 @@ def to_instance(
def adapt_type(
typeobj: TypeEngine[Any],
typeobj: _TypeEngineArgument[Any],
colspecs: Mapping[Type[Any], Type[TypeEngine[Any]]],
) -> TypeEngine[Any]:
if isinstance(typeobj, type):
typeobj = typeobj()
typeobj = to_instance(typeobj)
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]

View File

@@ -1,5 +1,5 @@
# sql/util.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -106,7 +106,7 @@ def join_condition(
would produce an expression along the lines of::
tablea.c.id==tableb.c.tablea_id
tablea.c.id == tableb.c.tablea_id
The join is determined based on the foreign key relationships
between the two selectables. If there are multiple ways
@@ -268,7 +268,7 @@ def visit_binary_product(
The function is of the form::
def my_fn(binary, left, right)
def my_fn(binary, left, right): ...
For each binary expression located which has a
comparison operator, the product of "left" and
@@ -277,12 +277,11 @@ def visit_binary_product(
Hence an expression like::
and_(
(a + b) == q + func.sum(e + f),
j == r
)
and_((a + b) == q + func.sum(e + f), j == r)
would have the traversal::
would have the traversal:
.. sourcecode:: text
a <eq> q
a <eq> e
@@ -528,9 +527,7 @@ def bind_values(clause):
E.g.::
>>> expr = and_(
... table.c.foo==5, table.c.foo==7
... )
>>> expr = and_(table.c.foo == 5, table.c.foo == 7)
>>> bind_values(expr)
[5, 7]
"""
@@ -1041,20 +1038,24 @@ class ClauseAdapter(visitors.ReplacingExternalTraversal):
E.g.::
table1 = Table('sometable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table2 = Table('someothertable', metadata,
Column('col1', Integer),
Column('col2', Integer)
)
table1 = Table(
"sometable",
metadata,
Column("col1", Integer),
Column("col2", Integer),
)
table2 = Table(
"someothertable",
metadata,
Column("col1", Integer),
Column("col2", Integer),
)
condition = table1.c.col1 == table2.c.col1
make an alias of table1::
s = table1.alias('foo')
s = table1.alias("foo")
calling ``ClauseAdapter(s).traverse(condition)`` converts
condition to read::

View File

@@ -1,5 +1,5 @@
# sql/visitors.py
# Copyright (C) 2005-2024 the SQLAlchemy authors and contributors
# Copyright (C) 2005-2025 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
@@ -934,11 +934,13 @@ def traverse(
from sqlalchemy.sql import visitors
stmt = select(some_table).where(some_table.c.foo == 'bar')
stmt = select(some_table).where(some_table.c.foo == "bar")
def visit_bindparam(bind_param):
print("found bound value: %s" % bind_param.value)
visitors.traverse(stmt, {}, {"bindparam": visit_bindparam})
The iteration of objects uses the :func:`.visitors.iterate` function,