mirror of
https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git
synced 2025-08-14 00:25:46 +02:00
Initial commit
This commit is contained in:
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
# Create support requests table
|
||||
|
||||
1. New Tables
|
||||
- `support_requests`
|
||||
- `id` (uuid, primary key)
|
||||
- `user_id` (text)
|
||||
- `department` (text)
|
||||
- `request_type` (text) - for storing request type
|
||||
- `priority` (text)
|
||||
- `description` (text)
|
||||
- `status` (text)
|
||||
- `created_at` (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add policies for users and admins
|
||||
*/
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id text NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type text NOT NULL CHECK (request_type IN ('hardware', 'software', 'network', 'access', 'other')),
|
||||
priority text NOT NULL CHECK (priority IN ('low', 'medium', 'high', 'critical')),
|
||||
description text,
|
||||
status text DEFAULT 'new' CHECK (status IN ('new', 'in_progress', 'resolved', 'closed')),
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable Row Level Security
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (auth.uid()::text = user_id);
|
||||
|
||||
CREATE POLICY "Users can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = user_id OR auth.role() = 'admin');
|
||||
|
||||
CREATE POLICY "Admins can update requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.role() = 'admin')
|
||||
WITH CHECK (auth.role() = 'admin');
|
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
# Initial schema setup
|
||||
|
||||
1. New Tables
|
||||
- `users`
|
||||
- `id` (uuid, primary key)
|
||||
- `first_name` (text)
|
||||
- `last_name` (text)
|
||||
- `department` (text)
|
||||
- `password` (text)
|
||||
- `created_at` (timestamp)
|
||||
|
||||
- `requests`
|
||||
- `id` (uuid, primary key)
|
||||
- `user_id` (uuid, foreign key)
|
||||
- `first_name` (text)
|
||||
- `last_name` (text)
|
||||
- `department` (text)
|
||||
- `urgency` (text)
|
||||
- `description` (text)
|
||||
- `status` (text)
|
||||
- `created_at` (timestamp)
|
||||
|
||||
2. Security
|
||||
- Enable RLS on both tables
|
||||
- Add policies for authenticated users
|
||||
*/
|
||||
|
||||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
first_name text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
password text NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Create requests table
|
||||
CREATE TABLE IF NOT EXISTS requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id uuid NOT NULL REFERENCES users(id),
|
||||
first_name text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
urgency text NOT NULL,
|
||||
description text,
|
||||
status text NOT NULL DEFAULT 'new',
|
||||
created_at timestamptz DEFAULT now(),
|
||||
FOREIGN KEY (user_id) REFERENCES users(id)
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE users ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON users
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid() = id);
|
||||
|
||||
CREATE POLICY "Users can read all requests"
|
||||
ON requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (true);
|
||||
|
||||
CREATE POLICY "Users can create requests"
|
||||
ON requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (auth.uid() = user_id);
|
||||
|
||||
CREATE POLICY "Users can update own requests"
|
||||
ON requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.uid() = user_id);
|
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
# Support System Database Schema
|
||||
|
||||
1. New Tables
|
||||
- `support_requests`
|
||||
- `id` (uuid, primary key)
|
||||
- `user_id` (text)
|
||||
- `department` (text)
|
||||
- `request_type` (text)
|
||||
- `priority` (text)
|
||||
- `description` (text)
|
||||
- `status` (text)
|
||||
- `created_at` (timestamp)
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add policies for users and admins
|
||||
*/
|
||||
|
||||
-- Drop existing table if it exists
|
||||
DROP TABLE IF EXISTS support_requests CASCADE;
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id text NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type text NOT NULL CHECK (request_type IN ('hardware', 'software', 'network', 'access', 'other')),
|
||||
priority text NOT NULL CHECK (priority IN ('low', 'medium', 'high', 'critical')),
|
||||
description text,
|
||||
status text DEFAULT 'new' CHECK (status IN ('new', 'in_progress', 'resolved', 'closed')),
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (auth.uid()::text = user_id);
|
||||
|
||||
CREATE POLICY "Users can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = user_id OR auth.role() = 'admin');
|
||||
|
||||
CREATE POLICY "Admins can update requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.role() = 'admin')
|
||||
WITH CHECK (auth.role() = 'admin');
|
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
# Support Requests Table
|
||||
|
||||
1. New Table
|
||||
- `support_requests`
|
||||
- `id` (uuid, primary key) - Unique identifier
|
||||
- `user_id` (text) - ID of the user who created the request
|
||||
- `department` (text) - Department the request is from
|
||||
- `request_type` (text) - Type of request (hardware/software/etc)
|
||||
- `priority` (text) - Request priority level
|
||||
- `description` (text) - Detailed description of the request
|
||||
- `status` (text) - Current status of the request
|
||||
- `created_at` (timestamp) - When the request was created
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add policies for users and admins
|
||||
*/
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
user_id text NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type text NOT NULL CHECK (request_type IN ('hardware', 'software', 'network', 'access', 'other')),
|
||||
priority text NOT NULL CHECK (priority IN ('low', 'medium', 'high', 'critical')),
|
||||
description text,
|
||||
status text DEFAULT 'new' CHECK (status IN ('new', 'in_progress', 'resolved', 'closed')),
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable Row Level Security
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
DO $$
|
||||
BEGIN
|
||||
IF EXISTS (
|
||||
SELECT 1 FROM pg_policies
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = 'support_requests'
|
||||
) THEN
|
||||
DROP POLICY IF EXISTS "Users can create their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can view their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Admins can update requests" ON support_requests;
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
CREATE POLICY "Users can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (auth.uid()::text = user_id);
|
||||
|
||||
CREATE POLICY "Users can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = user_id OR auth.role() = 'admin');
|
||||
|
||||
CREATE POLICY "Admins can update requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.role() = 'admin')
|
||||
WITH CHECK (auth.role() = 'admin');
|
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
# Fix Authentication Table Structure
|
||||
|
||||
1. Changes
|
||||
- Recreate employees table with correct structure
|
||||
- Add proper indexes
|
||||
- Insert admin user
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add proper policies
|
||||
*/
|
||||
|
||||
-- Recreate the employees table with correct structure
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
username text PRIMARY KEY,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
last_login_timestamp timestamptz,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username);
|
||||
|
||||
CREATE POLICY "Users can update their own data"
|
||||
ON employees
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username)
|
||||
WITH CHECK (auth.uid()::text = username);
|
||||
|
||||
-- Add indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_employees_username
|
||||
ON employees(username);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_employees_last_login
|
||||
ON employees(last_login_timestamp);
|
||||
|
||||
-- Insert admin user (if not exists)
|
||||
INSERT INTO employees (username, last_name, department)
|
||||
VALUES (
|
||||
'admin',
|
||||
'Administrator',
|
||||
'IT'
|
||||
) ON CONFLICT (username) DO NOTHING;
|
@@ -1,63 +0,0 @@
|
||||
/*
|
||||
# Employee Management System Schema
|
||||
|
||||
1. Table Structure
|
||||
- Creates `employees` table with:
|
||||
- `username` (text, primary key)
|
||||
- `last_name` (text, not null)
|
||||
- `department` (text, not null)
|
||||
- `last_login_timestamp` (timestamptz)
|
||||
- `created_at` (timestamptz with default)
|
||||
|
||||
2. Security
|
||||
- Enables Row Level Security (RLS)
|
||||
- Adds policies for:
|
||||
- Reading own data
|
||||
- Updating own data
|
||||
|
||||
3. Performance
|
||||
- Adds indexes on frequently queried columns
|
||||
*/
|
||||
|
||||
-- Drop existing table and dependencies if they exist
|
||||
DROP TABLE IF EXISTS employees CASCADE;
|
||||
|
||||
-- Create employees table
|
||||
CREATE TABLE employees (
|
||||
username text PRIMARY KEY,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
last_login_timestamp timestamptz,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username);
|
||||
|
||||
CREATE POLICY "Users can update their own data"
|
||||
ON employees
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username)
|
||||
WITH CHECK (auth.uid()::text = username);
|
||||
|
||||
-- Add performance indexes
|
||||
CREATE INDEX idx_employees_username ON employees(username);
|
||||
CREATE INDEX idx_employees_last_login ON employees(last_login_timestamp);
|
||||
|
||||
-- Insert default admin user
|
||||
INSERT INTO employees (username, last_name, department)
|
||||
VALUES (
|
||||
'admin',
|
||||
'Administrator',
|
||||
'IT'
|
||||
) ON CONFLICT (username) DO UPDATE SET
|
||||
last_name = EXCLUDED.last_name,
|
||||
department = EXCLUDED.department;
|
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
# Authentication System Setup
|
||||
|
||||
1. Changes
|
||||
- Create employees table with all required fields
|
||||
- Add password field with NOT NULL constraint
|
||||
- Add timestamp fields for auditing
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Set up read and update policies
|
||||
- Add performance indexes
|
||||
*/
|
||||
|
||||
-- Clean up and recreate employees table
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
username text PRIMARY KEY,
|
||||
password text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
last_login_timestamp timestamptz,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Safely handle existing policies
|
||||
DO $$
|
||||
BEGIN
|
||||
DROP POLICY IF EXISTS "Users can read own data" ON employees;
|
||||
DROP POLICY IF EXISTS "Users can update their own data" ON employees;
|
||||
END $$;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username);
|
||||
|
||||
CREATE POLICY "Users can update their own data"
|
||||
ON employees
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username)
|
||||
WITH CHECK (auth.uid()::text = username);
|
||||
|
||||
-- Safely handle existing indexes
|
||||
DO $$
|
||||
BEGIN
|
||||
DROP INDEX IF EXISTS idx_employees_username;
|
||||
DROP INDEX IF EXISTS idx_employees_last_login;
|
||||
END $$;
|
||||
|
||||
-- Add indexes
|
||||
CREATE INDEX idx_employees_username ON employees(username);
|
||||
CREATE INDEX idx_employees_last_login ON employees(last_login_timestamp);
|
||||
|
||||
-- Insert admin user with hashed password for 'admin66'
|
||||
INSERT INTO employees (username, password, last_name, department)
|
||||
VALUES (
|
||||
'admin',
|
||||
'$2a$10$xJ7Yt1UqZKhVkk2mFXgQe.UuB3YH3QQMkj8AfzF8fxMjGlZZYf.Hy',
|
||||
'Administrator',
|
||||
'IT'
|
||||
) ON CONFLICT (username) DO NOTHING;
|
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
# Update employees table and policies
|
||||
|
||||
1. Changes
|
||||
- Ensures employees table exists with required fields
|
||||
- Safely handles existing RLS policy
|
||||
- Updates admin user with correct password hash
|
||||
*/
|
||||
|
||||
-- Create table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
username text PRIMARY KEY,
|
||||
password text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS (idempotent operation)
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Drop existing policy if it exists
|
||||
DO $$
|
||||
BEGIN
|
||||
DROP POLICY IF EXISTS "Users can read own data" ON employees;
|
||||
END $$;
|
||||
|
||||
-- Create policy
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username);
|
||||
|
||||
-- Update or insert admin user
|
||||
INSERT INTO employees (username, password, last_name, department)
|
||||
VALUES (
|
||||
'admin',
|
||||
'$2a$10$X4kv7j5ZcG39WgkdqhzJXO2/ZZJHNNxt0Bz4Y8DzxfBqL0Q1erqJS', -- hashed 'admin'
|
||||
'Administrator',
|
||||
'IT'
|
||||
) ON CONFLICT (username)
|
||||
DO UPDATE SET
|
||||
password = EXCLUDED.password,
|
||||
last_name = EXCLUDED.last_name,
|
||||
department = EXCLUDED.department;
|
@@ -1,46 +0,0 @@
|
||||
/*
|
||||
# Update employees table and policies
|
||||
|
||||
1. Changes
|
||||
- Ensures employees table exists with required fields
|
||||
- Safely handles existing RLS policy
|
||||
- Updates admin user with correct password hash for 'admin66'
|
||||
*/
|
||||
|
||||
-- Create table if it doesn't exist
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
username text PRIMARY KEY,
|
||||
password text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS (idempotent operation)
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Safely handle existing policy
|
||||
DO $$
|
||||
BEGIN
|
||||
DROP POLICY IF EXISTS "Users can read own data" ON employees;
|
||||
END $$;
|
||||
|
||||
-- Create policy
|
||||
CREATE POLICY "Users can read own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (auth.uid()::text = username);
|
||||
|
||||
-- Update or insert admin user with hashed password for 'admin66'
|
||||
INSERT INTO employees (username, password, last_name, department)
|
||||
VALUES (
|
||||
'admin',
|
||||
'$2a$10$xJ7Yt1UqZKhVkk2mFXgQe.UuB3YH3QQMkj8AfzF8fxMjGlZZYf.Hy', -- hashed 'admin66'
|
||||
'Administrator',
|
||||
'IT'
|
||||
) ON CONFLICT (username)
|
||||
DO UPDATE SET
|
||||
password = EXCLUDED.password,
|
||||
last_name = EXCLUDED.last_name,
|
||||
department = EXCLUDED.department;
|
@@ -1,37 +0,0 @@
|
||||
/*
|
||||
# Create Users Table
|
||||
|
||||
1. New Tables
|
||||
- `users`
|
||||
- Basic user information and password storage
|
||||
- Includes RLS policies for security
|
||||
*/
|
||||
|
||||
-- Create extension for password hashing
|
||||
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
||||
|
||||
-- Create users table
|
||||
CREATE TABLE IF NOT EXISTS users (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
first_name text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
password_hash text NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE users ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create basic security policies
|
||||
CREATE POLICY "Users can view own data"
|
||||
ON users
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (id = auth.uid());
|
||||
|
||||
CREATE POLICY "Admins can manage all users"
|
||||
ON users
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'email' = 'admin@example.com');
|
@@ -1,166 +0,0 @@
|
||||
/*
|
||||
# Support Request System Tables
|
||||
|
||||
1. New Tables
|
||||
- `support_requests`
|
||||
- `id` (uuid, primary key)
|
||||
- `employee_id` (uuid, references employees)
|
||||
- `department` (text)
|
||||
- `request_type` (enum)
|
||||
- `priority` (enum)
|
||||
- `status` (enum)
|
||||
- `description` (text)
|
||||
- `created_at` (timestamptz)
|
||||
- `last_status_change` (timestamptz)
|
||||
|
||||
- `status_history`
|
||||
- `id` (uuid, primary key)
|
||||
- `request_id` (uuid, references support_requests)
|
||||
- `old_status` (enum)
|
||||
- `new_status` (enum)
|
||||
- `changed_by` (uuid, references employees)
|
||||
- `changed_at` (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS on all tables
|
||||
- Add policies for employees and admins
|
||||
*/
|
||||
|
||||
-- Create enum types
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_type AS ENUM ('hardware', 'software', 'network', 'access', 'other');
|
||||
CREATE TYPE request_priority AS ENUM ('low', 'medium', 'high', 'critical');
|
||||
CREATE TYPE request_status AS ENUM ('new', 'in_progress', 'resolved', 'closed');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
employee_id uuid REFERENCES employees(id) NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type request_type NOT NULL,
|
||||
priority request_priority NOT NULL,
|
||||
status request_status NOT NULL DEFAULT 'new',
|
||||
description text,
|
||||
created_at timestamptz NOT NULL DEFAULT now(),
|
||||
last_status_change timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Create status history table
|
||||
CREATE TABLE IF NOT EXISTS status_history (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
request_id uuid REFERENCES support_requests(id) ON DELETE CASCADE,
|
||||
old_status request_status,
|
||||
new_status request_status NOT NULL,
|
||||
changed_by uuid REFERENCES employees(id) NOT NULL,
|
||||
changed_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
ALTER TABLE status_history ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies for support_requests
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_policies
|
||||
WHERE tablename = 'support_requests'
|
||||
AND policyname = 'Employees can view their own requests'
|
||||
) THEN
|
||||
CREATE POLICY "Employees can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (employee_id = auth.uid());
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_policies
|
||||
WHERE tablename = 'support_requests'
|
||||
AND policyname = 'Employees can create their own requests'
|
||||
) THEN
|
||||
CREATE POLICY "Employees can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (employee_id = auth.uid());
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_policies
|
||||
WHERE tablename = 'support_requests'
|
||||
AND policyname = 'Admins can view all requests'
|
||||
) THEN
|
||||
CREATE POLICY "Admins can view all requests"
|
||||
ON support_requests
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create policies for status_history
|
||||
DO $$ BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_policies
|
||||
WHERE tablename = 'status_history'
|
||||
AND policyname = 'Employees can view status history of their requests'
|
||||
) THEN
|
||||
CREATE POLICY "Employees can view status history of their requests"
|
||||
ON status_history
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM support_requests
|
||||
WHERE id = status_history.request_id
|
||||
AND employee_id = auth.uid()
|
||||
)
|
||||
);
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_policies
|
||||
WHERE tablename = 'status_history'
|
||||
AND policyname = 'Admins can view all status history'
|
||||
) THEN
|
||||
CREATE POLICY "Admins can view all status history"
|
||||
ON status_history
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
END IF;
|
||||
END $$;
|
||||
|
||||
-- Create status update trigger
|
||||
CREATE OR REPLACE FUNCTION update_request_status_history()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
IF (TG_OP = 'UPDATE' AND OLD.status IS DISTINCT FROM NEW.status) THEN
|
||||
INSERT INTO status_history (
|
||||
request_id,
|
||||
old_status,
|
||||
new_status,
|
||||
changed_by
|
||||
) VALUES (
|
||||
NEW.id,
|
||||
OLD.status,
|
||||
NEW.status,
|
||||
auth.uid()
|
||||
);
|
||||
|
||||
NEW.last_status_change = now();
|
||||
END IF;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql SECURITY DEFINER;
|
||||
|
||||
-- Create trigger
|
||||
DROP TRIGGER IF EXISTS track_request_status_changes ON support_requests;
|
||||
CREATE TRIGGER track_request_status_changes
|
||||
BEFORE UPDATE ON support_requests
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_request_status_history();
|
@@ -1,51 +0,0 @@
|
||||
/*
|
||||
# Add employee details to support requests
|
||||
|
||||
1. Changes
|
||||
- Add employee_last_name and employee_department columns to support_requests
|
||||
- Add trigger to automatically populate employee details on insert
|
||||
- Update existing records with employee details
|
||||
|
||||
2. Security
|
||||
- Maintain existing RLS policies
|
||||
- No changes to security policies required as these are derived fields
|
||||
*/
|
||||
|
||||
-- Add new columns for employee details
|
||||
ALTER TABLE support_requests
|
||||
ADD COLUMN IF NOT EXISTS employee_last_name text,
|
||||
ADD COLUMN IF NOT EXISTS employee_department text;
|
||||
|
||||
-- Create function to populate employee details
|
||||
CREATE OR REPLACE FUNCTION populate_employee_details()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
SELECT
|
||||
last_name,
|
||||
department
|
||||
INTO
|
||||
NEW.employee_last_name,
|
||||
NEW.employee_department
|
||||
FROM employees
|
||||
WHERE id = NEW.employee_id;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Drop trigger if exists to avoid conflicts
|
||||
DROP TRIGGER IF EXISTS set_employee_details ON support_requests;
|
||||
|
||||
-- Create trigger to automatically populate employee details
|
||||
CREATE TRIGGER set_employee_details
|
||||
BEFORE INSERT ON support_requests
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION populate_employee_details();
|
||||
|
||||
-- Update existing records with employee details
|
||||
UPDATE support_requests sr
|
||||
SET
|
||||
employee_last_name = e.last_name,
|
||||
employee_department = e.department
|
||||
FROM employees e
|
||||
WHERE sr.employee_id = e.id;
|
@@ -1,66 +0,0 @@
|
||||
/*
|
||||
# Add create_user function
|
||||
|
||||
1. Changes
|
||||
- Add function to create new users in employees table
|
||||
- Function handles first name, last name, department and password
|
||||
- Returns the created employee record
|
||||
|
||||
2. Details
|
||||
- Creates a stored procedure for consistent user creation
|
||||
- Validates input parameters
|
||||
- Returns the full employee record after creation
|
||||
|
||||
3. Security
|
||||
- Function is SECURITY DEFINER to ensure proper access control
|
||||
- Input validation to prevent invalid data
|
||||
*/
|
||||
|
||||
-- Create function to handle user creation
|
||||
CREATE OR REPLACE FUNCTION create_user(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate inputs
|
||||
IF p_first_name IS NULL OR p_first_name = '' THEN
|
||||
RAISE EXCEPTION 'First name cannot be empty';
|
||||
END IF;
|
||||
|
||||
IF p_last_name IS NULL OR p_last_name = '' THEN
|
||||
RAISE EXCEPTION 'Last name cannot be empty';
|
||||
END IF;
|
||||
|
||||
IF p_department IS NULL OR p_department = '' THEN
|
||||
RAISE EXCEPTION 'Department cannot be empty';
|
||||
END IF;
|
||||
|
||||
IF p_password IS NULL OR p_password = '' THEN
|
||||
RAISE EXCEPTION 'Password cannot be empty';
|
||||
END IF;
|
||||
|
||||
-- Insert new employee
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
email -- Generate email from name
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
lower(p_last_name || '@example.com')
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
END;
|
||||
$$;
|
@@ -1,38 +0,0 @@
|
||||
/*
|
||||
# Update Employee Table RLS Policies
|
||||
|
||||
1. Changes
|
||||
- Drop existing RLS policies
|
||||
- Create new policies for admin access
|
||||
- Add policy for employee self-access
|
||||
|
||||
2. Security
|
||||
- Enable RLS on employees table
|
||||
- Admin can manage all employees
|
||||
- Employees can view their own data
|
||||
*/
|
||||
|
||||
-- Drop existing policies if they exist
|
||||
DROP POLICY IF EXISTS "Admins can manage employees" ON employees;
|
||||
DROP POLICY IF EXISTS "Employees can view own data" ON employees;
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create admin policy for full access
|
||||
CREATE POLICY "Admins can manage employees"
|
||||
ON employees
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (
|
||||
auth.jwt() ->> 'email' = 'admin@example.com'
|
||||
);
|
||||
|
||||
-- Create policy for employees to view their own data
|
||||
CREATE POLICY "Employees can view own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
id = auth.uid()
|
||||
);
|
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
# Update support requests schema
|
||||
|
||||
1. Changes
|
||||
- Change foreign key reference from auth.users to employees table
|
||||
- Update RLS policies to use employee_id instead of user_id
|
||||
- Add indexes for better query performance
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add policies for employees to manage their requests
|
||||
*/
|
||||
|
||||
-- First drop the foreign key constraint
|
||||
ALTER TABLE support_requests
|
||||
DROP CONSTRAINT IF EXISTS support_requests_user_id_fkey;
|
||||
|
||||
-- Then rename the column
|
||||
ALTER TABLE support_requests
|
||||
RENAME COLUMN user_id TO employee_id;
|
||||
|
||||
-- Add new foreign key constraint
|
||||
ALTER TABLE support_requests
|
||||
ADD CONSTRAINT support_requests_employee_id_fkey
|
||||
FOREIGN KEY (employee_id) REFERENCES employees(id);
|
||||
|
||||
-- Create index for better performance
|
||||
CREATE INDEX IF NOT EXISTS idx_support_requests_employee_id
|
||||
ON support_requests(employee_id);
|
||||
|
||||
-- Update RLS policies
|
||||
DROP POLICY IF EXISTS "Users can create requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can view their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can update their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "IT department can manage all requests" ON support_requests;
|
||||
|
||||
CREATE POLICY "Employees can create requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (
|
||||
EXISTS (
|
||||
SELECT 1 FROM employees
|
||||
WHERE id = support_requests.employee_id
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "Employees can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
employee_id IN (
|
||||
SELECT id FROM employees
|
||||
WHERE id = support_requests.employee_id
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "Employees can update their own requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (
|
||||
employee_id IN (
|
||||
SELECT id FROM employees
|
||||
WHERE id = support_requests.employee_id
|
||||
)
|
||||
);
|
@@ -1,112 +0,0 @@
|
||||
/*
|
||||
# Create employees and support requests tables
|
||||
|
||||
1. New Tables
|
||||
- `employees`
|
||||
- `id` (uuid, primary key)
|
||||
- `first_name` (text)
|
||||
- `last_name` (text)
|
||||
- `department` (text)
|
||||
- `created_at` (timestamptz)
|
||||
- `support_requests`
|
||||
- `id` (uuid, primary key)
|
||||
- `employee_id` (uuid, foreign key)
|
||||
- `department` (text)
|
||||
- `request_type` (enum)
|
||||
- `priority` (enum)
|
||||
- `status` (enum)
|
||||
- `description` (text)
|
||||
- `created_at` (timestamptz)
|
||||
- `last_status_change` (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS on both tables
|
||||
- Add appropriate policies for employees and admins
|
||||
*/
|
||||
|
||||
-- Create enum types if they don't exist
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_type AS ENUM ('hardware', 'software', 'network', 'access', 'other');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_priority AS ENUM ('low', 'medium', 'high', 'critical');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_status AS ENUM ('new', 'in_progress', 'resolved', 'closed');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN null;
|
||||
END $$;
|
||||
|
||||
-- Create employees table
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
first_name text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
created_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS for employees
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies for employees
|
||||
CREATE POLICY "Employees can view their own data"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (id = auth.uid());
|
||||
|
||||
CREATE POLICY "Admins can manage all employees"
|
||||
ON employees
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
employee_id uuid REFERENCES employees(id) NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type request_type NOT NULL,
|
||||
priority request_priority NOT NULL,
|
||||
status request_status NOT NULL DEFAULT 'new',
|
||||
description text,
|
||||
created_at timestamptz NOT NULL DEFAULT now(),
|
||||
last_status_change timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS for support requests
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies for support requests
|
||||
CREATE POLICY "Employees can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Employees can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Admins can manage all requests"
|
||||
ON support_requests
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
|
||||
-- Add initial test data for employees
|
||||
INSERT INTO employees (first_name, last_name, department)
|
||||
VALUES
|
||||
('Иван', 'Иванов', 'aho'),
|
||||
('Петр', 'Петров', 'gkh'),
|
||||
('Сергей', 'Сергеев', 'general')
|
||||
ON CONFLICT (id) DO NOTHING;
|
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
# Add employee details to support requests
|
||||
|
||||
1. Changes
|
||||
- Add employee_last_name and employee_department columns to support_requests
|
||||
- Create trigger to automatically populate employee details on insert
|
||||
- Update existing records with employee details
|
||||
|
||||
2. Details
|
||||
- Adds columns to store employee information directly in support_requests
|
||||
- Creates trigger to automatically populate these fields on insert
|
||||
- Updates existing records with employee information
|
||||
- Uses user_id to link with employees table
|
||||
|
||||
3. Security
|
||||
- Maintains existing RLS policies
|
||||
- No additional security changes needed as these are derived fields
|
||||
*/
|
||||
|
||||
-- Add new columns for employee details
|
||||
ALTER TABLE support_requests
|
||||
ADD COLUMN IF NOT EXISTS employee_last_name text,
|
||||
ADD COLUMN IF NOT EXISTS employee_department text;
|
||||
|
||||
-- Create function to populate employee details
|
||||
CREATE OR REPLACE FUNCTION populate_employee_details()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
SELECT
|
||||
last_name,
|
||||
department
|
||||
INTO
|
||||
NEW.employee_last_name,
|
||||
NEW.employee_department
|
||||
FROM employees
|
||||
WHERE id = NEW.user_id;
|
||||
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- Drop trigger if exists to avoid conflicts
|
||||
DROP TRIGGER IF EXISTS set_employee_details ON support_requests;
|
||||
|
||||
-- Create trigger to automatically populate employee details
|
||||
CREATE TRIGGER set_employee_details
|
||||
BEFORE INSERT ON support_requests
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION populate_employee_details();
|
||||
|
||||
-- Update existing records with employee details
|
||||
UPDATE support_requests sr
|
||||
SET
|
||||
employee_last_name = e.last_name,
|
||||
employee_department = e.department
|
||||
FROM employees e
|
||||
WHERE sr.user_id = e.id;
|
@@ -1,68 +0,0 @@
|
||||
/*
|
||||
# Update support requests policies
|
||||
|
||||
1. Changes
|
||||
- Add RLS policies for support requests table
|
||||
- Allow authenticated users to create and view their requests
|
||||
- Allow admins to manage all requests
|
||||
|
||||
2. Security
|
||||
- Enable RLS on support_requests table
|
||||
- Add policies for authenticated users
|
||||
- Add admin policies
|
||||
*/
|
||||
|
||||
-- Drop existing policies if they exist
|
||||
DROP POLICY IF EXISTS "Users can create their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can view their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can update their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Admins can view all requests" ON support_requests;
|
||||
|
||||
-- Create new policies
|
||||
CREATE POLICY "Users can create requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (true);
|
||||
|
||||
CREATE POLICY "Users can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
auth.uid() = user_id OR
|
||||
EXISTS (
|
||||
SELECT 1 FROM users
|
||||
WHERE users.id = auth.uid() AND department = 'it'
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "Users can update their own requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (
|
||||
auth.uid() = user_id OR
|
||||
EXISTS (
|
||||
SELECT 1 FROM users
|
||||
WHERE users.id = auth.uid() AND department = 'it'
|
||||
)
|
||||
)
|
||||
WITH CHECK (
|
||||
auth.uid() = user_id OR
|
||||
EXISTS (
|
||||
SELECT 1 FROM users
|
||||
WHERE users.id = auth.uid() AND department = 'it'
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "IT department can manage all requests"
|
||||
ON support_requests
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM users
|
||||
WHERE users.id = auth.uid() AND department = 'it'
|
||||
)
|
||||
);
|
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
# Add Employee Creation Function
|
||||
|
||||
1. Changes
|
||||
- Add secure function to create new employees with password hashing
|
||||
|
||||
2. Security
|
||||
- Uses hash_password function for secure password storage
|
||||
- SECURITY DEFINER to ensure proper access control
|
||||
- Returns employee data without password hash
|
||||
*/
|
||||
|
||||
-- Create function to create employee with password
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate input
|
||||
IF p_password IS NULL OR length(p_password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
|
||||
-- Create employee with hashed password
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
-- Return employee data without password hash
|
||||
v_employee.password_hash := NULL;
|
||||
RETURN v_employee;
|
||||
END;
|
||||
$$;
|
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
# Fix employee creation process
|
||||
|
||||
1. Changes
|
||||
- Add trigger to create auth user and employee synchronously
|
||||
- Update create_employee function to handle auth user creation
|
||||
- Add proper error handling
|
||||
|
||||
2. Security
|
||||
- Maintain RLS policies
|
||||
- Ensure secure password handling
|
||||
*/
|
||||
|
||||
-- Function to create auth user and employee
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
v_auth_user uuid;
|
||||
BEGIN
|
||||
-- Validate password
|
||||
PERFORM validate_password(p_password);
|
||||
|
||||
-- Create auth user first
|
||||
v_auth_user := auth.uid();
|
||||
|
||||
-- Create employee record
|
||||
INSERT INTO employees (
|
||||
id,
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
v_auth_user,
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
RAISE EXCEPTION 'Failed to create employee: %', SQLERRM;
|
||||
END;
|
||||
$$;
|
@@ -1,67 +0,0 @@
|
||||
/*
|
||||
# Remove email dependency from employees table
|
||||
|
||||
1. Changes
|
||||
- Remove email column from employees table
|
||||
- Update create_employee function to work without email
|
||||
- Preserve existing data integrity
|
||||
|
||||
2. Security
|
||||
- Maintain existing RLS policies
|
||||
*/
|
||||
|
||||
-- Remove email column and its constraint
|
||||
ALTER TABLE employees
|
||||
DROP COLUMN IF EXISTS email;
|
||||
|
||||
-- Update create_employee function
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Input validation
|
||||
IF p_first_name IS NULL OR p_first_name = '' THEN
|
||||
RAISE EXCEPTION 'First name is required';
|
||||
END IF;
|
||||
|
||||
IF p_last_name IS NULL OR p_last_name = '' THEN
|
||||
RAISE EXCEPTION 'Last name is required';
|
||||
END IF;
|
||||
|
||||
IF p_department IS NULL OR p_department = '' THEN
|
||||
RAISE EXCEPTION 'Department is required';
|
||||
END IF;
|
||||
|
||||
IF p_password IS NULL OR length(p_password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
|
||||
-- Create employee record
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
RAISE EXCEPTION 'Failed to create employee: %', SQLERRM;
|
||||
END;
|
||||
$$;
|
@@ -1,53 +0,0 @@
|
||||
/*
|
||||
# Create status history table
|
||||
|
||||
1. New Tables
|
||||
- `status_history`
|
||||
- `id` (uuid, primary key)
|
||||
- `request_id` (uuid, foreign key to support_requests.id)
|
||||
- `old_status` (request_status)
|
||||
- `new_status` (request_status)
|
||||
- `changed_by` (uuid, foreign key to employees.id)
|
||||
- `changed_at` (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add policies for status history access
|
||||
*/
|
||||
|
||||
CREATE TABLE IF NOT EXISTS status_history (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
request_id uuid REFERENCES support_requests(id) ON DELETE CASCADE NOT NULL,
|
||||
old_status request_status,
|
||||
new_status request_status NOT NULL,
|
||||
changed_by uuid REFERENCES employees(id) ON DELETE CASCADE NOT NULL,
|
||||
changed_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE status_history ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can view status history of their requests"
|
||||
ON status_history
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM support_requests sr
|
||||
WHERE sr.id = status_history.request_id
|
||||
AND sr.employee_id = auth.uid()
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "Admins can view all status history"
|
||||
ON status_history
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM employees e
|
||||
WHERE e.id = auth.uid()
|
||||
AND e.is_admin = true
|
||||
)
|
||||
);
|
@@ -1,57 +0,0 @@
|
||||
/*
|
||||
# Create support requests table
|
||||
|
||||
1. New Table
|
||||
- support_requests
|
||||
- id (uuid, primary key)
|
||||
- employee_id (uuid, foreign key to employees)
|
||||
- department (text)
|
||||
- request_type (request_type enum)
|
||||
- priority (request_priority enum)
|
||||
- status (request_status enum)
|
||||
- description (text)
|
||||
- created_at (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add request viewing, creation, and update policies
|
||||
- Add performance index for employee lookups
|
||||
*/
|
||||
|
||||
-- Create support requests table
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
employee_id uuid REFERENCES employees(id),
|
||||
department text NOT NULL,
|
||||
request_type request_type NOT NULL,
|
||||
priority request_priority NOT NULL,
|
||||
description text,
|
||||
status request_status NOT NULL DEFAULT 'new',
|
||||
created_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS for support requests
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create index for better query performance
|
||||
CREATE INDEX IF NOT EXISTS idx_support_requests_employee_id
|
||||
ON support_requests(employee_id);
|
||||
|
||||
-- Create RLS policies for support requests
|
||||
CREATE POLICY "Employees can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Employees can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Employees can update their own requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (employee_id = auth.uid());
|
@@ -1,80 +0,0 @@
|
||||
/*
|
||||
# Fix employee creation functions
|
||||
|
||||
1. Changes
|
||||
- Drop and recreate validate_password function with correct return type
|
||||
- Update create_employee function
|
||||
|
||||
2. Security
|
||||
- Maintain SECURITY DEFINER
|
||||
- Secure password handling
|
||||
*/
|
||||
|
||||
-- Drop existing functions
|
||||
DROP FUNCTION IF EXISTS validate_password(text);
|
||||
DROP FUNCTION IF EXISTS create_employee(text, text, text, text);
|
||||
|
||||
-- Recreate validate_password function
|
||||
CREATE OR REPLACE FUNCTION validate_password(password text)
|
||||
RETURNS void
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF length(password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create employee function with auth integration
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate input
|
||||
IF p_first_name IS NULL OR p_first_name = '' THEN
|
||||
RAISE EXCEPTION 'First name is required';
|
||||
END IF;
|
||||
|
||||
IF p_last_name IS NULL OR p_last_name = '' THEN
|
||||
RAISE EXCEPTION 'Last name is required';
|
||||
END IF;
|
||||
|
||||
IF p_department IS NULL OR p_department = '' THEN
|
||||
RAISE EXCEPTION 'Department is required';
|
||||
END IF;
|
||||
|
||||
-- Validate password
|
||||
PERFORM validate_password(p_password);
|
||||
|
||||
-- Create employee record
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
email,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
lower(p_last_name) || '@example.com',
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
RAISE EXCEPTION 'Failed to create employee: %', SQLERRM;
|
||||
END;
|
||||
$$;
|
@@ -1,85 +0,0 @@
|
||||
/*
|
||||
# Fix employee management functions
|
||||
|
||||
1. Changes
|
||||
- Drop and recreate validate_password function with proper return type
|
||||
- Create improved create_employee function with better validation
|
||||
|
||||
2. Security
|
||||
- Maintain SECURITY DEFINER for sensitive operations
|
||||
- Secure password validation and hashing
|
||||
- Input validation for all fields
|
||||
*/
|
||||
|
||||
-- Drop existing validate_password function if it exists
|
||||
DROP FUNCTION IF EXISTS validate_password(text);
|
||||
|
||||
-- Recreate validate_password function with better validation
|
||||
CREATE OR REPLACE FUNCTION validate_password(password text)
|
||||
RETURNS boolean
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
-- Check password length
|
||||
IF length(password) < 6 THEN
|
||||
RETURN false;
|
||||
END IF;
|
||||
|
||||
RETURN true;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create employee function with improved validation
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate input
|
||||
IF p_first_name IS NULL OR p_first_name = '' THEN
|
||||
RAISE EXCEPTION 'First name is required';
|
||||
END IF;
|
||||
|
||||
IF p_last_name IS NULL OR p_last_name = '' THEN
|
||||
RAISE EXCEPTION 'Last name is required';
|
||||
END IF;
|
||||
|
||||
IF p_department IS NULL OR p_department = '' THEN
|
||||
RAISE EXCEPTION 'Department is required';
|
||||
END IF;
|
||||
|
||||
-- Validate password
|
||||
IF NOT validate_password(p_password) THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
|
||||
-- Create employee record
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
email,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
lower(p_last_name) || '@example.com',
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
EXCEPTION
|
||||
WHEN others THEN
|
||||
RAISE EXCEPTION 'Failed to create employee: %', SQLERRM;
|
||||
END;
|
||||
$$;
|
@@ -1,93 +0,0 @@
|
||||
/*
|
||||
# Fix employee creation process
|
||||
|
||||
1. Changes
|
||||
- Add proper error handling for auth user creation
|
||||
- Ensure atomic transaction for employee creation
|
||||
- Add better validation for employee data
|
||||
- Fix duplicate email handling
|
||||
|
||||
2. Security
|
||||
- Maintain RLS policies
|
||||
- Add proper role checks
|
||||
*/
|
||||
|
||||
-- Drop existing function if exists
|
||||
DROP FUNCTION IF EXISTS create_employee(text, text, text, text);
|
||||
|
||||
-- Create improved employee creation function
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
v_email text;
|
||||
BEGIN
|
||||
-- Input validation
|
||||
IF p_first_name IS NULL OR p_first_name = '' THEN
|
||||
RAISE EXCEPTION 'First name is required';
|
||||
END IF;
|
||||
|
||||
IF p_last_name IS NULL OR p_last_name = '' THEN
|
||||
RAISE EXCEPTION 'Last name is required';
|
||||
END IF;
|
||||
|
||||
IF p_department IS NULL OR p_department = '' THEN
|
||||
RAISE EXCEPTION 'Department is required';
|
||||
END IF;
|
||||
|
||||
IF p_password IS NULL OR length(p_password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
|
||||
-- Generate unique email
|
||||
v_email := lower(p_last_name) || '@example.com';
|
||||
|
||||
-- Create employee record
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
email,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
v_email,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
-- Create auth user
|
||||
INSERT INTO auth.users (
|
||||
email,
|
||||
encrypted_password,
|
||||
email_confirmed_at,
|
||||
raw_user_meta_data
|
||||
) VALUES (
|
||||
v_email,
|
||||
hash_password(p_password),
|
||||
now(),
|
||||
jsonb_build_object(
|
||||
'first_name', p_first_name,
|
||||
'last_name', p_last_name,
|
||||
'department', p_department
|
||||
)
|
||||
);
|
||||
|
||||
RETURN v_employee;
|
||||
EXCEPTION
|
||||
WHEN unique_violation THEN
|
||||
RAISE EXCEPTION 'Employee with this email already exists';
|
||||
WHEN others THEN
|
||||
RAISE EXCEPTION 'Failed to create employee: %', SQLERRM;
|
||||
END;
|
||||
$$;
|
@@ -1,89 +0,0 @@
|
||||
/*
|
||||
# Fix database constraints and password validation
|
||||
|
||||
1. Changes
|
||||
- Add password validation function
|
||||
- Add employee creation function with validation
|
||||
- Add indexes for performance optimization
|
||||
- Update RLS policies
|
||||
|
||||
2. Security
|
||||
- Maintain RLS policies
|
||||
- Add proper validation for passwords
|
||||
*/
|
||||
|
||||
-- Update password validation function
|
||||
CREATE OR REPLACE FUNCTION validate_password(password text)
|
||||
RETURNS boolean
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
IF length(password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
RETURN true;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Update create_employee function to use password validation
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate password
|
||||
PERFORM validate_password(p_password);
|
||||
|
||||
-- Create employee
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
RETURN v_employee;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Add indexes for better performance if they don't exist
|
||||
CREATE INDEX IF NOT EXISTS idx_employees_last_name ON employees(last_name);
|
||||
CREATE INDEX IF NOT EXISTS idx_support_requests_created_at ON support_requests(created_at DESC);
|
||||
|
||||
-- Update RLS policies for support_requests
|
||||
DROP POLICY IF EXISTS "Users can create their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can view their own requests" ON support_requests;
|
||||
DROP POLICY IF EXISTS "Users can update their own requests" ON support_requests;
|
||||
|
||||
CREATE POLICY "Employees can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (user_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Employees can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (user_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Employees can update their own requests"
|
||||
ON support_requests
|
||||
FOR UPDATE
|
||||
TO authenticated
|
||||
USING (user_id = auth.uid())
|
||||
WITH CHECK (user_id = auth.uid());
|
@@ -1,43 +0,0 @@
|
||||
/*
|
||||
# Update employees table and policies
|
||||
|
||||
1. Table Creation
|
||||
- employees
|
||||
- id (uuid, primary key)
|
||||
- first_name (text)
|
||||
- last_name (text)
|
||||
- department (text)
|
||||
- created_at (timestamptz)
|
||||
|
||||
2. Security
|
||||
- Enable RLS
|
||||
- Add employee viewing policy (if not exists)
|
||||
*/
|
||||
|
||||
-- Create employees table
|
||||
CREATE TABLE IF NOT EXISTS employees (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
first_name text NOT NULL,
|
||||
last_name text NOT NULL,
|
||||
department text NOT NULL,
|
||||
created_at timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS for employees
|
||||
ALTER TABLE employees ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Safely create policy if it doesn't exist
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_policies
|
||||
WHERE tablename = 'employees'
|
||||
AND policyname = 'Employees can view their own profile'
|
||||
) THEN
|
||||
CREATE POLICY "Employees can view their own profile"
|
||||
ON employees
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (id = auth.uid());
|
||||
END IF;
|
||||
END $$;
|
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
# Add Employee Creation Function
|
||||
|
||||
1. Changes
|
||||
- Add secure function to create new employees with password hashing
|
||||
|
||||
2. Security
|
||||
- Uses hash_password function for secure password storage
|
||||
- SECURITY DEFINER to ensure proper access control
|
||||
- Returns employee data without password hash
|
||||
*/
|
||||
|
||||
-- Create function to create employee with password
|
||||
CREATE OR REPLACE FUNCTION create_employee(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS employees
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_employee employees;
|
||||
BEGIN
|
||||
-- Validate input
|
||||
IF p_password IS NULL OR length(p_password) < 6 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 6 characters long';
|
||||
END IF;
|
||||
|
||||
-- Create employee with hashed password
|
||||
INSERT INTO employees (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_employee;
|
||||
|
||||
-- Return employee data without password hash
|
||||
v_employee.password_hash := NULL;
|
||||
RETURN v_employee;
|
||||
END;
|
||||
$$;
|
@@ -1,27 +0,0 @@
|
||||
/*
|
||||
# Password Management Functions
|
||||
|
||||
1. New Functions
|
||||
- Password hashing and verification utilities
|
||||
- Secure password management
|
||||
*/
|
||||
|
||||
-- Create password hashing function
|
||||
CREATE OR REPLACE FUNCTION hash_password(password text)
|
||||
RETURNS text
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN crypt(password, gen_salt('bf'));
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create password verification function
|
||||
CREATE OR REPLACE FUNCTION verify_password(stored_hash text, password text)
|
||||
RETURNS boolean
|
||||
LANGUAGE plpgsql
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN stored_hash = crypt(password, stored_hash);
|
||||
END;
|
||||
$$;
|
@@ -1,58 +0,0 @@
|
||||
/*
|
||||
# Создание системы обратной связи
|
||||
|
||||
1. Новые таблицы
|
||||
- request_feedback (обратная связь по заявкам)
|
||||
- id (uuid, первичный ключ)
|
||||
- request_id (id заявки)
|
||||
- rating (оценка)
|
||||
- comment (комментарий)
|
||||
- created_by (кто создал)
|
||||
- created_at (дата создания)
|
||||
|
||||
2. Безопасность
|
||||
- Включение RLS
|
||||
- Политики доступа для управления отзывами
|
||||
*/
|
||||
|
||||
-- Создание таблицы обратной связи
|
||||
CREATE TABLE IF NOT EXISTS request_feedback (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
request_id uuid REFERENCES support_requests(id) ON DELETE CASCADE,
|
||||
rating integer NOT NULL CHECK (rating >= 1 AND rating <= 5),
|
||||
comment text,
|
||||
created_by uuid REFERENCES employees(id),
|
||||
created_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Создание индекса
|
||||
CREATE INDEX IF NOT EXISTS idx_request_feedback_request_id
|
||||
ON request_feedback(request_id);
|
||||
|
||||
-- Включение RLS
|
||||
ALTER TABLE request_feedback ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Создание политик
|
||||
CREATE POLICY "Сотрудники могут оставлять отзывы о своих заявках"
|
||||
ON request_feedback
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (
|
||||
EXISTS (
|
||||
SELECT 1 FROM support_requests
|
||||
WHERE id = request_id
|
||||
AND employee_id = auth.uid()
|
||||
)
|
||||
);
|
||||
|
||||
CREATE POLICY "Сотрудники могут видеть отзывы о своих заявках"
|
||||
ON request_feedback
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM support_requests
|
||||
WHERE id = request_feedback.request_id
|
||||
AND employee_id = auth.uid()
|
||||
)
|
||||
);
|
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
# Система приоритетов заявок
|
||||
|
||||
1. Новые таблицы
|
||||
- `request_priorities`
|
||||
- `id` (uuid, primary key)
|
||||
- `name` (text, unique)
|
||||
- `description` (text)
|
||||
- `color` (text)
|
||||
- `sla_hours` (integer)
|
||||
- `created_at` (timestamptz)
|
||||
|
||||
2. Безопасность
|
||||
- Включение RLS
|
||||
- Политики для чтения и управления
|
||||
*/
|
||||
|
||||
DO $$ BEGIN
|
||||
-- Создание таблицы приоритетов, если она не существует
|
||||
CREATE TABLE IF NOT EXISTS request_priorities (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name text NOT NULL UNIQUE,
|
||||
description text,
|
||||
color text NOT NULL,
|
||||
sla_hours integer NOT NULL,
|
||||
created_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Включение RLS
|
||||
ALTER TABLE request_priorities ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Безопасное создание политик с проверкой существования
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_policies
|
||||
WHERE tablename = 'request_priorities'
|
||||
AND policyname = 'Все могут просматривать приоритеты'
|
||||
) THEN
|
||||
CREATE POLICY "Все могут просматривать приоритеты"
|
||||
ON request_priorities
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (true);
|
||||
END IF;
|
||||
|
||||
IF NOT EXISTS (
|
||||
SELECT 1 FROM pg_policies
|
||||
WHERE tablename = 'request_priorities'
|
||||
AND policyname = 'Только администраторы могут управлять приоритетами'
|
||||
) THEN
|
||||
CREATE POLICY "Только администраторы могут управлять приоритетами"
|
||||
ON request_priorities
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
END IF;
|
||||
END $$;
|
@@ -1,73 +0,0 @@
|
||||
/*
|
||||
# User Management Functions
|
||||
|
||||
1. New Functions
|
||||
- User creation with password validation
|
||||
- User authentication
|
||||
*/
|
||||
|
||||
-- Create user management function
|
||||
CREATE OR REPLACE FUNCTION create_user(
|
||||
p_first_name text,
|
||||
p_last_name text,
|
||||
p_department text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS users
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
DECLARE
|
||||
v_user users;
|
||||
BEGIN
|
||||
-- Validate password length
|
||||
IF length(p_password) < 4 THEN
|
||||
RAISE EXCEPTION 'Password must be at least 4 characters long';
|
||||
END IF;
|
||||
|
||||
-- Create user
|
||||
INSERT INTO users (
|
||||
first_name,
|
||||
last_name,
|
||||
department,
|
||||
password_hash
|
||||
) VALUES (
|
||||
p_first_name,
|
||||
p_last_name,
|
||||
p_department,
|
||||
hash_password(p_password)
|
||||
)
|
||||
RETURNING * INTO v_user;
|
||||
|
||||
RETURN v_user;
|
||||
END;
|
||||
$$;
|
||||
|
||||
-- Create authentication function
|
||||
CREATE OR REPLACE FUNCTION authenticate_user(
|
||||
p_last_name text,
|
||||
p_password text
|
||||
)
|
||||
RETURNS TABLE (
|
||||
id uuid,
|
||||
first_name text,
|
||||
last_name text,
|
||||
department text,
|
||||
created_at timestamptz
|
||||
)
|
||||
LANGUAGE plpgsql
|
||||
SECURITY DEFINER
|
||||
AS $$
|
||||
BEGIN
|
||||
RETURN QUERY
|
||||
SELECT
|
||||
u.id,
|
||||
u.first_name,
|
||||
u.last_name,
|
||||
u.department,
|
||||
u.created_at
|
||||
FROM users u
|
||||
WHERE u.last_name = p_last_name
|
||||
AND verify_password(u.password_hash, p_password);
|
||||
END;
|
||||
$$;
|
@@ -1,50 +0,0 @@
|
||||
/*
|
||||
# Добавление системы категорий заявок
|
||||
|
||||
1. Новые таблицы
|
||||
- `request_categories`
|
||||
- `id` (uuid, primary key)
|
||||
- `name` (text, unique)
|
||||
- `description` (text)
|
||||
- `is_active` (boolean)
|
||||
- `created_at` (timestamp)
|
||||
|
||||
2. Безопасность
|
||||
- Включение RLS на таблице request_categories
|
||||
- Политика для чтения всеми авторизованными пользователями
|
||||
- Политика для управления только администраторами
|
||||
*/
|
||||
|
||||
DO $$ BEGIN
|
||||
-- Проверяем существование таблицы перед созданием
|
||||
IF NOT EXISTS (
|
||||
SELECT FROM pg_tables
|
||||
WHERE schemaname = 'public'
|
||||
AND tablename = 'request_categories'
|
||||
) THEN
|
||||
-- Создание таблицы категорий
|
||||
CREATE TABLE request_categories (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
name text NOT NULL UNIQUE,
|
||||
description text,
|
||||
is_active boolean DEFAULT true,
|
||||
created_at timestamptz NOT NULL DEFAULT now()
|
||||
);
|
||||
|
||||
-- Включение RLS
|
||||
ALTER TABLE request_categories ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Создание политик
|
||||
CREATE POLICY "Все могут просматривать категории"
|
||||
ON request_categories
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (true);
|
||||
|
||||
CREATE POLICY "Только администраторы могут управлять категориями"
|
||||
ON request_categories
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (auth.jwt() ->> 'role' = 'admin');
|
||||
END IF;
|
||||
END $$;
|
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
# Update employees table
|
||||
|
||||
1. Changes
|
||||
- Add `is_admin` column for admin access control
|
||||
- Add email generation function
|
||||
- Update existing records
|
||||
*/
|
||||
|
||||
-- Add admin flag
|
||||
ALTER TABLE employees
|
||||
ADD COLUMN IF NOT EXISTS is_admin boolean NOT NULL DEFAULT false;
|
||||
|
||||
-- Update existing admin users
|
||||
UPDATE employees
|
||||
SET is_admin = true
|
||||
WHERE email LIKE '%admin%';
|
@@ -1,72 +0,0 @@
|
||||
/*
|
||||
# Create support requests table with employee relationship
|
||||
|
||||
1. Changes
|
||||
- Create support_requests table with proper foreign keys
|
||||
- Add RLS policies for access control
|
||||
- Handle existing enum types safely
|
||||
|
||||
2. Security
|
||||
- Enable RLS on support_requests table
|
||||
- Add policies for authenticated users and admins
|
||||
*/
|
||||
|
||||
-- Safely create enum types if they don't exist
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_type AS ENUM ('hardware', 'software', 'network', 'access', 'other');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_priority AS ENUM ('low', 'medium', 'high', 'critical');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
DO $$ BEGIN
|
||||
CREATE TYPE request_status AS ENUM ('new', 'in_progress', 'resolved', 'closed');
|
||||
EXCEPTION
|
||||
WHEN duplicate_object THEN NULL;
|
||||
END $$;
|
||||
|
||||
-- Create support requests table with proper foreign key
|
||||
CREATE TABLE IF NOT EXISTS support_requests (
|
||||
id uuid PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
employee_id uuid REFERENCES employees(id) ON DELETE CASCADE NOT NULL,
|
||||
department text NOT NULL,
|
||||
request_type request_type NOT NULL,
|
||||
priority request_priority NOT NULL,
|
||||
status request_status NOT NULL DEFAULT 'new',
|
||||
description text NOT NULL DEFAULT '',
|
||||
created_at timestamptz NOT NULL DEFAULT now(),
|
||||
last_status_change timestamptz DEFAULT now()
|
||||
);
|
||||
|
||||
-- Enable RLS
|
||||
ALTER TABLE support_requests ENABLE ROW LEVEL SECURITY;
|
||||
|
||||
-- Create policies
|
||||
CREATE POLICY "Users can view their own requests"
|
||||
ON support_requests
|
||||
FOR SELECT
|
||||
TO authenticated
|
||||
USING (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Users can create their own requests"
|
||||
ON support_requests
|
||||
FOR INSERT
|
||||
TO authenticated
|
||||
WITH CHECK (employee_id = auth.uid());
|
||||
|
||||
CREATE POLICY "Admins can view all requests"
|
||||
ON support_requests
|
||||
FOR ALL
|
||||
TO authenticated
|
||||
USING (
|
||||
EXISTS (
|
||||
SELECT 1 FROM employees e
|
||||
WHERE e.id = auth.uid()
|
||||
AND e.is_admin = true
|
||||
)
|
||||
);
|
@@ -1,164 +0,0 @@
|
||||
/* -*- indent-tabs-mode: nil; tab-width: 4; -*- */
|
||||
|
||||
/* Greenlet object interface */
|
||||
|
||||
#ifndef Py_GREENLETOBJECT_H
|
||||
#define Py_GREENLETOBJECT_H
|
||||
|
||||
|
||||
#include <Python.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* This is deprecated and undocumented. It does not change. */
|
||||
#define GREENLET_VERSION "1.0.0"
|
||||
|
||||
#ifndef GREENLET_MODULE
|
||||
#define implementation_ptr_t void*
|
||||
#endif
|
||||
|
||||
typedef struct _greenlet {
|
||||
PyObject_HEAD
|
||||
PyObject* weakreflist;
|
||||
PyObject* dict;
|
||||
implementation_ptr_t pimpl;
|
||||
} PyGreenlet;
|
||||
|
||||
#define PyGreenlet_Check(op) (op && PyObject_TypeCheck(op, &PyGreenlet_Type))
|
||||
|
||||
|
||||
/* C API functions */
|
||||
|
||||
/* Total number of symbols that are exported */
|
||||
#define PyGreenlet_API_pointers 12
|
||||
|
||||
#define PyGreenlet_Type_NUM 0
|
||||
#define PyExc_GreenletError_NUM 1
|
||||
#define PyExc_GreenletExit_NUM 2
|
||||
|
||||
#define PyGreenlet_New_NUM 3
|
||||
#define PyGreenlet_GetCurrent_NUM 4
|
||||
#define PyGreenlet_Throw_NUM 5
|
||||
#define PyGreenlet_Switch_NUM 6
|
||||
#define PyGreenlet_SetParent_NUM 7
|
||||
|
||||
#define PyGreenlet_MAIN_NUM 8
|
||||
#define PyGreenlet_STARTED_NUM 9
|
||||
#define PyGreenlet_ACTIVE_NUM 10
|
||||
#define PyGreenlet_GET_PARENT_NUM 11
|
||||
|
||||
#ifndef GREENLET_MODULE
|
||||
/* This section is used by modules that uses the greenlet C API */
|
||||
static void** _PyGreenlet_API = NULL;
|
||||
|
||||
# define PyGreenlet_Type \
|
||||
(*(PyTypeObject*)_PyGreenlet_API[PyGreenlet_Type_NUM])
|
||||
|
||||
# define PyExc_GreenletError \
|
||||
((PyObject*)_PyGreenlet_API[PyExc_GreenletError_NUM])
|
||||
|
||||
# define PyExc_GreenletExit \
|
||||
((PyObject*)_PyGreenlet_API[PyExc_GreenletExit_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_New(PyObject *args)
|
||||
*
|
||||
* greenlet.greenlet(run, parent=None)
|
||||
*/
|
||||
# define PyGreenlet_New \
|
||||
(*(PyGreenlet * (*)(PyObject * run, PyGreenlet * parent)) \
|
||||
_PyGreenlet_API[PyGreenlet_New_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_GetCurrent(void)
|
||||
*
|
||||
* greenlet.getcurrent()
|
||||
*/
|
||||
# define PyGreenlet_GetCurrent \
|
||||
(*(PyGreenlet * (*)(void)) _PyGreenlet_API[PyGreenlet_GetCurrent_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_Throw(
|
||||
* PyGreenlet *greenlet,
|
||||
* PyObject *typ,
|
||||
* PyObject *val,
|
||||
* PyObject *tb)
|
||||
*
|
||||
* g.throw(...)
|
||||
*/
|
||||
# define PyGreenlet_Throw \
|
||||
(*(PyObject * (*)(PyGreenlet * self, \
|
||||
PyObject * typ, \
|
||||
PyObject * val, \
|
||||
PyObject * tb)) \
|
||||
_PyGreenlet_API[PyGreenlet_Throw_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_Switch(PyGreenlet *greenlet, PyObject *args)
|
||||
*
|
||||
* g.switch(*args, **kwargs)
|
||||
*/
|
||||
# define PyGreenlet_Switch \
|
||||
(*(PyObject * \
|
||||
(*)(PyGreenlet * greenlet, PyObject * args, PyObject * kwargs)) \
|
||||
_PyGreenlet_API[PyGreenlet_Switch_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_SetParent(PyObject *greenlet, PyObject *new_parent)
|
||||
*
|
||||
* g.parent = new_parent
|
||||
*/
|
||||
# define PyGreenlet_SetParent \
|
||||
(*(int (*)(PyGreenlet * greenlet, PyGreenlet * nparent)) \
|
||||
_PyGreenlet_API[PyGreenlet_SetParent_NUM])
|
||||
|
||||
/*
|
||||
* PyGreenlet_GetParent(PyObject* greenlet)
|
||||
*
|
||||
* return greenlet.parent;
|
||||
*
|
||||
* This could return NULL even if there is no exception active.
|
||||
* If it does not return NULL, you are responsible for decrementing the
|
||||
* reference count.
|
||||
*/
|
||||
# define PyGreenlet_GetParent \
|
||||
(*(PyGreenlet* (*)(PyGreenlet*)) \
|
||||
_PyGreenlet_API[PyGreenlet_GET_PARENT_NUM])
|
||||
|
||||
/*
|
||||
* deprecated, undocumented alias.
|
||||
*/
|
||||
# define PyGreenlet_GET_PARENT PyGreenlet_GetParent
|
||||
|
||||
# define PyGreenlet_MAIN \
|
||||
(*(int (*)(PyGreenlet*)) \
|
||||
_PyGreenlet_API[PyGreenlet_MAIN_NUM])
|
||||
|
||||
# define PyGreenlet_STARTED \
|
||||
(*(int (*)(PyGreenlet*)) \
|
||||
_PyGreenlet_API[PyGreenlet_STARTED_NUM])
|
||||
|
||||
# define PyGreenlet_ACTIVE \
|
||||
(*(int (*)(PyGreenlet*)) \
|
||||
_PyGreenlet_API[PyGreenlet_ACTIVE_NUM])
|
||||
|
||||
|
||||
|
||||
|
||||
/* Macro that imports greenlet and initializes C API */
|
||||
/* NOTE: This has actually moved to ``greenlet._greenlet._C_API``, but we
|
||||
keep the older definition to be sure older code that might have a copy of
|
||||
the header still works. */
|
||||
# define PyGreenlet_Import() \
|
||||
{ \
|
||||
_PyGreenlet_API = (void**)PyCapsule_Import("greenlet._C_API", 0); \
|
||||
}
|
||||
|
||||
#endif /* GREENLET_MODULE */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
#endif /* !Py_GREENLETOBJECT_H */
|
@@ -1 +0,0 @@
|
||||
pip
|
@@ -1,19 +0,0 @@
|
||||
Copyright 2005-2024 SQLAlchemy authors and contributors <see AUTHORS file>.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
|
||||
of the Software, and to permit persons to whom the Software is furnished to do
|
||||
so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -1,242 +0,0 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: SQLAlchemy
|
||||
Version: 2.0.27
|
||||
Summary: Database Abstraction Library
|
||||
Home-page: https://www.sqlalchemy.org
|
||||
Author: Mike Bayer
|
||||
Author-email: mike_mp@zzzcomputing.com
|
||||
License: MIT
|
||||
Project-URL: Documentation, https://docs.sqlalchemy.org
|
||||
Project-URL: Issue Tracker, https://github.com/sqlalchemy/sqlalchemy/
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: OS Independent
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.7
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: Implementation :: CPython
|
||||
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
||||
Classifier: Topic :: Database :: Front-Ends
|
||||
Requires-Python: >=3.7
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
Requires-Dist: typing-extensions >=4.6.0
|
||||
Requires-Dist: greenlet !=0.4.17 ; platform_machine == "aarch64" or (platform_machine == "ppc64le" or (platform_machine == "x86_64" or (platform_machine == "amd64" or (platform_machine == "AMD64" or (platform_machine == "win32" or platform_machine == "WIN32")))))
|
||||
Requires-Dist: importlib-metadata ; python_version < "3.8"
|
||||
Provides-Extra: aiomysql
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'aiomysql'
|
||||
Requires-Dist: aiomysql >=0.2.0 ; extra == 'aiomysql'
|
||||
Provides-Extra: aioodbc
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'aioodbc'
|
||||
Requires-Dist: aioodbc ; extra == 'aioodbc'
|
||||
Provides-Extra: aiosqlite
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'aiosqlite'
|
||||
Requires-Dist: aiosqlite ; extra == 'aiosqlite'
|
||||
Requires-Dist: typing-extensions !=3.10.0.1 ; extra == 'aiosqlite'
|
||||
Provides-Extra: asyncio
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'asyncio'
|
||||
Provides-Extra: asyncmy
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'asyncmy'
|
||||
Requires-Dist: asyncmy !=0.2.4,!=0.2.6,>=0.2.3 ; extra == 'asyncmy'
|
||||
Provides-Extra: mariadb_connector
|
||||
Requires-Dist: mariadb !=1.1.2,!=1.1.5,>=1.0.1 ; extra == 'mariadb_connector'
|
||||
Provides-Extra: mssql
|
||||
Requires-Dist: pyodbc ; extra == 'mssql'
|
||||
Provides-Extra: mssql_pymssql
|
||||
Requires-Dist: pymssql ; extra == 'mssql_pymssql'
|
||||
Provides-Extra: mssql_pyodbc
|
||||
Requires-Dist: pyodbc ; extra == 'mssql_pyodbc'
|
||||
Provides-Extra: mypy
|
||||
Requires-Dist: mypy >=0.910 ; extra == 'mypy'
|
||||
Provides-Extra: mysql
|
||||
Requires-Dist: mysqlclient >=1.4.0 ; extra == 'mysql'
|
||||
Provides-Extra: mysql_connector
|
||||
Requires-Dist: mysql-connector-python ; extra == 'mysql_connector'
|
||||
Provides-Extra: oracle
|
||||
Requires-Dist: cx-oracle >=8 ; extra == 'oracle'
|
||||
Provides-Extra: oracle_oracledb
|
||||
Requires-Dist: oracledb >=1.0.1 ; extra == 'oracle_oracledb'
|
||||
Provides-Extra: postgresql
|
||||
Requires-Dist: psycopg2 >=2.7 ; extra == 'postgresql'
|
||||
Provides-Extra: postgresql_asyncpg
|
||||
Requires-Dist: greenlet !=0.4.17 ; extra == 'postgresql_asyncpg'
|
||||
Requires-Dist: asyncpg ; extra == 'postgresql_asyncpg'
|
||||
Provides-Extra: postgresql_pg8000
|
||||
Requires-Dist: pg8000 >=1.29.1 ; extra == 'postgresql_pg8000'
|
||||
Provides-Extra: postgresql_psycopg
|
||||
Requires-Dist: psycopg >=3.0.7 ; extra == 'postgresql_psycopg'
|
||||
Provides-Extra: postgresql_psycopg2binary
|
||||
Requires-Dist: psycopg2-binary ; extra == 'postgresql_psycopg2binary'
|
||||
Provides-Extra: postgresql_psycopg2cffi
|
||||
Requires-Dist: psycopg2cffi ; extra == 'postgresql_psycopg2cffi'
|
||||
Provides-Extra: postgresql_psycopgbinary
|
||||
Requires-Dist: psycopg[binary] >=3.0.7 ; extra == 'postgresql_psycopgbinary'
|
||||
Provides-Extra: pymysql
|
||||
Requires-Dist: pymysql ; extra == 'pymysql'
|
||||
Provides-Extra: sqlcipher
|
||||
Requires-Dist: sqlcipher3-binary ; extra == 'sqlcipher'
|
||||
|
||||
SQLAlchemy
|
||||
==========
|
||||
|
||||
|PyPI| |Python| |Downloads|
|
||||
|
||||
.. |PyPI| image:: https://img.shields.io/pypi/v/sqlalchemy
|
||||
:target: https://pypi.org/project/sqlalchemy
|
||||
:alt: PyPI
|
||||
|
||||
.. |Python| image:: https://img.shields.io/pypi/pyversions/sqlalchemy
|
||||
:target: https://pypi.org/project/sqlalchemy
|
||||
:alt: PyPI - Python Version
|
||||
|
||||
.. |Downloads| image:: https://static.pepy.tech/badge/sqlalchemy/month
|
||||
:target: https://pepy.tech/project/sqlalchemy
|
||||
:alt: PyPI - Downloads
|
||||
|
||||
|
||||
The Python SQL Toolkit and Object Relational Mapper
|
||||
|
||||
Introduction
|
||||
-------------
|
||||
|
||||
SQLAlchemy is the Python SQL toolkit and Object Relational Mapper
|
||||
that gives application developers the full power and
|
||||
flexibility of SQL. SQLAlchemy provides a full suite
|
||||
of well known enterprise-level persistence patterns,
|
||||
designed for efficient and high-performing database
|
||||
access, adapted into a simple and Pythonic domain
|
||||
language.
|
||||
|
||||
Major SQLAlchemy features include:
|
||||
|
||||
* An industrial strength ORM, built
|
||||
from the core on the identity map, unit of work,
|
||||
and data mapper patterns. These patterns
|
||||
allow transparent persistence of objects
|
||||
using a declarative configuration system.
|
||||
Domain models
|
||||
can be constructed and manipulated naturally,
|
||||
and changes are synchronized with the
|
||||
current transaction automatically.
|
||||
* A relationally-oriented query system, exposing
|
||||
the full range of SQL's capabilities
|
||||
explicitly, including joins, subqueries,
|
||||
correlation, and most everything else,
|
||||
in terms of the object model.
|
||||
Writing queries with the ORM uses the same
|
||||
techniques of relational composition you use
|
||||
when writing SQL. While you can drop into
|
||||
literal SQL at any time, it's virtually never
|
||||
needed.
|
||||
* A comprehensive and flexible system
|
||||
of eager loading for related collections and objects.
|
||||
Collections are cached within a session,
|
||||
and can be loaded on individual access, all
|
||||
at once using joins, or by query per collection
|
||||
across the full result set.
|
||||
* A Core SQL construction system and DBAPI
|
||||
interaction layer. The SQLAlchemy Core is
|
||||
separate from the ORM and is a full database
|
||||
abstraction layer in its own right, and includes
|
||||
an extensible Python-based SQL expression
|
||||
language, schema metadata, connection pooling,
|
||||
type coercion, and custom types.
|
||||
* All primary and foreign key constraints are
|
||||
assumed to be composite and natural. Surrogate
|
||||
integer primary keys are of course still the
|
||||
norm, but SQLAlchemy never assumes or hardcodes
|
||||
to this model.
|
||||
* Database introspection and generation. Database
|
||||
schemas can be "reflected" in one step into
|
||||
Python structures representing database metadata;
|
||||
those same structures can then generate
|
||||
CREATE statements right back out - all within
|
||||
the Core, independent of the ORM.
|
||||
|
||||
SQLAlchemy's philosophy:
|
||||
|
||||
* SQL databases behave less and less like object
|
||||
collections the more size and performance start to
|
||||
matter; object collections behave less and less like
|
||||
tables and rows the more abstraction starts to matter.
|
||||
SQLAlchemy aims to accommodate both of these
|
||||
principles.
|
||||
* An ORM doesn't need to hide the "R". A relational
|
||||
database provides rich, set-based functionality
|
||||
that should be fully exposed. SQLAlchemy's
|
||||
ORM provides an open-ended set of patterns
|
||||
that allow a developer to construct a custom
|
||||
mediation layer between a domain model and
|
||||
a relational schema, turning the so-called
|
||||
"object relational impedance" issue into
|
||||
a distant memory.
|
||||
* The developer, in all cases, makes all decisions
|
||||
regarding the design, structure, and naming conventions
|
||||
of both the object model as well as the relational
|
||||
schema. SQLAlchemy only provides the means
|
||||
to automate the execution of these decisions.
|
||||
* With SQLAlchemy, there's no such thing as
|
||||
"the ORM generated a bad query" - you
|
||||
retain full control over the structure of
|
||||
queries, including how joins are organized,
|
||||
how subqueries and correlation is used, what
|
||||
columns are requested. Everything SQLAlchemy
|
||||
does is ultimately the result of a developer-initiated
|
||||
decision.
|
||||
* Don't use an ORM if the problem doesn't need one.
|
||||
SQLAlchemy consists of a Core and separate ORM
|
||||
component. The Core offers a full SQL expression
|
||||
language that allows Pythonic construction
|
||||
of SQL constructs that render directly to SQL
|
||||
strings for a target database, returning
|
||||
result sets that are essentially enhanced DBAPI
|
||||
cursors.
|
||||
* Transactions should be the norm. With SQLAlchemy's
|
||||
ORM, nothing goes to permanent storage until
|
||||
commit() is called. SQLAlchemy encourages applications
|
||||
to create a consistent means of delineating
|
||||
the start and end of a series of operations.
|
||||
* Never render a literal value in a SQL statement.
|
||||
Bound parameters are used to the greatest degree
|
||||
possible, allowing query optimizers to cache
|
||||
query plans effectively and making SQL injection
|
||||
attacks a non-issue.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
Latest documentation is at:
|
||||
|
||||
https://www.sqlalchemy.org/docs/
|
||||
|
||||
Installation / Requirements
|
||||
---------------------------
|
||||
|
||||
Full documentation for installation is at
|
||||
`Installation <https://www.sqlalchemy.org/docs/intro.html#installation>`_.
|
||||
|
||||
Getting Help / Development / Bug reporting
|
||||
------------------------------------------
|
||||
|
||||
Please refer to the `SQLAlchemy Community Guide <https://www.sqlalchemy.org/support.html>`_.
|
||||
|
||||
Code of Conduct
|
||||
---------------
|
||||
|
||||
Above all, SQLAlchemy places great emphasis on polite, thoughtful, and
|
||||
constructive communication between users and developers.
|
||||
Please see our current Code of Conduct at
|
||||
`Code of Conduct <https://www.sqlalchemy.org/codeofconduct.html>`_.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
SQLAlchemy is distributed under the `MIT license
|
||||
<https://www.opensource.org/licenses/mit-license.php>`_.
|
||||
|
@@ -1,530 +0,0 @@
|
||||
SQLAlchemy-2.0.27.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
SQLAlchemy-2.0.27.dist-info/LICENSE,sha256=eYQKk6tEYK_iQW6ePf95YIdsg66dK-JwXoOhBNSXQOs,1119
|
||||
SQLAlchemy-2.0.27.dist-info/METADATA,sha256=ySjjHZ2y0chER0KC_8dWxR1tJNJ6ZFKSsKn5NpUGQ4c,9844
|
||||
SQLAlchemy-2.0.27.dist-info/RECORD,,
|
||||
SQLAlchemy-2.0.27.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
SQLAlchemy-2.0.27.dist-info/WHEEL,sha256=ircjsfhzblqgSzO8ow7-0pXK-RVqDqNRGQ8F650AUNM,102
|
||||
SQLAlchemy-2.0.27.dist-info/top_level.txt,sha256=rp-ZgB7D8G11ivXON5VGPjupT1voYmWqkciDt5Uaw_Q,11
|
||||
sqlalchemy/__init__.py,sha256=hU_2Jtxg7KtVE1AdYJaSNwubWJozVVoKzMBzda_lewc,13327
|
||||
sqlalchemy/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/events.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/exc.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/inspection.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/log.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/schema.cpython-311.pyc,,
|
||||
sqlalchemy/__pycache__/types.cpython-311.pyc,,
|
||||
sqlalchemy/connectors/__init__.py,sha256=A2AI8p63aT0jT5CsVX33xlTfiGWliOcGahlK0RyTLXg,494
|
||||
sqlalchemy/connectors/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/connectors/__pycache__/aioodbc.cpython-311.pyc,,
|
||||
sqlalchemy/connectors/__pycache__/asyncio.cpython-311.pyc,,
|
||||
sqlalchemy/connectors/__pycache__/pyodbc.cpython-311.pyc,,
|
||||
sqlalchemy/connectors/aioodbc.py,sha256=fg3xfG-5gLsy-DSyVonNNKYhOf0_lzHmixRFa5edtWI,5462
|
||||
sqlalchemy/connectors/asyncio.py,sha256=DOy84rX4l0U5Nfn9dYLY8ETFE2tRiuSAZTJVPZpEl0E,6163
|
||||
sqlalchemy/connectors/pyodbc.py,sha256=IG5lLCyFbnv1wB85HQuMO3S5piWHaB660OBWvBIQhbg,8750
|
||||
sqlalchemy/cyextension/__init__.py,sha256=Hlfk91RinbOuNF_fybR5R2UtiIcTeUOXS66QOfSSCV0,250
|
||||
sqlalchemy/cyextension/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/cyextension/collections.cp311-win_amd64.pyd,sha256=sMUnGsAsxmktVQxumYz7FORlt4p0fkcXpNvtdQl0Bfw,175104
|
||||
sqlalchemy/cyextension/collections.pyx,sha256=GXPkr9cHRLW3Vcu-ik3dVBZMR-zf0Q5_K4J-_8yV-gk,12980
|
||||
sqlalchemy/cyextension/immutabledict.cp311-win_amd64.pyd,sha256=Md9KeQlBLAJ9k671YhqKNg0oxyY-gGH_izUWkMV0ciE,72704
|
||||
sqlalchemy/cyextension/immutabledict.pxd,sha256=5iGndSbJCgCkNmRbJ_z14RANs2dSSnAzyiRPUTBk58Y,299
|
||||
sqlalchemy/cyextension/immutabledict.pyx,sha256=IhB2pR49CrORXQ3LXMFpuCIRc6I08QNvIylE1cPQA5o,3668
|
||||
sqlalchemy/cyextension/processors.cp311-win_amd64.pyd,sha256=ocBdpnIueLk1-UrvAt1ieovUjd7c94wCy-eh5Bsd384,58368
|
||||
sqlalchemy/cyextension/processors.pyx,sha256=V9gzqXiNHWsa5DBgYl-3KzclFHY8kXGF_TD1xHFE7eM,1860
|
||||
sqlalchemy/cyextension/resultproxy.cp311-win_amd64.pyd,sha256=b8Sw6akLIwneSdTRYvga8VuWhc-zz7aX1pa3EACVuwE,60928
|
||||
sqlalchemy/cyextension/resultproxy.pyx,sha256=h_RrKasbLtKK3LqUh6UiWtkumBlKtcN5eeB_1bZROMA,2827
|
||||
sqlalchemy/cyextension/util.cp311-win_amd64.pyd,sha256=HZ220KIKcvLl-qn23YNUtRGL6kLMKMPOnKhtyRzR7qA,72192
|
||||
sqlalchemy/cyextension/util.pyx,sha256=50QYpSAKgLSUfhFEQgSN2e1qHWCMh_b6ZNlErDUS7ec,2621
|
||||
sqlalchemy/dialects/__init__.py,sha256=SJfQyxMhOL58EB-S6GQv_0jf2oP7MMfmVdlV2UxGWQo,1831
|
||||
sqlalchemy/dialects/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/__pycache__/_typing.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/_typing.py,sha256=mN2r8mU8z-mRh4YS3VeK8Nv_IKJmE0Mb1CrJ-ptILas,913
|
||||
sqlalchemy/dialects/mssql/__init__.py,sha256=r3oTfX2LLbJAGhM57wdPLWxaZBzunkcmyaTbW0FjLuY,1968
|
||||
sqlalchemy/dialects/mssql/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/aioodbc.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/information_schema.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/json.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/pymssql.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/__pycache__/pyodbc.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mssql/aioodbc.py,sha256=b9bhUKcVj4NzoqJIDfECeE_Rmt51sRy8OOUFz_R3vpg,2086
|
||||
sqlalchemy/dialects/mssql/base.py,sha256=ERGOPdJHLCF4n9L9-nBeiVQ3X-nl8ryt1FEFLxUwj1o,137336
|
||||
sqlalchemy/dialects/mssql/information_schema.py,sha256=A1UJAoFb3UtE8YCY3heBgeTMkzWq3j7C2caZ3gcMGZk,8338
|
||||
sqlalchemy/dialects/mssql/json.py,sha256=nZVVsgmR4Z4dNn9cv5Gucq596gsQ0MvASPuEEtz-Gek,4949
|
||||
sqlalchemy/dialects/mssql/provision.py,sha256=pa-b74Xr2qsto3BFG1O0I_B25TUT3TOecg6cAKuRcf8,5517
|
||||
sqlalchemy/dialects/mssql/pymssql.py,sha256=f7xqRif9Dp64de9G8yuC4OyWArwXy_oVq5X0oiwIX4E,4163
|
||||
sqlalchemy/dialects/mssql/pyodbc.py,sha256=YVI19AnrqxPCBwDqcjrO_rqUUWbV2re7E8iLuV1ilqE,27801
|
||||
sqlalchemy/dialects/mysql/__init__.py,sha256=PPQDwNqcpxWMt3nFQ66KefX9T9iz7d8lybEwKlfXB1U,2254
|
||||
sqlalchemy/dialects/mysql/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/aiomysql.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/asyncmy.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/cymysql.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/dml.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/enumerated.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/expression.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/json.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/mariadb.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/mariadbconnector.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/mysqlconnector.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/mysqldb.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/pymysql.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/pyodbc.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/reflection.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/reserved_words.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/__pycache__/types.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/mysql/aiomysql.py,sha256=-YFqFQEx0M2pww3xvsOlaVKflTeoUfIsM8BgfhP1MP0,10296
|
||||
sqlalchemy/dialects/mysql/asyncmy.py,sha256=h9BBhGsqPG2LWdoh0lPieRLhaQ_BJrhxwsR_G5yPLDQ,10370
|
||||
sqlalchemy/dialects/mysql/base.py,sha256=LQ-nvj7HIQSntwAycof93th3kpMBatHl402HNQ8z9hc,124297
|
||||
sqlalchemy/dialects/mysql/cymysql.py,sha256=0mRP3gFe2t7iJYQqJz1Os_TztFwMAF34w2MmXe-4B_w,2384
|
||||
sqlalchemy/dialects/mysql/dml.py,sha256=n31-m4vfOIL0MdHpUdIfTLgaMzusfQ-yHYoJWO_ndEc,7864
|
||||
sqlalchemy/dialects/mysql/enumerated.py,sha256=Nz9Sv3ENX-1T18aEoOY8QfZlAcwRf65lIOse7vwjil8,8692
|
||||
sqlalchemy/dialects/mysql/expression.py,sha256=uxD1fICubfGh8BhAn6WoeS8AF6hAVEvreDShXqRZTqM,4238
|
||||
sqlalchemy/dialects/mysql/json.py,sha256=i0Lrd_7VKTd3fNm6kQKzrtPERuW0JeSw7XSUWnl1HQI,2350
|
||||
sqlalchemy/dialects/mysql/mariadb.py,sha256=WoNxkjiPfIbWAkrVEU9MTM7mePeLHZ2uiJsyfvcpv1s,885
|
||||
sqlalchemy/dialects/mysql/mariadbconnector.py,sha256=_XZ60dSn8-iQP-qyn1Utk-lswGciuoDgpW7m7GguSVA,9016
|
||||
sqlalchemy/dialects/mysql/mysqlconnector.py,sha256=1ga6IV7lVLH9BKsMh2M2wSz78l5a82BZnyRqJMaS5Qw,5854
|
||||
sqlalchemy/dialects/mysql/mysqldb.py,sha256=KdSkQXgCTHfOWzaM9dlQnmb77FR9X8Io6PVWTYRb1XQ,9966
|
||||
sqlalchemy/dialects/mysql/provision.py,sha256=2ecdVRnZSXy5GF3hpLtQp3T8QW-oFjtTSQgbEePDH1k,3581
|
||||
sqlalchemy/dialects/mysql/pymysql.py,sha256=Kxi_A34-nbQ5UEFSmy14TXc1v43-1SZ8gE628REGTFo,4220
|
||||
sqlalchemy/dialects/mysql/pyodbc.py,sha256=CZCEnhyLIgbuiAW32Cw7N1m1aiQv1eBB34pV-txOs70,4435
|
||||
sqlalchemy/dialects/mysql/reflection.py,sha256=wn8qKHxDb9Dnr8zC_uEgAVjk2lVuObvqPOEiad8568c,23499
|
||||
sqlalchemy/dialects/mysql/reserved_words.py,sha256=s59cfdGTmlXn3GFCnevugDsc3qiiZn8tL31lief0tvo,9721
|
||||
sqlalchemy/dialects/mysql/types.py,sha256=2du4p4PnuJgLHvdofFYP5dtkicIDJYeEdrFBhe7uotQ,25040
|
||||
sqlalchemy/dialects/oracle/__init__.py,sha256=_yFT_k0R6yc7MKQG-Al9QZt8wYZsiCtpkhNlba5xqn8,1560
|
||||
sqlalchemy/dialects/oracle/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/cx_oracle.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/dictionary.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/oracledb.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/__pycache__/types.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/oracle/base.py,sha256=haztCHFbuhnhpcBUr8h1PDLXAavdwtpIjgOg_PSDI_A,121486
|
||||
sqlalchemy/dialects/oracle/cx_oracle.py,sha256=3Tx3DKvqcCKyXupBuCiCL4B8D5TDO934Q7LYsLJjlkk,57058
|
||||
sqlalchemy/dialects/oracle/dictionary.py,sha256=tmAZLEACqBAPBE0SEV2jr1R4aPcpNOrbomJl-UmgiR4,20026
|
||||
sqlalchemy/dialects/oracle/oracledb.py,sha256=kuw08rp-tXKPOtGGutqcs5o2gvRptQXAzNBqPVZoLxg,9798
|
||||
sqlalchemy/dialects/oracle/provision.py,sha256=KKlXDQnC8n6BjLJWA7AJg3lwXluH1OyStqfP2Uf9rq0,8524
|
||||
sqlalchemy/dialects/oracle/types.py,sha256=U9EReFRcr0PiwOxT9vg2cA7WOix8LQ2sVp0gRkMHcPo,8518
|
||||
sqlalchemy/dialects/postgresql/__init__.py,sha256=nfP-oId_1wzvRKo5TM6f2xrXry-IxoMkVd4Dga7LDg8,4062
|
||||
sqlalchemy/dialects/postgresql/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/_psycopg_common.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/array.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/asyncpg.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/dml.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/ext.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/hstore.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/json.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/named_types.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/operators.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/pg8000.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/pg_catalog.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/psycopg.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/psycopg2.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/psycopg2cffi.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/ranges.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/__pycache__/types.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/postgresql/_psycopg_common.py,sha256=fYFqLVxNxAqh3nOvzGOv3Pfpm2BsclHrk71MJZrpJKo,5883
|
||||
sqlalchemy/dialects/postgresql/array.py,sha256=E69zMO2h7NQFjnWEeFX4fGYDymTbqHAI-laLfy7WrEA,14137
|
||||
sqlalchemy/dialects/postgresql/asyncpg.py,sha256=CpFNMxOqJUvviWxbEyMiJKnBXTMcvN0IFwaka2gH4R0,41445
|
||||
sqlalchemy/dialects/postgresql/base.py,sha256=x3xx5650ZKYAq9ShqDGQNCkj5nt3HNQXv0-Xq0ZT1Ro,181406
|
||||
sqlalchemy/dialects/postgresql/dml.py,sha256=uMiqxEkji-UXqk8gO1ramQEvEfCugYmy8Cv1cnG7DQs,11522
|
||||
sqlalchemy/dialects/postgresql/ext.py,sha256=ct6NQfMAfBnLYhybpF2wPEq-p8-U0tEpy-aq8NwqJLw,16758
|
||||
sqlalchemy/dialects/postgresql/hstore.py,sha256=4jAZQMPWl3VE4weDRZrgrbVDRZJTM3X0Xj4twr5znYQ,11938
|
||||
sqlalchemy/dialects/postgresql/json.py,sha256=TCw4qYTeLhxjDrF1sq3k-yk7UFEhryweKEj8AelBfRc,11537
|
||||
sqlalchemy/dialects/postgresql/named_types.py,sha256=aaH5fNMZp8ZdmLI1ag9g0UgDGVvX0dpE0PbTu3XVUYc,17595
|
||||
sqlalchemy/dialects/postgresql/operators.py,sha256=iyZuyx_daRyJjiS5rw-XnZlaWj1bmRiHdy5MXzBrFZw,2937
|
||||
sqlalchemy/dialects/postgresql/pg8000.py,sha256=TPJXX078vW0FSwZ-DlWNkEOXg7Z4xk8IFwi1droMhPw,19302
|
||||
sqlalchemy/dialects/postgresql/pg_catalog.py,sha256=rNWjbOOC3SB2jmFAwz5VkbdZub7BCTp60YUmJt3eeRI,9178
|
||||
sqlalchemy/dialects/postgresql/provision.py,sha256=Uo_6vxVzFryFjLqsrvesRO55VqHrnsAs_pBH_8JtFcA,5937
|
||||
sqlalchemy/dialects/postgresql/psycopg.py,sha256=zRoaG1ggAKnMBo6fxe6xJa52BxyU2OrOGX-_jDrH2Zk,23113
|
||||
sqlalchemy/dialects/postgresql/psycopg2.py,sha256=xau5nXatjvPNbzqTF8GF_wrwU4Q5T2zkIMGZkhHvrI0,32483
|
||||
sqlalchemy/dialects/postgresql/psycopg2cffi.py,sha256=hFg-9GH08ApPy3foVPUdJKwCEzNSv2zD5l4nH97AqgI,1817
|
||||
sqlalchemy/dialects/postgresql/ranges.py,sha256=oiTmnZ-hd5WqqGNsXbuOJfoNxpbso_M_49gky8dlCrE,33978
|
||||
sqlalchemy/dialects/postgresql/types.py,sha256=pd1QmuGwJFLqpY2tK-Ql3FNjtT1Ha-lVvfaR9dimvHc,7603
|
||||
sqlalchemy/dialects/sqlite/__init__.py,sha256=MmQfjHun1U_4q-Dq_yhs9RzAX0VLixSwWeY5xWiDwag,1239
|
||||
sqlalchemy/dialects/sqlite/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/aiosqlite.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/dml.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/json.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/pysqlcipher.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/__pycache__/pysqlite.cpython-311.pyc,,
|
||||
sqlalchemy/dialects/sqlite/aiosqlite.py,sha256=TgobCILLu2mjGvDgMTxX3-CPxkj_c5LDYRDJHo5W5qg,12701
|
||||
sqlalchemy/dialects/sqlite/base.py,sha256=8Ft5tZeT1lHiWliTwoJaNslf8PRITVKKWhjhhRCeVDk,99576
|
||||
sqlalchemy/dialects/sqlite/dml.py,sha256=8JV6Ise7WtmFniy590X5b19AYZcE51M6N5hef7d9JoA,8683
|
||||
sqlalchemy/dialects/sqlite/json.py,sha256=-9afZnBt07vInCX20CKzjlTG85wHTO5_cxhcYU4phDc,2869
|
||||
sqlalchemy/dialects/sqlite/provision.py,sha256=nAXZPEjXFrb6a1LxXZMqKmkQoXgl3MPsSHuMyBQ76NU,5830
|
||||
sqlalchemy/dialects/sqlite/pysqlcipher.py,sha256=p0KfzHBwANDMwKTKEJCjR5RxMYqQwS4E8KXjl3Bx6Fw,5511
|
||||
sqlalchemy/dialects/sqlite/pysqlite.py,sha256=s1eL04d6fqPeUNSTnpxXSjWR8OdCuYcy9ilk7716wzU,28653
|
||||
sqlalchemy/dialects/type_migration_guidelines.txt,sha256=gyh3JCauAIFi_9XEfqm3vYv_jb2Eqcz2HjpmC9ZEPMM,8384
|
||||
sqlalchemy/engine/__init__.py,sha256=93FWhb62dLCidc6e4FE65wq_P8GeoWQG1OG6RZMBqhM,2880
|
||||
sqlalchemy/engine/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/_py_processors.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/_py_row.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/_py_util.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/characteristics.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/create.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/cursor.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/default.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/events.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/interfaces.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/mock.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/processors.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/reflection.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/result.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/row.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/strategies.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/url.cpython-311.pyc,,
|
||||
sqlalchemy/engine/__pycache__/util.cpython-311.pyc,,
|
||||
sqlalchemy/engine/_py_processors.py,sha256=-jlAYPM6etmuKeViiI7BD41kqY0Pr8nzaox22TPqCCQ,3880
|
||||
sqlalchemy/engine/_py_row.py,sha256=UEGCjAeRsggcUn0QB0PdFC82kuykrOiOZ1KGq_Gf_qQ,3915
|
||||
sqlalchemy/engine/_py_util.py,sha256=nh1XoVq1b-eGgkdzbqFqzje0RNSmVWotoa6yaB7J5Sw,2558
|
||||
sqlalchemy/engine/base.py,sha256=0VnqL5IK8rTjs7wanSKgOjuzbKdNjs_8TvzJgUqfNCw,125393
|
||||
sqlalchemy/engine/characteristics.py,sha256=q_l1-KybkM5Y-5hG74NuQBxHRkidBstqpRBlFLxJPqs,2671
|
||||
sqlalchemy/engine/create.py,sha256=IC_D9X-t-MN78Ro3shJl4RL0fP9eTvskl4j1bAjagco,33736
|
||||
sqlalchemy/engine/cursor.py,sha256=nCEzaLslgar67EDE4mqowuJsuiBaCbs5ix7sgD9iWTI,76593
|
||||
sqlalchemy/engine/default.py,sha256=oB9T6i1k7rMxat1HkDGCf-_i_3eAOymV1yPU4k0tBeU,87011
|
||||
sqlalchemy/engine/events.py,sha256=e0VHj69fH20sB7gocBhr5Rs2FjR8ioY4iE8VQt70oJg,38332
|
||||
sqlalchemy/engine/interfaces.py,sha256=G29dfsTCNayoIiRnCQ64y9y9w9wtrGwxnm5t102ApvA,116079
|
||||
sqlalchemy/engine/mock.py,sha256=wInBRiHwydTc5ELQLivdezDd1ikbSMVXgLVzZrSC0iQ,4310
|
||||
sqlalchemy/engine/processors.py,sha256=w4MiVMlU6VvfhIW49nygbHcwX8FteGpz7g3IGEqtZb8,2440
|
||||
sqlalchemy/engine/reflection.py,sha256=TO-tymk7BsfAzc6Fi0GmwtYyOWjfMGkyvytJyMVz_oY,77216
|
||||
sqlalchemy/engine/result.py,sha256=U245Q3kGUOugqjmv-qSkx8eyDn27fLYV5agIoBHXQCA,79985
|
||||
sqlalchemy/engine/row.py,sha256=g7ZqmsqX_BtRUzY-zfXoZZ4-5xZ_KJEVbvqKHUIlqRg,12433
|
||||
sqlalchemy/engine/strategies.py,sha256=fD4DJn0AD371wlUa7s5Sy4j7QtgGyP7gMy_kUyqCLDQ,461
|
||||
sqlalchemy/engine/url.py,sha256=tOCRmKkqrpsIfNeSDoy6KKTLtQAMtoIn9xa5kmJQebk,31694
|
||||
sqlalchemy/engine/util.py,sha256=GDoT1xzoCL_CaBvWpBigzXIpUsFAFtjHH8v2e_au7qo,5833
|
||||
sqlalchemy/event/__init__.py,sha256=09qZzHwt0PkIDsPwuPUVJvNakjtCBjuUJeY0AEJ9j7k,1022
|
||||
sqlalchemy/event/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/event/__pycache__/api.cpython-311.pyc,,
|
||||
sqlalchemy/event/__pycache__/attr.cpython-311.pyc,,
|
||||
sqlalchemy/event/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/event/__pycache__/legacy.cpython-311.pyc,,
|
||||
sqlalchemy/event/__pycache__/registry.cpython-311.pyc,,
|
||||
sqlalchemy/event/api.py,sha256=74Z-EXtNsv8fvsf8m8bgCkVzJf7QzhMaVXJhCylegLo,8452
|
||||
sqlalchemy/event/attr.py,sha256=-SHjzXMOs7IICPSgNwpgRS3FIEeLIpB5PyvVlpw8Gp8,21406
|
||||
sqlalchemy/event/base.py,sha256=haAsH-KuvvY52A6OjwATfvCXy3hdYGgZxkDZiOqbMvI,15416
|
||||
sqlalchemy/event/legacy.py,sha256=a8VEvS83PvgbomNnaSa3okZmTkxl_buZ7Lfilechjh8,8473
|
||||
sqlalchemy/event/registry.py,sha256=f31k0FLqIlWpOK9tksiYXnv-yuZPPz9iLQqvKEYV7ko,11221
|
||||
sqlalchemy/events.py,sha256=OAy8TK21lWzSe8bDUnAbmsP82bsBYy0LL19hR6y3BrM,542
|
||||
sqlalchemy/exc.py,sha256=k01TD2xp2BM3DrXdo2U5r8yuRfsoqBND4kwvtD1SVN0,24806
|
||||
sqlalchemy/ext/__init__.py,sha256=YbMQmRS_9HxRyWM-KA_F76WOss1_Em1ZcrnQDIDXoOc,333
|
||||
sqlalchemy/ext/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/associationproxy.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/automap.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/baked.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/compiler.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/horizontal_shard.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/hybrid.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/indexable.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/instrumentation.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/mutable.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/orderinglist.cpython-311.pyc,,
|
||||
sqlalchemy/ext/__pycache__/serializer.cpython-311.pyc,,
|
||||
sqlalchemy/ext/associationproxy.py,sha256=MBtGwISA4wwT9i6op8jfY6u9lCUYn_4JCqtxZpjggok,67776
|
||||
sqlalchemy/ext/asyncio/__init__.py,sha256=tKYIrERYf8hov9m8DuKWRO_53qhrvj2jRmIYjSGQ2Po,1342
|
||||
sqlalchemy/ext/asyncio/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/engine.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/exc.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/result.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/scoping.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/__pycache__/session.cpython-311.pyc,,
|
||||
sqlalchemy/ext/asyncio/base.py,sha256=slWQTFdgQQlkzrnx3m5a9xT8IRg4iM0gkEbypXr_YXQ,9184
|
||||
sqlalchemy/ext/asyncio/engine.py,sha256=VKp3nRnmzl7cEIWSxJNJQMK8CbQbigmrvyqpIiVUdgY,49398
|
||||
sqlalchemy/ext/asyncio/exc.py,sha256=0awLfUB4PhEPVVTKYluyor1tW91GPZZnvdQ-GGSOmJY,660
|
||||
sqlalchemy/ext/asyncio/result.py,sha256=MtKAqA7hwYIdkpRxlCgHNYYzlB7dvqCtEp-aoDdFjDA,31370
|
||||
sqlalchemy/ext/asyncio/scoping.py,sha256=CiMQ7ewPNsyEtl9aGOiEZOrUaNYOTP_LrR0_xkdV3r8,54211
|
||||
sqlalchemy/ext/asyncio/session.py,sha256=gVdecaNFy8fo4YXhfhgwmMpezGaNKsIKL5kSI_j4GoI,64821
|
||||
sqlalchemy/ext/automap.py,sha256=8mu3_-s4DUnWfmOAZsWFFaADZALSIe1J3NQS2BvUGi8,63041
|
||||
sqlalchemy/ext/baked.py,sha256=jc6vPocoXXsvdZsOsqgT4kG6guWSZD1TdPjoRBmkbRU,18381
|
||||
sqlalchemy/ext/compiler.py,sha256=PbvelWqZdzL6y1C6rEc8ledF79t_04MtYV26RUwNhik,20946
|
||||
sqlalchemy/ext/declarative/__init__.py,sha256=MHSOffOS4MWcqshAuLNQv0vDXpK_Z3lpGXTm1riyLls,1883
|
||||
sqlalchemy/ext/declarative/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/ext/declarative/__pycache__/extensions.cpython-311.pyc,,
|
||||
sqlalchemy/ext/declarative/extensions.py,sha256=aPpW0PvTKH3CoSMhsOY5GcUMZOVq-OFsV1hflxmb3Lw,20095
|
||||
sqlalchemy/ext/horizontal_shard.py,sha256=V8vXEt5ZQb_PM39agZD2IyoQNGSqVI1MhY-6mNV5MRY,17231
|
||||
sqlalchemy/ext/hybrid.py,sha256=Fc73iUTCJuHcz3McvD3FBbjEvDag1Jw8THY6rL-phA8,53946
|
||||
sqlalchemy/ext/indexable.py,sha256=aDlVpN4rilRrer9qKg3kO7fqnqB5NX4M5qzYuYM8pvw,11373
|
||||
sqlalchemy/ext/instrumentation.py,sha256=lFsJECWlN1oc1E0r9TaQDZcxAx4VOz6PSHYrl5fLk9Y,16157
|
||||
sqlalchemy/ext/mutable.py,sha256=nAz3_lF2xkYSARt7GAWQh-OUMcnpe6s1ocjvQGxCPkc,38428
|
||||
sqlalchemy/ext/mypy/__init__.py,sha256=aqT8_9sNwzC8PIaEZ4zkCYGBvYPaDD3eCgJtJuk3g6A,247
|
||||
sqlalchemy/ext/mypy/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/apply.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/decl_class.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/infer.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/names.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/plugin.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/__pycache__/util.cpython-311.pyc,,
|
||||
sqlalchemy/ext/mypy/apply.py,sha256=1Qb-_FpQ_0LVB2KFA5hVjfPv6DDMIcxXe86Ts1X9GBk,10870
|
||||
sqlalchemy/ext/mypy/decl_class.py,sha256=f2iWiFVlDFqGb_IoGGotI3IEOUErh25sLT7B_cMfx0g,17899
|
||||
sqlalchemy/ext/mypy/infer.py,sha256=O-3IjELDSBEAwGGxRM7lr0NWwGD0HMK4vda_iY6iwjs,19959
|
||||
sqlalchemy/ext/mypy/names.py,sha256=TWsrp5ftD5p0NeyAipgYIir9SUbA4U0BAk0W2FXA3VA,10972
|
||||
sqlalchemy/ext/mypy/plugin.py,sha256=TDTziLsYFRqyX8UcQMtBBa6TFR4z9N-XNO8wRkHlEOI,10053
|
||||
sqlalchemy/ext/mypy/util.py,sha256=3iQ1zVpXSUoj2aHa-Kkg4O83JOzqVd8TDEwpZj3SWWs,9786
|
||||
sqlalchemy/ext/orderinglist.py,sha256=r7La_3nZlGevIgsBL1IB30FvWO_tZHlTKo_FWwid-aY,14800
|
||||
sqlalchemy/ext/serializer.py,sha256=_7gottqRCI-qkW4Go4o2EnOSnieKDCQ8jQ6muHXw-RM,6363
|
||||
sqlalchemy/future/__init__.py,sha256=6-qPdjMHX-V-kAPjTQgNuHztmYiwKlJhKhhljuETvoQ,528
|
||||
sqlalchemy/future/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/future/__pycache__/engine.cpython-311.pyc,,
|
||||
sqlalchemy/future/engine.py,sha256=N_5W2ab5-ueedWzqNdgLPzTW9audT1IbxF6FCDLRZOc,510
|
||||
sqlalchemy/inspection.py,sha256=GpmMuSAZ53u4W__iGpvzQKCBMFnTxnHt4Lo7Nq1FSKM,5237
|
||||
sqlalchemy/log.py,sha256=Sg6PGR_wmseiCCpJfRDEkaMs08XTPPsf0X_iYJLvzS0,8895
|
||||
sqlalchemy/orm/__init__.py,sha256=I-XesvuyjkAAwnsiF5FnXRLNV6W2nW70EnGAIt2GAjU,8633
|
||||
sqlalchemy/orm/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/_orm_constructors.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/_typing.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/attributes.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/bulk_persistence.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/clsregistry.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/collections.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/context.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/decl_api.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/decl_base.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/dependency.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/descriptor_props.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/dynamic.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/evaluator.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/events.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/exc.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/identity.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/instrumentation.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/interfaces.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/loading.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/mapped_collection.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/mapper.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/path_registry.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/persistence.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/properties.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/query.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/relationships.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/scoping.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/session.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/state.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/state_changes.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/strategies.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/strategy_options.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/sync.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/unitofwork.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/util.cpython-311.pyc,,
|
||||
sqlalchemy/orm/__pycache__/writeonly.cpython-311.pyc,,
|
||||
sqlalchemy/orm/_orm_constructors.py,sha256=qOQLU_Rly-AYjGDotwZv65PCjjLgdAwqHGPUeZrTYfE,101822
|
||||
sqlalchemy/orm/_typing.py,sha256=Z9GZT8Vb-wFwvHeOeVE37dvmCWdItLZnqI_pLin4cMc,5152
|
||||
sqlalchemy/orm/attributes.py,sha256=devlqbjcICNLnG0HEg4wSP2sgm2m8sW9RpoQZuaU0KM,95355
|
||||
sqlalchemy/orm/base.py,sha256=JWzk2w1k-xT5BW8T7K0DRhMg7lDKFf4T6KrFey_6c4A,28394
|
||||
sqlalchemy/orm/bulk_persistence.py,sha256=z7yTdor_Nea7R0UMu8kloKvN-t0z2AZ-P-q5FE-oabc,72070
|
||||
sqlalchemy/orm/clsregistry.py,sha256=ZumBI7I2O-l93LbA4eyMKm0w6al-nNS2QV1VDcJxGko,18528
|
||||
sqlalchemy/orm/collections.py,sha256=lHjP6uDz0WdwedTqyh_8R2_nzRAK_5ONCIoisHrsb94,53797
|
||||
sqlalchemy/orm/context.py,sha256=bBieTIPsM10lt5z5Feq4tDjBqpznbsssaiXh7OrTh18,115244
|
||||
sqlalchemy/orm/decl_api.py,sha256=Xd9s8A7V_jlypP-u-tnpQ5o6Oq-v1A4H1f3hSEj4-Bc,65549
|
||||
sqlalchemy/orm/decl_base.py,sha256=g6bhTg5M9hsf3JVnMaqn1NS7MTdBJY--L-qf9aQabgs,83753
|
||||
sqlalchemy/orm/dependency.py,sha256=glstmbB4t-PIRA47u9NgTyyxbENfyQuG9Uzj2iezB_s,48935
|
||||
sqlalchemy/orm/descriptor_props.py,sha256=PpDt83EX72AhCbBjixQDpYG1P1MqWqGdA-bJpXMSxSw,38254
|
||||
sqlalchemy/orm/dynamic.py,sha256=m7V2GPS5__4y_hP7BQjD66b6BVEre1pzPwaj2bmRFRM,10084
|
||||
sqlalchemy/orm/evaluator.py,sha256=gQIDxuoB5Ut2hiFdN768U67JusVkbFt-Bdna_T8npPA,12293
|
||||
sqlalchemy/orm/events.py,sha256=EeIAGfdSQX0XQLZyURz94KYOlsjwI-ANWjPfyN_-jcQ,130956
|
||||
sqlalchemy/orm/exc.py,sha256=90xWOIIAmzPguaVH6mxr2xUSGW44aGIPz5WytJSwmR4,7583
|
||||
sqlalchemy/orm/identity.py,sha256=fOpANTf73r12F_w9DhVoyjkAdh8ldgJcNnwxx0GY8YM,9551
|
||||
sqlalchemy/orm/instrumentation.py,sha256=a8vi3qEAyO7Z9PYksLkFi_YzxqQhzB-anblegiAtsFw,25075
|
||||
sqlalchemy/orm/interfaces.py,sha256=IKCWZFHamXIkpBCaEh7YIYy-Jz2hV4-SQt6kGbfOuE8,49842
|
||||
sqlalchemy/orm/loading.py,sha256=sp7VaIoc9gzrtAOekU_2EWYN3L_9lKuiKkfn6f4VlQQ,59202
|
||||
sqlalchemy/orm/mapped_collection.py,sha256=AeSzQwj56cLr1tVMC0B-3JsC74IAP7_gbr-EPc4_2uw,20250
|
||||
sqlalchemy/orm/mapper.py,sha256=13goncHEJueS73Z7EWovVdp1W-xBKSWB5cjqtpyIk5s,175479
|
||||
sqlalchemy/orm/path_registry.py,sha256=KqS4yYe__beUSpdEjL8qxzL_z7V-FJkolm4AMCBHhGg,26658
|
||||
sqlalchemy/orm/persistence.py,sha256=MKb7TuSLJUQpyqnHxf6uNmGXSznmZgkkFTD04nHbNUQ,63483
|
||||
sqlalchemy/orm/properties.py,sha256=xtSDAAeUgDAmZF_OTBd4F18q9h3h7JWnPtQnWU0-aU4,30007
|
||||
sqlalchemy/orm/query.py,sha256=d2aENAsXkiZuXOxZ5WNKMqsrNHT7_d-_BgXqNa2BtsA,120948
|
||||
sqlalchemy/orm/relationships.py,sha256=jtIA6Y7jhlSzee-MGu_0YMmtH8Kr4lwanjCBfB_eV_I,131177
|
||||
sqlalchemy/orm/scoping.py,sha256=aAQMIAAZ-M_m6UGndmkUiDazcphE-klw6wZjFT2Az7E,80842
|
||||
sqlalchemy/orm/session.py,sha256=QRC0WQSjNFIhyInWnZD-picMoKWS79oIlQAVAcRon_4,198419
|
||||
sqlalchemy/orm/state.py,sha256=9opH8AR6LnbCRmW1lN2RxEQyxnEi1rcDXlySqrDeUiw,38656
|
||||
sqlalchemy/orm/state_changes.py,sha256=4i90vDgBGvVGUzhlonlBkZBAZFOWaAXij2X8OEA3-BA,7013
|
||||
sqlalchemy/orm/strategies.py,sha256=GFppPikNxZJdsi4DW1HcU6pv-EvIeFvTI8K_LDY5nmk,117550
|
||||
sqlalchemy/orm/strategy_options.py,sha256=cBzotMlredMZ8vM6T04mTnskiS0qSoRkHEQKSLZNARA,86734
|
||||
sqlalchemy/orm/sync.py,sha256=aMEMhYTj2rtJZJvjqm-cUx2CoQxYl8P6YddCLpLelhM,5943
|
||||
sqlalchemy/orm/unitofwork.py,sha256=THggzzAaqmYh5PBDob5dHTP_YyHXYdscs3fIxtRV-gE,27829
|
||||
sqlalchemy/orm/util.py,sha256=xIaltctFxy-u4xdLk1mqdHYsq_bvg4xsaTRQTev7I80,82756
|
||||
sqlalchemy/orm/writeonly.py,sha256=j5DcpZKOv1tLGQLhKfk-Uw-B0yEG7LezwJWNTq0FtWQ,22983
|
||||
sqlalchemy/pool/__init__.py,sha256=ZKUPMKdBU57mhu677UsvRs5Aq9s9BwIbMmSNRoTRPoY,1848
|
||||
sqlalchemy/pool/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/pool/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/pool/__pycache__/events.cpython-311.pyc,,
|
||||
sqlalchemy/pool/__pycache__/impl.cpython-311.pyc,,
|
||||
sqlalchemy/pool/base.py,sha256=D0sKTRla6wpIFbELyGY2JEHUHR324rveIl93qjjmYr8,53751
|
||||
sqlalchemy/pool/events.py,sha256=ysyFh0mNDpL4N4rQ-o_BC6tpo_zt0_au_QLBgJqaKY8,13517
|
||||
sqlalchemy/pool/impl.py,sha256=8VcM4JSUnu4FcSrC5TUzTWT0FYFxfNouKyuXCKnD6KM,18264
|
||||
sqlalchemy/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
sqlalchemy/schema.py,sha256=UFhZjGmYoqN3zkId7M4CbVCd8KaeZUfKUjdlk0sHQ_E,3264
|
||||
sqlalchemy/sql/__init__.py,sha256=T16ZB3Za0Tq1LQGXeJuuxDkyu2t-XHR2t-8QH1mE1Uw,5965
|
||||
sqlalchemy/sql/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_dml_constructors.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_elements_constructors.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_orm_types.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_py_util.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_selectable_constructors.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/_typing.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/annotation.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/cache_key.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/coercions.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/compiler.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/crud.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/ddl.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/default_comparator.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/dml.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/elements.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/events.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/expression.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/functions.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/lambdas.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/naming.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/operators.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/roles.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/schema.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/selectable.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/sqltypes.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/traversals.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/type_api.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/util.cpython-311.pyc,,
|
||||
sqlalchemy/sql/__pycache__/visitors.cpython-311.pyc,,
|
||||
sqlalchemy/sql/_dml_constructors.py,sha256=1xMH5Kd6SLhlFwfIs_lOXGC8GTrqW8mQM7Kc3cKyLuw,4007
|
||||
sqlalchemy/sql/_elements_constructors.py,sha256=DN5B84YTp9K5cfp1qjEpg_2d5WpHujlNBJ-pG7bsKoI,64386
|
||||
sqlalchemy/sql/_orm_types.py,sha256=_bzlAh3-vTIZoLvAM2ry1SF7rsYRM3-jupfhGWZZn5Y,645
|
||||
sqlalchemy/sql/_py_util.py,sha256=VzThcXk7fKqT9_mZmXrkxePdwyyl_wIciCftzl2Z_-g,2248
|
||||
sqlalchemy/sql/_selectable_constructors.py,sha256=mRgtlGyctlb1LMBqFxgn0eGzIXMbyZtQafjUuJWhYjs,19415
|
||||
sqlalchemy/sql/_typing.py,sha256=i4COgky9Gv0ArXdOpp9xfxpvKqP3lj0I_vJqinvWEck,12940
|
||||
sqlalchemy/sql/annotation.py,sha256=PslN1KQV9hN8Ji4k8I3-W-cDuRMCCLwMmJcg-n86Yy4,18830
|
||||
sqlalchemy/sql/base.py,sha256=Twa1DYB1fBGGzU0kIVS0h30-0j67gfduuw9GU6RxGao,75935
|
||||
sqlalchemy/sql/cache_key.py,sha256=vUWB-pqAtgt8SBRMHEkzo8BQ_1yXjMeGt-aaDB1ieek,34605
|
||||
sqlalchemy/sql/coercions.py,sha256=O6PA7Gzhr9XQedJs3zIngCivN1vcrNyEhFueN5CqriI,41882
|
||||
sqlalchemy/sql/compiler.py,sha256=PslsNi8ND2FXVPcFIS3UBMpVthfM47MAu4c_rpBpMHE,278910
|
||||
sqlalchemy/sql/crud.py,sha256=I5nPPnujtNKHC5C2v1vW4A0mbyomwChT21IYOX3z5fw,58190
|
||||
sqlalchemy/sql/ddl.py,sha256=NbW8F3UT4BTnda5w5TNPGxXPtv0wHSNB51hhr4gBSJM,46980
|
||||
sqlalchemy/sql/default_comparator.py,sha256=lXmd8yAUzfyeP5w4vebrQG99oC0bTrmdGc0crBq1GKw,17259
|
||||
sqlalchemy/sql/dml.py,sha256=lt5FC6BbJNotE65U-fmvEovBxkADfKBnVcnkVYYQxUM,67431
|
||||
sqlalchemy/sql/elements.py,sha256=tIgio7vC-0gftEddMtUu35cXIWwPqqomat0ufAIFrMM,177377
|
||||
sqlalchemy/sql/events.py,sha256=pG3jqJbPX18N9Amp47aXiQYMYD_HL_lOXHk-0m8m7Hw,18745
|
||||
sqlalchemy/sql/expression.py,sha256=T-AgCPp30tgKQYLKeSyqQg_VoJFE69m2yDTz6fn-u1E,7748
|
||||
sqlalchemy/sql/functions.py,sha256=vxYsWwzQpYhfQ_EwfdA-lGlbh2pkQ30AXGjvHEvVBWo,65741
|
||||
sqlalchemy/sql/lambdas.py,sha256=i3F6TZEAHSPqTV704LAybfNOMyUJ52x2YE2eCHTlYi4,50738
|
||||
sqlalchemy/sql/naming.py,sha256=ERVjqo6fBHBw2BwNgpbb5cvsCkq1jjdztczP9BKzVt8,7070
|
||||
sqlalchemy/sql/operators.py,sha256=6rpSbuFon7iIUCT4SDowYctDyOmFpe2-FdLu2HIX3x8,78508
|
||||
sqlalchemy/sql/roles.py,sha256=8nO4y1hbP1cA8IzeOn6uPgNZNVILb3E-IMeJWOIScu8,7985
|
||||
sqlalchemy/sql/schema.py,sha256=iIurzYqmZNRi_wBN-tXFKIM-jt07DIvVuzV_IVsfsTo,234377
|
||||
sqlalchemy/sql/selectable.py,sha256=fWcddtd9UM3QMcS-3Pg6E98mK9uAZGkhmyOLWlNEigI,239761
|
||||
sqlalchemy/sql/sqltypes.py,sha256=AJvAe9Nt3Bweic9eC__NVnkVAbIgb_exoajEfTij1R4,130912
|
||||
sqlalchemy/sql/traversals.py,sha256=p2iXAQc0FvV-l1Q3NNMxIhRYTm8U3Ul630jG3Ys6qCI,34611
|
||||
sqlalchemy/sql/type_api.py,sha256=zRtzrf5sLjDWnSUvl_vAnG6X8fhY8vuln4jG_Jx4zKY,86164
|
||||
sqlalchemy/sql/util.py,sha256=ftTiyNGeJK0MIRMqWMV7Xf8iZuiRGocoJRp3MIO3F3Y,49563
|
||||
sqlalchemy/sql/visitors.py,sha256=oudlabsf9qleuC78GFe_iflRSAD8H-HjaM7T8Frc538,37482
|
||||
sqlalchemy/testing/__init__.py,sha256=8iT66v5k4J9RmquaH4GLI2DjEA7c_JZSTVig-uuBNw8,3221
|
||||
sqlalchemy/testing/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/assertions.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/assertsql.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/asyncio.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/config.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/engines.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/entities.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/exclusions.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/pickleable.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/profiling.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/provision.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/requirements.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/schema.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/util.cpython-311.pyc,,
|
||||
sqlalchemy/testing/__pycache__/warnings.cpython-311.pyc,,
|
||||
sqlalchemy/testing/assertions.py,sha256=bBn2Ep89FF-WBmzh0VkvnJ9gNMKuqk8OXq7ALpUwar4,32428
|
||||
sqlalchemy/testing/assertsql.py,sha256=gj4YRBR9cjOtS1WgR3nsyIze1tmqctsNs1uCV8N2Q4w,17333
|
||||
sqlalchemy/testing/asyncio.py,sha256=GvWrQFrL3xz7rub61oGOS2PXVvw7D9Id3gtkXQjZJLY,3858
|
||||
sqlalchemy/testing/config.py,sha256=jfFVUiAOm8im6SlqyAdZVSaA51kmADgfBDqrHnngH7c,12517
|
||||
sqlalchemy/testing/engines.py,sha256=U3FkWECbghiK2_Yv5uKMjco377xoFsi75WZgRZroGWA,13814
|
||||
sqlalchemy/testing/entities.py,sha256=Um-DFSz81p06DhTK899ZRUOZRw3FtUDeNMVHcIg3eLc,3471
|
||||
sqlalchemy/testing/exclusions.py,sha256=8kjsaFfjCvPlLsQLD_LIDwuqvVlIVbD5qTWBlKdtNkM,12895
|
||||
sqlalchemy/testing/fixtures/__init__.py,sha256=B1IFCzEVdCqhEvFrLmgxZ_Fr08jDus5FddSA-lnnAAU,1226
|
||||
sqlalchemy/testing/fixtures/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/testing/fixtures/__pycache__/base.cpython-311.pyc,,
|
||||
sqlalchemy/testing/fixtures/__pycache__/mypy.cpython-311.pyc,,
|
||||
sqlalchemy/testing/fixtures/__pycache__/orm.cpython-311.pyc,,
|
||||
sqlalchemy/testing/fixtures/__pycache__/sql.cpython-311.pyc,,
|
||||
sqlalchemy/testing/fixtures/base.py,sha256=S0ODuph0jA2Za4GN3NNhYVIqN9jAa3Q9Vd1N4O4rcTc,12622
|
||||
sqlalchemy/testing/fixtures/mypy.py,sha256=2H8QxvGvwsb_Z3alRtvCvfXeqGjOb8aemfoYxQiuGMc,12285
|
||||
sqlalchemy/testing/fixtures/orm.py,sha256=6JvQpIfmgmSTH3Hie4nhmUFfvH0pseujIFA9Lup2Dzw,6322
|
||||
sqlalchemy/testing/fixtures/sql.py,sha256=Joqh4q1vE3wCaE3eDZUnSobeLNUE-pabIy58ZMURFto,16196
|
||||
sqlalchemy/testing/pickleable.py,sha256=uYLl557iNep6jSOVl0vK1GwaLHUKidALoPJc-QIrC08,2988
|
||||
sqlalchemy/testing/plugin/__init__.py,sha256=bbtVIt7LzVnUCcVxHWRH2owOQD067bQwwhyMf_whqHs,253
|
||||
sqlalchemy/testing/plugin/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/testing/plugin/__pycache__/bootstrap.cpython-311.pyc,,
|
||||
sqlalchemy/testing/plugin/__pycache__/plugin_base.cpython-311.pyc,,
|
||||
sqlalchemy/testing/plugin/__pycache__/pytestplugin.cpython-311.pyc,,
|
||||
sqlalchemy/testing/plugin/bootstrap.py,sha256=USn6pE-JcE5pSmnEd2wad3goKLx2hdJS3AUUFpXHm-I,1736
|
||||
sqlalchemy/testing/plugin/plugin_base.py,sha256=CgrNj2wj9KNALu9YfnGSaHX2fXfTtiim_cfx0CPVoy8,22357
|
||||
sqlalchemy/testing/plugin/pytestplugin.py,sha256=xFbgBkv92U7_nYSyq87MG6OZSg_NR2HOo7CG7IC1cpY,28416
|
||||
sqlalchemy/testing/profiling.py,sha256=o8_V3TpF_WytudMQQLm1UxlfNDrLCWxUvkH-Kd0unKU,10472
|
||||
sqlalchemy/testing/provision.py,sha256=ciWoXf3P9ql4hh1yBp0RNEtPr5vyvPbd8RD_DYxNG9U,15115
|
||||
sqlalchemy/testing/requirements.py,sha256=L_DKVqVxVMbB3JveC_6UhD5oVry2KjBHPxfQd35hrWQ,53600
|
||||
sqlalchemy/testing/schema.py,sha256=z2Z5rm3iJ1-vgifUxwzxEjt1qu7QOyr3TeDnQdCHlWE,6737
|
||||
sqlalchemy/testing/suite/__init__.py,sha256=YvTEqUNHaBlgLgWDAWn79mQrUR4VBGUHtprywJlmDT8,741
|
||||
sqlalchemy/testing/suite/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_cte.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_ddl.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_deprecations.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_dialect.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_insert.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_reflection.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_results.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_rowcount.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_select.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_sequence.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_types.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_unicode_ddl.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/__pycache__/test_update_delete.cpython-311.pyc,,
|
||||
sqlalchemy/testing/suite/test_cte.py,sha256=C_viXJKClFAm91rtPb42tiAA7gYJwKkqGYVJYap0cLM,6662
|
||||
sqlalchemy/testing/suite/test_ddl.py,sha256=k6D6RreLkDSSpRUM2hQz-_CA48qV2PYx_2LNyUSoZzE,12420
|
||||
sqlalchemy/testing/suite/test_deprecations.py,sha256=SKRFZDteBO1rw9-BQjDic5nh7fdyw2ypVOewR2pj7-Q,5490
|
||||
sqlalchemy/testing/suite/test_dialect.py,sha256=ftOWRXWOotB2_jMJJqwoH9f3X2ucc1HwwOiXp573GwM,23663
|
||||
sqlalchemy/testing/suite/test_insert.py,sha256=v3zrUZaGlke3cI4vabHg7xaI4gNqcHhtMPgYuf0mOxc,19454
|
||||
sqlalchemy/testing/suite/test_reflection.py,sha256=C6P9ccG5Eog5uiIHO4s6M7ThnBbEUZKh83CmOMn-KSo,109594
|
||||
sqlalchemy/testing/suite/test_results.py,sha256=1SlvhdioM1_ZrkQX2IJbJgXNHuleizwAge6-XvHtA0s,16405
|
||||
sqlalchemy/testing/suite/test_rowcount.py,sha256=DCEGxorDcrT5JCLd3_SNQeZmxT6sKIcuKxX1r6vK4Mg,8158
|
||||
sqlalchemy/testing/suite/test_select.py,sha256=NwHUSVc4UptVYMGjp3QVLr0OpGxpz2qJG4cNWZW8vTo,60462
|
||||
sqlalchemy/testing/suite/test_sequence.py,sha256=sIqkfgVqPIgl4lm75EPdag9gK-rTHfUm3pWX-JijPy4,10240
|
||||
sqlalchemy/testing/suite/test_types.py,sha256=i1fCIXERdtGABdp_T3l1vaPH9AhQ80DJvbjOPbeng1c,67748
|
||||
sqlalchemy/testing/suite/test_unicode_ddl.py,sha256=juF_KTK1nGrSlsL8z0Ky0rFSNkPGheLB3e0Kq3yRqss,6330
|
||||
sqlalchemy/testing/suite/test_update_delete.py,sha256=TnJI5U_ZEuu3bni4sH-S6CENxvSZwDgZL-FKSV45bAo,4133
|
||||
sqlalchemy/testing/util.py,sha256=jX9jlUHSH-7_2OCypZUvitP8JkJbNdr5_ZxU6Aa8DPY,14599
|
||||
sqlalchemy/testing/warnings.py,sha256=3EhbTlPe4gJnoydj-OKueNOOtGwIRF2kV4XvlFwFYOA,1598
|
||||
sqlalchemy/types.py,sha256=unCm_O8qKxU3LjLbqeqSNQSsK5k5R5POsyEx2gH6CF4,3244
|
||||
sqlalchemy/util/__init__.py,sha256=3-O9j9qPk-gTx6hlyLsISc_JOW5MhjV0J_L5nV19qI8,8436
|
||||
sqlalchemy/util/__pycache__/__init__.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/_collections.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/_concurrency_py3k.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/_has_cy.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/_py_collections.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/compat.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/concurrency.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/deprecations.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/langhelpers.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/preloaded.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/queue.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/tool_support.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/topological.cpython-311.pyc,,
|
||||
sqlalchemy/util/__pycache__/typing.cpython-311.pyc,,
|
||||
sqlalchemy/util/_collections.py,sha256=O3iqq0R9TbcXNyFk8nG4QLwkUzdWkCFmqBYcDrajvl8,20778
|
||||
sqlalchemy/util/_concurrency_py3k.py,sha256=HQ5tLleQd5cR4BOoXKFWVTK7p4fCDW105QxRLW52_ko,8841
|
||||
sqlalchemy/util/_has_cy.py,sha256=IHGc5hUFbXQuv1a1z2P8yVwz0yGbCYXyQM2qsdcBTyg,1287
|
||||
sqlalchemy/util/_py_collections.py,sha256=2PUqiKIsF8d-gNDAAqYI8WE6XPyRf1flRLkVsJeXuOo,17255
|
||||
sqlalchemy/util/compat.py,sha256=ojCAtKHlkqNdYB33PXbAP0zTH1ZXYdTZkJl32cqGnMQ,9014
|
||||
sqlalchemy/util/concurrency.py,sha256=AOLQUBm9Hm4jDArP8vBYL39FzckyH9S4NsKRvWaYzEE,2500
|
||||
sqlalchemy/util/deprecations.py,sha256=AnHpDWHi7g2gv_QUTGStQTnr0J94lIF-3aFLOsv9yzg,12372
|
||||
sqlalchemy/util/langhelpers.py,sha256=1meF9IffDMmz50uxNdUO15FUL0TARzwFcPjwbpOQRX8,67115
|
||||
sqlalchemy/util/preloaded.py,sha256=78Sl7VjzTOPajbovvARxNeuZb-iYRpEvL5k8m5Bz4vQ,6054
|
||||
sqlalchemy/util/queue.py,sha256=4SbSbVamUECjCDpMPR035N1ooVHt9W5GjbqkxfZmH5k,10507
|
||||
sqlalchemy/util/tool_support.py,sha256=DuurikYgDUIIxk3gubUKl6rs-etXt3eeHaZ4ZkIyJXQ,6336
|
||||
sqlalchemy/util/topological.py,sha256=_NdtAghZjhZ4e2fwWHmn25erP5cvtGgOUMplsCa_VCE,3578
|
||||
sqlalchemy/util/typing.py,sha256=DG9V94Mh63cqObr_G5X19wH4H3hhWMqZXufVEZ2wtiw,17221
|
@@ -1,5 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.42.0)
|
||||
Root-Is-Purelib: false
|
||||
Tag: cp311-cp311-win_amd64
|
||||
|
@@ -1 +0,0 @@
|
||||
sqlalchemy
|
Binary file not shown.
@@ -1,222 +0,0 @@
|
||||
# don't import any costly modules
|
||||
import sys
|
||||
import os
|
||||
|
||||
|
||||
is_pypy = '__pypy__' in sys.builtin_module_names
|
||||
|
||||
|
||||
def warn_distutils_present():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
if is_pypy and sys.version_info < (3, 7):
|
||||
# PyPy for 3.6 unconditionally imports distutils, so bypass the warning
|
||||
# https://foss.heptapod.net/pypy/pypy/-/blob/be829135bc0d758997b3566062999ee8b23872b4/lib-python/3/site.py#L250
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn(
|
||||
"Distutils was imported before Setuptools, but importing Setuptools "
|
||||
"also replaces the `distutils` module in `sys.modules`. This may lead "
|
||||
"to undesirable behaviors or errors. To avoid these issues, avoid "
|
||||
"using distutils directly, ensure that setuptools is installed in the "
|
||||
"traditional way (e.g. not an editable install), and/or make sure "
|
||||
"that setuptools is always imported before distutils."
|
||||
)
|
||||
|
||||
|
||||
def clear_distutils():
|
||||
if 'distutils' not in sys.modules:
|
||||
return
|
||||
import warnings
|
||||
|
||||
warnings.warn("Setuptools is replacing distutils.")
|
||||
mods = [
|
||||
name
|
||||
for name in sys.modules
|
||||
if name == "distutils" or name.startswith("distutils.")
|
||||
]
|
||||
for name in mods:
|
||||
del sys.modules[name]
|
||||
|
||||
|
||||
def enabled():
|
||||
"""
|
||||
Allow selection of distutils by environment variable.
|
||||
"""
|
||||
which = os.environ.get('SETUPTOOLS_USE_DISTUTILS', 'local')
|
||||
return which == 'local'
|
||||
|
||||
|
||||
def ensure_local_distutils():
|
||||
import importlib
|
||||
|
||||
clear_distutils()
|
||||
|
||||
# With the DistutilsMetaFinder in place,
|
||||
# perform an import to cause distutils to be
|
||||
# loaded from setuptools._distutils. Ref #2906.
|
||||
with shim():
|
||||
importlib.import_module('distutils')
|
||||
|
||||
# check that submodules load as expected
|
||||
core = importlib.import_module('distutils.core')
|
||||
assert '_distutils' in core.__file__, core.__file__
|
||||
assert 'setuptools._distutils.log' not in sys.modules
|
||||
|
||||
|
||||
def do_override():
|
||||
"""
|
||||
Ensure that the local copy of distutils is preferred over stdlib.
|
||||
|
||||
See https://github.com/pypa/setuptools/issues/417#issuecomment-392298401
|
||||
for more motivation.
|
||||
"""
|
||||
if enabled():
|
||||
warn_distutils_present()
|
||||
ensure_local_distutils()
|
||||
|
||||
|
||||
class _TrivialRe:
|
||||
def __init__(self, *patterns):
|
||||
self._patterns = patterns
|
||||
|
||||
def match(self, string):
|
||||
return all(pat in string for pat in self._patterns)
|
||||
|
||||
|
||||
class DistutilsMetaFinder:
|
||||
def find_spec(self, fullname, path, target=None):
|
||||
# optimization: only consider top level modules and those
|
||||
# found in the CPython test suite.
|
||||
if path is not None and not fullname.startswith('test.'):
|
||||
return
|
||||
|
||||
method_name = 'spec_for_{fullname}'.format(**locals())
|
||||
method = getattr(self, method_name, lambda: None)
|
||||
return method()
|
||||
|
||||
def spec_for_distutils(self):
|
||||
if self.is_cpython():
|
||||
return
|
||||
|
||||
import importlib
|
||||
import importlib.abc
|
||||
import importlib.util
|
||||
|
||||
try:
|
||||
mod = importlib.import_module('setuptools._distutils')
|
||||
except Exception:
|
||||
# There are a couple of cases where setuptools._distutils
|
||||
# may not be present:
|
||||
# - An older Setuptools without a local distutils is
|
||||
# taking precedence. Ref #2957.
|
||||
# - Path manipulation during sitecustomize removes
|
||||
# setuptools from the path but only after the hook
|
||||
# has been loaded. Ref #2980.
|
||||
# In either case, fall back to stdlib behavior.
|
||||
return
|
||||
|
||||
class DistutilsLoader(importlib.abc.Loader):
|
||||
def create_module(self, spec):
|
||||
mod.__name__ = 'distutils'
|
||||
return mod
|
||||
|
||||
def exec_module(self, module):
|
||||
pass
|
||||
|
||||
return importlib.util.spec_from_loader(
|
||||
'distutils', DistutilsLoader(), origin=mod.__file__
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def is_cpython():
|
||||
"""
|
||||
Suppress supplying distutils for CPython (build and tests).
|
||||
Ref #2965 and #3007.
|
||||
"""
|
||||
return os.path.isfile('pybuilddir.txt')
|
||||
|
||||
def spec_for_pip(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running under pip.
|
||||
See pypa/pip#8761 for rationale.
|
||||
"""
|
||||
if self.pip_imported_during_build():
|
||||
return
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
@classmethod
|
||||
def pip_imported_during_build(cls):
|
||||
"""
|
||||
Detect if pip is being imported in a build script. Ref #2355.
|
||||
"""
|
||||
import traceback
|
||||
|
||||
return any(
|
||||
cls.frame_file_is_setup(frame) for frame, line in traceback.walk_stack(None)
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def frame_file_is_setup(frame):
|
||||
"""
|
||||
Return True if the indicated frame suggests a setup.py file.
|
||||
"""
|
||||
# some frames may not have __file__ (#2940)
|
||||
return frame.f_globals.get('__file__', '').endswith('setup.py')
|
||||
|
||||
def spec_for_sensitive_tests(self):
|
||||
"""
|
||||
Ensure stdlib distutils when running select tests under CPython.
|
||||
|
||||
python/cpython#91169
|
||||
"""
|
||||
clear_distutils()
|
||||
self.spec_for_distutils = lambda: None
|
||||
|
||||
sensitive_tests = (
|
||||
[
|
||||
'test.test_distutils',
|
||||
'test.test_peg_generator',
|
||||
'test.test_importlib',
|
||||
]
|
||||
if sys.version_info < (3, 10)
|
||||
else [
|
||||
'test.test_distutils',
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
for name in DistutilsMetaFinder.sensitive_tests:
|
||||
setattr(
|
||||
DistutilsMetaFinder,
|
||||
f'spec_for_{name}',
|
||||
DistutilsMetaFinder.spec_for_sensitive_tests,
|
||||
)
|
||||
|
||||
|
||||
DISTUTILS_FINDER = DistutilsMetaFinder()
|
||||
|
||||
|
||||
def add_shim():
|
||||
DISTUTILS_FINDER in sys.meta_path or insert_shim()
|
||||
|
||||
|
||||
class shim:
|
||||
def __enter__(self):
|
||||
insert_shim()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
remove_shim()
|
||||
|
||||
|
||||
def insert_shim():
|
||||
sys.meta_path.insert(0, DISTUTILS_FINDER)
|
||||
|
||||
|
||||
def remove_shim():
|
||||
try:
|
||||
sys.meta_path.remove(DISTUTILS_FINDER)
|
||||
except ValueError:
|
||||
pass
|
@@ -1 +0,0 @@
|
||||
__import__('_distutils_hack').do_override()
|
@@ -1 +0,0 @@
|
||||
pip
|
@@ -1,295 +0,0 @@
|
||||
Metadata-Version: 2.3
|
||||
Name: annotated-types
|
||||
Version: 0.7.0
|
||||
Summary: Reusable constraint types to use with typing.Annotated
|
||||
Project-URL: Homepage, https://github.com/annotated-types/annotated-types
|
||||
Project-URL: Source, https://github.com/annotated-types/annotated-types
|
||||
Project-URL: Changelog, https://github.com/annotated-types/annotated-types/releases
|
||||
Author-email: Adrian Garcia Badaracco <1755071+adriangb@users.noreply.github.com>, Samuel Colvin <s@muelcolvin.com>, Zac Hatfield-Dodds <zac@zhd.dev>
|
||||
License-File: LICENSE
|
||||
Classifier: Development Status :: 4 - Beta
|
||||
Classifier: Environment :: Console
|
||||
Classifier: Environment :: MacOS X
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: Intended Audience :: Information Technology
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: POSIX :: Linux
|
||||
Classifier: Operating System :: Unix
|
||||
Classifier: Programming Language :: Python :: 3 :: Only
|
||||
Classifier: Programming Language :: Python :: 3.8
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
||||
Classifier: Typing :: Typed
|
||||
Requires-Python: >=3.8
|
||||
Requires-Dist: typing-extensions>=4.0.0; python_version < '3.9'
|
||||
Description-Content-Type: text/markdown
|
||||
|
||||
# annotated-types
|
||||
|
||||
[](https://github.com/annotated-types/annotated-types/actions?query=event%3Apush+branch%3Amain+workflow%3ACI)
|
||||
[](https://pypi.python.org/pypi/annotated-types)
|
||||
[](https://github.com/annotated-types/annotated-types)
|
||||
[](https://github.com/annotated-types/annotated-types/blob/main/LICENSE)
|
||||
|
||||
[PEP-593](https://peps.python.org/pep-0593/) added `typing.Annotated` as a way of
|
||||
adding context-specific metadata to existing types, and specifies that
|
||||
`Annotated[T, x]` _should_ be treated as `T` by any tool or library without special
|
||||
logic for `x`.
|
||||
|
||||
This package provides metadata objects which can be used to represent common
|
||||
constraints such as upper and lower bounds on scalar values and collection sizes,
|
||||
a `Predicate` marker for runtime checks, and
|
||||
descriptions of how we intend these metadata to be interpreted. In some cases,
|
||||
we also note alternative representations which do not require this package.
|
||||
|
||||
## Install
|
||||
|
||||
```bash
|
||||
pip install annotated-types
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
```python
|
||||
from typing import Annotated
|
||||
from annotated_types import Gt, Len, Predicate
|
||||
|
||||
class MyClass:
|
||||
age: Annotated[int, Gt(18)] # Valid: 19, 20, ...
|
||||
# Invalid: 17, 18, "19", 19.0, ...
|
||||
factors: list[Annotated[int, Predicate(is_prime)]] # Valid: 2, 3, 5, 7, 11, ...
|
||||
# Invalid: 4, 8, -2, 5.0, "prime", ...
|
||||
|
||||
my_list: Annotated[list[int], Len(0, 10)] # Valid: [], [10, 20, 30, 40, 50]
|
||||
# Invalid: (1, 2), ["abc"], [0] * 20
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
_While `annotated-types` avoids runtime checks for performance, users should not
|
||||
construct invalid combinations such as `MultipleOf("non-numeric")` or `Annotated[int, Len(3)]`.
|
||||
Downstream implementors may choose to raise an error, emit a warning, silently ignore
|
||||
a metadata item, etc., if the metadata objects described below are used with an
|
||||
incompatible type - or for any other reason!_
|
||||
|
||||
### Gt, Ge, Lt, Le
|
||||
|
||||
Express inclusive and/or exclusive bounds on orderable values - which may be numbers,
|
||||
dates, times, strings, sets, etc. Note that the boundary value need not be of the
|
||||
same type that was annotated, so long as they can be compared: `Annotated[int, Gt(1.5)]`
|
||||
is fine, for example, and implies that the value is an integer x such that `x > 1.5`.
|
||||
|
||||
We suggest that implementors may also interpret `functools.partial(operator.le, 1.5)`
|
||||
as being equivalent to `Gt(1.5)`, for users who wish to avoid a runtime dependency on
|
||||
the `annotated-types` package.
|
||||
|
||||
To be explicit, these types have the following meanings:
|
||||
|
||||
* `Gt(x)` - value must be "Greater Than" `x` - equivalent to exclusive minimum
|
||||
* `Ge(x)` - value must be "Greater than or Equal" to `x` - equivalent to inclusive minimum
|
||||
* `Lt(x)` - value must be "Less Than" `x` - equivalent to exclusive maximum
|
||||
* `Le(x)` - value must be "Less than or Equal" to `x` - equivalent to inclusive maximum
|
||||
|
||||
### Interval
|
||||
|
||||
`Interval(gt, ge, lt, le)` allows you to specify an upper and lower bound with a single
|
||||
metadata object. `None` attributes should be ignored, and non-`None` attributes
|
||||
treated as per the single bounds above.
|
||||
|
||||
### MultipleOf
|
||||
|
||||
`MultipleOf(multiple_of=x)` might be interpreted in two ways:
|
||||
|
||||
1. Python semantics, implying `value % multiple_of == 0`, or
|
||||
2. [JSONschema semantics](https://json-schema.org/draft/2020-12/json-schema-validation.html#rfc.section.6.2.1),
|
||||
where `int(value / multiple_of) == value / multiple_of`.
|
||||
|
||||
We encourage users to be aware of these two common interpretations and their
|
||||
distinct behaviours, especially since very large or non-integer numbers make
|
||||
it easy to cause silent data corruption due to floating-point imprecision.
|
||||
|
||||
We encourage libraries to carefully document which interpretation they implement.
|
||||
|
||||
### MinLen, MaxLen, Len
|
||||
|
||||
`Len()` implies that `min_length <= len(value) <= max_length` - lower and upper bounds are inclusive.
|
||||
|
||||
As well as `Len()` which can optionally include upper and lower bounds, we also
|
||||
provide `MinLen(x)` and `MaxLen(y)` which are equivalent to `Len(min_length=x)`
|
||||
and `Len(max_length=y)` respectively.
|
||||
|
||||
`Len`, `MinLen`, and `MaxLen` may be used with any type which supports `len(value)`.
|
||||
|
||||
Examples of usage:
|
||||
|
||||
* `Annotated[list, MaxLen(10)]` (or `Annotated[list, Len(max_length=10))`) - list must have a length of 10 or less
|
||||
* `Annotated[str, MaxLen(10)]` - string must have a length of 10 or less
|
||||
* `Annotated[list, MinLen(3))` (or `Annotated[list, Len(min_length=3))`) - list must have a length of 3 or more
|
||||
* `Annotated[list, Len(4, 6)]` - list must have a length of 4, 5, or 6
|
||||
* `Annotated[list, Len(8, 8)]` - list must have a length of exactly 8
|
||||
|
||||
#### Changed in v0.4.0
|
||||
|
||||
* `min_inclusive` has been renamed to `min_length`, no change in meaning
|
||||
* `max_exclusive` has been renamed to `max_length`, upper bound is now **inclusive** instead of **exclusive**
|
||||
* The recommendation that slices are interpreted as `Len` has been removed due to ambiguity and different semantic
|
||||
meaning of the upper bound in slices vs. `Len`
|
||||
|
||||
See [issue #23](https://github.com/annotated-types/annotated-types/issues/23) for discussion.
|
||||
|
||||
### Timezone
|
||||
|
||||
`Timezone` can be used with a `datetime` or a `time` to express which timezones
|
||||
are allowed. `Annotated[datetime, Timezone(None)]` must be a naive datetime.
|
||||
`Timezone[...]` ([literal ellipsis](https://docs.python.org/3/library/constants.html#Ellipsis))
|
||||
expresses that any timezone-aware datetime is allowed. You may also pass a specific
|
||||
timezone string or [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects)
|
||||
object such as `Timezone(timezone.utc)` or `Timezone("Africa/Abidjan")` to express that you only
|
||||
allow a specific timezone, though we note that this is often a symptom of fragile design.
|
||||
|
||||
#### Changed in v0.x.x
|
||||
|
||||
* `Timezone` accepts [`tzinfo`](https://docs.python.org/3/library/datetime.html#tzinfo-objects) objects instead of
|
||||
`timezone`, extending compatibility to [`zoneinfo`](https://docs.python.org/3/library/zoneinfo.html) and third party libraries.
|
||||
|
||||
### Unit
|
||||
|
||||
`Unit(unit: str)` expresses that the annotated numeric value is the magnitude of
|
||||
a quantity with the specified unit. For example, `Annotated[float, Unit("m/s")]`
|
||||
would be a float representing a velocity in meters per second.
|
||||
|
||||
Please note that `annotated_types` itself makes no attempt to parse or validate
|
||||
the unit string in any way. That is left entirely to downstream libraries,
|
||||
such as [`pint`](https://pint.readthedocs.io) or
|
||||
[`astropy.units`](https://docs.astropy.org/en/stable/units/).
|
||||
|
||||
An example of how a library might use this metadata:
|
||||
|
||||
```python
|
||||
from annotated_types import Unit
|
||||
from typing import Annotated, TypeVar, Callable, Any, get_origin, get_args
|
||||
|
||||
# given a type annotated with a unit:
|
||||
Meters = Annotated[float, Unit("m")]
|
||||
|
||||
|
||||
# you can cast the annotation to a specific unit type with any
|
||||
# callable that accepts a string and returns the desired type
|
||||
T = TypeVar("T")
|
||||
def cast_unit(tp: Any, unit_cls: Callable[[str], T]) -> T | None:
|
||||
if get_origin(tp) is Annotated:
|
||||
for arg in get_args(tp):
|
||||
if isinstance(arg, Unit):
|
||||
return unit_cls(arg.unit)
|
||||
return None
|
||||
|
||||
|
||||
# using `pint`
|
||||
import pint
|
||||
pint_unit = cast_unit(Meters, pint.Unit)
|
||||
|
||||
|
||||
# using `astropy.units`
|
||||
import astropy.units as u
|
||||
astropy_unit = cast_unit(Meters, u.Unit)
|
||||
```
|
||||
|
||||
### Predicate
|
||||
|
||||
`Predicate(func: Callable)` expresses that `func(value)` is truthy for valid values.
|
||||
Users should prefer the statically inspectable metadata above, but if you need
|
||||
the full power and flexibility of arbitrary runtime predicates... here it is.
|
||||
|
||||
For some common constraints, we provide generic types:
|
||||
|
||||
* `IsLower = Annotated[T, Predicate(str.islower)]`
|
||||
* `IsUpper = Annotated[T, Predicate(str.isupper)]`
|
||||
* `IsDigit = Annotated[T, Predicate(str.isdigit)]`
|
||||
* `IsFinite = Annotated[T, Predicate(math.isfinite)]`
|
||||
* `IsNotFinite = Annotated[T, Predicate(Not(math.isfinite))]`
|
||||
* `IsNan = Annotated[T, Predicate(math.isnan)]`
|
||||
* `IsNotNan = Annotated[T, Predicate(Not(math.isnan))]`
|
||||
* `IsInfinite = Annotated[T, Predicate(math.isinf)]`
|
||||
* `IsNotInfinite = Annotated[T, Predicate(Not(math.isinf))]`
|
||||
|
||||
so that you can write e.g. `x: IsFinite[float] = 2.0` instead of the longer
|
||||
(but exactly equivalent) `x: Annotated[float, Predicate(math.isfinite)] = 2.0`.
|
||||
|
||||
Some libraries might have special logic to handle known or understandable predicates,
|
||||
for example by checking for `str.isdigit` and using its presence to both call custom
|
||||
logic to enforce digit-only strings, and customise some generated external schema.
|
||||
Users are therefore encouraged to avoid indirection like `lambda s: s.lower()`, in
|
||||
favor of introspectable methods such as `str.lower` or `re.compile("pattern").search`.
|
||||
|
||||
To enable basic negation of commonly used predicates like `math.isnan` without introducing introspection that makes it impossible for implementers to introspect the predicate we provide a `Not` wrapper that simply negates the predicate in an introspectable manner. Several of the predicates listed above are created in this manner.
|
||||
|
||||
We do not specify what behaviour should be expected for predicates that raise
|
||||
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
|
||||
skip invalid constraints, or statically raise an error; or it might try calling it
|
||||
and then propagate or discard the resulting
|
||||
`TypeError: descriptor 'isdigit' for 'str' objects doesn't apply to a 'int' object`
|
||||
exception. We encourage libraries to document the behaviour they choose.
|
||||
|
||||
### Doc
|
||||
|
||||
`doc()` can be used to add documentation information in `Annotated`, for function and method parameters, variables, class attributes, return types, and any place where `Annotated` can be used.
|
||||
|
||||
It expects a value that can be statically analyzed, as the main use case is for static analysis, editors, documentation generators, and similar tools.
|
||||
|
||||
It returns a `DocInfo` class with a single attribute `documentation` containing the value passed to `doc()`.
|
||||
|
||||
This is the early adopter's alternative form of the [`typing-doc` proposal](https://github.com/tiangolo/fastapi/blob/typing-doc/typing_doc.md).
|
||||
|
||||
### Integrating downstream types with `GroupedMetadata`
|
||||
|
||||
Implementers may choose to provide a convenience wrapper that groups multiple pieces of metadata.
|
||||
This can help reduce verbosity and cognitive overhead for users.
|
||||
For example, an implementer like Pydantic might provide a `Field` or `Meta` type that accepts keyword arguments and transforms these into low-level metadata:
|
||||
|
||||
```python
|
||||
from dataclasses import dataclass
|
||||
from typing import Iterator
|
||||
from annotated_types import GroupedMetadata, Ge
|
||||
|
||||
@dataclass
|
||||
class Field(GroupedMetadata):
|
||||
ge: int | None = None
|
||||
description: str | None = None
|
||||
|
||||
def __iter__(self) -> Iterator[object]:
|
||||
# Iterating over a GroupedMetadata object should yield annotated-types
|
||||
# constraint metadata objects which describe it as fully as possible,
|
||||
# and may include other unknown objects too.
|
||||
if self.ge is not None:
|
||||
yield Ge(self.ge)
|
||||
if self.description is not None:
|
||||
yield Description(self.description)
|
||||
```
|
||||
|
||||
Libraries consuming annotated-types constraints should check for `GroupedMetadata` and unpack it by iterating over the object and treating the results as if they had been "unpacked" in the `Annotated` type. The same logic should be applied to the [PEP 646 `Unpack` type](https://peps.python.org/pep-0646/), so that `Annotated[T, Field(...)]`, `Annotated[T, Unpack[Field(...)]]` and `Annotated[T, *Field(...)]` are all treated consistently.
|
||||
|
||||
Libraries consuming annotated-types should also ignore any metadata they do not recongize that came from unpacking a `GroupedMetadata`, just like they ignore unrecognized metadata in `Annotated` itself.
|
||||
|
||||
Our own `annotated_types.Interval` class is a `GroupedMetadata` which unpacks itself into `Gt`, `Lt`, etc., so this is not an abstract concern. Similarly, `annotated_types.Len` is a `GroupedMetadata` which unpacks itself into `MinLen` (optionally) and `MaxLen`.
|
||||
|
||||
### Consuming metadata
|
||||
|
||||
We intend to not be prescriptive as to _how_ the metadata and constraints are used, but as an example of how one might parse constraints from types annotations see our [implementation in `test_main.py`](https://github.com/annotated-types/annotated-types/blob/f59cf6d1b5255a0fe359b93896759a180bec30ae/tests/test_main.py#L94-L103).
|
||||
|
||||
It is up to the implementer to determine how this metadata is used.
|
||||
You could use the metadata for runtime type checking, for generating schemas or to generate example data, amongst other use cases.
|
||||
|
||||
## Design & History
|
||||
|
||||
This package was designed at the PyCon 2022 sprints by the maintainers of Pydantic
|
||||
and Hypothesis, with the goal of making it as easy as possible for end-users to
|
||||
provide more informative annotations for use by runtime libraries.
|
||||
|
||||
It is deliberately minimal, and following PEP-593 allows considerable downstream
|
||||
discretion in what (if anything!) they choose to support. Nonetheless, we expect
|
||||
that staying simple and covering _only_ the most common use-cases will give users
|
||||
and maintainers the best experience we can. If you'd like more constraints for your
|
||||
types - follow our lead, by defining them and documenting them downstream!
|
@@ -1,10 +0,0 @@
|
||||
annotated_types-0.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
annotated_types-0.7.0.dist-info/METADATA,sha256=7ltqxksJJ0wCYFGBNIQCWTlWQGeAH0hRFdnK3CB895E,15046
|
||||
annotated_types-0.7.0.dist-info/RECORD,,
|
||||
annotated_types-0.7.0.dist-info/WHEEL,sha256=zEMcRr9Kr03x1ozGwg5v9NQBKn3kndp6LSoSlVg-jhU,87
|
||||
annotated_types-0.7.0.dist-info/licenses/LICENSE,sha256=_hBJiEsaDZNCkB6I4H8ykl0ksxIdmXK2poBfuYJLCV0,1083
|
||||
annotated_types/__init__.py,sha256=RynLsRKUEGI0KimXydlD1fZEfEzWwDo0Uon3zOKhG1Q,13819
|
||||
annotated_types/__pycache__/__init__.cpython-311.pyc,,
|
||||
annotated_types/__pycache__/test_cases.cpython-311.pyc,,
|
||||
annotated_types/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
annotated_types/test_cases.py,sha256=zHFX6EpcMbGJ8FzBYDbO56bPwx_DYIVSKbZM-4B3_lg,6421
|
@@ -1,4 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: hatchling 1.24.2
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2022 the contributors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
@@ -1,432 +0,0 @@
|
||||
import math
|
||||
import sys
|
||||
import types
|
||||
from dataclasses import dataclass
|
||||
from datetime import tzinfo
|
||||
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, SupportsFloat, SupportsIndex, TypeVar, Union
|
||||
|
||||
if sys.version_info < (3, 8):
|
||||
from typing_extensions import Protocol, runtime_checkable
|
||||
else:
|
||||
from typing import Protocol, runtime_checkable
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing_extensions import Annotated, Literal
|
||||
else:
|
||||
from typing import Annotated, Literal
|
||||
|
||||
if sys.version_info < (3, 10):
|
||||
EllipsisType = type(Ellipsis)
|
||||
KW_ONLY = {}
|
||||
SLOTS = {}
|
||||
else:
|
||||
from types import EllipsisType
|
||||
|
||||
KW_ONLY = {"kw_only": True}
|
||||
SLOTS = {"slots": True}
|
||||
|
||||
|
||||
__all__ = (
|
||||
'BaseMetadata',
|
||||
'GroupedMetadata',
|
||||
'Gt',
|
||||
'Ge',
|
||||
'Lt',
|
||||
'Le',
|
||||
'Interval',
|
||||
'MultipleOf',
|
||||
'MinLen',
|
||||
'MaxLen',
|
||||
'Len',
|
||||
'Timezone',
|
||||
'Predicate',
|
||||
'LowerCase',
|
||||
'UpperCase',
|
||||
'IsDigits',
|
||||
'IsFinite',
|
||||
'IsNotFinite',
|
||||
'IsNan',
|
||||
'IsNotNan',
|
||||
'IsInfinite',
|
||||
'IsNotInfinite',
|
||||
'doc',
|
||||
'DocInfo',
|
||||
'__version__',
|
||||
)
|
||||
|
||||
__version__ = '0.7.0'
|
||||
|
||||
|
||||
T = TypeVar('T')
|
||||
|
||||
|
||||
# arguments that start with __ are considered
|
||||
# positional only
|
||||
# see https://peps.python.org/pep-0484/#positional-only-arguments
|
||||
|
||||
|
||||
class SupportsGt(Protocol):
|
||||
def __gt__(self: T, __other: T) -> bool:
|
||||
...
|
||||
|
||||
|
||||
class SupportsGe(Protocol):
|
||||
def __ge__(self: T, __other: T) -> bool:
|
||||
...
|
||||
|
||||
|
||||
class SupportsLt(Protocol):
|
||||
def __lt__(self: T, __other: T) -> bool:
|
||||
...
|
||||
|
||||
|
||||
class SupportsLe(Protocol):
|
||||
def __le__(self: T, __other: T) -> bool:
|
||||
...
|
||||
|
||||
|
||||
class SupportsMod(Protocol):
|
||||
def __mod__(self: T, __other: T) -> T:
|
||||
...
|
||||
|
||||
|
||||
class SupportsDiv(Protocol):
|
||||
def __div__(self: T, __other: T) -> T:
|
||||
...
|
||||
|
||||
|
||||
class BaseMetadata:
|
||||
"""Base class for all metadata.
|
||||
|
||||
This exists mainly so that implementers
|
||||
can do `isinstance(..., BaseMetadata)` while traversing field annotations.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Gt(BaseMetadata):
|
||||
"""Gt(gt=x) implies that the value must be greater than x.
|
||||
|
||||
It can be used with any type that supports the ``>`` operator,
|
||||
including numbers, dates and times, strings, sets, and so on.
|
||||
"""
|
||||
|
||||
gt: SupportsGt
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Ge(BaseMetadata):
|
||||
"""Ge(ge=x) implies that the value must be greater than or equal to x.
|
||||
|
||||
It can be used with any type that supports the ``>=`` operator,
|
||||
including numbers, dates and times, strings, sets, and so on.
|
||||
"""
|
||||
|
||||
ge: SupportsGe
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Lt(BaseMetadata):
|
||||
"""Lt(lt=x) implies that the value must be less than x.
|
||||
|
||||
It can be used with any type that supports the ``<`` operator,
|
||||
including numbers, dates and times, strings, sets, and so on.
|
||||
"""
|
||||
|
||||
lt: SupportsLt
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Le(BaseMetadata):
|
||||
"""Le(le=x) implies that the value must be less than or equal to x.
|
||||
|
||||
It can be used with any type that supports the ``<=`` operator,
|
||||
including numbers, dates and times, strings, sets, and so on.
|
||||
"""
|
||||
|
||||
le: SupportsLe
|
||||
|
||||
|
||||
@runtime_checkable
|
||||
class GroupedMetadata(Protocol):
|
||||
"""A grouping of multiple objects, like typing.Unpack.
|
||||
|
||||
`GroupedMetadata` on its own is not metadata and has no meaning.
|
||||
All of the constraints and metadata should be fully expressable
|
||||
in terms of the `BaseMetadata`'s returned by `GroupedMetadata.__iter__()`.
|
||||
|
||||
Concrete implementations should override `GroupedMetadata.__iter__()`
|
||||
to add their own metadata.
|
||||
For example:
|
||||
|
||||
>>> @dataclass
|
||||
>>> class Field(GroupedMetadata):
|
||||
>>> gt: float | None = None
|
||||
>>> description: str | None = None
|
||||
...
|
||||
>>> def __iter__(self) -> Iterable[object]:
|
||||
>>> if self.gt is not None:
|
||||
>>> yield Gt(self.gt)
|
||||
>>> if self.description is not None:
|
||||
>>> yield Description(self.gt)
|
||||
|
||||
Also see the implementation of `Interval` below for an example.
|
||||
|
||||
Parsers should recognize this and unpack it so that it can be used
|
||||
both with and without unpacking:
|
||||
|
||||
- `Annotated[int, Field(...)]` (parser must unpack Field)
|
||||
- `Annotated[int, *Field(...)]` (PEP-646)
|
||||
""" # noqa: trailing-whitespace
|
||||
|
||||
@property
|
||||
def __is_annotated_types_grouped_metadata__(self) -> Literal[True]:
|
||||
return True
|
||||
|
||||
def __iter__(self) -> Iterator[object]:
|
||||
...
|
||||
|
||||
if not TYPE_CHECKING:
|
||||
__slots__ = () # allow subclasses to use slots
|
||||
|
||||
def __init_subclass__(cls, *args: Any, **kwargs: Any) -> None:
|
||||
# Basic ABC like functionality without the complexity of an ABC
|
||||
super().__init_subclass__(*args, **kwargs)
|
||||
if cls.__iter__ is GroupedMetadata.__iter__:
|
||||
raise TypeError("Can't subclass GroupedMetadata without implementing __iter__")
|
||||
|
||||
def __iter__(self) -> Iterator[object]: # noqa: F811
|
||||
raise NotImplementedError # more helpful than "None has no attribute..." type errors
|
||||
|
||||
|
||||
@dataclass(frozen=True, **KW_ONLY, **SLOTS)
|
||||
class Interval(GroupedMetadata):
|
||||
"""Interval can express inclusive or exclusive bounds with a single object.
|
||||
|
||||
It accepts keyword arguments ``gt``, ``ge``, ``lt``, and/or ``le``, which
|
||||
are interpreted the same way as the single-bound constraints.
|
||||
"""
|
||||
|
||||
gt: Union[SupportsGt, None] = None
|
||||
ge: Union[SupportsGe, None] = None
|
||||
lt: Union[SupportsLt, None] = None
|
||||
le: Union[SupportsLe, None] = None
|
||||
|
||||
def __iter__(self) -> Iterator[BaseMetadata]:
|
||||
"""Unpack an Interval into zero or more single-bounds."""
|
||||
if self.gt is not None:
|
||||
yield Gt(self.gt)
|
||||
if self.ge is not None:
|
||||
yield Ge(self.ge)
|
||||
if self.lt is not None:
|
||||
yield Lt(self.lt)
|
||||
if self.le is not None:
|
||||
yield Le(self.le)
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class MultipleOf(BaseMetadata):
|
||||
"""MultipleOf(multiple_of=x) might be interpreted in two ways:
|
||||
|
||||
1. Python semantics, implying ``value % multiple_of == 0``, or
|
||||
2. JSONschema semantics, where ``int(value / multiple_of) == value / multiple_of``
|
||||
|
||||
We encourage users to be aware of these two common interpretations,
|
||||
and libraries to carefully document which they implement.
|
||||
"""
|
||||
|
||||
multiple_of: Union[SupportsDiv, SupportsMod]
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class MinLen(BaseMetadata):
|
||||
"""
|
||||
MinLen() implies minimum inclusive length,
|
||||
e.g. ``len(value) >= min_length``.
|
||||
"""
|
||||
|
||||
min_length: Annotated[int, Ge(0)]
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class MaxLen(BaseMetadata):
|
||||
"""
|
||||
MaxLen() implies maximum inclusive length,
|
||||
e.g. ``len(value) <= max_length``.
|
||||
"""
|
||||
|
||||
max_length: Annotated[int, Ge(0)]
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Len(GroupedMetadata):
|
||||
"""
|
||||
Len() implies that ``min_length <= len(value) <= max_length``.
|
||||
|
||||
Upper bound may be omitted or ``None`` to indicate no upper length bound.
|
||||
"""
|
||||
|
||||
min_length: Annotated[int, Ge(0)] = 0
|
||||
max_length: Optional[Annotated[int, Ge(0)]] = None
|
||||
|
||||
def __iter__(self) -> Iterator[BaseMetadata]:
|
||||
"""Unpack a Len into zone or more single-bounds."""
|
||||
if self.min_length > 0:
|
||||
yield MinLen(self.min_length)
|
||||
if self.max_length is not None:
|
||||
yield MaxLen(self.max_length)
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Timezone(BaseMetadata):
|
||||
"""Timezone(tz=...) requires a datetime to be aware (or ``tz=None``, naive).
|
||||
|
||||
``Annotated[datetime, Timezone(None)]`` must be a naive datetime.
|
||||
``Timezone[...]`` (the ellipsis literal) expresses that the datetime must be
|
||||
tz-aware but any timezone is allowed.
|
||||
|
||||
You may also pass a specific timezone string or tzinfo object such as
|
||||
``Timezone(timezone.utc)`` or ``Timezone("Africa/Abidjan")`` to express that
|
||||
you only allow a specific timezone, though we note that this is often
|
||||
a symptom of poor design.
|
||||
"""
|
||||
|
||||
tz: Union[str, tzinfo, EllipsisType, None]
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Unit(BaseMetadata):
|
||||
"""Indicates that the value is a physical quantity with the specified unit.
|
||||
|
||||
It is intended for usage with numeric types, where the value represents the
|
||||
magnitude of the quantity. For example, ``distance: Annotated[float, Unit('m')]``
|
||||
or ``speed: Annotated[float, Unit('m/s')]``.
|
||||
|
||||
Interpretation of the unit string is left to the discretion of the consumer.
|
||||
It is suggested to follow conventions established by python libraries that work
|
||||
with physical quantities, such as
|
||||
|
||||
- ``pint`` : <https://pint.readthedocs.io/en/stable/>
|
||||
- ``astropy.units``: <https://docs.astropy.org/en/stable/units/>
|
||||
|
||||
For indicating a quantity with a certain dimensionality but without a specific unit
|
||||
it is recommended to use square brackets, e.g. `Annotated[float, Unit('[time]')]`.
|
||||
Note, however, ``annotated_types`` itself makes no use of the unit string.
|
||||
"""
|
||||
|
||||
unit: str
|
||||
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class Predicate(BaseMetadata):
|
||||
"""``Predicate(func: Callable)`` implies `func(value)` is truthy for valid values.
|
||||
|
||||
Users should prefer statically inspectable metadata, but if you need the full
|
||||
power and flexibility of arbitrary runtime predicates... here it is.
|
||||
|
||||
We provide a few predefined predicates for common string constraints:
|
||||
``IsLower = Predicate(str.islower)``, ``IsUpper = Predicate(str.isupper)``, and
|
||||
``IsDigits = Predicate(str.isdigit)``. Users are encouraged to use methods which
|
||||
can be given special handling, and avoid indirection like ``lambda s: s.lower()``.
|
||||
|
||||
Some libraries might have special logic to handle certain predicates, e.g. by
|
||||
checking for `str.isdigit` and using its presence to both call custom logic to
|
||||
enforce digit-only strings, and customise some generated external schema.
|
||||
|
||||
We do not specify what behaviour should be expected for predicates that raise
|
||||
an exception. For example `Annotated[int, Predicate(str.isdigit)]` might silently
|
||||
skip invalid constraints, or statically raise an error; or it might try calling it
|
||||
and then propagate or discard the resulting exception.
|
||||
"""
|
||||
|
||||
func: Callable[[Any], bool]
|
||||
|
||||
def __repr__(self) -> str:
|
||||
if getattr(self.func, "__name__", "<lambda>") == "<lambda>":
|
||||
return f"{self.__class__.__name__}({self.func!r})"
|
||||
if isinstance(self.func, (types.MethodType, types.BuiltinMethodType)) and (
|
||||
namespace := getattr(self.func.__self__, "__name__", None)
|
||||
):
|
||||
return f"{self.__class__.__name__}({namespace}.{self.func.__name__})"
|
||||
if isinstance(self.func, type(str.isascii)): # method descriptor
|
||||
return f"{self.__class__.__name__}({self.func.__qualname__})"
|
||||
return f"{self.__class__.__name__}({self.func.__name__})"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Not:
|
||||
func: Callable[[Any], bool]
|
||||
|
||||
def __call__(self, __v: Any) -> bool:
|
||||
return not self.func(__v)
|
||||
|
||||
|
||||
_StrType = TypeVar("_StrType", bound=str)
|
||||
|
||||
LowerCase = Annotated[_StrType, Predicate(str.islower)]
|
||||
"""
|
||||
Return True if the string is a lowercase string, False otherwise.
|
||||
|
||||
A string is lowercase if all cased characters in the string are lowercase and there is at least one cased character in the string.
|
||||
""" # noqa: E501
|
||||
UpperCase = Annotated[_StrType, Predicate(str.isupper)]
|
||||
"""
|
||||
Return True if the string is an uppercase string, False otherwise.
|
||||
|
||||
A string is uppercase if all cased characters in the string are uppercase and there is at least one cased character in the string.
|
||||
""" # noqa: E501
|
||||
IsDigit = Annotated[_StrType, Predicate(str.isdigit)]
|
||||
IsDigits = IsDigit # type: ignore # plural for backwards compatibility, see #63
|
||||
"""
|
||||
Return True if the string is a digit string, False otherwise.
|
||||
|
||||
A string is a digit string if all characters in the string are digits and there is at least one character in the string.
|
||||
""" # noqa: E501
|
||||
IsAscii = Annotated[_StrType, Predicate(str.isascii)]
|
||||
"""
|
||||
Return True if all characters in the string are ASCII, False otherwise.
|
||||
|
||||
ASCII characters have code points in the range U+0000-U+007F. Empty string is ASCII too.
|
||||
"""
|
||||
|
||||
_NumericType = TypeVar('_NumericType', bound=Union[SupportsFloat, SupportsIndex])
|
||||
IsFinite = Annotated[_NumericType, Predicate(math.isfinite)]
|
||||
"""Return True if x is neither an infinity nor a NaN, and False otherwise."""
|
||||
IsNotFinite = Annotated[_NumericType, Predicate(Not(math.isfinite))]
|
||||
"""Return True if x is one of infinity or NaN, and False otherwise"""
|
||||
IsNan = Annotated[_NumericType, Predicate(math.isnan)]
|
||||
"""Return True if x is a NaN (not a number), and False otherwise."""
|
||||
IsNotNan = Annotated[_NumericType, Predicate(Not(math.isnan))]
|
||||
"""Return True if x is anything but NaN (not a number), and False otherwise."""
|
||||
IsInfinite = Annotated[_NumericType, Predicate(math.isinf)]
|
||||
"""Return True if x is a positive or negative infinity, and False otherwise."""
|
||||
IsNotInfinite = Annotated[_NumericType, Predicate(Not(math.isinf))]
|
||||
"""Return True if x is neither a positive or negative infinity, and False otherwise."""
|
||||
|
||||
try:
|
||||
from typing_extensions import DocInfo, doc # type: ignore [attr-defined]
|
||||
except ImportError:
|
||||
|
||||
@dataclass(frozen=True, **SLOTS)
|
||||
class DocInfo: # type: ignore [no-redef]
|
||||
""" "
|
||||
The return value of doc(), mainly to be used by tools that want to extract the
|
||||
Annotated documentation at runtime.
|
||||
"""
|
||||
|
||||
documentation: str
|
||||
"""The documentation string passed to doc()."""
|
||||
|
||||
def doc(
|
||||
documentation: str,
|
||||
) -> DocInfo:
|
||||
"""
|
||||
Add documentation to a type annotation inside of Annotated.
|
||||
|
||||
For example:
|
||||
|
||||
>>> def hi(name: Annotated[int, doc("The name of the user")]) -> None: ...
|
||||
"""
|
||||
return DocInfo(documentation)
|
@@ -1,151 +0,0 @@
|
||||
import math
|
||||
import sys
|
||||
from datetime import date, datetime, timedelta, timezone
|
||||
from decimal import Decimal
|
||||
from typing import Any, Dict, Iterable, Iterator, List, NamedTuple, Set, Tuple
|
||||
|
||||
if sys.version_info < (3, 9):
|
||||
from typing_extensions import Annotated
|
||||
else:
|
||||
from typing import Annotated
|
||||
|
||||
import annotated_types as at
|
||||
|
||||
|
||||
class Case(NamedTuple):
|
||||
"""
|
||||
A test case for `annotated_types`.
|
||||
"""
|
||||
|
||||
annotation: Any
|
||||
valid_cases: Iterable[Any]
|
||||
invalid_cases: Iterable[Any]
|
||||
|
||||
|
||||
def cases() -> Iterable[Case]:
|
||||
# Gt, Ge, Lt, Le
|
||||
yield Case(Annotated[int, at.Gt(4)], (5, 6, 1000), (4, 0, -1))
|
||||
yield Case(Annotated[float, at.Gt(0.5)], (0.6, 0.7, 0.8, 0.9), (0.5, 0.0, -0.1))
|
||||
yield Case(
|
||||
Annotated[datetime, at.Gt(datetime(2000, 1, 1))],
|
||||
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
||||
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
|
||||
)
|
||||
yield Case(
|
||||
Annotated[datetime, at.Gt(date(2000, 1, 1))],
|
||||
[date(2000, 1, 2), date(2000, 1, 3)],
|
||||
[date(2000, 1, 1), date(1999, 12, 31)],
|
||||
)
|
||||
yield Case(
|
||||
Annotated[datetime, at.Gt(Decimal('1.123'))],
|
||||
[Decimal('1.1231'), Decimal('123')],
|
||||
[Decimal('1.123'), Decimal('0')],
|
||||
)
|
||||
|
||||
yield Case(Annotated[int, at.Ge(4)], (4, 5, 6, 1000, 4), (0, -1))
|
||||
yield Case(Annotated[float, at.Ge(0.5)], (0.5, 0.6, 0.7, 0.8, 0.9), (0.4, 0.0, -0.1))
|
||||
yield Case(
|
||||
Annotated[datetime, at.Ge(datetime(2000, 1, 1))],
|
||||
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
||||
[datetime(1998, 1, 1), datetime(1999, 12, 31)],
|
||||
)
|
||||
|
||||
yield Case(Annotated[int, at.Lt(4)], (0, -1), (4, 5, 6, 1000, 4))
|
||||
yield Case(Annotated[float, at.Lt(0.5)], (0.4, 0.0, -0.1), (0.5, 0.6, 0.7, 0.8, 0.9))
|
||||
yield Case(
|
||||
Annotated[datetime, at.Lt(datetime(2000, 1, 1))],
|
||||
[datetime(1999, 12, 31), datetime(1999, 12, 31)],
|
||||
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
||||
)
|
||||
|
||||
yield Case(Annotated[int, at.Le(4)], (4, 0, -1), (5, 6, 1000))
|
||||
yield Case(Annotated[float, at.Le(0.5)], (0.5, 0.0, -0.1), (0.6, 0.7, 0.8, 0.9))
|
||||
yield Case(
|
||||
Annotated[datetime, at.Le(datetime(2000, 1, 1))],
|
||||
[datetime(2000, 1, 1), datetime(1999, 12, 31)],
|
||||
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
||||
)
|
||||
|
||||
# Interval
|
||||
yield Case(Annotated[int, at.Interval(gt=4)], (5, 6, 1000), (4, 0, -1))
|
||||
yield Case(Annotated[int, at.Interval(gt=4, lt=10)], (5, 6), (4, 10, 1000, 0, -1))
|
||||
yield Case(Annotated[float, at.Interval(ge=0.5, le=1)], (0.5, 0.9, 1), (0.49, 1.1))
|
||||
yield Case(
|
||||
Annotated[datetime, at.Interval(gt=datetime(2000, 1, 1), le=datetime(2000, 1, 3))],
|
||||
[datetime(2000, 1, 2), datetime(2000, 1, 3)],
|
||||
[datetime(2000, 1, 1), datetime(2000, 1, 4)],
|
||||
)
|
||||
|
||||
yield Case(Annotated[int, at.MultipleOf(multiple_of=3)], (0, 3, 9), (1, 2, 4))
|
||||
yield Case(Annotated[float, at.MultipleOf(multiple_of=0.5)], (0, 0.5, 1, 1.5), (0.4, 1.1))
|
||||
|
||||
# lengths
|
||||
|
||||
yield Case(Annotated[str, at.MinLen(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
|
||||
yield Case(Annotated[str, at.Len(3)], ('123', '1234', 'x' * 10), ('', '1', '12'))
|
||||
yield Case(Annotated[List[int], at.MinLen(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
|
||||
yield Case(Annotated[List[int], at.Len(3)], ([1, 2, 3], [1, 2, 3, 4], [1] * 10), ([], [1], [1, 2]))
|
||||
|
||||
yield Case(Annotated[str, at.MaxLen(4)], ('', '1234'), ('12345', 'x' * 10))
|
||||
yield Case(Annotated[str, at.Len(0, 4)], ('', '1234'), ('12345', 'x' * 10))
|
||||
yield Case(Annotated[List[str], at.MaxLen(4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
|
||||
yield Case(Annotated[List[str], at.Len(0, 4)], ([], ['a', 'bcdef'], ['a', 'b', 'c']), (['a'] * 5, ['b'] * 10))
|
||||
|
||||
yield Case(Annotated[str, at.Len(3, 5)], ('123', '12345'), ('', '1', '12', '123456', 'x' * 10))
|
||||
yield Case(Annotated[str, at.Len(3, 3)], ('123',), ('12', '1234'))
|
||||
|
||||
yield Case(Annotated[Dict[int, int], at.Len(2, 3)], [{1: 1, 2: 2}], [{}, {1: 1}, {1: 1, 2: 2, 3: 3, 4: 4}])
|
||||
yield Case(Annotated[Set[int], at.Len(2, 3)], ({1, 2}, {1, 2, 3}), (set(), {1}, {1, 2, 3, 4}))
|
||||
yield Case(Annotated[Tuple[int, ...], at.Len(2, 3)], ((1, 2), (1, 2, 3)), ((), (1,), (1, 2, 3, 4)))
|
||||
|
||||
# Timezone
|
||||
|
||||
yield Case(
|
||||
Annotated[datetime, at.Timezone(None)], [datetime(2000, 1, 1)], [datetime(2000, 1, 1, tzinfo=timezone.utc)]
|
||||
)
|
||||
yield Case(
|
||||
Annotated[datetime, at.Timezone(...)], [datetime(2000, 1, 1, tzinfo=timezone.utc)], [datetime(2000, 1, 1)]
|
||||
)
|
||||
yield Case(
|
||||
Annotated[datetime, at.Timezone(timezone.utc)],
|
||||
[datetime(2000, 1, 1, tzinfo=timezone.utc)],
|
||||
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
|
||||
)
|
||||
yield Case(
|
||||
Annotated[datetime, at.Timezone('Europe/London')],
|
||||
[datetime(2000, 1, 1, tzinfo=timezone(timedelta(0), name='Europe/London'))],
|
||||
[datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=timezone(timedelta(hours=6)))],
|
||||
)
|
||||
|
||||
# Quantity
|
||||
|
||||
yield Case(Annotated[float, at.Unit(unit='m')], (5, 4.2), ('5m', '4.2m'))
|
||||
|
||||
# predicate types
|
||||
|
||||
yield Case(at.LowerCase[str], ['abc', 'foobar'], ['', 'A', 'Boom'])
|
||||
yield Case(at.UpperCase[str], ['ABC', 'DEFO'], ['', 'a', 'abc', 'AbC'])
|
||||
yield Case(at.IsDigit[str], ['123'], ['', 'ab', 'a1b2'])
|
||||
yield Case(at.IsAscii[str], ['123', 'foo bar'], ['£100', '😊', 'whatever 👀'])
|
||||
|
||||
yield Case(Annotated[int, at.Predicate(lambda x: x % 2 == 0)], [0, 2, 4], [1, 3, 5])
|
||||
|
||||
yield Case(at.IsFinite[float], [1.23], [math.nan, math.inf, -math.inf])
|
||||
yield Case(at.IsNotFinite[float], [math.nan, math.inf], [1.23])
|
||||
yield Case(at.IsNan[float], [math.nan], [1.23, math.inf])
|
||||
yield Case(at.IsNotNan[float], [1.23, math.inf], [math.nan])
|
||||
yield Case(at.IsInfinite[float], [math.inf], [math.nan, 1.23])
|
||||
yield Case(at.IsNotInfinite[float], [math.nan, 1.23], [math.inf])
|
||||
|
||||
# check stacked predicates
|
||||
yield Case(at.IsInfinite[Annotated[float, at.Predicate(lambda x: x > 0)]], [math.inf], [-math.inf, 1.23, math.nan])
|
||||
|
||||
# doc
|
||||
yield Case(Annotated[int, at.doc("A number")], [1, 2], [])
|
||||
|
||||
# custom GroupedMetadata
|
||||
class MyCustomGroupedMetadata(at.GroupedMetadata):
|
||||
def __iter__(self) -> Iterator[at.Predicate]:
|
||||
yield at.Predicate(lambda x: float(x).is_integer())
|
||||
|
||||
yield Case(Annotated[float, MyCustomGroupedMetadata()], [0, 2.0], [0.01, 1.5])
|
@@ -1 +0,0 @@
|
||||
pip
|
@@ -1,20 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2018 Alex Grönholm
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
@@ -1,105 +0,0 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: anyio
|
||||
Version: 4.7.0
|
||||
Summary: High level compatibility layer for multiple asynchronous event loop implementations
|
||||
Author-email: Alex Grönholm <alex.gronholm@nextday.fi>
|
||||
License: MIT
|
||||
Project-URL: Documentation, https://anyio.readthedocs.io/en/latest/
|
||||
Project-URL: Changelog, https://anyio.readthedocs.io/en/stable/versionhistory.html
|
||||
Project-URL: Source code, https://github.com/agronholm/anyio
|
||||
Project-URL: Issue tracker, https://github.com/agronholm/anyio/issues
|
||||
Classifier: Development Status :: 5 - Production/Stable
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Framework :: AnyIO
|
||||
Classifier: Typing :: Typed
|
||||
Classifier: Programming Language :: Python
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.9
|
||||
Classifier: Programming Language :: Python :: 3.10
|
||||
Classifier: Programming Language :: Python :: 3.11
|
||||
Classifier: Programming Language :: Python :: 3.12
|
||||
Classifier: Programming Language :: Python :: 3.13
|
||||
Requires-Python: >=3.9
|
||||
Description-Content-Type: text/x-rst
|
||||
License-File: LICENSE
|
||||
Requires-Dist: exceptiongroup>=1.0.2; python_version < "3.11"
|
||||
Requires-Dist: idna>=2.8
|
||||
Requires-Dist: sniffio>=1.1
|
||||
Requires-Dist: typing_extensions>=4.5; python_version < "3.13"
|
||||
Provides-Extra: trio
|
||||
Requires-Dist: trio>=0.26.1; extra == "trio"
|
||||
Provides-Extra: test
|
||||
Requires-Dist: anyio[trio]; extra == "test"
|
||||
Requires-Dist: coverage[toml]>=7; extra == "test"
|
||||
Requires-Dist: exceptiongroup>=1.2.0; extra == "test"
|
||||
Requires-Dist: hypothesis>=4.0; extra == "test"
|
||||
Requires-Dist: psutil>=5.9; extra == "test"
|
||||
Requires-Dist: pytest>=7.0; extra == "test"
|
||||
Requires-Dist: pytest-mock>=3.6.1; extra == "test"
|
||||
Requires-Dist: trustme; extra == "test"
|
||||
Requires-Dist: truststore>=0.9.1; python_version >= "3.10" and extra == "test"
|
||||
Requires-Dist: uvloop>=0.21; (platform_python_implementation == "CPython" and platform_system != "Windows") and extra == "test"
|
||||
Provides-Extra: doc
|
||||
Requires-Dist: packaging; extra == "doc"
|
||||
Requires-Dist: Sphinx~=7.4; extra == "doc"
|
||||
Requires-Dist: sphinx_rtd_theme; extra == "doc"
|
||||
Requires-Dist: sphinx-autodoc-typehints>=1.2.0; extra == "doc"
|
||||
|
||||
.. image:: https://github.com/agronholm/anyio/actions/workflows/test.yml/badge.svg
|
||||
:target: https://github.com/agronholm/anyio/actions/workflows/test.yml
|
||||
:alt: Build Status
|
||||
.. image:: https://coveralls.io/repos/github/agronholm/anyio/badge.svg?branch=master
|
||||
:target: https://coveralls.io/github/agronholm/anyio?branch=master
|
||||
:alt: Code Coverage
|
||||
.. image:: https://readthedocs.org/projects/anyio/badge/?version=latest
|
||||
:target: https://anyio.readthedocs.io/en/latest/?badge=latest
|
||||
:alt: Documentation
|
||||
.. image:: https://badges.gitter.im/gitterHQ/gitter.svg
|
||||
:target: https://gitter.im/python-trio/AnyIO
|
||||
:alt: Gitter chat
|
||||
|
||||
AnyIO is an asynchronous networking and concurrency library that works on top of either asyncio_ or
|
||||
trio_. It implements trio-like `structured concurrency`_ (SC) on top of asyncio and works in harmony
|
||||
with the native SC of trio itself.
|
||||
|
||||
Applications and libraries written against AnyIO's API will run unmodified on either asyncio_ or
|
||||
trio_. AnyIO can also be adopted into a library or application incrementally – bit by bit, no full
|
||||
refactoring necessary. It will blend in with the native libraries of your chosen backend.
|
||||
|
||||
Documentation
|
||||
-------------
|
||||
|
||||
View full documentation at: https://anyio.readthedocs.io/
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
AnyIO offers the following functionality:
|
||||
|
||||
* Task groups (nurseries_ in trio terminology)
|
||||
* High-level networking (TCP, UDP and UNIX sockets)
|
||||
|
||||
* `Happy eyeballs`_ algorithm for TCP connections (more robust than that of asyncio on Python
|
||||
3.8)
|
||||
* async/await style UDP sockets (unlike asyncio where you still have to use Transports and
|
||||
Protocols)
|
||||
|
||||
* A versatile API for byte streams and object streams
|
||||
* Inter-task synchronization and communication (locks, conditions, events, semaphores, object
|
||||
streams)
|
||||
* Worker threads
|
||||
* Subprocesses
|
||||
* Asynchronous file I/O (using worker threads)
|
||||
* Signal handling
|
||||
|
||||
AnyIO also comes with its own pytest_ plugin which also supports asynchronous fixtures.
|
||||
It even works with the popular Hypothesis_ library.
|
||||
|
||||
.. _asyncio: https://docs.python.org/3/library/asyncio.html
|
||||
.. _trio: https://github.com/python-trio/trio
|
||||
.. _structured concurrency: https://en.wikipedia.org/wiki/Structured_concurrency
|
||||
.. _nurseries: https://trio.readthedocs.io/en/stable/reference-core.html#nurseries-and-spawning
|
||||
.. _Happy eyeballs: https://en.wikipedia.org/wiki/Happy_Eyeballs
|
||||
.. _pytest: https://docs.pytest.org/en/latest/
|
||||
.. _Hypothesis: https://hypothesis.works/
|
@@ -1,84 +0,0 @@
|
||||
anyio-4.7.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
||||
anyio-4.7.0.dist-info/LICENSE,sha256=U2GsncWPLvX9LpsJxoKXwX8ElQkJu8gCO9uC6s8iwrA,1081
|
||||
anyio-4.7.0.dist-info/METADATA,sha256=A-A-n0m-esMw8lYv8a9kqsr84J2aFh8MvqcTq2Xx_so,4653
|
||||
anyio-4.7.0.dist-info/RECORD,,
|
||||
anyio-4.7.0.dist-info/WHEEL,sha256=PZUExdf71Ui_so67QXpySuHtCi3-J3wvF4ORK6k_S8U,91
|
||||
anyio-4.7.0.dist-info/entry_points.txt,sha256=_d6Yu6uiaZmNe0CydowirE9Cmg7zUL2g08tQpoS3Qvc,39
|
||||
anyio-4.7.0.dist-info/top_level.txt,sha256=QglSMiWX8_5dpoVAEIHdEYzvqFMdSYWmCj6tYw2ITkQ,6
|
||||
anyio/__init__.py,sha256=5NCKQNJueCeIJqVbOpAQdho2HIQrQvcnfQjuEhAiZcc,4433
|
||||
anyio/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/__pycache__/from_thread.cpython-311.pyc,,
|
||||
anyio/__pycache__/lowlevel.cpython-311.pyc,,
|
||||
anyio/__pycache__/pytest_plugin.cpython-311.pyc,,
|
||||
anyio/__pycache__/to_process.cpython-311.pyc,,
|
||||
anyio/__pycache__/to_thread.cpython-311.pyc,,
|
||||
anyio/_backends/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/_backends/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/_backends/__pycache__/_asyncio.cpython-311.pyc,,
|
||||
anyio/_backends/__pycache__/_trio.cpython-311.pyc,,
|
||||
anyio/_backends/_asyncio.py,sha256=i5Qe4IBdiWRlww0qIUAVsF-K0z30bgZakuMePpNbdro,94051
|
||||
anyio/_backends/_trio.py,sha256=7oGxbqeveiesGm2pAnCRBydqy-Gbistn_xfsmKhSLLg,40371
|
||||
anyio/_core/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/_core/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_asyncio_selector_thread.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_eventloop.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_exceptions.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_fileio.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_resources.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_signals.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_sockets.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_streams.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_subprocesses.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_synchronization.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_tasks.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_testing.cpython-311.pyc,,
|
||||
anyio/_core/__pycache__/_typedattr.cpython-311.pyc,,
|
||||
anyio/_core/_asyncio_selector_thread.py,sha256=vTdZBWaxRgVcgUaRb5uBwQ_VGgY3qPKF7l91IJ5Mqzo,4773
|
||||
anyio/_core/_eventloop.py,sha256=t_tAwBFPjF8jrZGjlJ6bbYy6KA3bjsbZxV9mvh9t1i0,4695
|
||||
anyio/_core/_exceptions.py,sha256=bKPr2QbkYG7nIb425L5JePUie9bGc9XfkY0y4JKWvFM,2488
|
||||
anyio/_core/_fileio.py,sha256=DqnG_zvQFMqiIFaUeDRC1Ts3LT0FWHkWtGgm-684hvQ,20957
|
||||
anyio/_core/_resources.py,sha256=NbmU5O5UX3xEyACnkmYX28Fmwdl-f-ny0tHym26e0w0,435
|
||||
anyio/_core/_signals.py,sha256=vulT1M1xdLYtAR-eY5TamIgaf1WTlOwOrMGwswlTTr8,905
|
||||
anyio/_core/_sockets.py,sha256=vQ5GnSDLHjEhHhV2yvsdiPs5wmPxxb1kRsv3RM5lbQk,26951
|
||||
anyio/_core/_streams.py,sha256=OnaKgoDD-FcMSwLvkoAUGP51sG2ZdRvMpxt9q2w1gYA,1804
|
||||
anyio/_core/_subprocesses.py,sha256=WquR6sHrnaZofaeqnL8U4Yv___msVW_WqivleLHK4zI,7760
|
||||
anyio/_core/_synchronization.py,sha256=tct5FJFdgYjiEMtUeg5NGG15tf-2Qd7VaWuSgzS5dIU,20347
|
||||
anyio/_core/_tasks.py,sha256=pvVEX2Fw159sf0ypAPerukKsZgRRwvFFedVW52nR2Vk,4764
|
||||
anyio/_core/_testing.py,sha256=YUGwA5cgFFbUTv4WFd7cv_BSVr4ryTtPp8owQA3JdWE,2118
|
||||
anyio/_core/_typedattr.py,sha256=P4ozZikn3-DbpoYcvyghS_FOYAgbmUxeoU8-L_07pZM,2508
|
||||
anyio/abc/__init__.py,sha256=c2OQbTCS_fQowviMXanLPh8m29ccwkXmpDr7uyNZYOo,2652
|
||||
anyio/abc/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_eventloop.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_resources.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_sockets.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_streams.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_subprocesses.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_tasks.cpython-311.pyc,,
|
||||
anyio/abc/__pycache__/_testing.cpython-311.pyc,,
|
||||
anyio/abc/_eventloop.py,sha256=Wd_3C3hLm0ex5z_eHHWGqvLle2OKCSexJSZVnwQNGV4,9658
|
||||
anyio/abc/_resources.py,sha256=DrYvkNN1hH6Uvv5_5uKySvDsnknGVDe8FCKfko0VtN8,783
|
||||
anyio/abc/_sockets.py,sha256=KhWtJxan8jpBXKwPaFeQzI4iRXdFaOIn0HXtDZnaO7U,6262
|
||||
anyio/abc/_streams.py,sha256=GzST5Q2zQmxVzdrAqtbSyHNxkPlIC9AzeZJg_YyPAXw,6598
|
||||
anyio/abc/_subprocesses.py,sha256=cumAPJTktOQtw63IqG0lDpyZqu_l1EElvQHMiwJgL08,2067
|
||||
anyio/abc/_tasks.py,sha256=yJWbMwowvqjlAX4oJ3l9Is1w-zwynr2lX1Z02AWJqsY,3080
|
||||
anyio/abc/_testing.py,sha256=tBJUzkSfOXJw23fe8qSJ03kJlShOYjjaEyFB6k6MYT8,1821
|
||||
anyio/from_thread.py,sha256=dbi5TUH45_Sg_jZ8Vv1NJWVohe0WeQ_OaCvXIKveAGg,17478
|
||||
anyio/lowlevel.py,sha256=nkgmW--SdxGVp0cmLUYazjkigveRm5HY7-gW8Bpp9oY,4169
|
||||
anyio/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/pytest_plugin.py,sha256=vjGhGRHD31OyMgJRFQrMvExhx3Ea8KbyDqYKmiSDdXA,6712
|
||||
anyio/streams/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
||||
anyio/streams/__pycache__/__init__.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/buffered.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/file.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/memory.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/stapled.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/text.cpython-311.pyc,,
|
||||
anyio/streams/__pycache__/tls.cpython-311.pyc,,
|
||||
anyio/streams/buffered.py,sha256=UCldKC168YuLvT7n3HtNPnQ2iWAMSTYQWbZvzLwMwkM,4500
|
||||
anyio/streams/file.py,sha256=6uoTNb5KbMoj-6gS3_xrrL8uZN8Q4iIvOS1WtGyFfKw,4383
|
||||
anyio/streams/memory.py,sha256=j8AyOExK4-UPaon_Xbhwax25Vqs0DwFg3ZXc-EIiHjY,10550
|
||||
anyio/streams/stapled.py,sha256=U09pCrmOw9kkNhe6tKopsm1QIMT1lFTFvtb-A7SIe4k,4302
|
||||
anyio/streams/text.py,sha256=6x8w8xlfCZKTUWQoJiMPoMhSSJFUBRKgoBNSBtbd9yg,5094
|
||||
anyio/streams/tls.py,sha256=m3AE2LVSpoRHSIwSoSCupiOVL54EvOFoY3CcwTxcZfg,12742
|
||||
anyio/to_process.py,sha256=cR4n7TssbbJowE_9cWme49zaeuoBuMzqgZ6cBIs0YIs,9571
|
||||
anyio/to_thread.py,sha256=WM2JQ2MbVsd5D5CM08bQiTwzZIvpsGjfH1Fy247KoDQ,2396
|
@@ -1,5 +0,0 @@
|
||||
Wheel-Version: 1.0
|
||||
Generator: setuptools (75.6.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py3-none-any
|
||||
|
@@ -1,2 +0,0 @@
|
||||
[pytest11]
|
||||
anyio = anyio.pytest_plugin
|
@@ -1 +0,0 @@
|
||||
anyio
|
@@ -1,76 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ._core._eventloop import current_time as current_time
|
||||
from ._core._eventloop import get_all_backends as get_all_backends
|
||||
from ._core._eventloop import get_cancelled_exc_class as get_cancelled_exc_class
|
||||
from ._core._eventloop import run as run
|
||||
from ._core._eventloop import sleep as sleep
|
||||
from ._core._eventloop import sleep_forever as sleep_forever
|
||||
from ._core._eventloop import sleep_until as sleep_until
|
||||
from ._core._exceptions import BrokenResourceError as BrokenResourceError
|
||||
from ._core._exceptions import BrokenWorkerProcess as BrokenWorkerProcess
|
||||
from ._core._exceptions import BusyResourceError as BusyResourceError
|
||||
from ._core._exceptions import ClosedResourceError as ClosedResourceError
|
||||
from ._core._exceptions import DelimiterNotFound as DelimiterNotFound
|
||||
from ._core._exceptions import EndOfStream as EndOfStream
|
||||
from ._core._exceptions import IncompleteRead as IncompleteRead
|
||||
from ._core._exceptions import TypedAttributeLookupError as TypedAttributeLookupError
|
||||
from ._core._exceptions import WouldBlock as WouldBlock
|
||||
from ._core._fileio import AsyncFile as AsyncFile
|
||||
from ._core._fileio import Path as Path
|
||||
from ._core._fileio import open_file as open_file
|
||||
from ._core._fileio import wrap_file as wrap_file
|
||||
from ._core._resources import aclose_forcefully as aclose_forcefully
|
||||
from ._core._signals import open_signal_receiver as open_signal_receiver
|
||||
from ._core._sockets import connect_tcp as connect_tcp
|
||||
from ._core._sockets import connect_unix as connect_unix
|
||||
from ._core._sockets import create_connected_udp_socket as create_connected_udp_socket
|
||||
from ._core._sockets import (
|
||||
create_connected_unix_datagram_socket as create_connected_unix_datagram_socket,
|
||||
)
|
||||
from ._core._sockets import create_tcp_listener as create_tcp_listener
|
||||
from ._core._sockets import create_udp_socket as create_udp_socket
|
||||
from ._core._sockets import create_unix_datagram_socket as create_unix_datagram_socket
|
||||
from ._core._sockets import create_unix_listener as create_unix_listener
|
||||
from ._core._sockets import getaddrinfo as getaddrinfo
|
||||
from ._core._sockets import getnameinfo as getnameinfo
|
||||
from ._core._sockets import wait_readable as wait_readable
|
||||
from ._core._sockets import wait_socket_readable as wait_socket_readable
|
||||
from ._core._sockets import wait_socket_writable as wait_socket_writable
|
||||
from ._core._sockets import wait_writable as wait_writable
|
||||
from ._core._streams import create_memory_object_stream as create_memory_object_stream
|
||||
from ._core._subprocesses import open_process as open_process
|
||||
from ._core._subprocesses import run_process as run_process
|
||||
from ._core._synchronization import CapacityLimiter as CapacityLimiter
|
||||
from ._core._synchronization import (
|
||||
CapacityLimiterStatistics as CapacityLimiterStatistics,
|
||||
)
|
||||
from ._core._synchronization import Condition as Condition
|
||||
from ._core._synchronization import ConditionStatistics as ConditionStatistics
|
||||
from ._core._synchronization import Event as Event
|
||||
from ._core._synchronization import EventStatistics as EventStatistics
|
||||
from ._core._synchronization import Lock as Lock
|
||||
from ._core._synchronization import LockStatistics as LockStatistics
|
||||
from ._core._synchronization import ResourceGuard as ResourceGuard
|
||||
from ._core._synchronization import Semaphore as Semaphore
|
||||
from ._core._synchronization import SemaphoreStatistics as SemaphoreStatistics
|
||||
from ._core._tasks import TASK_STATUS_IGNORED as TASK_STATUS_IGNORED
|
||||
from ._core._tasks import CancelScope as CancelScope
|
||||
from ._core._tasks import create_task_group as create_task_group
|
||||
from ._core._tasks import current_effective_deadline as current_effective_deadline
|
||||
from ._core._tasks import fail_after as fail_after
|
||||
from ._core._tasks import move_on_after as move_on_after
|
||||
from ._core._testing import TaskInfo as TaskInfo
|
||||
from ._core._testing import get_current_task as get_current_task
|
||||
from ._core._testing import get_running_tasks as get_running_tasks
|
||||
from ._core._testing import wait_all_tasks_blocked as wait_all_tasks_blocked
|
||||
from ._core._typedattr import TypedAttributeProvider as TypedAttributeProvider
|
||||
from ._core._typedattr import TypedAttributeSet as TypedAttributeSet
|
||||
from ._core._typedattr import typed_attribute as typed_attribute
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
for __value in list(locals().values()):
|
||||
if getattr(__value, "__module__", "").startswith("anyio."):
|
||||
__value.__module__ = __name__
|
||||
|
||||
del __value
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,150 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import socket
|
||||
import threading
|
||||
from collections.abc import Callable
|
||||
from selectors import EVENT_READ, EVENT_WRITE, DefaultSelector
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import FileDescriptorLike
|
||||
|
||||
_selector_lock = threading.Lock()
|
||||
_selector: Selector | None = None
|
||||
|
||||
|
||||
class Selector:
|
||||
def __init__(self) -> None:
|
||||
self._thread = threading.Thread(target=self.run, name="AnyIO socket selector")
|
||||
self._selector = DefaultSelector()
|
||||
self._send, self._receive = socket.socketpair()
|
||||
self._send.setblocking(False)
|
||||
self._receive.setblocking(False)
|
||||
self._selector.register(self._receive, EVENT_READ)
|
||||
self._closed = False
|
||||
|
||||
def start(self) -> None:
|
||||
self._thread.start()
|
||||
threading._register_atexit(self._stop) # type: ignore[attr-defined]
|
||||
|
||||
def _stop(self) -> None:
|
||||
global _selector
|
||||
self._closed = True
|
||||
self._notify_self()
|
||||
self._send.close()
|
||||
self._thread.join()
|
||||
self._selector.unregister(self._receive)
|
||||
self._receive.close()
|
||||
self._selector.close()
|
||||
_selector = None
|
||||
assert (
|
||||
not self._selector.get_map()
|
||||
), "selector still has registered file descriptors after shutdown"
|
||||
|
||||
def _notify_self(self) -> None:
|
||||
try:
|
||||
self._send.send(b"\x00")
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
def add_reader(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
|
||||
loop = asyncio.get_running_loop()
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
self._selector.register(fd, EVENT_READ, {EVENT_READ: (loop, callback)})
|
||||
else:
|
||||
if EVENT_READ in key.data:
|
||||
raise ValueError(
|
||||
"this file descriptor is already registered for reading"
|
||||
)
|
||||
|
||||
key.data[EVENT_READ] = loop, callback
|
||||
self._selector.modify(fd, key.events | EVENT_READ, key.data)
|
||||
|
||||
self._notify_self()
|
||||
|
||||
def add_writer(self, fd: FileDescriptorLike, callback: Callable[[], Any]) -> None:
|
||||
loop = asyncio.get_running_loop()
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
self._selector.register(fd, EVENT_WRITE, {EVENT_WRITE: (loop, callback)})
|
||||
else:
|
||||
if EVENT_WRITE in key.data:
|
||||
raise ValueError(
|
||||
"this file descriptor is already registered for writing"
|
||||
)
|
||||
|
||||
key.data[EVENT_WRITE] = loop, callback
|
||||
self._selector.modify(fd, key.events | EVENT_WRITE, key.data)
|
||||
|
||||
self._notify_self()
|
||||
|
||||
def remove_reader(self, fd: FileDescriptorLike) -> bool:
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if new_events := key.events ^ EVENT_READ:
|
||||
del key.data[EVENT_READ]
|
||||
self._selector.modify(fd, new_events, key.data)
|
||||
else:
|
||||
self._selector.unregister(fd)
|
||||
|
||||
return True
|
||||
|
||||
def remove_writer(self, fd: FileDescriptorLike) -> bool:
|
||||
try:
|
||||
key = self._selector.get_key(fd)
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if new_events := key.events ^ EVENT_WRITE:
|
||||
del key.data[EVENT_WRITE]
|
||||
self._selector.modify(fd, new_events, key.data)
|
||||
else:
|
||||
self._selector.unregister(fd)
|
||||
|
||||
return True
|
||||
|
||||
def run(self) -> None:
|
||||
while not self._closed:
|
||||
for key, events in self._selector.select():
|
||||
if key.fileobj is self._receive:
|
||||
try:
|
||||
while self._receive.recv(4096):
|
||||
pass
|
||||
except BlockingIOError:
|
||||
pass
|
||||
|
||||
continue
|
||||
|
||||
if events & EVENT_READ:
|
||||
loop, callback = key.data[EVENT_READ]
|
||||
self.remove_reader(key.fd)
|
||||
try:
|
||||
loop.call_soon_threadsafe(callback)
|
||||
except RuntimeError:
|
||||
pass # the loop was already closed
|
||||
|
||||
if events & EVENT_WRITE:
|
||||
loop, callback = key.data[EVENT_WRITE]
|
||||
self.remove_writer(key.fd)
|
||||
try:
|
||||
loop.call_soon_threadsafe(callback)
|
||||
except RuntimeError:
|
||||
pass # the loop was already closed
|
||||
|
||||
|
||||
def get_selector() -> Selector:
|
||||
global _selector
|
||||
|
||||
with _selector_lock:
|
||||
if _selector is None:
|
||||
_selector = Selector()
|
||||
_selector.start()
|
||||
|
||||
return _selector
|
@@ -1,166 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
import threading
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from contextlib import contextmanager
|
||||
from importlib import import_module
|
||||
from typing import TYPE_CHECKING, Any, TypeVar
|
||||
|
||||
import sniffio
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..abc import AsyncBackend
|
||||
|
||||
# This must be updated when new backends are introduced
|
||||
BACKENDS = "asyncio", "trio"
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
threadlocals = threading.local()
|
||||
loaded_backends: dict[str, type[AsyncBackend]] = {}
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
backend: str = "asyncio",
|
||||
backend_options: dict[str, Any] | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param backend: name of the asynchronous event loop implementation – currently
|
||||
either ``asyncio`` or ``trio``
|
||||
:param backend_options: keyword arguments to call the backend ``run()``
|
||||
implementation with (documented :ref:`here <backend options>`)
|
||||
:return: the return value of the coroutine function
|
||||
:raises RuntimeError: if an asynchronous event loop is already running in this
|
||||
thread
|
||||
:raises LookupError: if the named backend is not found
|
||||
|
||||
"""
|
||||
try:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
except sniffio.AsyncLibraryNotFoundError:
|
||||
pass
|
||||
else:
|
||||
raise RuntimeError(f"Already running {asynclib_name} in this thread")
|
||||
|
||||
try:
|
||||
async_backend = get_async_backend(backend)
|
||||
except ImportError as exc:
|
||||
raise LookupError(f"No such backend: {backend}") from exc
|
||||
|
||||
token = None
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the async
|
||||
# library
|
||||
token = sniffio.current_async_library_cvar.set(backend)
|
||||
|
||||
try:
|
||||
backend_options = backend_options or {}
|
||||
return async_backend.run(func, args, {}, backend_options)
|
||||
finally:
|
||||
if token:
|
||||
sniffio.current_async_library_cvar.reset(token)
|
||||
|
||||
|
||||
async def sleep(delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
|
||||
"""
|
||||
return await get_async_backend().sleep(delay)
|
||||
|
||||
|
||||
async def sleep_forever() -> None:
|
||||
"""
|
||||
Pause the current task until it's cancelled.
|
||||
|
||||
This is a shortcut for ``sleep(math.inf)``.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
await sleep(math.inf)
|
||||
|
||||
|
||||
async def sleep_until(deadline: float) -> None:
|
||||
"""
|
||||
Pause the current task until the given time.
|
||||
|
||||
:param deadline: the absolute time to wake up at (according to the internal
|
||||
monotonic clock of the event loop)
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
"""
|
||||
now = current_time()
|
||||
await sleep(max(deadline - now, 0))
|
||||
|
||||
|
||||
def current_time() -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
|
||||
"""
|
||||
return get_async_backend().current_time()
|
||||
|
||||
|
||||
def get_all_backends() -> tuple[str, ...]:
|
||||
"""Return a tuple of the names of all built-in backends."""
|
||||
return BACKENDS
|
||||
|
||||
|
||||
def get_cancelled_exc_class() -> type[BaseException]:
|
||||
"""Return the current async library's cancellation exception class."""
|
||||
return get_async_backend().cancelled_exception_class()
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
@contextmanager
|
||||
def claim_worker_thread(
|
||||
backend_class: type[AsyncBackend], token: object
|
||||
) -> Generator[Any, None, None]:
|
||||
threadlocals.current_async_backend = backend_class
|
||||
threadlocals.current_token = token
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del threadlocals.current_async_backend
|
||||
del threadlocals.current_token
|
||||
|
||||
|
||||
def get_async_backend(asynclib_name: str | None = None) -> type[AsyncBackend]:
|
||||
if asynclib_name is None:
|
||||
asynclib_name = sniffio.current_async_library()
|
||||
|
||||
# We use our own dict instead of sys.modules to get the already imported back-end
|
||||
# class because the appropriate modules in sys.modules could potentially be only
|
||||
# partially initialized
|
||||
try:
|
||||
return loaded_backends[asynclib_name]
|
||||
except KeyError:
|
||||
module = import_module(f"anyio._backends._{asynclib_name}")
|
||||
loaded_backends[asynclib_name] = module.backend_class
|
||||
return module.backend_class
|
@@ -1,89 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Generator
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import BaseExceptionGroup
|
||||
|
||||
|
||||
class BrokenResourceError(Exception):
|
||||
"""
|
||||
Raised when trying to use a resource that has been rendered unusable due to external
|
||||
causes (e.g. a send stream whose peer has disconnected).
|
||||
"""
|
||||
|
||||
|
||||
class BrokenWorkerProcess(Exception):
|
||||
"""
|
||||
Raised by :meth:`~anyio.to_process.run_sync` if the worker process terminates abruptly or
|
||||
otherwise misbehaves.
|
||||
"""
|
||||
|
||||
|
||||
class BusyResourceError(Exception):
|
||||
"""
|
||||
Raised when two tasks are trying to read from or write to the same resource
|
||||
concurrently.
|
||||
"""
|
||||
|
||||
def __init__(self, action: str):
|
||||
super().__init__(f"Another task is already {action} this resource")
|
||||
|
||||
|
||||
class ClosedResourceError(Exception):
|
||||
"""Raised when trying to use a resource that has been closed."""
|
||||
|
||||
|
||||
class DelimiterNotFound(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
maximum number of bytes has been read without the delimiter being found.
|
||||
"""
|
||||
|
||||
def __init__(self, max_bytes: int) -> None:
|
||||
super().__init__(
|
||||
f"The delimiter was not found among the first {max_bytes} bytes"
|
||||
)
|
||||
|
||||
|
||||
class EndOfStream(Exception):
|
||||
"""
|
||||
Raised when trying to read from a stream that has been closed from the other end.
|
||||
"""
|
||||
|
||||
|
||||
class IncompleteRead(Exception):
|
||||
"""
|
||||
Raised during
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_exactly` or
|
||||
:meth:`~anyio.streams.buffered.BufferedByteReceiveStream.receive_until` if the
|
||||
connection is closed before the requested amount of bytes has been read.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__(
|
||||
"The stream was closed before the read operation could be completed"
|
||||
)
|
||||
|
||||
|
||||
class TypedAttributeLookupError(LookupError):
|
||||
"""
|
||||
Raised by :meth:`~anyio.TypedAttributeProvider.extra` when the given typed attribute
|
||||
is not found and no default value has been given.
|
||||
"""
|
||||
|
||||
|
||||
class WouldBlock(Exception):
|
||||
"""Raised by ``X_nowait`` functions if ``X()`` would block."""
|
||||
|
||||
|
||||
def iterate_exceptions(
|
||||
exception: BaseException,
|
||||
) -> Generator[BaseException, None, None]:
|
||||
if isinstance(exception, BaseExceptionGroup):
|
||||
for exc in exception.exceptions:
|
||||
yield from iterate_exceptions(exc)
|
||||
else:
|
||||
yield exception
|
@@ -1,674 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pathlib
|
||||
import sys
|
||||
from collections.abc import AsyncIterator, Callable, Iterable, Iterator, Sequence
|
||||
from dataclasses import dataclass
|
||||
from functools import partial
|
||||
from os import PathLike
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
AnyStr,
|
||||
Final,
|
||||
Generic,
|
||||
overload,
|
||||
)
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import AsyncResource
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import OpenBinaryMode, OpenTextMode, ReadableBuffer, WriteableBuffer
|
||||
else:
|
||||
ReadableBuffer = OpenBinaryMode = OpenTextMode = WriteableBuffer = object
|
||||
|
||||
|
||||
class AsyncFile(AsyncResource, Generic[AnyStr]):
|
||||
"""
|
||||
An asynchronous file object.
|
||||
|
||||
This class wraps a standard file object and provides async friendly versions of the
|
||||
following blocking methods (where available on the original file object):
|
||||
|
||||
* read
|
||||
* read1
|
||||
* readline
|
||||
* readlines
|
||||
* readinto
|
||||
* readinto1
|
||||
* write
|
||||
* writelines
|
||||
* truncate
|
||||
* seek
|
||||
* tell
|
||||
* flush
|
||||
|
||||
All other methods are directly passed through.
|
||||
|
||||
This class supports the asynchronous context manager protocol which closes the
|
||||
underlying file at the end of the context block.
|
||||
|
||||
This class also supports asynchronous iteration::
|
||||
|
||||
async with await open_file(...) as f:
|
||||
async for line in f:
|
||||
print(line)
|
||||
"""
|
||||
|
||||
def __init__(self, fp: IO[AnyStr]) -> None:
|
||||
self._fp: Any = fp
|
||||
|
||||
def __getattr__(self, name: str) -> object:
|
||||
return getattr(self._fp, name)
|
||||
|
||||
@property
|
||||
def wrapped(self) -> IO[AnyStr]:
|
||||
"""The wrapped file object."""
|
||||
return self._fp
|
||||
|
||||
async def __aiter__(self) -> AsyncIterator[AnyStr]:
|
||||
while True:
|
||||
line = await self.readline()
|
||||
if line:
|
||||
yield line
|
||||
else:
|
||||
break
|
||||
|
||||
async def aclose(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.close)
|
||||
|
||||
async def read(self, size: int = -1) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.read, size)
|
||||
|
||||
async def read1(self: AsyncFile[bytes], size: int = -1) -> bytes:
|
||||
return await to_thread.run_sync(self._fp.read1, size)
|
||||
|
||||
async def readline(self) -> AnyStr:
|
||||
return await to_thread.run_sync(self._fp.readline)
|
||||
|
||||
async def readlines(self) -> list[AnyStr]:
|
||||
return await to_thread.run_sync(self._fp.readlines)
|
||||
|
||||
async def readinto(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
||||
return await to_thread.run_sync(self._fp.readinto, b)
|
||||
|
||||
async def readinto1(self: AsyncFile[bytes], b: WriteableBuffer) -> int:
|
||||
return await to_thread.run_sync(self._fp.readinto1, b)
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[bytes], b: ReadableBuffer) -> int: ...
|
||||
|
||||
@overload
|
||||
async def write(self: AsyncFile[str], b: str) -> int: ...
|
||||
|
||||
async def write(self, b: ReadableBuffer | str) -> int:
|
||||
return await to_thread.run_sync(self._fp.write, b)
|
||||
|
||||
@overload
|
||||
async def writelines(
|
||||
self: AsyncFile[bytes], lines: Iterable[ReadableBuffer]
|
||||
) -> None: ...
|
||||
|
||||
@overload
|
||||
async def writelines(self: AsyncFile[str], lines: Iterable[str]) -> None: ...
|
||||
|
||||
async def writelines(self, lines: Iterable[ReadableBuffer] | Iterable[str]) -> None:
|
||||
return await to_thread.run_sync(self._fp.writelines, lines)
|
||||
|
||||
async def truncate(self, size: int | None = None) -> int:
|
||||
return await to_thread.run_sync(self._fp.truncate, size)
|
||||
|
||||
async def seek(self, offset: int, whence: int | None = os.SEEK_SET) -> int:
|
||||
return await to_thread.run_sync(self._fp.seek, offset, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
return await to_thread.run_sync(self._fp.tell)
|
||||
|
||||
async def flush(self) -> None:
|
||||
return await to_thread.run_sync(self._fp.flush)
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
|
||||
@overload
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
closefd: bool = ...,
|
||||
opener: Callable[[str, int], int] | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
|
||||
async def open_file(
|
||||
file: str | PathLike[str] | int,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
closefd: bool = True,
|
||||
opener: Callable[[str, int], int] | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
"""
|
||||
Open a file asynchronously.
|
||||
|
||||
The arguments are exactly the same as for the builtin :func:`open`.
|
||||
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
fp = await to_thread.run_sync(
|
||||
open, file, mode, buffering, encoding, errors, newline, closefd, opener
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
|
||||
def wrap_file(file: IO[AnyStr]) -> AsyncFile[AnyStr]:
|
||||
"""
|
||||
Wrap an existing file as an asynchronous file.
|
||||
|
||||
:param file: an existing file-like object
|
||||
:return: an asynchronous file object
|
||||
|
||||
"""
|
||||
return AsyncFile(file)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class _PathIterator(AsyncIterator["Path"]):
|
||||
iterator: Iterator[PathLike[str]]
|
||||
|
||||
async def __anext__(self) -> Path:
|
||||
nextval = await to_thread.run_sync(
|
||||
next, self.iterator, None, abandon_on_cancel=True
|
||||
)
|
||||
if nextval is None:
|
||||
raise StopAsyncIteration from None
|
||||
|
||||
return Path(nextval)
|
||||
|
||||
|
||||
class Path:
|
||||
"""
|
||||
An asynchronous version of :class:`pathlib.Path`.
|
||||
|
||||
This class cannot be substituted for :class:`pathlib.Path` or
|
||||
:class:`pathlib.PurePath`, but it is compatible with the :class:`os.PathLike`
|
||||
interface.
|
||||
|
||||
It implements the Python 3.10 version of :class:`pathlib.Path` interface, except for
|
||||
the deprecated :meth:`~pathlib.Path.link_to` method.
|
||||
|
||||
Some methods may be unavailable or have limited functionality, based on the Python
|
||||
version:
|
||||
|
||||
* :meth:`~pathlib.Path.from_uri` (available on Python 3.13 or later)
|
||||
* :meth:`~pathlib.Path.full_match` (available on Python 3.13 or later)
|
||||
* :meth:`~pathlib.Path.is_junction` (available on Python 3.12 or later)
|
||||
* :meth:`~pathlib.Path.match` (the ``case_sensitive`` paramater is only available on
|
||||
Python 3.13 or later)
|
||||
* :meth:`~pathlib.Path.relative_to` (the ``walk_up`` parameter is only available on
|
||||
Python 3.12 or later)
|
||||
* :meth:`~pathlib.Path.walk` (available on Python 3.12 or later)
|
||||
|
||||
Any methods that do disk I/O need to be awaited on. These methods are:
|
||||
|
||||
* :meth:`~pathlib.Path.absolute`
|
||||
* :meth:`~pathlib.Path.chmod`
|
||||
* :meth:`~pathlib.Path.cwd`
|
||||
* :meth:`~pathlib.Path.exists`
|
||||
* :meth:`~pathlib.Path.expanduser`
|
||||
* :meth:`~pathlib.Path.group`
|
||||
* :meth:`~pathlib.Path.hardlink_to`
|
||||
* :meth:`~pathlib.Path.home`
|
||||
* :meth:`~pathlib.Path.is_block_device`
|
||||
* :meth:`~pathlib.Path.is_char_device`
|
||||
* :meth:`~pathlib.Path.is_dir`
|
||||
* :meth:`~pathlib.Path.is_fifo`
|
||||
* :meth:`~pathlib.Path.is_file`
|
||||
* :meth:`~pathlib.Path.is_junction`
|
||||
* :meth:`~pathlib.Path.is_mount`
|
||||
* :meth:`~pathlib.Path.is_socket`
|
||||
* :meth:`~pathlib.Path.is_symlink`
|
||||
* :meth:`~pathlib.Path.lchmod`
|
||||
* :meth:`~pathlib.Path.lstat`
|
||||
* :meth:`~pathlib.Path.mkdir`
|
||||
* :meth:`~pathlib.Path.open`
|
||||
* :meth:`~pathlib.Path.owner`
|
||||
* :meth:`~pathlib.Path.read_bytes`
|
||||
* :meth:`~pathlib.Path.read_text`
|
||||
* :meth:`~pathlib.Path.readlink`
|
||||
* :meth:`~pathlib.Path.rename`
|
||||
* :meth:`~pathlib.Path.replace`
|
||||
* :meth:`~pathlib.Path.resolve`
|
||||
* :meth:`~pathlib.Path.rmdir`
|
||||
* :meth:`~pathlib.Path.samefile`
|
||||
* :meth:`~pathlib.Path.stat`
|
||||
* :meth:`~pathlib.Path.symlink_to`
|
||||
* :meth:`~pathlib.Path.touch`
|
||||
* :meth:`~pathlib.Path.unlink`
|
||||
* :meth:`~pathlib.Path.walk`
|
||||
* :meth:`~pathlib.Path.write_bytes`
|
||||
* :meth:`~pathlib.Path.write_text`
|
||||
|
||||
Additionally, the following methods return an async iterator yielding
|
||||
:class:`~.Path` objects:
|
||||
|
||||
* :meth:`~pathlib.Path.glob`
|
||||
* :meth:`~pathlib.Path.iterdir`
|
||||
* :meth:`~pathlib.Path.rglob`
|
||||
"""
|
||||
|
||||
__slots__ = "_path", "__weakref__"
|
||||
|
||||
__weakref__: Any
|
||||
|
||||
def __init__(self, *args: str | PathLike[str]) -> None:
|
||||
self._path: Final[pathlib.Path] = pathlib.Path(*args)
|
||||
|
||||
def __fspath__(self) -> str:
|
||||
return self._path.__fspath__()
|
||||
|
||||
def __str__(self) -> str:
|
||||
return self._path.__str__()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}({self.as_posix()!r})"
|
||||
|
||||
def __bytes__(self) -> bytes:
|
||||
return self._path.__bytes__()
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return self._path.__hash__()
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__eq__(target)
|
||||
|
||||
def __lt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__lt__(target)
|
||||
|
||||
def __le__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__le__(target)
|
||||
|
||||
def __gt__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__gt__(target)
|
||||
|
||||
def __ge__(self, other: pathlib.PurePath | Path) -> bool:
|
||||
target = other._path if isinstance(other, Path) else other
|
||||
return self._path.__ge__(target)
|
||||
|
||||
def __truediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path / other)
|
||||
|
||||
def __rtruediv__(self, other: str | PathLike[str]) -> Path:
|
||||
return Path(other) / self
|
||||
|
||||
@property
|
||||
def parts(self) -> tuple[str, ...]:
|
||||
return self._path.parts
|
||||
|
||||
@property
|
||||
def drive(self) -> str:
|
||||
return self._path.drive
|
||||
|
||||
@property
|
||||
def root(self) -> str:
|
||||
return self._path.root
|
||||
|
||||
@property
|
||||
def anchor(self) -> str:
|
||||
return self._path.anchor
|
||||
|
||||
@property
|
||||
def parents(self) -> Sequence[Path]:
|
||||
return tuple(Path(p) for p in self._path.parents)
|
||||
|
||||
@property
|
||||
def parent(self) -> Path:
|
||||
return Path(self._path.parent)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._path.name
|
||||
|
||||
@property
|
||||
def suffix(self) -> str:
|
||||
return self._path.suffix
|
||||
|
||||
@property
|
||||
def suffixes(self) -> list[str]:
|
||||
return self._path.suffixes
|
||||
|
||||
@property
|
||||
def stem(self) -> str:
|
||||
return self._path.stem
|
||||
|
||||
async def absolute(self) -> Path:
|
||||
path = await to_thread.run_sync(self._path.absolute)
|
||||
return Path(path)
|
||||
|
||||
def as_posix(self) -> str:
|
||||
return self._path.as_posix()
|
||||
|
||||
def as_uri(self) -> str:
|
||||
return self._path.as_uri()
|
||||
|
||||
if sys.version_info >= (3, 13):
|
||||
parser = pathlib.Path.parser
|
||||
|
||||
@classmethod
|
||||
def from_uri(cls, uri: str) -> Path:
|
||||
return Path(pathlib.Path.from_uri(uri))
|
||||
|
||||
def full_match(
|
||||
self, path_pattern: str, *, case_sensitive: bool | None = None
|
||||
) -> bool:
|
||||
return self._path.full_match(path_pattern, case_sensitive=case_sensitive)
|
||||
|
||||
def match(
|
||||
self, path_pattern: str, *, case_sensitive: bool | None = None
|
||||
) -> bool:
|
||||
return self._path.match(path_pattern, case_sensitive=case_sensitive)
|
||||
else:
|
||||
|
||||
def match(self, path_pattern: str) -> bool:
|
||||
return self._path.match(path_pattern)
|
||||
|
||||
def is_relative_to(self, other: str | PathLike[str]) -> bool:
|
||||
try:
|
||||
self.relative_to(other)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
async def chmod(self, mode: int, *, follow_symlinks: bool = True) -> None:
|
||||
func = partial(os.chmod, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, mode)
|
||||
|
||||
@classmethod
|
||||
async def cwd(cls) -> Path:
|
||||
path = await to_thread.run_sync(pathlib.Path.cwd)
|
||||
return cls(path)
|
||||
|
||||
async def exists(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.exists, abandon_on_cancel=True)
|
||||
|
||||
async def expanduser(self) -> Path:
|
||||
return Path(
|
||||
await to_thread.run_sync(self._path.expanduser, abandon_on_cancel=True)
|
||||
)
|
||||
|
||||
def glob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.glob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def group(self) -> str:
|
||||
return await to_thread.run_sync(self._path.group, abandon_on_cancel=True)
|
||||
|
||||
async def hardlink_to(
|
||||
self, target: str | bytes | PathLike[str] | PathLike[bytes]
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(os.link, target, self)
|
||||
|
||||
@classmethod
|
||||
async def home(cls) -> Path:
|
||||
home_path = await to_thread.run_sync(pathlib.Path.home)
|
||||
return cls(home_path)
|
||||
|
||||
def is_absolute(self) -> bool:
|
||||
return self._path.is_absolute()
|
||||
|
||||
async def is_block_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_block_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_char_device(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
self._path.is_char_device, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def is_dir(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_dir, abandon_on_cancel=True)
|
||||
|
||||
async def is_fifo(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_fifo, abandon_on_cancel=True)
|
||||
|
||||
async def is_file(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_file, abandon_on_cancel=True)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
async def is_junction(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_junction)
|
||||
|
||||
async def is_mount(self) -> bool:
|
||||
return await to_thread.run_sync(
|
||||
os.path.ismount, self._path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
def is_reserved(self) -> bool:
|
||||
return self._path.is_reserved()
|
||||
|
||||
async def is_socket(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_socket, abandon_on_cancel=True)
|
||||
|
||||
async def is_symlink(self) -> bool:
|
||||
return await to_thread.run_sync(self._path.is_symlink, abandon_on_cancel=True)
|
||||
|
||||
def iterdir(self) -> AsyncIterator[Path]:
|
||||
gen = self._path.iterdir()
|
||||
return _PathIterator(gen)
|
||||
|
||||
def joinpath(self, *args: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.joinpath(*args))
|
||||
|
||||
async def lchmod(self, mode: int) -> None:
|
||||
await to_thread.run_sync(self._path.lchmod, mode)
|
||||
|
||||
async def lstat(self) -> os.stat_result:
|
||||
return await to_thread.run_sync(self._path.lstat, abandon_on_cancel=True)
|
||||
|
||||
async def mkdir(
|
||||
self, mode: int = 0o777, parents: bool = False, exist_ok: bool = False
|
||||
) -> None:
|
||||
await to_thread.run_sync(self._path.mkdir, mode, parents, exist_ok)
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenBinaryMode,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[bytes]: ...
|
||||
|
||||
@overload
|
||||
async def open(
|
||||
self,
|
||||
mode: OpenTextMode = ...,
|
||||
buffering: int = ...,
|
||||
encoding: str | None = ...,
|
||||
errors: str | None = ...,
|
||||
newline: str | None = ...,
|
||||
) -> AsyncFile[str]: ...
|
||||
|
||||
async def open(
|
||||
self,
|
||||
mode: str = "r",
|
||||
buffering: int = -1,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> AsyncFile[Any]:
|
||||
fp = await to_thread.run_sync(
|
||||
self._path.open, mode, buffering, encoding, errors, newline
|
||||
)
|
||||
return AsyncFile(fp)
|
||||
|
||||
async def owner(self) -> str:
|
||||
return await to_thread.run_sync(self._path.owner, abandon_on_cancel=True)
|
||||
|
||||
async def read_bytes(self) -> bytes:
|
||||
return await to_thread.run_sync(self._path.read_bytes)
|
||||
|
||||
async def read_text(
|
||||
self, encoding: str | None = None, errors: str | None = None
|
||||
) -> str:
|
||||
return await to_thread.run_sync(self._path.read_text, encoding, errors)
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
def relative_to(
|
||||
self, *other: str | PathLike[str], walk_up: bool = False
|
||||
) -> Path:
|
||||
return Path(self._path.relative_to(*other, walk_up=walk_up))
|
||||
|
||||
else:
|
||||
|
||||
def relative_to(self, *other: str | PathLike[str]) -> Path:
|
||||
return Path(self._path.relative_to(*other))
|
||||
|
||||
async def readlink(self) -> Path:
|
||||
target = await to_thread.run_sync(os.readlink, self._path)
|
||||
return Path(target)
|
||||
|
||||
async def rename(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.rename, target)
|
||||
return Path(target)
|
||||
|
||||
async def replace(self, target: str | pathlib.PurePath | Path) -> Path:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.replace, target)
|
||||
return Path(target)
|
||||
|
||||
async def resolve(self, strict: bool = False) -> Path:
|
||||
func = partial(self._path.resolve, strict=strict)
|
||||
return Path(await to_thread.run_sync(func, abandon_on_cancel=True))
|
||||
|
||||
def rglob(self, pattern: str) -> AsyncIterator[Path]:
|
||||
gen = self._path.rglob(pattern)
|
||||
return _PathIterator(gen)
|
||||
|
||||
async def rmdir(self) -> None:
|
||||
await to_thread.run_sync(self._path.rmdir)
|
||||
|
||||
async def samefile(self, other_path: str | PathLike[str]) -> bool:
|
||||
if isinstance(other_path, Path):
|
||||
other_path = other_path._path
|
||||
|
||||
return await to_thread.run_sync(
|
||||
self._path.samefile, other_path, abandon_on_cancel=True
|
||||
)
|
||||
|
||||
async def stat(self, *, follow_symlinks: bool = True) -> os.stat_result:
|
||||
func = partial(os.stat, follow_symlinks=follow_symlinks)
|
||||
return await to_thread.run_sync(func, self._path, abandon_on_cancel=True)
|
||||
|
||||
async def symlink_to(
|
||||
self,
|
||||
target: str | bytes | PathLike[str] | PathLike[bytes],
|
||||
target_is_directory: bool = False,
|
||||
) -> None:
|
||||
if isinstance(target, Path):
|
||||
target = target._path
|
||||
|
||||
await to_thread.run_sync(self._path.symlink_to, target, target_is_directory)
|
||||
|
||||
async def touch(self, mode: int = 0o666, exist_ok: bool = True) -> None:
|
||||
await to_thread.run_sync(self._path.touch, mode, exist_ok)
|
||||
|
||||
async def unlink(self, missing_ok: bool = False) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._path.unlink)
|
||||
except FileNotFoundError:
|
||||
if not missing_ok:
|
||||
raise
|
||||
|
||||
if sys.version_info >= (3, 12):
|
||||
|
||||
async def walk(
|
||||
self,
|
||||
top_down: bool = True,
|
||||
on_error: Callable[[OSError], object] | None = None,
|
||||
follow_symlinks: bool = False,
|
||||
) -> AsyncIterator[tuple[Path, list[str], list[str]]]:
|
||||
def get_next_value() -> tuple[pathlib.Path, list[str], list[str]] | None:
|
||||
try:
|
||||
return next(gen)
|
||||
except StopIteration:
|
||||
return None
|
||||
|
||||
gen = self._path.walk(top_down, on_error, follow_symlinks)
|
||||
while True:
|
||||
value = await to_thread.run_sync(get_next_value)
|
||||
if value is None:
|
||||
return
|
||||
|
||||
root, dirs, paths = value
|
||||
yield Path(root), dirs, paths
|
||||
|
||||
def with_name(self, name: str) -> Path:
|
||||
return Path(self._path.with_name(name))
|
||||
|
||||
def with_stem(self, stem: str) -> Path:
|
||||
return Path(self._path.with_name(stem + self._path.suffix))
|
||||
|
||||
def with_suffix(self, suffix: str) -> Path:
|
||||
return Path(self._path.with_suffix(suffix))
|
||||
|
||||
def with_segments(self, *pathsegments: str | PathLike[str]) -> Path:
|
||||
return Path(*pathsegments)
|
||||
|
||||
async def write_bytes(self, data: bytes) -> int:
|
||||
return await to_thread.run_sync(self._path.write_bytes, data)
|
||||
|
||||
async def write_text(
|
||||
self,
|
||||
data: str,
|
||||
encoding: str | None = None,
|
||||
errors: str | None = None,
|
||||
newline: str | None = None,
|
||||
) -> int:
|
||||
# Path.write_text() does not support the "newline" parameter before Python 3.10
|
||||
def sync_write_text() -> int:
|
||||
with self._path.open(
|
||||
"w", encoding=encoding, errors=errors, newline=newline
|
||||
) as fp:
|
||||
return fp.write(data)
|
||||
|
||||
return await to_thread.run_sync(sync_write_text)
|
||||
|
||||
|
||||
PathLike.register(Path)
|
@@ -1,18 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ..abc import AsyncResource
|
||||
from ._tasks import CancelScope
|
||||
|
||||
|
||||
async def aclose_forcefully(resource: AsyncResource) -> None:
|
||||
"""
|
||||
Close an asynchronous resource in a cancelled scope.
|
||||
|
||||
Doing this closes the resource without waiting on anything.
|
||||
|
||||
:param resource: the resource to close
|
||||
|
||||
"""
|
||||
with CancelScope() as scope:
|
||||
scope.cancel()
|
||||
await resource.aclose()
|
@@ -1,27 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import AsyncIterator
|
||||
from contextlib import AbstractContextManager
|
||||
from signal import Signals
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
def open_signal_receiver(
|
||||
*signals: Signals,
|
||||
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
||||
"""
|
||||
Start receiving operating system signals.
|
||||
|
||||
:param signals: signals to receive (e.g. ``signal.SIGINT``)
|
||||
:return: an asynchronous context manager for an asynchronous iterator which yields
|
||||
signal numbers
|
||||
|
||||
.. warning:: Windows does not support signals natively so it is best to avoid
|
||||
relying on this in cross-platform applications.
|
||||
|
||||
.. warning:: On asyncio, this permanently replaces any previous signal handler for
|
||||
the given signals, as set via :meth:`~asyncio.loop.add_signal_handler`.
|
||||
|
||||
"""
|
||||
return get_async_backend().open_signal_receiver(*signals)
|
@@ -1,787 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import errno
|
||||
import os
|
||||
import socket
|
||||
import ssl
|
||||
import stat
|
||||
import sys
|
||||
from collections.abc import Awaitable
|
||||
from ipaddress import IPv6Address, ip_address
|
||||
from os import PathLike, chmod
|
||||
from socket import AddressFamily, SocketKind
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast, overload
|
||||
|
||||
from .. import to_thread
|
||||
from ..abc import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPAddressType,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ..streams.stapled import MultiListener
|
||||
from ..streams.tls import TLSStream
|
||||
from ._eventloop import get_async_backend
|
||||
from ._resources import aclose_forcefully
|
||||
from ._synchronization import Event
|
||||
from ._tasks import create_task_group, move_on_after
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import FileDescriptorLike
|
||||
else:
|
||||
FileDescriptorLike = object
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import ExceptionGroup
|
||||
|
||||
if sys.version_info < (3, 13):
|
||||
from typing_extensions import deprecated
|
||||
else:
|
||||
from warnings import deprecated
|
||||
|
||||
IPPROTO_IPV6 = getattr(socket, "IPPROTO_IPV6", 41) # https://bugs.python.org/issue29515
|
||||
|
||||
AnyIPAddressFamily = Literal[
|
||||
AddressFamily.AF_UNSPEC, AddressFamily.AF_INET, AddressFamily.AF_INET6
|
||||
]
|
||||
IPAddressFamily = Literal[AddressFamily.AF_INET, AddressFamily.AF_INET6]
|
||||
|
||||
|
||||
# tls_hostname given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# ssl_context given
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
ssl_context: ssl.SSLContext,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=True
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[True],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> TLSStream: ...
|
||||
|
||||
|
||||
# tls=False
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
tls: Literal[False],
|
||||
ssl_context: ssl.SSLContext | None = ...,
|
||||
tls_standard_compatible: bool = ...,
|
||||
tls_hostname: str | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
# No TLS arguments
|
||||
@overload
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = ...,
|
||||
happy_eyeballs_delay: float = ...,
|
||||
) -> SocketStream: ...
|
||||
|
||||
|
||||
async def connect_tcp(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
tls: bool = False,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
tls_standard_compatible: bool = True,
|
||||
tls_hostname: str | None = None,
|
||||
happy_eyeballs_delay: float = 0.25,
|
||||
) -> SocketStream | TLSStream:
|
||||
"""
|
||||
Connect to a host using the TCP protocol.
|
||||
|
||||
This function implements the stateless version of the Happy Eyeballs algorithm (RFC
|
||||
6555). If ``remote_host`` is a host name that resolves to multiple IP addresses,
|
||||
each one is tried until one connection attempt succeeds. If the first attempt does
|
||||
not connected within 250 milliseconds, a second attempt is started using the next
|
||||
address in the list, and so on. On IPv6 enabled systems, an IPv6 address (if
|
||||
available) is tried first.
|
||||
|
||||
When the connection has been established, a TLS handshake will be done if either
|
||||
``ssl_context`` or ``tls_hostname`` is not ``None``, or if ``tls`` is ``True``.
|
||||
|
||||
:param remote_host: the IP address or host name to connect to
|
||||
:param remote_port: port on the target host to connect to
|
||||
:param local_host: the interface address or name to bind the socket to before
|
||||
connecting
|
||||
:param tls: ``True`` to do a TLS handshake with the connected stream and return a
|
||||
:class:`~anyio.streams.tls.TLSStream` instead
|
||||
:param ssl_context: the SSL context object to use (if omitted, a default context is
|
||||
created)
|
||||
:param tls_standard_compatible: If ``True``, performs the TLS shutdown handshake
|
||||
before closing the stream and requires that the server does this as well.
|
||||
Otherwise, :exc:`~ssl.SSLEOFError` may be raised during reads from the stream.
|
||||
Some protocols, such as HTTP, require this option to be ``False``.
|
||||
See :meth:`~ssl.SSLContext.wrap_socket` for details.
|
||||
:param tls_hostname: host name to check the server certificate against (defaults to
|
||||
the value of ``remote_host``)
|
||||
:param happy_eyeballs_delay: delay (in seconds) before starting the next connection
|
||||
attempt
|
||||
:return: a socket stream object if no TLS handshake was done, otherwise a TLS stream
|
||||
:raises OSError: if the connection attempt fails
|
||||
|
||||
"""
|
||||
# Placed here due to https://github.com/python/mypy/issues/7057
|
||||
connected_stream: SocketStream | None = None
|
||||
|
||||
async def try_connect(remote_host: str, event: Event) -> None:
|
||||
nonlocal connected_stream
|
||||
try:
|
||||
stream = await asynclib.connect_tcp(remote_host, remote_port, local_address)
|
||||
except OSError as exc:
|
||||
oserrors.append(exc)
|
||||
return
|
||||
else:
|
||||
if connected_stream is None:
|
||||
connected_stream = stream
|
||||
tg.cancel_scope.cancel()
|
||||
else:
|
||||
await stream.aclose()
|
||||
finally:
|
||||
event.set()
|
||||
|
||||
asynclib = get_async_backend()
|
||||
local_address: IPSockAddrType | None = None
|
||||
family = socket.AF_UNSPEC
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(str(local_host), None)
|
||||
family, *_, local_address = gai_res[0]
|
||||
|
||||
target_host = str(remote_host)
|
||||
try:
|
||||
addr_obj = ip_address(remote_host)
|
||||
except ValueError:
|
||||
addr_obj = None
|
||||
|
||||
if addr_obj is not None:
|
||||
if isinstance(addr_obj, IPv6Address):
|
||||
target_addrs = [(socket.AF_INET6, addr_obj.compressed)]
|
||||
else:
|
||||
target_addrs = [(socket.AF_INET, addr_obj.compressed)]
|
||||
else:
|
||||
# getaddrinfo() will raise an exception if name resolution fails
|
||||
gai_res = await getaddrinfo(
|
||||
target_host, remote_port, family=family, type=socket.SOCK_STREAM
|
||||
)
|
||||
|
||||
# Organize the list so that the first address is an IPv6 address (if available)
|
||||
# and the second one is an IPv4 addresses. The rest can be in whatever order.
|
||||
v6_found = v4_found = False
|
||||
target_addrs = []
|
||||
for af, *rest, sa in gai_res:
|
||||
if af == socket.AF_INET6 and not v6_found:
|
||||
v6_found = True
|
||||
target_addrs.insert(0, (af, sa[0]))
|
||||
elif af == socket.AF_INET and not v4_found and v6_found:
|
||||
v4_found = True
|
||||
target_addrs.insert(1, (af, sa[0]))
|
||||
else:
|
||||
target_addrs.append((af, sa[0]))
|
||||
|
||||
oserrors: list[OSError] = []
|
||||
async with create_task_group() as tg:
|
||||
for i, (af, addr) in enumerate(target_addrs):
|
||||
event = Event()
|
||||
tg.start_soon(try_connect, addr, event)
|
||||
with move_on_after(happy_eyeballs_delay):
|
||||
await event.wait()
|
||||
|
||||
if connected_stream is None:
|
||||
cause = (
|
||||
oserrors[0]
|
||||
if len(oserrors) == 1
|
||||
else ExceptionGroup("multiple connection attempts failed", oserrors)
|
||||
)
|
||||
raise OSError("All connection attempts failed") from cause
|
||||
|
||||
if tls or tls_hostname or ssl_context:
|
||||
try:
|
||||
return await TLSStream.wrap(
|
||||
connected_stream,
|
||||
server_side=False,
|
||||
hostname=tls_hostname or str(remote_host),
|
||||
ssl_context=ssl_context,
|
||||
standard_compatible=tls_standard_compatible,
|
||||
)
|
||||
except BaseException:
|
||||
await aclose_forcefully(connected_stream)
|
||||
raise
|
||||
|
||||
return connected_stream
|
||||
|
||||
|
||||
async def connect_unix(path: str | bytes | PathLike[Any]) -> UNIXSocketStream:
|
||||
"""
|
||||
Connect to the given UNIX socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path to the socket
|
||||
:return: a socket stream object
|
||||
|
||||
"""
|
||||
path = os.fspath(path)
|
||||
return await get_async_backend().connect_unix(path)
|
||||
|
||||
|
||||
async def create_tcp_listener(
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
family: AnyIPAddressFamily = socket.AddressFamily.AF_UNSPEC,
|
||||
backlog: int = 65536,
|
||||
reuse_port: bool = False,
|
||||
) -> MultiListener[SocketStream]:
|
||||
"""
|
||||
Create a TCP socket listener.
|
||||
|
||||
:param local_port: port number to listen on
|
||||
:param local_host: IP address of the interface to listen on. If omitted, listen on
|
||||
all IPv4 and IPv6 interfaces. To listen on all interfaces on a specific address
|
||||
family, use ``0.0.0.0`` for IPv4 or ``::`` for IPv6.
|
||||
:param family: address family (used if ``local_host`` was omitted)
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a list of listener objects
|
||||
|
||||
"""
|
||||
asynclib = get_async_backend()
|
||||
backlog = min(backlog, 65536)
|
||||
local_host = str(local_host) if local_host is not None else None
|
||||
gai_res = await getaddrinfo(
|
||||
local_host,
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SocketKind.SOCK_STREAM if sys.platform == "win32" else 0,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
listeners: list[SocketListener] = []
|
||||
try:
|
||||
# The set() is here to work around a glibc bug:
|
||||
# https://sourceware.org/bugzilla/show_bug.cgi?id=14969
|
||||
sockaddr: tuple[str, int] | tuple[str, int, int, int]
|
||||
for fam, kind, *_, sockaddr in sorted(set(gai_res)):
|
||||
# Workaround for an uvloop bug where we don't get the correct scope ID for
|
||||
# IPv6 link-local addresses when passing type=socket.SOCK_STREAM to
|
||||
# getaddrinfo(): https://github.com/MagicStack/uvloop/issues/539
|
||||
if sys.platform != "win32" and kind is not SocketKind.SOCK_STREAM:
|
||||
continue
|
||||
|
||||
raw_socket = socket.socket(fam)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
# For Windows, enable exclusive address use. For others, enable address
|
||||
# reuse.
|
||||
if sys.platform == "win32":
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
|
||||
else:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
|
||||
if reuse_port:
|
||||
raw_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
|
||||
# If only IPv6 was requested, disable dual stack operation
|
||||
if fam == socket.AF_INET6:
|
||||
raw_socket.setsockopt(IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
|
||||
|
||||
# Workaround for #554
|
||||
if "%" in sockaddr[0]:
|
||||
addr, scope_id = sockaddr[0].split("%", 1)
|
||||
sockaddr = (addr, sockaddr[1], 0, int(scope_id))
|
||||
|
||||
raw_socket.bind(sockaddr)
|
||||
raw_socket.listen(backlog)
|
||||
listener = asynclib.create_tcp_listener(raw_socket)
|
||||
listeners.append(listener)
|
||||
except BaseException:
|
||||
for listener in listeners:
|
||||
await listener.aclose()
|
||||
|
||||
raise
|
||||
|
||||
return MultiListener(listeners)
|
||||
|
||||
|
||||
async def create_unix_listener(
|
||||
path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
mode: int | None = None,
|
||||
backlog: int = 65536,
|
||||
) -> SocketListener:
|
||||
"""
|
||||
Create a UNIX socket listener.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param backlog: maximum number of queued incoming connections (up to a maximum of
|
||||
2**16, or 65536)
|
||||
:return: a listener object
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
If a socket already exists on the file system in the given path, it will be
|
||||
removed first.
|
||||
|
||||
"""
|
||||
backlog = min(backlog, 65536)
|
||||
raw_socket = await setup_unix_local_socket(path, mode, socket.SOCK_STREAM)
|
||||
try:
|
||||
raw_socket.listen(backlog)
|
||||
return get_async_backend().create_unix_listener(raw_socket)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
|
||||
async def create_udp_socket(
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
*,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> UDPSocket:
|
||||
"""
|
||||
Create a UDP socket.
|
||||
|
||||
If ``port`` has been given, the socket will be bound to this port on the local
|
||||
machine, making this socket suitable for providing UDP based services.
|
||||
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a UDP socket
|
||||
|
||||
"""
|
||||
if family is AddressFamily.AF_UNSPEC and not local_host:
|
||||
raise ValueError('Either "family" or "local_host" must be given')
|
||||
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
elif family is AddressFamily.AF_INET6:
|
||||
local_address = ("::", 0)
|
||||
else:
|
||||
local_address = ("0.0.0.0", 0)
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, None, reuse_port
|
||||
)
|
||||
return cast(UDPSocket, sock)
|
||||
|
||||
|
||||
async def create_connected_udp_socket(
|
||||
remote_host: IPAddressType,
|
||||
remote_port: int,
|
||||
*,
|
||||
family: AnyIPAddressFamily = AddressFamily.AF_UNSPEC,
|
||||
local_host: IPAddressType | None = None,
|
||||
local_port: int = 0,
|
||||
reuse_port: bool = False,
|
||||
) -> ConnectedUDPSocket:
|
||||
"""
|
||||
Create a connected UDP socket.
|
||||
|
||||
Connected UDP sockets can only communicate with the specified remote host/port, an
|
||||
any packets sent from other sources are dropped.
|
||||
|
||||
:param remote_host: remote host to set as the default target
|
||||
:param remote_port: port on the remote host to set as the default target
|
||||
:param family: address family (``AF_INET`` or ``AF_INET6``) – automatically
|
||||
determined from ``local_host`` or ``remote_host`` if omitted
|
||||
:param local_host: IP address or host name of the local interface to bind to
|
||||
:param local_port: local port to bind to
|
||||
:param reuse_port: ``True`` to allow multiple sockets to bind to the same
|
||||
address/port (not supported on Windows)
|
||||
:return: a connected UDP socket
|
||||
|
||||
"""
|
||||
local_address = None
|
||||
if local_host:
|
||||
gai_res = await getaddrinfo(
|
||||
str(local_host),
|
||||
local_port,
|
||||
family=family,
|
||||
type=socket.SOCK_DGRAM,
|
||||
flags=socket.AI_PASSIVE | socket.AI_ADDRCONFIG,
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
local_address = gai_res[0][-1]
|
||||
|
||||
gai_res = await getaddrinfo(
|
||||
str(remote_host), remote_port, family=family, type=socket.SOCK_DGRAM
|
||||
)
|
||||
family = cast(AnyIPAddressFamily, gai_res[0][0])
|
||||
remote_address = gai_res[0][-1]
|
||||
|
||||
sock = await get_async_backend().create_udp_socket(
|
||||
family, local_address, remote_address, reuse_port
|
||||
)
|
||||
return cast(ConnectedUDPSocket, sock)
|
||||
|
||||
|
||||
async def create_unix_datagram_socket(
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> UNIXDatagramSocket:
|
||||
"""
|
||||
Create a UNIX datagram socket.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making this
|
||||
socket suitable for receiving datagrams from other processes. Other processes can
|
||||
send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a UNIX datagram socket
|
||||
|
||||
"""
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(raw_socket, None)
|
||||
|
||||
|
||||
async def create_connected_unix_datagram_socket(
|
||||
remote_path: str | bytes | PathLike[Any],
|
||||
*,
|
||||
local_path: None | str | bytes | PathLike[Any] = None,
|
||||
local_mode: int | None = None,
|
||||
) -> ConnectedUNIXDatagramSocket:
|
||||
"""
|
||||
Create a connected UNIX datagram socket.
|
||||
|
||||
Connected datagram sockets can only communicate with the specified remote path.
|
||||
|
||||
If ``local_path`` has been given, the socket will be bound to this path, making
|
||||
this socket suitable for receiving datagrams from other processes. Other processes
|
||||
can send datagrams to this socket only if ``local_path`` is set.
|
||||
|
||||
If a socket already exists on the file system in the ``local_path``, it will be
|
||||
removed first.
|
||||
|
||||
:param remote_path: the path to set as the default target
|
||||
:param local_path: the path on which to bind to
|
||||
:param local_mode: permissions to set on the local socket
|
||||
:return: a connected UNIX datagram socket
|
||||
|
||||
"""
|
||||
remote_path = os.fspath(remote_path)
|
||||
raw_socket = await setup_unix_local_socket(
|
||||
local_path, local_mode, socket.SOCK_DGRAM
|
||||
)
|
||||
return await get_async_backend().create_unix_datagram_socket(
|
||||
raw_socket, remote_path
|
||||
)
|
||||
|
||||
|
||||
async def getaddrinfo(
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> list[tuple[AddressFamily, SocketKind, int, str, tuple[str, int]]]:
|
||||
"""
|
||||
Look up a numeric IP address given a host name.
|
||||
|
||||
Internationalized domain names are translated according to the (non-transitional)
|
||||
IDNA 2008 standard.
|
||||
|
||||
.. note:: 4-tuple IPv6 socket addresses are automatically converted to 2-tuples of
|
||||
(host, port), unlike what :func:`socket.getaddrinfo` does.
|
||||
|
||||
:param host: host name
|
||||
:param port: port number
|
||||
:param family: socket family (`'AF_INET``, ...)
|
||||
:param type: socket type (``SOCK_STREAM``, ...)
|
||||
:param proto: protocol number
|
||||
:param flags: flags to pass to upstream ``getaddrinfo()``
|
||||
:return: list of tuples containing (family, type, proto, canonname, sockaddr)
|
||||
|
||||
.. seealso:: :func:`socket.getaddrinfo`
|
||||
|
||||
"""
|
||||
# Handle unicode hostnames
|
||||
if isinstance(host, str):
|
||||
try:
|
||||
encoded_host: bytes | None = host.encode("ascii")
|
||||
except UnicodeEncodeError:
|
||||
import idna
|
||||
|
||||
encoded_host = idna.encode(host, uts46=True)
|
||||
else:
|
||||
encoded_host = host
|
||||
|
||||
gai_res = await get_async_backend().getaddrinfo(
|
||||
encoded_host, port, family=family, type=type, proto=proto, flags=flags
|
||||
)
|
||||
return [
|
||||
(family, type, proto, canonname, convert_ipv6_sockaddr(sockaddr))
|
||||
for family, type, proto, canonname, sockaddr in gai_res
|
||||
]
|
||||
|
||||
|
||||
def getnameinfo(sockaddr: IPSockAddrType, flags: int = 0) -> Awaitable[tuple[str, str]]:
|
||||
"""
|
||||
Look up the host name of an IP address.
|
||||
|
||||
:param sockaddr: socket address (e.g. (ipaddress, port) for IPv4)
|
||||
:param flags: flags to pass to upstream ``getnameinfo()``
|
||||
:return: a tuple of (host name, service name)
|
||||
|
||||
.. seealso:: :func:`socket.getnameinfo`
|
||||
|
||||
"""
|
||||
return get_async_backend().getnameinfo(sockaddr, flags)
|
||||
|
||||
|
||||
@deprecated("This function is deprecated; use `wait_readable` instead")
|
||||
def wait_socket_readable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
.. deprecated:: 4.7.0
|
||||
Use :func:`wait_readable` instead.
|
||||
|
||||
Wait until the given socket has data to be read.
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become readable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become readable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_readable(sock.fileno())
|
||||
|
||||
|
||||
@deprecated("This function is deprecated; use `wait_writable` instead")
|
||||
def wait_socket_writable(sock: socket.socket) -> Awaitable[None]:
|
||||
"""
|
||||
.. deprecated:: 4.7.0
|
||||
Use :func:`wait_writable` instead.
|
||||
|
||||
Wait until the given socket can be written to.
|
||||
|
||||
This does **NOT** work on Windows when using the asyncio backend with a proactor
|
||||
event loop (default on py3.8+).
|
||||
|
||||
.. warning:: Only use this on raw sockets that have not been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param sock: a socket object
|
||||
:raises ~anyio.ClosedResourceError: if the socket was closed while waiting for the
|
||||
socket to become writable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the socket
|
||||
to become writable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_writable(sock.fileno())
|
||||
|
||||
|
||||
def wait_readable(obj: FileDescriptorLike) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given object has data to be read.
|
||||
|
||||
On Unix systems, ``obj`` must either be an integer file descriptor, or else an
|
||||
object with a ``.fileno()`` method which returns an integer file descriptor. Any
|
||||
kind of file descriptor can be passed, though the exact semantics will depend on
|
||||
your kernel. For example, this probably won't do anything useful for on-disk files.
|
||||
|
||||
On Windows systems, ``obj`` must either be an integer ``SOCKET`` handle, or else an
|
||||
object with a ``.fileno()`` method which returns an integer ``SOCKET`` handle. File
|
||||
descriptors aren't supported, and neither are handles that refer to anything besides
|
||||
a ``SOCKET``.
|
||||
|
||||
On backends where this functionality is not natively provided (asyncio
|
||||
``ProactorEventLoop`` on Windows), it is provided using a separate selector thread
|
||||
which is set to shut down when the interpreter shuts down.
|
||||
|
||||
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
:param obj: an object with a ``.fileno()`` method or an integer handle
|
||||
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
|
||||
object to become readable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
|
||||
to become readable
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_readable(obj)
|
||||
|
||||
|
||||
def wait_writable(obj: FileDescriptorLike) -> Awaitable[None]:
|
||||
"""
|
||||
Wait until the given object can be written to.
|
||||
|
||||
:param obj: an object with a ``.fileno()`` method or an integer handle
|
||||
:raises ~anyio.ClosedResourceError: if the object was closed while waiting for the
|
||||
object to become writable
|
||||
:raises ~anyio.BusyResourceError: if another task is already waiting for the object
|
||||
to become writable
|
||||
|
||||
.. seealso:: See the documentation of :func:`wait_readable` for the definition of
|
||||
``obj`` and notes on backend compatibility.
|
||||
|
||||
.. warning:: Don't use this on raw sockets that have been wrapped by any higher
|
||||
level constructs like socket streams!
|
||||
|
||||
"""
|
||||
return get_async_backend().wait_writable(obj)
|
||||
|
||||
|
||||
#
|
||||
# Private API
|
||||
#
|
||||
|
||||
|
||||
def convert_ipv6_sockaddr(
|
||||
sockaddr: tuple[str, int, int, int] | tuple[str, int],
|
||||
) -> tuple[str, int]:
|
||||
"""
|
||||
Convert a 4-tuple IPv6 socket address to a 2-tuple (address, port) format.
|
||||
|
||||
If the scope ID is nonzero, it is added to the address, separated with ``%``.
|
||||
Otherwise the flow id and scope id are simply cut off from the tuple.
|
||||
Any other kinds of socket addresses are returned as-is.
|
||||
|
||||
:param sockaddr: the result of :meth:`~socket.socket.getsockname`
|
||||
:return: the converted socket address
|
||||
|
||||
"""
|
||||
# This is more complicated than it should be because of MyPy
|
||||
if isinstance(sockaddr, tuple) and len(sockaddr) == 4:
|
||||
host, port, flowinfo, scope_id = sockaddr
|
||||
if scope_id:
|
||||
# PyPy (as of v7.3.11) leaves the interface name in the result, so
|
||||
# we discard it and only get the scope ID from the end
|
||||
# (https://foss.heptapod.net/pypy/pypy/-/issues/3938)
|
||||
host = host.split("%")[0]
|
||||
|
||||
# Add scope_id to the address
|
||||
return f"{host}%{scope_id}", port
|
||||
else:
|
||||
return host, port
|
||||
else:
|
||||
return sockaddr
|
||||
|
||||
|
||||
async def setup_unix_local_socket(
|
||||
path: None | str | bytes | PathLike[Any],
|
||||
mode: int | None,
|
||||
socktype: int,
|
||||
) -> socket.socket:
|
||||
"""
|
||||
Create a UNIX local socket object, deleting the socket at the given path if it
|
||||
exists.
|
||||
|
||||
Not available on Windows.
|
||||
|
||||
:param path: path of the socket
|
||||
:param mode: permissions to set on the socket
|
||||
:param socktype: socket.SOCK_STREAM or socket.SOCK_DGRAM
|
||||
|
||||
"""
|
||||
path_str: str | None
|
||||
if path is not None:
|
||||
path_str = os.fsdecode(path)
|
||||
|
||||
# Linux abstract namespace sockets aren't backed by a concrete file so skip stat call
|
||||
if not path_str.startswith("\0"):
|
||||
# Copied from pathlib...
|
||||
try:
|
||||
stat_result = os.stat(path)
|
||||
except OSError as e:
|
||||
if e.errno not in (
|
||||
errno.ENOENT,
|
||||
errno.ENOTDIR,
|
||||
errno.EBADF,
|
||||
errno.ELOOP,
|
||||
):
|
||||
raise
|
||||
else:
|
||||
if stat.S_ISSOCK(stat_result.st_mode):
|
||||
os.unlink(path)
|
||||
else:
|
||||
path_str = None
|
||||
|
||||
raw_socket = socket.socket(socket.AF_UNIX, socktype)
|
||||
raw_socket.setblocking(False)
|
||||
|
||||
if path_str is not None:
|
||||
try:
|
||||
await to_thread.run_sync(raw_socket.bind, path_str, abandon_on_cancel=True)
|
||||
if mode is not None:
|
||||
await to_thread.run_sync(chmod, path_str, mode, abandon_on_cancel=True)
|
||||
except BaseException:
|
||||
raw_socket.close()
|
||||
raise
|
||||
|
||||
return raw_socket
|
@@ -1,52 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from typing import TypeVar
|
||||
from warnings import warn
|
||||
|
||||
from ..streams.memory import (
|
||||
MemoryObjectReceiveStream,
|
||||
MemoryObjectSendStream,
|
||||
MemoryObjectStreamState,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
|
||||
|
||||
class create_memory_object_stream(
|
||||
tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]],
|
||||
):
|
||||
"""
|
||||
Create a memory object stream.
|
||||
|
||||
The stream's item type can be annotated like
|
||||
:func:`create_memory_object_stream[T_Item]`.
|
||||
|
||||
:param max_buffer_size: number of items held in the buffer until ``send()`` starts
|
||||
blocking
|
||||
:param item_type: old way of marking the streams with the right generic type for
|
||||
static typing (does nothing on AnyIO 4)
|
||||
|
||||
.. deprecated:: 4.0
|
||||
Use ``create_memory_object_stream[YourItemType](...)`` instead.
|
||||
:return: a tuple of (send stream, receive stream)
|
||||
|
||||
"""
|
||||
|
||||
def __new__( # type: ignore[misc]
|
||||
cls, max_buffer_size: float = 0, item_type: object = None
|
||||
) -> tuple[MemoryObjectSendStream[T_Item], MemoryObjectReceiveStream[T_Item]]:
|
||||
if max_buffer_size != math.inf and not isinstance(max_buffer_size, int):
|
||||
raise ValueError("max_buffer_size must be either an integer or math.inf")
|
||||
if max_buffer_size < 0:
|
||||
raise ValueError("max_buffer_size cannot be negative")
|
||||
if item_type is not None:
|
||||
warn(
|
||||
"The item_type argument has been deprecated in AnyIO 4.0. "
|
||||
"Use create_memory_object_stream[YourItemType](...) instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
state = MemoryObjectStreamState[T_Item](max_buffer_size)
|
||||
return (MemoryObjectSendStream(state), MemoryObjectReceiveStream(state))
|
@@ -1,196 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import AsyncIterable, Iterable, Mapping, Sequence
|
||||
from io import BytesIO
|
||||
from os import PathLike
|
||||
from subprocess import DEVNULL, PIPE, CalledProcessError, CompletedProcess
|
||||
from typing import IO, Any, Union, cast
|
||||
|
||||
from ..abc import Process
|
||||
from ._eventloop import get_async_backend
|
||||
from ._tasks import create_task_group
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeAlias
|
||||
else:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
|
||||
|
||||
|
||||
async def run_process(
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
input: bytes | None = None,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
check: bool = True,
|
||||
cwd: StrOrBytesPath | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
startupinfo: Any = None,
|
||||
creationflags: int = 0,
|
||||
start_new_session: bool = False,
|
||||
pass_fds: Sequence[int] = (),
|
||||
user: str | int | None = None,
|
||||
group: str | int | None = None,
|
||||
extra_groups: Iterable[str | int] | None = None,
|
||||
umask: int = -1,
|
||||
) -> CompletedProcess[bytes]:
|
||||
"""
|
||||
Run an external command in a subprocess and wait until it completes.
|
||||
|
||||
.. seealso:: :func:`subprocess.run`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param input: bytes passed to the standard input of the subprocess
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or `None`
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or `None`
|
||||
:param check: if ``True``, raise :exc:`~subprocess.CalledProcessError` if the
|
||||
process terminates with a return code other than 0
|
||||
:param cwd: If not ``None``, change the working directory to this before running the
|
||||
command
|
||||
:param env: if not ``None``, this mapping replaces the inherited environment
|
||||
variables from the parent process
|
||||
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
||||
to specify process startup parameters (Windows only)
|
||||
:param creationflags: flags that can be used to control the creation of the
|
||||
subprocess (see :class:`subprocess.Popen` for the specifics)
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
||||
child processes. (POSIX only)
|
||||
:param user: effective user to run the process as (Python >= 3.9, POSIX only)
|
||||
:param group: effective group to run the process as (Python >= 3.9, POSIX only)
|
||||
:param extra_groups: supplementary groups to set in the subprocess (Python >= 3.9,
|
||||
POSIX only)
|
||||
:param umask: if not negative, this umask is applied in the child process before
|
||||
running the given command (Python >= 3.9, POSIX only)
|
||||
:return: an object representing the completed process
|
||||
:raises ~subprocess.CalledProcessError: if ``check`` is ``True`` and the process
|
||||
exits with a nonzero return code
|
||||
|
||||
"""
|
||||
|
||||
async def drain_stream(stream: AsyncIterable[bytes], index: int) -> None:
|
||||
buffer = BytesIO()
|
||||
async for chunk in stream:
|
||||
buffer.write(chunk)
|
||||
|
||||
stream_contents[index] = buffer.getvalue()
|
||||
|
||||
async with await open_process(
|
||||
command,
|
||||
stdin=PIPE if input else DEVNULL,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
startupinfo=startupinfo,
|
||||
creationflags=creationflags,
|
||||
start_new_session=start_new_session,
|
||||
pass_fds=pass_fds,
|
||||
user=user,
|
||||
group=group,
|
||||
extra_groups=extra_groups,
|
||||
umask=umask,
|
||||
) as process:
|
||||
stream_contents: list[bytes | None] = [None, None]
|
||||
async with create_task_group() as tg:
|
||||
if process.stdout:
|
||||
tg.start_soon(drain_stream, process.stdout, 0)
|
||||
|
||||
if process.stderr:
|
||||
tg.start_soon(drain_stream, process.stderr, 1)
|
||||
|
||||
if process.stdin and input:
|
||||
await process.stdin.send(input)
|
||||
await process.stdin.aclose()
|
||||
|
||||
await process.wait()
|
||||
|
||||
output, errors = stream_contents
|
||||
if check and process.returncode != 0:
|
||||
raise CalledProcessError(cast(int, process.returncode), command, output, errors)
|
||||
|
||||
return CompletedProcess(command, cast(int, process.returncode), output, errors)
|
||||
|
||||
|
||||
async def open_process(
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
stdin: int | IO[Any] | None = PIPE,
|
||||
stdout: int | IO[Any] | None = PIPE,
|
||||
stderr: int | IO[Any] | None = PIPE,
|
||||
cwd: StrOrBytesPath | None = None,
|
||||
env: Mapping[str, str] | None = None,
|
||||
startupinfo: Any = None,
|
||||
creationflags: int = 0,
|
||||
start_new_session: bool = False,
|
||||
pass_fds: Sequence[int] = (),
|
||||
user: str | int | None = None,
|
||||
group: str | int | None = None,
|
||||
extra_groups: Iterable[str | int] | None = None,
|
||||
umask: int = -1,
|
||||
) -> Process:
|
||||
"""
|
||||
Start an external command in a subprocess.
|
||||
|
||||
.. seealso:: :class:`subprocess.Popen`
|
||||
|
||||
:param command: either a string to pass to the shell, or an iterable of strings
|
||||
containing the executable name or path and its arguments
|
||||
:param stdin: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`, a
|
||||
file-like object, or ``None``
|
||||
:param stdout: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
a file-like object, or ``None``
|
||||
:param stderr: one of :data:`subprocess.PIPE`, :data:`subprocess.DEVNULL`,
|
||||
:data:`subprocess.STDOUT`, a file-like object, or ``None``
|
||||
:param cwd: If not ``None``, the working directory is changed before executing
|
||||
:param env: If env is not ``None``, it must be a mapping that defines the
|
||||
environment variables for the new process
|
||||
:param creationflags: flags that can be used to control the creation of the
|
||||
subprocess (see :class:`subprocess.Popen` for the specifics)
|
||||
:param startupinfo: an instance of :class:`subprocess.STARTUPINFO` that can be used
|
||||
to specify process startup parameters (Windows only)
|
||||
:param start_new_session: if ``true`` the setsid() system call will be made in the
|
||||
child process prior to the execution of the subprocess. (POSIX only)
|
||||
:param pass_fds: sequence of file descriptors to keep open between the parent and
|
||||
child processes. (POSIX only)
|
||||
:param user: effective user to run the process as (POSIX only)
|
||||
:param group: effective group to run the process as (POSIX only)
|
||||
:param extra_groups: supplementary groups to set in the subprocess (POSIX only)
|
||||
:param umask: if not negative, this umask is applied in the child process before
|
||||
running the given command (POSIX only)
|
||||
:return: an asynchronous process object
|
||||
|
||||
"""
|
||||
kwargs: dict[str, Any] = {}
|
||||
if user is not None:
|
||||
kwargs["user"] = user
|
||||
|
||||
if group is not None:
|
||||
kwargs["group"] = group
|
||||
|
||||
if extra_groups is not None:
|
||||
kwargs["extra_groups"] = group
|
||||
|
||||
if umask >= 0:
|
||||
kwargs["umask"] = umask
|
||||
|
||||
return await get_async_backend().open_process(
|
||||
command,
|
||||
stdin=stdin,
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
cwd=cwd,
|
||||
env=env,
|
||||
startupinfo=startupinfo,
|
||||
creationflags=creationflags,
|
||||
start_new_session=start_new_session,
|
||||
pass_fds=pass_fds,
|
||||
**kwargs,
|
||||
)
|
@@ -1,733 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from types import TracebackType
|
||||
|
||||
from sniffio import AsyncLibraryNotFoundError
|
||||
|
||||
from ..lowlevel import checkpoint
|
||||
from ._eventloop import get_async_backend
|
||||
from ._exceptions import BusyResourceError
|
||||
from ._tasks import CancelScope
|
||||
from ._testing import TaskInfo, get_current_task
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EventStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Event.wait`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CapacityLimiterStatistics:
|
||||
"""
|
||||
:ivar int borrowed_tokens: number of tokens currently borrowed by tasks
|
||||
:ivar float total_tokens: total number of available tokens
|
||||
:ivar tuple borrowers: tasks or other objects currently holding tokens borrowed from
|
||||
this limiter
|
||||
:ivar int tasks_waiting: number of tasks waiting on
|
||||
:meth:`~.CapacityLimiter.acquire` or
|
||||
:meth:`~.CapacityLimiter.acquire_on_behalf_of`
|
||||
"""
|
||||
|
||||
borrowed_tokens: int
|
||||
total_tokens: float
|
||||
borrowers: tuple[object, ...]
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LockStatistics:
|
||||
"""
|
||||
:ivar bool locked: flag indicating if this lock is locked or not
|
||||
:ivar ~anyio.TaskInfo owner: task currently holding the lock (or ``None`` if the
|
||||
lock is not held by any task)
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Lock.acquire`
|
||||
"""
|
||||
|
||||
locked: bool
|
||||
owner: TaskInfo | None
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ConditionStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks blocked on :meth:`~.Condition.wait`
|
||||
:ivar ~anyio.LockStatistics lock_statistics: statistics of the underlying
|
||||
:class:`~.Lock`
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
lock_statistics: LockStatistics
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SemaphoreStatistics:
|
||||
"""
|
||||
:ivar int tasks_waiting: number of tasks waiting on :meth:`~.Semaphore.acquire`
|
||||
|
||||
"""
|
||||
|
||||
tasks_waiting: int
|
||||
|
||||
|
||||
class Event:
|
||||
def __new__(cls) -> Event:
|
||||
try:
|
||||
return get_async_backend().create_event()
|
||||
except AsyncLibraryNotFoundError:
|
||||
return EventAdapter()
|
||||
|
||||
def set(self) -> None:
|
||||
"""Set the flag, notifying all listeners."""
|
||||
raise NotImplementedError
|
||||
|
||||
def is_set(self) -> bool:
|
||||
"""Return ``True`` if the flag is set, ``False`` if not."""
|
||||
raise NotImplementedError
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""
|
||||
Wait until the flag has been set.
|
||||
|
||||
If the flag has already been set when this method is called, it returns
|
||||
immediately.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
"""Return statistics about the current state of this event."""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class EventAdapter(Event):
|
||||
_internal_event: Event | None = None
|
||||
_is_set: bool = False
|
||||
|
||||
def __new__(cls) -> EventAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
@property
|
||||
def _event(self) -> Event:
|
||||
if self._internal_event is None:
|
||||
self._internal_event = get_async_backend().create_event()
|
||||
if self._is_set:
|
||||
self._internal_event.set()
|
||||
|
||||
return self._internal_event
|
||||
|
||||
def set(self) -> None:
|
||||
if self._internal_event is None:
|
||||
self._is_set = True
|
||||
else:
|
||||
self._event.set()
|
||||
|
||||
def is_set(self) -> bool:
|
||||
if self._internal_event is None:
|
||||
return self._is_set
|
||||
|
||||
return self._internal_event.is_set()
|
||||
|
||||
async def wait(self) -> None:
|
||||
await self._event.wait()
|
||||
|
||||
def statistics(self) -> EventStatistics:
|
||||
if self._internal_event is None:
|
||||
return EventStatistics(tasks_waiting=0)
|
||||
|
||||
return self._internal_event.statistics()
|
||||
|
||||
|
||||
class Lock:
|
||||
def __new__(cls, *, fast_acquire: bool = False) -> Lock:
|
||||
try:
|
||||
return get_async_backend().create_lock(fast_acquire=fast_acquire)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return LockAdapter(fast_acquire=fast_acquire)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the lock."""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the lock."""
|
||||
raise NotImplementedError
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is currently held."""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> LockStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this lock.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class LockAdapter(Lock):
|
||||
_internal_lock: Lock | None = None
|
||||
|
||||
def __new__(cls, *, fast_acquire: bool = False) -> LockAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, *, fast_acquire: bool = False):
|
||||
self._fast_acquire = fast_acquire
|
||||
|
||||
@property
|
||||
def _lock(self) -> Lock:
|
||||
if self._internal_lock is None:
|
||||
self._internal_lock = get_async_backend().create_lock(
|
||||
fast_acquire=self._fast_acquire
|
||||
)
|
||||
|
||||
return self._internal_lock
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self._lock.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
if self._internal_lock is not None:
|
||||
self._internal_lock.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the lock."""
|
||||
await self._lock.acquire()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
self._lock.acquire_nowait()
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the lock."""
|
||||
self._lock.release()
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is currently held."""
|
||||
return self._lock.locked()
|
||||
|
||||
def statistics(self) -> LockStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this lock.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
if self._internal_lock is None:
|
||||
return LockStatistics(False, None, 0)
|
||||
|
||||
return self._internal_lock.statistics()
|
||||
|
||||
|
||||
class Condition:
|
||||
_owner_task: TaskInfo | None = None
|
||||
|
||||
def __init__(self, lock: Lock | None = None):
|
||||
self._lock = lock or Lock()
|
||||
self._waiters: deque[Event] = deque()
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self.acquire()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
def _check_acquired(self) -> None:
|
||||
if self._owner_task != get_current_task():
|
||||
raise RuntimeError("The current task is not holding the underlying lock")
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Acquire the underlying lock."""
|
||||
await self._lock.acquire()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
self._lock.acquire_nowait()
|
||||
self._owner_task = get_current_task()
|
||||
|
||||
def release(self) -> None:
|
||||
"""Release the underlying lock."""
|
||||
self._lock.release()
|
||||
|
||||
def locked(self) -> bool:
|
||||
"""Return True if the lock is set."""
|
||||
return self._lock.locked()
|
||||
|
||||
def notify(self, n: int = 1) -> None:
|
||||
"""Notify exactly n listeners."""
|
||||
self._check_acquired()
|
||||
for _ in range(n):
|
||||
try:
|
||||
event = self._waiters.popleft()
|
||||
except IndexError:
|
||||
break
|
||||
|
||||
event.set()
|
||||
|
||||
def notify_all(self) -> None:
|
||||
"""Notify all the listeners."""
|
||||
self._check_acquired()
|
||||
for event in self._waiters:
|
||||
event.set()
|
||||
|
||||
self._waiters.clear()
|
||||
|
||||
async def wait(self) -> None:
|
||||
"""Wait for a notification."""
|
||||
await checkpoint()
|
||||
event = Event()
|
||||
self._waiters.append(event)
|
||||
self.release()
|
||||
try:
|
||||
await event.wait()
|
||||
except BaseException:
|
||||
if not event.is_set():
|
||||
self._waiters.remove(event)
|
||||
|
||||
raise
|
||||
finally:
|
||||
with CancelScope(shield=True):
|
||||
await self.acquire()
|
||||
|
||||
def statistics(self) -> ConditionStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this condition.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return ConditionStatistics(len(self._waiters), self._lock.statistics())
|
||||
|
||||
|
||||
class Semaphore:
|
||||
def __new__(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> Semaphore:
|
||||
try:
|
||||
return get_async_backend().create_semaphore(
|
||||
initial_value, max_value=max_value, fast_acquire=fast_acquire
|
||||
)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return SemaphoreAdapter(initial_value, max_value=max_value)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
):
|
||||
if not isinstance(initial_value, int):
|
||||
raise TypeError("initial_value must be an integer")
|
||||
if initial_value < 0:
|
||||
raise ValueError("initial_value must be >= 0")
|
||||
if max_value is not None:
|
||||
if not isinstance(max_value, int):
|
||||
raise TypeError("max_value must be an integer or None")
|
||||
if max_value < initial_value:
|
||||
raise ValueError(
|
||||
"max_value must be equal to or higher than initial_value"
|
||||
)
|
||||
|
||||
self._fast_acquire = fast_acquire
|
||||
|
||||
async def __aenter__(self) -> Semaphore:
|
||||
await self.acquire()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.release()
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""Decrement the semaphore value, blocking if necessary."""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire the underlying lock, without blocking.
|
||||
|
||||
:raises ~anyio.WouldBlock: if the operation would block
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""Increment the semaphore value."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
"""The current value of the semaphore."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def max_value(self) -> int | None:
|
||||
"""The maximum value of the semaphore."""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> SemaphoreStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this semaphore.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class SemaphoreAdapter(Semaphore):
|
||||
_internal_semaphore: Semaphore | None = None
|
||||
|
||||
def __new__(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> SemaphoreAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> None:
|
||||
super().__init__(initial_value, max_value=max_value, fast_acquire=fast_acquire)
|
||||
self._initial_value = initial_value
|
||||
self._max_value = max_value
|
||||
|
||||
@property
|
||||
def _semaphore(self) -> Semaphore:
|
||||
if self._internal_semaphore is None:
|
||||
self._internal_semaphore = get_async_backend().create_semaphore(
|
||||
self._initial_value, max_value=self._max_value
|
||||
)
|
||||
|
||||
return self._internal_semaphore
|
||||
|
||||
async def acquire(self) -> None:
|
||||
await self._semaphore.acquire()
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
self._semaphore.acquire_nowait()
|
||||
|
||||
def release(self) -> None:
|
||||
self._semaphore.release()
|
||||
|
||||
@property
|
||||
def value(self) -> int:
|
||||
if self._internal_semaphore is None:
|
||||
return self._initial_value
|
||||
|
||||
return self._semaphore.value
|
||||
|
||||
@property
|
||||
def max_value(self) -> int | None:
|
||||
return self._max_value
|
||||
|
||||
def statistics(self) -> SemaphoreStatistics:
|
||||
if self._internal_semaphore is None:
|
||||
return SemaphoreStatistics(tasks_waiting=0)
|
||||
|
||||
return self._semaphore.statistics()
|
||||
|
||||
|
||||
class CapacityLimiter:
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiter:
|
||||
try:
|
||||
return get_async_backend().create_capacity_limiter(total_tokens)
|
||||
except AsyncLibraryNotFoundError:
|
||||
return CapacityLimiterAdapter(total_tokens)
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
"""
|
||||
The total number of tokens available for borrowing.
|
||||
|
||||
This is a read-write property. If the total number of tokens is increased, the
|
||||
proportionate number of tasks waiting on this limiter will be granted their
|
||||
tokens.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
The property is now writable.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
"""The number of tokens that have currently been borrowed."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
"""The number of tokens currently available to be borrowed"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task without waiting for one to become
|
||||
available.
|
||||
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token without waiting for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
:raises ~anyio.WouldBlock: if there are no tokens available for borrowing
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire(self) -> None:
|
||||
"""
|
||||
Acquire a token for the current task, waiting if necessary for one to become
|
||||
available.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Acquire a token, waiting if necessary for one to become available.
|
||||
|
||||
:param borrower: the entity borrowing a token
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release(self) -> None:
|
||||
"""
|
||||
Release the token held by the current task.
|
||||
|
||||
:raises RuntimeError: if the current task has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
"""
|
||||
Release the token held by the given borrower.
|
||||
|
||||
:raises RuntimeError: if the borrower has not borrowed a token from this
|
||||
limiter.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this limiter.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class CapacityLimiterAdapter(CapacityLimiter):
|
||||
_internal_limiter: CapacityLimiter | None = None
|
||||
|
||||
def __new__(cls, total_tokens: float) -> CapacityLimiterAdapter:
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, total_tokens: float) -> None:
|
||||
self.total_tokens = total_tokens
|
||||
|
||||
@property
|
||||
def _limiter(self) -> CapacityLimiter:
|
||||
if self._internal_limiter is None:
|
||||
self._internal_limiter = get_async_backend().create_capacity_limiter(
|
||||
self._total_tokens
|
||||
)
|
||||
|
||||
return self._internal_limiter
|
||||
|
||||
async def __aenter__(self) -> None:
|
||||
await self._limiter.__aenter__()
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return await self._limiter.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
@property
|
||||
def total_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.total_tokens
|
||||
|
||||
@total_tokens.setter
|
||||
def total_tokens(self, value: float) -> None:
|
||||
if not isinstance(value, int) and value is not math.inf:
|
||||
raise TypeError("total_tokens must be an int or math.inf")
|
||||
elif value < 1:
|
||||
raise ValueError("total_tokens must be >= 1")
|
||||
|
||||
if self._internal_limiter is None:
|
||||
self._total_tokens = value
|
||||
return
|
||||
|
||||
self._limiter.total_tokens = value
|
||||
|
||||
@property
|
||||
def borrowed_tokens(self) -> int:
|
||||
if self._internal_limiter is None:
|
||||
return 0
|
||||
|
||||
return self._internal_limiter.borrowed_tokens
|
||||
|
||||
@property
|
||||
def available_tokens(self) -> float:
|
||||
if self._internal_limiter is None:
|
||||
return self._total_tokens
|
||||
|
||||
return self._internal_limiter.available_tokens
|
||||
|
||||
def acquire_nowait(self) -> None:
|
||||
self._limiter.acquire_nowait()
|
||||
|
||||
def acquire_on_behalf_of_nowait(self, borrower: object) -> None:
|
||||
self._limiter.acquire_on_behalf_of_nowait(borrower)
|
||||
|
||||
async def acquire(self) -> None:
|
||||
await self._limiter.acquire()
|
||||
|
||||
async def acquire_on_behalf_of(self, borrower: object) -> None:
|
||||
await self._limiter.acquire_on_behalf_of(borrower)
|
||||
|
||||
def release(self) -> None:
|
||||
self._limiter.release()
|
||||
|
||||
def release_on_behalf_of(self, borrower: object) -> None:
|
||||
self._limiter.release_on_behalf_of(borrower)
|
||||
|
||||
def statistics(self) -> CapacityLimiterStatistics:
|
||||
if self._internal_limiter is None:
|
||||
return CapacityLimiterStatistics(
|
||||
borrowed_tokens=0,
|
||||
total_tokens=self.total_tokens,
|
||||
borrowers=(),
|
||||
tasks_waiting=0,
|
||||
)
|
||||
|
||||
return self._internal_limiter.statistics()
|
||||
|
||||
|
||||
class ResourceGuard:
|
||||
"""
|
||||
A context manager for ensuring that a resource is only used by a single task at a
|
||||
time.
|
||||
|
||||
Entering this context manager while the previous has not exited it yet will trigger
|
||||
:exc:`BusyResourceError`.
|
||||
|
||||
:param action: the action to guard against (visible in the :exc:`BusyResourceError`
|
||||
when triggered, e.g. "Another task is already {action} this resource")
|
||||
|
||||
.. versionadded:: 4.1
|
||||
"""
|
||||
|
||||
__slots__ = "action", "_guarded"
|
||||
|
||||
def __init__(self, action: str = "using"):
|
||||
self.action: str = action
|
||||
self._guarded = False
|
||||
|
||||
def __enter__(self) -> None:
|
||||
if self._guarded:
|
||||
raise BusyResourceError(self.action)
|
||||
|
||||
self._guarded = True
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
self._guarded = False
|
||||
return None
|
@@ -1,158 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
from collections.abc import Generator
|
||||
from contextlib import contextmanager
|
||||
from types import TracebackType
|
||||
|
||||
from ..abc._tasks import TaskGroup, TaskStatus
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class _IgnoredTaskStatus(TaskStatus[object]):
|
||||
def started(self, value: object = None) -> None:
|
||||
pass
|
||||
|
||||
|
||||
TASK_STATUS_IGNORED = _IgnoredTaskStatus()
|
||||
|
||||
|
||||
class CancelScope:
|
||||
"""
|
||||
Wraps a unit of work that can be made separately cancellable.
|
||||
|
||||
:param deadline: The time (clock value) when this scope is cancelled automatically
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
"""
|
||||
|
||||
def __new__(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
return get_async_backend().create_cancel_scope(shield=shield, deadline=deadline)
|
||||
|
||||
def cancel(self) -> None:
|
||||
"""Cancel this scope immediately."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def deadline(self) -> float:
|
||||
"""
|
||||
The time (clock value) when this scope is cancelled automatically.
|
||||
|
||||
Will be ``float('inf')`` if no timeout has been set.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@deadline.setter
|
||||
def deadline(self, value: float) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancel_called(self) -> bool:
|
||||
"""``True`` if :meth:`cancel` has been called."""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def cancelled_caught(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope suppressed a cancellation exception it itself raised.
|
||||
|
||||
This is typically used to check if any work was interrupted, or to see if the
|
||||
scope was cancelled due to its deadline being reached. The value will, however,
|
||||
only be ``True`` if the cancellation was triggered by the scope itself (and not
|
||||
an outer scope).
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@property
|
||||
def shield(self) -> bool:
|
||||
"""
|
||||
``True`` if this scope is shielded from external cancellation.
|
||||
|
||||
While a scope is shielded, it will not receive cancellations from outside.
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@shield.setter
|
||||
def shield(self, value: bool) -> None:
|
||||
raise NotImplementedError
|
||||
|
||||
def __enter__(self) -> CancelScope:
|
||||
raise NotImplementedError
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
@contextmanager
|
||||
def fail_after(
|
||||
delay: float | None, shield: bool = False
|
||||
) -> Generator[CancelScope, None, None]:
|
||||
"""
|
||||
Create a context manager which raises a :class:`TimeoutError` if does not finish in
|
||||
time.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before raising the exception, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a context manager that yields a cancel scope
|
||||
:rtype: :class:`~typing.ContextManager`\\[:class:`~anyio.CancelScope`\\]
|
||||
|
||||
"""
|
||||
current_time = get_async_backend().current_time
|
||||
deadline = (current_time() + delay) if delay is not None else math.inf
|
||||
with get_async_backend().create_cancel_scope(
|
||||
deadline=deadline, shield=shield
|
||||
) as cancel_scope:
|
||||
yield cancel_scope
|
||||
|
||||
if cancel_scope.cancelled_caught and current_time() >= cancel_scope.deadline:
|
||||
raise TimeoutError
|
||||
|
||||
|
||||
def move_on_after(delay: float | None, shield: bool = False) -> CancelScope:
|
||||
"""
|
||||
Create a cancel scope with a deadline that expires after the given delay.
|
||||
|
||||
:param delay: maximum allowed time (in seconds) before exiting the context block, or
|
||||
``None`` to disable the timeout
|
||||
:param shield: ``True`` to shield the cancel scope from external cancellation
|
||||
:return: a cancel scope
|
||||
|
||||
"""
|
||||
deadline = (
|
||||
(get_async_backend().current_time() + delay) if delay is not None else math.inf
|
||||
)
|
||||
return get_async_backend().create_cancel_scope(deadline=deadline, shield=shield)
|
||||
|
||||
|
||||
def current_effective_deadline() -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the current
|
||||
task.
|
||||
|
||||
:return: a clock value from the event loop's internal clock (or ``float('inf')`` if
|
||||
there is no deadline in effect, or ``float('-inf')`` if the current scope has
|
||||
been cancelled)
|
||||
:rtype: float
|
||||
|
||||
"""
|
||||
return get_async_backend().current_effective_deadline()
|
||||
|
||||
|
||||
def create_task_group() -> TaskGroup:
|
||||
"""
|
||||
Create a task group.
|
||||
|
||||
:return: a task group
|
||||
|
||||
"""
|
||||
return get_async_backend().create_task_group()
|
@@ -1,78 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Awaitable, Generator
|
||||
from typing import Any, cast
|
||||
|
||||
from ._eventloop import get_async_backend
|
||||
|
||||
|
||||
class TaskInfo:
|
||||
"""
|
||||
Represents an asynchronous task.
|
||||
|
||||
:ivar int id: the unique identifier of the task
|
||||
:ivar parent_id: the identifier of the parent task, if any
|
||||
:vartype parent_id: Optional[int]
|
||||
:ivar str name: the description of the task (if any)
|
||||
:ivar ~collections.abc.Coroutine coro: the coroutine object of the task
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "id", "parent_id", "name", "coro"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
id: int,
|
||||
parent_id: int | None,
|
||||
name: str | None,
|
||||
coro: Generator[Any, Any, Any] | Awaitable[Any],
|
||||
):
|
||||
func = get_current_task
|
||||
self._name = f"{func.__module__}.{func.__qualname__}"
|
||||
self.id: int = id
|
||||
self.parent_id: int | None = parent_id
|
||||
self.name: str | None = name
|
||||
self.coro: Generator[Any, Any, Any] | Awaitable[Any] = coro
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if isinstance(other, TaskInfo):
|
||||
return self.id == other.id
|
||||
|
||||
return NotImplemented
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(self.id)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}(id={self.id!r}, name={self.name!r})"
|
||||
|
||||
def has_pending_cancellation(self) -> bool:
|
||||
"""
|
||||
Return ``True`` if the task has a cancellation pending, ``False`` otherwise.
|
||||
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
def get_current_task() -> TaskInfo:
|
||||
"""
|
||||
Return the current task.
|
||||
|
||||
:return: a representation of the current task
|
||||
|
||||
"""
|
||||
return get_async_backend().get_current_task()
|
||||
|
||||
|
||||
def get_running_tasks() -> list[TaskInfo]:
|
||||
"""
|
||||
Return a list of running tasks in the current event loop.
|
||||
|
||||
:return: a list of task info objects
|
||||
|
||||
"""
|
||||
return cast("list[TaskInfo]", get_async_backend().get_running_tasks())
|
||||
|
||||
|
||||
async def wait_all_tasks_blocked() -> None:
|
||||
"""Wait until all other tasks are waiting for something."""
|
||||
await get_async_backend().wait_all_tasks_blocked()
|
@@ -1,81 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from typing import Any, TypeVar, final, overload
|
||||
|
||||
from ._exceptions import TypedAttributeLookupError
|
||||
|
||||
T_Attr = TypeVar("T_Attr")
|
||||
T_Default = TypeVar("T_Default")
|
||||
undefined = object()
|
||||
|
||||
|
||||
def typed_attribute() -> Any:
|
||||
"""Return a unique object, used to mark typed attributes."""
|
||||
return object()
|
||||
|
||||
|
||||
class TypedAttributeSet:
|
||||
"""
|
||||
Superclass for typed attribute collections.
|
||||
|
||||
Checks that every public attribute of every subclass has a type annotation.
|
||||
"""
|
||||
|
||||
def __init_subclass__(cls) -> None:
|
||||
annotations: dict[str, Any] = getattr(cls, "__annotations__", {})
|
||||
for attrname in dir(cls):
|
||||
if not attrname.startswith("_") and attrname not in annotations:
|
||||
raise TypeError(
|
||||
f"Attribute {attrname!r} is missing its type annotation"
|
||||
)
|
||||
|
||||
super().__init_subclass__()
|
||||
|
||||
|
||||
class TypedAttributeProvider:
|
||||
"""Base class for classes that wish to provide typed extra attributes."""
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[T_Attr, Callable[[], T_Attr]]:
|
||||
"""
|
||||
A mapping of the extra attributes to callables that return the corresponding
|
||||
values.
|
||||
|
||||
If the provider wraps another provider, the attributes from that wrapper should
|
||||
also be included in the returned mapping (but the wrapper may override the
|
||||
callables from the wrapped instance).
|
||||
|
||||
"""
|
||||
return {}
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr) -> T_Attr: ...
|
||||
|
||||
@overload
|
||||
def extra(self, attribute: T_Attr, default: T_Default) -> T_Attr | T_Default: ...
|
||||
|
||||
@final
|
||||
def extra(self, attribute: Any, default: object = undefined) -> object:
|
||||
"""
|
||||
extra(attribute, default=undefined)
|
||||
|
||||
Return the value of the given typed extra attribute.
|
||||
|
||||
:param attribute: the attribute (member of a :class:`~TypedAttributeSet`) to
|
||||
look for
|
||||
:param default: the value that should be returned if no value is found for the
|
||||
attribute
|
||||
:raises ~anyio.TypedAttributeLookupError: if the search failed and no default
|
||||
value was given
|
||||
|
||||
"""
|
||||
try:
|
||||
getter = self.extra_attributes[attribute]
|
||||
except KeyError:
|
||||
if default is undefined:
|
||||
raise TypedAttributeLookupError("Attribute not found") from None
|
||||
else:
|
||||
return default
|
||||
|
||||
return getter()
|
@@ -1,55 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from ._eventloop import AsyncBackend as AsyncBackend
|
||||
from ._resources import AsyncResource as AsyncResource
|
||||
from ._sockets import ConnectedUDPSocket as ConnectedUDPSocket
|
||||
from ._sockets import ConnectedUNIXDatagramSocket as ConnectedUNIXDatagramSocket
|
||||
from ._sockets import IPAddressType as IPAddressType
|
||||
from ._sockets import IPSockAddrType as IPSockAddrType
|
||||
from ._sockets import SocketAttribute as SocketAttribute
|
||||
from ._sockets import SocketListener as SocketListener
|
||||
from ._sockets import SocketStream as SocketStream
|
||||
from ._sockets import UDPPacketType as UDPPacketType
|
||||
from ._sockets import UDPSocket as UDPSocket
|
||||
from ._sockets import UNIXDatagramPacketType as UNIXDatagramPacketType
|
||||
from ._sockets import UNIXDatagramSocket as UNIXDatagramSocket
|
||||
from ._sockets import UNIXSocketStream as UNIXSocketStream
|
||||
from ._streams import AnyByteReceiveStream as AnyByteReceiveStream
|
||||
from ._streams import AnyByteSendStream as AnyByteSendStream
|
||||
from ._streams import AnyByteStream as AnyByteStream
|
||||
from ._streams import AnyUnreliableByteReceiveStream as AnyUnreliableByteReceiveStream
|
||||
from ._streams import AnyUnreliableByteSendStream as AnyUnreliableByteSendStream
|
||||
from ._streams import AnyUnreliableByteStream as AnyUnreliableByteStream
|
||||
from ._streams import ByteReceiveStream as ByteReceiveStream
|
||||
from ._streams import ByteSendStream as ByteSendStream
|
||||
from ._streams import ByteStream as ByteStream
|
||||
from ._streams import Listener as Listener
|
||||
from ._streams import ObjectReceiveStream as ObjectReceiveStream
|
||||
from ._streams import ObjectSendStream as ObjectSendStream
|
||||
from ._streams import ObjectStream as ObjectStream
|
||||
from ._streams import UnreliableObjectReceiveStream as UnreliableObjectReceiveStream
|
||||
from ._streams import UnreliableObjectSendStream as UnreliableObjectSendStream
|
||||
from ._streams import UnreliableObjectStream as UnreliableObjectStream
|
||||
from ._subprocesses import Process as Process
|
||||
from ._tasks import TaskGroup as TaskGroup
|
||||
from ._tasks import TaskStatus as TaskStatus
|
||||
from ._testing import TestRunner as TestRunner
|
||||
|
||||
# Re-exported here, for backwards compatibility
|
||||
# isort: off
|
||||
from .._core._synchronization import (
|
||||
CapacityLimiter as CapacityLimiter,
|
||||
Condition as Condition,
|
||||
Event as Event,
|
||||
Lock as Lock,
|
||||
Semaphore as Semaphore,
|
||||
)
|
||||
from .._core._tasks import CancelScope as CancelScope
|
||||
from ..from_thread import BlockingPortal as BlockingPortal
|
||||
|
||||
# Re-export imports so they look like they live directly in this package
|
||||
for __value in list(locals().values()):
|
||||
if getattr(__value, "__module__", "").startswith("anyio.abc."):
|
||||
__value.__module__ = __name__
|
||||
|
||||
del __value
|
@@ -1,376 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncIterator, Awaitable, Callable, Sequence
|
||||
from contextlib import AbstractContextManager
|
||||
from os import PathLike
|
||||
from signal import Signals
|
||||
from socket import AddressFamily, SocketKind, socket
|
||||
from typing import (
|
||||
IO,
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
TypeVar,
|
||||
Union,
|
||||
overload,
|
||||
)
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeAlias
|
||||
else:
|
||||
from typing_extensions import TypeAlias
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from _typeshed import HasFileno
|
||||
|
||||
from .._core._synchronization import CapacityLimiter, Event, Lock, Semaphore
|
||||
from .._core._tasks import CancelScope
|
||||
from .._core._testing import TaskInfo
|
||||
from ..from_thread import BlockingPortal
|
||||
from ._sockets import (
|
||||
ConnectedUDPSocket,
|
||||
ConnectedUNIXDatagramSocket,
|
||||
IPSockAddrType,
|
||||
SocketListener,
|
||||
SocketStream,
|
||||
UDPSocket,
|
||||
UNIXDatagramSocket,
|
||||
UNIXSocketStream,
|
||||
)
|
||||
from ._subprocesses import Process
|
||||
from ._tasks import TaskGroup
|
||||
from ._testing import TestRunner
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
StrOrBytesPath: TypeAlias = Union[str, bytes, "PathLike[str]", "PathLike[bytes]"]
|
||||
|
||||
|
||||
class AsyncBackend(metaclass=ABCMeta):
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
options: dict[str, Any],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Run the given coroutine function in an asynchronous event loop.
|
||||
|
||||
The current thread must not be already running an event loop.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to ``func``
|
||||
:param kwargs: positional arguments to ``func``
|
||||
:param options: keyword arguments to call the backend ``run()`` implementation
|
||||
with
|
||||
:return: the return value of the coroutine function
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_token(cls) -> object:
|
||||
"""
|
||||
|
||||
:return:
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_time(cls) -> float:
|
||||
"""
|
||||
Return the current value of the event loop's internal clock.
|
||||
|
||||
:return: the clock value (seconds)
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def cancelled_exception_class(cls) -> type[BaseException]:
|
||||
"""Return the exception class that is raised in a task if it's cancelled."""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def checkpoint(cls) -> None:
|
||||
"""
|
||||
Check if the task has been cancelled, and allow rescheduling of other tasks.
|
||||
|
||||
This is effectively the same as running :meth:`checkpoint_if_cancelled` and then
|
||||
:meth:`cancel_shielded_checkpoint`.
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def checkpoint_if_cancelled(cls) -> None:
|
||||
"""
|
||||
Check if the current task group has been cancelled.
|
||||
|
||||
This will check if the task has been cancelled, but will not allow other tasks
|
||||
to be scheduled if not.
|
||||
|
||||
"""
|
||||
if cls.current_effective_deadline() == -math.inf:
|
||||
await cls.checkpoint()
|
||||
|
||||
@classmethod
|
||||
async def cancel_shielded_checkpoint(cls) -> None:
|
||||
"""
|
||||
Allow the rescheduling of other tasks.
|
||||
|
||||
This will give other tasks the opportunity to run, but without checking if the
|
||||
current task group has been cancelled, unlike with :meth:`checkpoint`.
|
||||
|
||||
"""
|
||||
with cls.create_cancel_scope(shield=True):
|
||||
await cls.sleep(0)
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def sleep(cls, delay: float) -> None:
|
||||
"""
|
||||
Pause the current task for the specified duration.
|
||||
|
||||
:param delay: the duration, in seconds
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_cancel_scope(
|
||||
cls, *, deadline: float = math.inf, shield: bool = False
|
||||
) -> CancelScope:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_effective_deadline(cls) -> float:
|
||||
"""
|
||||
Return the nearest deadline among all the cancel scopes effective for the
|
||||
current task.
|
||||
|
||||
:return:
|
||||
- a clock value from the event loop's internal clock
|
||||
- ``inf`` if there is no deadline in effect
|
||||
- ``-inf`` if the current scope has been cancelled
|
||||
:rtype: float
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_task_group(cls) -> TaskGroup:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_event(cls) -> Event:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_lock(cls, *, fast_acquire: bool) -> Lock:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_semaphore(
|
||||
cls,
|
||||
initial_value: int,
|
||||
*,
|
||||
max_value: int | None = None,
|
||||
fast_acquire: bool = False,
|
||||
) -> Semaphore:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_capacity_limiter(cls, total_tokens: float) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def run_sync_in_worker_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
abandon_on_cancel: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def check_cancelled(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_async_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def run_sync_from_thread(
|
||||
cls,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
token: object,
|
||||
) -> T_Retval:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_blocking_portal(cls) -> BlockingPortal:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def open_process(
|
||||
cls,
|
||||
command: StrOrBytesPath | Sequence[StrOrBytesPath],
|
||||
*,
|
||||
stdin: int | IO[Any] | None,
|
||||
stdout: int | IO[Any] | None,
|
||||
stderr: int | IO[Any] | None,
|
||||
**kwargs: Any,
|
||||
) -> Process:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def setup_process_pool_exit_at_shutdown(cls, workers: set[Process]) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_tcp(
|
||||
cls, host: str, port: int, local_address: IPSockAddrType | None = None
|
||||
) -> SocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def connect_unix(cls, path: str | bytes) -> UNIXSocketStream:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_tcp_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_unix_listener(cls, sock: socket) -> SocketListener:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_udp_socket(
|
||||
cls,
|
||||
family: AddressFamily,
|
||||
local_address: IPSockAddrType | None,
|
||||
remote_address: IPSockAddrType | None,
|
||||
reuse_port: bool,
|
||||
) -> UDPSocket | ConnectedUDPSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: None
|
||||
) -> UNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@overload
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes
|
||||
) -> ConnectedUNIXDatagramSocket: ...
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def create_unix_datagram_socket(
|
||||
cls, raw_socket: socket, remote_path: str | bytes | None
|
||||
) -> UNIXDatagramSocket | ConnectedUNIXDatagramSocket:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getaddrinfo(
|
||||
cls,
|
||||
host: bytes | str | None,
|
||||
port: str | int | None,
|
||||
*,
|
||||
family: int | AddressFamily = 0,
|
||||
type: int | SocketKind = 0,
|
||||
proto: int = 0,
|
||||
flags: int = 0,
|
||||
) -> list[
|
||||
tuple[
|
||||
AddressFamily,
|
||||
SocketKind,
|
||||
int,
|
||||
str,
|
||||
tuple[str, int] | tuple[str, int, int, int],
|
||||
]
|
||||
]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def getnameinfo(
|
||||
cls, sockaddr: IPSockAddrType, flags: int = 0
|
||||
) -> tuple[str, str]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_readable(cls, obj: HasFileno | int) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_writable(cls, obj: HasFileno | int) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def current_default_thread_limiter(cls) -> CapacityLimiter:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def open_signal_receiver(
|
||||
cls, *signals: Signals
|
||||
) -> AbstractContextManager[AsyncIterator[Signals]]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_current_task(cls) -> TaskInfo:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def get_running_tasks(cls) -> Sequence[TaskInfo]:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
async def wait_all_tasks_blocked(cls) -> None:
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def create_test_runner(cls, options: dict[str, Any]) -> TestRunner:
|
||||
pass
|
@@ -1,33 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from types import TracebackType
|
||||
from typing import TypeVar
|
||||
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
class AsyncResource(metaclass=ABCMeta):
|
||||
"""
|
||||
Abstract base class for all closeable asynchronous resources.
|
||||
|
||||
Works as an asynchronous context manager which returns the instance itself on enter,
|
||||
and calls :meth:`aclose` on exit.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
async def __aenter__(self: T) -> T:
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
await self.aclose()
|
||||
|
||||
@abstractmethod
|
||||
async def aclose(self) -> None:
|
||||
"""Close the resource."""
|
@@ -1,194 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import socket
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable, Collection, Mapping
|
||||
from contextlib import AsyncExitStack
|
||||
from io import IOBase
|
||||
from ipaddress import IPv4Address, IPv6Address
|
||||
from socket import AddressFamily
|
||||
from types import TracebackType
|
||||
from typing import Any, TypeVar, Union
|
||||
|
||||
from .._core._typedattr import (
|
||||
TypedAttributeProvider,
|
||||
TypedAttributeSet,
|
||||
typed_attribute,
|
||||
)
|
||||
from ._streams import ByteStream, Listener, UnreliableObjectStream
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
IPAddressType = Union[str, IPv4Address, IPv6Address]
|
||||
IPSockAddrType = tuple[str, int]
|
||||
SockAddrType = Union[IPSockAddrType, str]
|
||||
UDPPacketType = tuple[bytes, IPSockAddrType]
|
||||
UNIXDatagramPacketType = tuple[bytes, str]
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
|
||||
|
||||
class _NullAsyncContextManager:
|
||||
async def __aenter__(self) -> None:
|
||||
pass
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
return None
|
||||
|
||||
|
||||
class SocketAttribute(TypedAttributeSet):
|
||||
#: the address family of the underlying socket
|
||||
family: AddressFamily = typed_attribute()
|
||||
#: the local socket address of the underlying socket
|
||||
local_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the local port the underlying socket is bound to
|
||||
local_port: int = typed_attribute()
|
||||
#: the underlying stdlib socket object
|
||||
raw_socket: socket.socket = typed_attribute()
|
||||
#: the remote address the underlying socket is connected to
|
||||
remote_address: SockAddrType = typed_attribute()
|
||||
#: for IP addresses, the remote port the underlying socket is connected to
|
||||
remote_port: int = typed_attribute()
|
||||
|
||||
|
||||
class _SocketProvider(TypedAttributeProvider):
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
from .._core._sockets import convert_ipv6_sockaddr as convert
|
||||
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
SocketAttribute.family: lambda: self._raw_socket.family,
|
||||
SocketAttribute.local_address: lambda: convert(
|
||||
self._raw_socket.getsockname()
|
||||
),
|
||||
SocketAttribute.raw_socket: lambda: self._raw_socket,
|
||||
}
|
||||
try:
|
||||
peername: tuple[str, int] | None = convert(self._raw_socket.getpeername())
|
||||
except OSError:
|
||||
peername = None
|
||||
|
||||
# Provide the remote address for connected sockets
|
||||
if peername is not None:
|
||||
attributes[SocketAttribute.remote_address] = lambda: peername
|
||||
|
||||
# Provide local and remote ports for IP based sockets
|
||||
if self._raw_socket.family in (AddressFamily.AF_INET, AddressFamily.AF_INET6):
|
||||
attributes[SocketAttribute.local_port] = (
|
||||
lambda: self._raw_socket.getsockname()[1]
|
||||
)
|
||||
if peername is not None:
|
||||
remote_port = peername[1]
|
||||
attributes[SocketAttribute.remote_port] = lambda: remote_port
|
||||
|
||||
return attributes
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def _raw_socket(self) -> socket.socket:
|
||||
pass
|
||||
|
||||
|
||||
class SocketStream(ByteStream, _SocketProvider):
|
||||
"""
|
||||
Transports bytes over a socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXSocketStream(SocketStream):
|
||||
@abstractmethod
|
||||
async def send_fds(self, message: bytes, fds: Collection[int | IOBase]) -> None:
|
||||
"""
|
||||
Send file descriptors along with a message to the peer.
|
||||
|
||||
:param message: a non-empty bytestring
|
||||
:param fds: a collection of files (either numeric file descriptors or open file
|
||||
or socket objects)
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def receive_fds(self, msglen: int, maxfds: int) -> tuple[bytes, list[int]]:
|
||||
"""
|
||||
Receive file descriptors along with a message from the peer.
|
||||
|
||||
:param msglen: length of the message to expect from the peer
|
||||
:param maxfds: maximum number of file descriptors to expect from the peer
|
||||
:return: a tuple of (message, file descriptors)
|
||||
"""
|
||||
|
||||
|
||||
class SocketListener(Listener[SocketStream], _SocketProvider):
|
||||
"""
|
||||
Listens to incoming socket connections.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def accept(self) -> SocketStream:
|
||||
"""Accept an incoming connection."""
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[SocketStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with AsyncExitStack() as stack:
|
||||
if task_group is None:
|
||||
task_group = await stack.enter_async_context(create_task_group())
|
||||
|
||||
while True:
|
||||
stream = await self.accept()
|
||||
task_group.start_soon(handler, stream)
|
||||
|
||||
|
||||
class UDPSocket(UnreliableObjectStream[UDPPacketType], _SocketProvider):
|
||||
"""
|
||||
Represents an unconnected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, host: str, port: int) -> None:
|
||||
"""
|
||||
Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, (host, port))).
|
||||
|
||||
"""
|
||||
return await self.send((data, (host, port)))
|
||||
|
||||
|
||||
class ConnectedUDPSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents an connected UDP socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
|
||||
class UNIXDatagramSocket(
|
||||
UnreliableObjectStream[UNIXDatagramPacketType], _SocketProvider
|
||||
):
|
||||
"""
|
||||
Represents an unconnected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
||||
|
||||
async def sendto(self, data: bytes, path: str) -> None:
|
||||
"""Alias for :meth:`~.UnreliableObjectSendStream.send` ((data, path))."""
|
||||
return await self.send((data, path))
|
||||
|
||||
|
||||
class ConnectedUNIXDatagramSocket(UnreliableObjectStream[bytes], _SocketProvider):
|
||||
"""
|
||||
Represents a connected Unix datagram socket.
|
||||
|
||||
Supports all relevant extra attributes from :class:`~SocketAttribute`.
|
||||
"""
|
@@ -1,203 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable
|
||||
from typing import Any, Generic, TypeVar, Union
|
||||
|
||||
from .._core._exceptions import EndOfStream
|
||||
from .._core._typedattr import TypedAttributeProvider
|
||||
from ._resources import AsyncResource
|
||||
from ._tasks import TaskGroup
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class UnreliableObjectReceiveStream(
|
||||
Generic[T_co], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for receiving objects.
|
||||
|
||||
This interface makes no guarantees that the received messages arrive in the order in
|
||||
which they were sent, or that no messages are missed.
|
||||
|
||||
Asynchronously iterating over objects of this type will yield objects matching the
|
||||
given type parameter.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> UnreliableObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> T_co:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self) -> T_co:
|
||||
"""
|
||||
Receive the next item.
|
||||
|
||||
:raises ~anyio.ClosedResourceError: if the receive stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectSendStream(
|
||||
Generic[T_contra], AsyncResource, TypedAttributeProvider
|
||||
):
|
||||
"""
|
||||
An interface for sending objects.
|
||||
|
||||
This interface makes no guarantees that the messages sent will reach the
|
||||
recipient(s) in the same order in which they were sent, or at all.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the peer(s).
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if the send stream has been explicitly
|
||||
closed
|
||||
:raises ~anyio.BrokenResourceError: if this stream has been rendered unusable
|
||||
due to external causes
|
||||
"""
|
||||
|
||||
|
||||
class UnreliableObjectStream(
|
||||
UnreliableObjectReceiveStream[T_Item], UnreliableObjectSendStream[T_Item]
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which does not guarantee the order or reliability of
|
||||
message delivery.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectReceiveStream(UnreliableObjectReceiveStream[T_co]):
|
||||
"""
|
||||
A receive message stream which guarantees that messages are received in the same
|
||||
order in which they were sent, and that no messages are missed.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectSendStream(UnreliableObjectSendStream[T_contra]):
|
||||
"""
|
||||
A send message stream which guarantees that messages are delivered in the same order
|
||||
in which they were sent, without missing any messages in the middle.
|
||||
"""
|
||||
|
||||
|
||||
class ObjectStream(
|
||||
ObjectReceiveStream[T_Item],
|
||||
ObjectSendStream[T_Item],
|
||||
UnreliableObjectStream[T_Item],
|
||||
):
|
||||
"""
|
||||
A bidirectional message stream which guarantees the order and reliability of message
|
||||
delivery.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
class ByteReceiveStream(AsyncResource, TypedAttributeProvider):
|
||||
"""
|
||||
An interface for receiving bytes from a single peer.
|
||||
|
||||
Iterating this byte stream will yield a byte string of arbitrary length, but no more
|
||||
than 65536 bytes.
|
||||
"""
|
||||
|
||||
def __aiter__(self) -> ByteReceiveStream:
|
||||
return self
|
||||
|
||||
async def __anext__(self) -> bytes:
|
||||
try:
|
||||
return await self.receive()
|
||||
except EndOfStream:
|
||||
raise StopAsyncIteration
|
||||
|
||||
@abstractmethod
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
"""
|
||||
Receive at most ``max_bytes`` bytes from the peer.
|
||||
|
||||
.. note:: Implementors of this interface should not return an empty
|
||||
:class:`bytes` object, and users should ignore them.
|
||||
|
||||
:param max_bytes: maximum number of bytes to receive
|
||||
:return: the received bytes
|
||||
:raises ~anyio.EndOfStream: if this stream has been closed from the other end
|
||||
"""
|
||||
|
||||
|
||||
class ByteSendStream(AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for sending bytes to a single peer."""
|
||||
|
||||
@abstractmethod
|
||||
async def send(self, item: bytes) -> None:
|
||||
"""
|
||||
Send the given bytes to the peer.
|
||||
|
||||
:param item: the bytes to send
|
||||
"""
|
||||
|
||||
|
||||
class ByteStream(ByteReceiveStream, ByteSendStream):
|
||||
"""A bidirectional byte stream."""
|
||||
|
||||
@abstractmethod
|
||||
async def send_eof(self) -> None:
|
||||
"""
|
||||
Send an end-of-file indication to the peer.
|
||||
|
||||
You should not try to send any further data to this stream after calling this
|
||||
method. This method is idempotent (does nothing on successive calls).
|
||||
"""
|
||||
|
||||
|
||||
#: Type alias for all unreliable bytes-oriented receive streams.
|
||||
AnyUnreliableByteReceiveStream = Union[
|
||||
UnreliableObjectReceiveStream[bytes], ByteReceiveStream
|
||||
]
|
||||
#: Type alias for all unreliable bytes-oriented send streams.
|
||||
AnyUnreliableByteSendStream = Union[UnreliableObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all unreliable bytes-oriented streams.
|
||||
AnyUnreliableByteStream = Union[UnreliableObjectStream[bytes], ByteStream]
|
||||
#: Type alias for all bytes-oriented receive streams.
|
||||
AnyByteReceiveStream = Union[ObjectReceiveStream[bytes], ByteReceiveStream]
|
||||
#: Type alias for all bytes-oriented send streams.
|
||||
AnyByteSendStream = Union[ObjectSendStream[bytes], ByteSendStream]
|
||||
#: Type alias for all bytes-oriented streams.
|
||||
AnyByteStream = Union[ObjectStream[bytes], ByteStream]
|
||||
|
||||
|
||||
class Listener(Generic[T_co], AsyncResource, TypedAttributeProvider):
|
||||
"""An interface for objects that let you accept incoming connections."""
|
||||
|
||||
@abstractmethod
|
||||
async def serve(
|
||||
self, handler: Callable[[T_co], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Accept incoming connections as they come in and start tasks to handle them.
|
||||
|
||||
:param handler: a callable that will be used to handle each accepted connection
|
||||
:param task_group: the task group that will be used to start tasks for handling
|
||||
each accepted connection (if omitted, an ad-hoc task group will be created)
|
||||
"""
|
@@ -1,79 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from abc import abstractmethod
|
||||
from signal import Signals
|
||||
|
||||
from ._resources import AsyncResource
|
||||
from ._streams import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class Process(AsyncResource):
|
||||
"""An asynchronous version of :class:`subprocess.Popen`."""
|
||||
|
||||
@abstractmethod
|
||||
async def wait(self) -> int:
|
||||
"""
|
||||
Wait until the process exits.
|
||||
|
||||
:return: the exit code of the process
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def terminate(self) -> None:
|
||||
"""
|
||||
Terminates the process, gracefully if possible.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGTERM`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.terminate`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def kill(self) -> None:
|
||||
"""
|
||||
Kills the process.
|
||||
|
||||
On Windows, this calls ``TerminateProcess()``.
|
||||
On POSIX systems, this sends ``SIGKILL`` to the process.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.kill`
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def send_signal(self, signal: Signals) -> None:
|
||||
"""
|
||||
Send a signal to the subprocess.
|
||||
|
||||
.. seealso:: :meth:`subprocess.Popen.send_signal`
|
||||
|
||||
:param signal: the signal number (e.g. :data:`signal.SIGHUP`)
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def pid(self) -> int:
|
||||
"""The process ID of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def returncode(self) -> int | None:
|
||||
"""
|
||||
The return code of the process. If the process has not yet terminated, this will
|
||||
be ``None``.
|
||||
"""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdin(self) -> ByteSendStream | None:
|
||||
"""The stream for the standard input of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stdout(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard output of the process."""
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def stderr(self) -> ByteReceiveStream | None:
|
||||
"""The stream for the standard error output of the process."""
|
@@ -1,101 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import Awaitable, Callable
|
||||
from types import TracebackType
|
||||
from typing import TYPE_CHECKING, Any, Protocol, TypeVar, overload
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .._core._tasks import CancelScope
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
class TaskStatus(Protocol[T_contra]):
|
||||
@overload
|
||||
def started(self: TaskStatus[None]) -> None: ...
|
||||
|
||||
@overload
|
||||
def started(self, value: T_contra) -> None: ...
|
||||
|
||||
def started(self, value: T_contra | None = None) -> None:
|
||||
"""
|
||||
Signal that the task has started.
|
||||
|
||||
:param value: object passed back to the starter of the task
|
||||
"""
|
||||
|
||||
|
||||
class TaskGroup(metaclass=ABCMeta):
|
||||
"""
|
||||
Groups several asynchronous tasks together.
|
||||
|
||||
:ivar cancel_scope: the cancel scope inherited by all child tasks
|
||||
:vartype cancel_scope: CancelScope
|
||||
|
||||
.. note:: On asyncio, support for eager task factories is considered to be
|
||||
**experimental**. In particular, they don't follow the usual semantics of new
|
||||
tasks being scheduled on the next iteration of the event loop, and may thus
|
||||
cause unexpected behavior in code that wasn't written with such semantics in
|
||||
mind.
|
||||
"""
|
||||
|
||||
cancel_scope: CancelScope
|
||||
|
||||
@abstractmethod
|
||||
def start_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[Any]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> None:
|
||||
"""
|
||||
Start a new task in this task group.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def start(
|
||||
self,
|
||||
func: Callable[..., Awaitable[Any]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> Any:
|
||||
"""
|
||||
Start a new task and wait until it signals for readiness.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments to call the function with
|
||||
:param name: name of the task, for the purposes of introspection and debugging
|
||||
:return: the value passed to ``task_status.started()``
|
||||
:raises RuntimeError: if the task finishes without calling
|
||||
``task_status.started()``
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
async def __aenter__(self) -> TaskGroup:
|
||||
"""Enter the task group context and allow starting new tasks."""
|
||||
|
||||
@abstractmethod
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
"""Exit the task group context waiting for all tasks to finish."""
|
@@ -1,65 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import types
|
||||
from abc import ABCMeta, abstractmethod
|
||||
from collections.abc import AsyncGenerator, Callable, Coroutine, Iterable
|
||||
from typing import Any, TypeVar
|
||||
|
||||
_T = TypeVar("_T")
|
||||
|
||||
|
||||
class TestRunner(metaclass=ABCMeta):
|
||||
"""
|
||||
Encapsulates a running event loop. Every call made through this object will use the
|
||||
same event loop.
|
||||
"""
|
||||
|
||||
def __enter__(self) -> TestRunner:
|
||||
return self
|
||||
|
||||
@abstractmethod
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: types.TracebackType | None,
|
||||
) -> bool | None: ...
|
||||
|
||||
@abstractmethod
|
||||
def run_asyncgen_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., AsyncGenerator[_T, Any]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> Iterable[_T]:
|
||||
"""
|
||||
Run an async generator fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: an iterator yielding the value yielded from the async generator
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_fixture(
|
||||
self,
|
||||
fixture_func: Callable[..., Coroutine[Any, Any, _T]],
|
||||
kwargs: dict[str, Any],
|
||||
) -> _T:
|
||||
"""
|
||||
Run an async fixture.
|
||||
|
||||
:param fixture_func: the fixture function
|
||||
:param kwargs: keyword arguments to call the fixture function with
|
||||
:return: the return value of the fixture function
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def run_test(
|
||||
self, test_func: Callable[..., Coroutine[Any, Any, Any]], kwargs: dict[str, Any]
|
||||
) -> None:
|
||||
"""
|
||||
Run an async test function.
|
||||
|
||||
:param test_func: the test function
|
||||
:param kwargs: keyword arguments to call the test function with
|
||||
"""
|
@@ -1,527 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Awaitable, Callable, Generator
|
||||
from concurrent.futures import Future
|
||||
from contextlib import (
|
||||
AbstractAsyncContextManager,
|
||||
AbstractContextManager,
|
||||
contextmanager,
|
||||
)
|
||||
from dataclasses import dataclass, field
|
||||
from inspect import isawaitable
|
||||
from threading import Lock, Thread, get_ident
|
||||
from types import TracebackType
|
||||
from typing import (
|
||||
Any,
|
||||
Generic,
|
||||
TypeVar,
|
||||
cast,
|
||||
overload,
|
||||
)
|
||||
|
||||
from ._core import _eventloop
|
||||
from ._core._eventloop import get_async_backend, get_cancelled_exc_class, threadlocals
|
||||
from ._core._synchronization import Event
|
||||
from ._core._tasks import CancelScope, create_task_group
|
||||
from .abc import AsyncBackend
|
||||
from .abc._tasks import TaskStatus
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
|
||||
def run(
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a coroutine function from a worker thread.
|
||||
|
||||
:param func: a coroutine function
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the coroutine function
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_async_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call a function in the event loop thread from a worker thread.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:return: the return value of the callable
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend = threadlocals.current_async_backend
|
||||
token = threadlocals.current_token
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
return async_backend.run_sync_from_thread(func, args, token=token)
|
||||
|
||||
|
||||
class _BlockingAsyncContextManager(Generic[T_co], AbstractContextManager):
|
||||
_enter_future: Future[T_co]
|
||||
_exit_future: Future[bool | None]
|
||||
_exit_event: Event
|
||||
_exit_exc_info: tuple[
|
||||
type[BaseException] | None, BaseException | None, TracebackType | None
|
||||
] = (None, None, None)
|
||||
|
||||
def __init__(
|
||||
self, async_cm: AbstractAsyncContextManager[T_co], portal: BlockingPortal
|
||||
):
|
||||
self._async_cm = async_cm
|
||||
self._portal = portal
|
||||
|
||||
async def run_async_cm(self) -> bool | None:
|
||||
try:
|
||||
self._exit_event = Event()
|
||||
value = await self._async_cm.__aenter__()
|
||||
except BaseException as exc:
|
||||
self._enter_future.set_exception(exc)
|
||||
raise
|
||||
else:
|
||||
self._enter_future.set_result(value)
|
||||
|
||||
try:
|
||||
# Wait for the sync context manager to exit.
|
||||
# This next statement can raise `get_cancelled_exc_class()` if
|
||||
# something went wrong in a task group in this async context
|
||||
# manager.
|
||||
await self._exit_event.wait()
|
||||
finally:
|
||||
# In case of cancellation, it could be that we end up here before
|
||||
# `_BlockingAsyncContextManager.__exit__` is called, and an
|
||||
# `_exit_exc_info` has been set.
|
||||
result = await self._async_cm.__aexit__(*self._exit_exc_info)
|
||||
return result
|
||||
|
||||
def __enter__(self) -> T_co:
|
||||
self._enter_future = Future()
|
||||
self._exit_future = self._portal.start_task_soon(self.run_async_cm)
|
||||
return self._enter_future.result()
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
__exc_type: type[BaseException] | None,
|
||||
__exc_value: BaseException | None,
|
||||
__traceback: TracebackType | None,
|
||||
) -> bool | None:
|
||||
self._exit_exc_info = __exc_type, __exc_value, __traceback
|
||||
self._portal.call(self._exit_event.set)
|
||||
return self._exit_future.result()
|
||||
|
||||
|
||||
class _BlockingPortalTaskStatus(TaskStatus):
|
||||
def __init__(self, future: Future):
|
||||
self._future = future
|
||||
|
||||
def started(self, value: object = None) -> None:
|
||||
self._future.set_result(value)
|
||||
|
||||
|
||||
class BlockingPortal:
|
||||
"""An object that lets external threads run code in an asynchronous event loop."""
|
||||
|
||||
def __new__(cls) -> BlockingPortal:
|
||||
return get_async_backend().create_blocking_portal()
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._event_loop_thread_id: int | None = get_ident()
|
||||
self._stop_event = Event()
|
||||
self._task_group = create_task_group()
|
||||
self._cancelled_exc_class = get_cancelled_exc_class()
|
||||
|
||||
async def __aenter__(self) -> BlockingPortal:
|
||||
await self._task_group.__aenter__()
|
||||
return self
|
||||
|
||||
async def __aexit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> bool | None:
|
||||
await self.stop()
|
||||
return await self._task_group.__aexit__(exc_type, exc_val, exc_tb)
|
||||
|
||||
def _check_running(self) -> None:
|
||||
if self._event_loop_thread_id is None:
|
||||
raise RuntimeError("This portal is not running")
|
||||
if self._event_loop_thread_id == get_ident():
|
||||
raise RuntimeError(
|
||||
"This method cannot be called from the event loop thread"
|
||||
)
|
||||
|
||||
async def sleep_until_stopped(self) -> None:
|
||||
"""Sleep until :meth:`stop` is called."""
|
||||
await self._stop_event.wait()
|
||||
|
||||
async def stop(self, cancel_remaining: bool = False) -> None:
|
||||
"""
|
||||
Signal the portal to shut down.
|
||||
|
||||
This marks the portal as no longer accepting new calls and exits from
|
||||
:meth:`sleep_until_stopped`.
|
||||
|
||||
:param cancel_remaining: ``True`` to cancel all the remaining tasks, ``False``
|
||||
to let them finish before returning
|
||||
|
||||
"""
|
||||
self._event_loop_thread_id = None
|
||||
self._stop_event.set()
|
||||
if cancel_remaining:
|
||||
self._task_group.cancel_scope.cancel()
|
||||
|
||||
async def _call_func(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
def callback(f: Future[T_Retval]) -> None:
|
||||
if f.cancelled() and self._event_loop_thread_id not in (
|
||||
None,
|
||||
get_ident(),
|
||||
):
|
||||
self.call(scope.cancel)
|
||||
|
||||
try:
|
||||
retval_or_awaitable = func(*args, **kwargs)
|
||||
if isawaitable(retval_or_awaitable):
|
||||
with CancelScope() as scope:
|
||||
if future.cancelled():
|
||||
scope.cancel()
|
||||
else:
|
||||
future.add_done_callback(callback)
|
||||
|
||||
retval = await retval_or_awaitable
|
||||
else:
|
||||
retval = retval_or_awaitable
|
||||
except self._cancelled_exc_class:
|
||||
future.cancel()
|
||||
future.set_running_or_notify_cancel()
|
||||
except BaseException as exc:
|
||||
if not future.cancelled():
|
||||
future.set_exception(exc)
|
||||
|
||||
# Let base exceptions fall through
|
||||
if not isinstance(exc, Exception):
|
||||
raise
|
||||
else:
|
||||
if not future.cancelled():
|
||||
future.set_result(retval)
|
||||
finally:
|
||||
scope = None # type: ignore[assignment]
|
||||
|
||||
def _spawn_task_from_thread(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
args: tuple[Unpack[PosArgsT]],
|
||||
kwargs: dict[str, Any],
|
||||
name: object,
|
||||
future: Future[T_Retval],
|
||||
) -> None:
|
||||
"""
|
||||
Spawn a new task using the given callable.
|
||||
|
||||
Implementors must ensure that the future is resolved when the task finishes.
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments to be passed to the callable
|
||||
:param kwargs: keyword arguments to be passed to the callable
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:param future: a future that will resolve to the return value of the callable,
|
||||
or the exception raised during its execution
|
||||
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval: ...
|
||||
|
||||
@overload
|
||||
def call(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval: ...
|
||||
|
||||
def call(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function in the event loop thread.
|
||||
|
||||
If the callable returns a coroutine object, it is awaited on.
|
||||
|
||||
:param func: any callable
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
|
||||
"""
|
||||
return cast(T_Retval, self.start_task_soon(func, *args).result())
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval]],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
@overload
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]: ...
|
||||
|
||||
def start_task_soon(
|
||||
self,
|
||||
func: Callable[[Unpack[PosArgsT]], Awaitable[T_Retval] | T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
name: object = None,
|
||||
) -> Future[T_Retval]:
|
||||
"""
|
||||
Start a task in the portal's task group.
|
||||
|
||||
The task will be run inside a cancel scope which can be cancelled by cancelling
|
||||
the returned future.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a future that resolves with the return value of the callable if the
|
||||
task completes successfully, or with the exception raised in the task
|
||||
:raises RuntimeError: if the portal is not running or if this method is called
|
||||
from within the event loop thread
|
||||
:rtype: concurrent.futures.Future[T_Retval]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
self._check_running()
|
||||
f: Future[T_Retval] = Future()
|
||||
self._spawn_task_from_thread(func, args, {}, name, f)
|
||||
return f
|
||||
|
||||
def start_task(
|
||||
self,
|
||||
func: Callable[..., Awaitable[T_Retval]],
|
||||
*args: object,
|
||||
name: object = None,
|
||||
) -> tuple[Future[T_Retval], Any]:
|
||||
"""
|
||||
Start a task in the portal's task group and wait until it signals for readiness.
|
||||
|
||||
This method works the same way as :meth:`.abc.TaskGroup.start`.
|
||||
|
||||
:param func: the target function
|
||||
:param args: positional arguments passed to ``func``
|
||||
:param name: name of the task (will be coerced to a string if not ``None``)
|
||||
:return: a tuple of (future, task_status_value) where the ``task_status_value``
|
||||
is the value passed to ``task_status.started()`` from within the target
|
||||
function
|
||||
:rtype: tuple[concurrent.futures.Future[T_Retval], Any]
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
|
||||
def task_done(future: Future[T_Retval]) -> None:
|
||||
if not task_status_future.done():
|
||||
if future.cancelled():
|
||||
task_status_future.cancel()
|
||||
elif future.exception():
|
||||
task_status_future.set_exception(future.exception())
|
||||
else:
|
||||
exc = RuntimeError(
|
||||
"Task exited without calling task_status.started()"
|
||||
)
|
||||
task_status_future.set_exception(exc)
|
||||
|
||||
self._check_running()
|
||||
task_status_future: Future = Future()
|
||||
task_status = _BlockingPortalTaskStatus(task_status_future)
|
||||
f: Future = Future()
|
||||
f.add_done_callback(task_done)
|
||||
self._spawn_task_from_thread(func, args, {"task_status": task_status}, name, f)
|
||||
return f, task_status_future.result()
|
||||
|
||||
def wrap_async_context_manager(
|
||||
self, cm: AbstractAsyncContextManager[T_co]
|
||||
) -> AbstractContextManager[T_co]:
|
||||
"""
|
||||
Wrap an async context manager as a synchronous context manager via this portal.
|
||||
|
||||
Spawns a task that will call both ``__aenter__()`` and ``__aexit__()``, stopping
|
||||
in the middle until the synchronous context manager exits.
|
||||
|
||||
:param cm: an asynchronous context manager
|
||||
:return: a synchronous context manager
|
||||
|
||||
.. versionadded:: 2.1
|
||||
|
||||
"""
|
||||
return _BlockingAsyncContextManager(cm, self)
|
||||
|
||||
|
||||
@dataclass
|
||||
class BlockingPortalProvider:
|
||||
"""
|
||||
A manager for a blocking portal. Used as a context manager. The first thread to
|
||||
enter this context manager causes a blocking portal to be started with the specific
|
||||
parameters, and the last thread to exit causes the portal to be shut down. Thus,
|
||||
there will be exactly one blocking portal running in this context as long as at
|
||||
least one thread has entered this context manager.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
|
||||
.. versionadded:: 4.4
|
||||
"""
|
||||
|
||||
backend: str = "asyncio"
|
||||
backend_options: dict[str, Any] | None = None
|
||||
_lock: Lock = field(init=False, default_factory=Lock)
|
||||
_leases: int = field(init=False, default=0)
|
||||
_portal: BlockingPortal = field(init=False)
|
||||
_portal_cm: AbstractContextManager[BlockingPortal] | None = field(
|
||||
init=False, default=None
|
||||
)
|
||||
|
||||
def __enter__(self) -> BlockingPortal:
|
||||
with self._lock:
|
||||
if self._portal_cm is None:
|
||||
self._portal_cm = start_blocking_portal(
|
||||
self.backend, self.backend_options
|
||||
)
|
||||
self._portal = self._portal_cm.__enter__()
|
||||
|
||||
self._leases += 1
|
||||
return self._portal
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
portal_cm: AbstractContextManager[BlockingPortal] | None = None
|
||||
with self._lock:
|
||||
assert self._portal_cm
|
||||
assert self._leases > 0
|
||||
self._leases -= 1
|
||||
if not self._leases:
|
||||
portal_cm = self._portal_cm
|
||||
self._portal_cm = None
|
||||
del self._portal
|
||||
|
||||
if portal_cm:
|
||||
portal_cm.__exit__(None, None, None)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def start_blocking_portal(
|
||||
backend: str = "asyncio", backend_options: dict[str, Any] | None = None
|
||||
) -> Generator[BlockingPortal, Any, None]:
|
||||
"""
|
||||
Start a new event loop in a new thread and run a blocking portal in its main task.
|
||||
|
||||
The parameters are the same as for :func:`~anyio.run`.
|
||||
|
||||
:param backend: name of the backend
|
||||
:param backend_options: backend options
|
||||
:return: a context manager that yields a blocking portal
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Usage as a context manager is now required.
|
||||
|
||||
"""
|
||||
|
||||
async def run_portal() -> None:
|
||||
async with BlockingPortal() as portal_:
|
||||
future.set_result(portal_)
|
||||
await portal_.sleep_until_stopped()
|
||||
|
||||
def run_blocking_portal() -> None:
|
||||
if future.set_running_or_notify_cancel():
|
||||
try:
|
||||
_eventloop.run(
|
||||
run_portal, backend=backend, backend_options=backend_options
|
||||
)
|
||||
except BaseException as exc:
|
||||
if not future.done():
|
||||
future.set_exception(exc)
|
||||
|
||||
future: Future[BlockingPortal] = Future()
|
||||
thread = Thread(target=run_blocking_portal, daemon=True)
|
||||
thread.start()
|
||||
try:
|
||||
cancel_remaining_tasks = False
|
||||
portal = future.result()
|
||||
try:
|
||||
yield portal
|
||||
except BaseException:
|
||||
cancel_remaining_tasks = True
|
||||
raise
|
||||
finally:
|
||||
try:
|
||||
portal.call(portal.stop, cancel_remaining_tasks)
|
||||
except RuntimeError:
|
||||
pass
|
||||
finally:
|
||||
thread.join()
|
||||
|
||||
|
||||
def check_cancelled() -> None:
|
||||
"""
|
||||
Check if the cancel scope of the host task's running the current worker thread has
|
||||
been cancelled.
|
||||
|
||||
If the host task's current cancel scope has indeed been cancelled, the
|
||||
backend-specific cancellation exception will be raised.
|
||||
|
||||
:raises RuntimeError: if the current thread was not spawned by
|
||||
:func:`.to_thread.run_sync`
|
||||
|
||||
"""
|
||||
try:
|
||||
async_backend: AsyncBackend = threadlocals.current_async_backend
|
||||
except AttributeError:
|
||||
raise RuntimeError(
|
||||
"This function can only be run from an AnyIO worker thread"
|
||||
) from None
|
||||
|
||||
async_backend.check_cancelled()
|
@@ -1,161 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import enum
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, Literal, TypeVar, overload
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from ._core._eventloop import get_async_backend
|
||||
|
||||
T = TypeVar("T")
|
||||
D = TypeVar("D")
|
||||
|
||||
|
||||
async def checkpoint() -> None:
|
||||
"""
|
||||
Check for cancellation and allow the scheduler to switch to another task.
|
||||
|
||||
Equivalent to (but more efficient than)::
|
||||
|
||||
await checkpoint_if_cancelled()
|
||||
await cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint()
|
||||
|
||||
|
||||
async def checkpoint_if_cancelled() -> None:
|
||||
"""
|
||||
Enter a checkpoint if the enclosing cancel scope has been cancelled.
|
||||
|
||||
This does not allow the scheduler to switch to a different task.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().checkpoint_if_cancelled()
|
||||
|
||||
|
||||
async def cancel_shielded_checkpoint() -> None:
|
||||
"""
|
||||
Allow the scheduler to switch to another task but without checking for cancellation.
|
||||
|
||||
Equivalent to (but potentially more efficient than)::
|
||||
|
||||
with CancelScope(shield=True):
|
||||
await checkpoint()
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
"""
|
||||
await get_async_backend().cancel_shielded_checkpoint()
|
||||
|
||||
|
||||
def current_token() -> object:
|
||||
"""
|
||||
Return a backend specific token object that can be used to get back to the event
|
||||
loop.
|
||||
|
||||
"""
|
||||
return get_async_backend().current_token()
|
||||
|
||||
|
||||
_run_vars: WeakKeyDictionary[Any, dict[str, Any]] = WeakKeyDictionary()
|
||||
_token_wrappers: dict[Any, _TokenWrapper] = {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _TokenWrapper:
|
||||
__slots__ = "_token", "__weakref__"
|
||||
_token: object
|
||||
|
||||
|
||||
class _NoValueSet(enum.Enum):
|
||||
NO_VALUE_SET = enum.auto()
|
||||
|
||||
|
||||
class RunvarToken(Generic[T]):
|
||||
__slots__ = "_var", "_value", "_redeemed"
|
||||
|
||||
def __init__(self, var: RunVar[T], value: T | Literal[_NoValueSet.NO_VALUE_SET]):
|
||||
self._var = var
|
||||
self._value: T | Literal[_NoValueSet.NO_VALUE_SET] = value
|
||||
self._redeemed = False
|
||||
|
||||
|
||||
class RunVar(Generic[T]):
|
||||
"""
|
||||
Like a :class:`~contextvars.ContextVar`, except scoped to the running event loop.
|
||||
"""
|
||||
|
||||
__slots__ = "_name", "_default"
|
||||
|
||||
NO_VALUE_SET: Literal[_NoValueSet.NO_VALUE_SET] = _NoValueSet.NO_VALUE_SET
|
||||
|
||||
_token_wrappers: set[_TokenWrapper] = set()
|
||||
|
||||
def __init__(
|
||||
self, name: str, default: T | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
):
|
||||
self._name = name
|
||||
self._default = default
|
||||
|
||||
@property
|
||||
def _current_vars(self) -> dict[str, T]:
|
||||
token = current_token()
|
||||
try:
|
||||
return _run_vars[token]
|
||||
except KeyError:
|
||||
run_vars = _run_vars[token] = {}
|
||||
return run_vars
|
||||
|
||||
@overload
|
||||
def get(self, default: D) -> T | D: ...
|
||||
|
||||
@overload
|
||||
def get(self) -> T: ...
|
||||
|
||||
def get(
|
||||
self, default: D | Literal[_NoValueSet.NO_VALUE_SET] = NO_VALUE_SET
|
||||
) -> T | D:
|
||||
try:
|
||||
return self._current_vars[self._name]
|
||||
except KeyError:
|
||||
if default is not RunVar.NO_VALUE_SET:
|
||||
return default
|
||||
elif self._default is not RunVar.NO_VALUE_SET:
|
||||
return self._default
|
||||
|
||||
raise LookupError(
|
||||
f'Run variable "{self._name}" has no value and no default set'
|
||||
)
|
||||
|
||||
def set(self, value: T) -> RunvarToken[T]:
|
||||
current_vars = self._current_vars
|
||||
token = RunvarToken(self, current_vars.get(self._name, RunVar.NO_VALUE_SET))
|
||||
current_vars[self._name] = value
|
||||
return token
|
||||
|
||||
def reset(self, token: RunvarToken[T]) -> None:
|
||||
if token._var is not self:
|
||||
raise ValueError("This token does not belong to this RunVar")
|
||||
|
||||
if token._redeemed:
|
||||
raise ValueError("This token has already been used")
|
||||
|
||||
if token._value is _NoValueSet.NO_VALUE_SET:
|
||||
try:
|
||||
del self._current_vars[self._name]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
self._current_vars[self._name] = token._value
|
||||
|
||||
token._redeemed = True
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"<RunVar name={self._name!r}>"
|
@@ -1,191 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sys
|
||||
from collections.abc import Generator, Iterator
|
||||
from contextlib import ExitStack, contextmanager
|
||||
from inspect import isasyncgenfunction, iscoroutinefunction, ismethod
|
||||
from typing import Any, cast
|
||||
|
||||
import pytest
|
||||
import sniffio
|
||||
from _pytest.fixtures import SubRequest
|
||||
from _pytest.outcomes import Exit
|
||||
|
||||
from ._core._eventloop import get_all_backends, get_async_backend
|
||||
from ._core._exceptions import iterate_exceptions
|
||||
from .abc import TestRunner
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from exceptiongroup import ExceptionGroup
|
||||
|
||||
_current_runner: TestRunner | None = None
|
||||
_runner_stack: ExitStack | None = None
|
||||
_runner_leases = 0
|
||||
|
||||
|
||||
def extract_backend_and_options(backend: object) -> tuple[str, dict[str, Any]]:
|
||||
if isinstance(backend, str):
|
||||
return backend, {}
|
||||
elif isinstance(backend, tuple) and len(backend) == 2:
|
||||
if isinstance(backend[0], str) and isinstance(backend[1], dict):
|
||||
return cast(tuple[str, dict[str, Any]], backend)
|
||||
|
||||
raise TypeError("anyio_backend must be either a string or tuple of (string, dict)")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def get_runner(
|
||||
backend_name: str, backend_options: dict[str, Any]
|
||||
) -> Iterator[TestRunner]:
|
||||
global _current_runner, _runner_leases, _runner_stack
|
||||
if _current_runner is None:
|
||||
asynclib = get_async_backend(backend_name)
|
||||
_runner_stack = ExitStack()
|
||||
if sniffio.current_async_library_cvar.get(None) is None:
|
||||
# Since we're in control of the event loop, we can cache the name of the
|
||||
# async library
|
||||
token = sniffio.current_async_library_cvar.set(backend_name)
|
||||
_runner_stack.callback(sniffio.current_async_library_cvar.reset, token)
|
||||
|
||||
backend_options = backend_options or {}
|
||||
_current_runner = _runner_stack.enter_context(
|
||||
asynclib.create_test_runner(backend_options)
|
||||
)
|
||||
|
||||
_runner_leases += 1
|
||||
try:
|
||||
yield _current_runner
|
||||
finally:
|
||||
_runner_leases -= 1
|
||||
if not _runner_leases:
|
||||
assert _runner_stack is not None
|
||||
_runner_stack.close()
|
||||
_runner_stack = _current_runner = None
|
||||
|
||||
|
||||
def pytest_configure(config: Any) -> None:
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"anyio: mark the (coroutine function) test to be run "
|
||||
"asynchronously via anyio.",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_fixture_setup(fixturedef: Any, request: Any) -> Generator[Any]:
|
||||
def wrapper(
|
||||
*args: Any, anyio_backend: Any, request: SubRequest, **kwargs: Any
|
||||
) -> Any:
|
||||
# Rebind any fixture methods to the request instance
|
||||
if (
|
||||
request.instance
|
||||
and ismethod(func)
|
||||
and type(func.__self__) is type(request.instance)
|
||||
):
|
||||
local_func = func.__func__.__get__(request.instance)
|
||||
else:
|
||||
local_func = func
|
||||
|
||||
backend_name, backend_options = extract_backend_and_options(anyio_backend)
|
||||
if has_backend_arg:
|
||||
kwargs["anyio_backend"] = anyio_backend
|
||||
|
||||
if has_request_arg:
|
||||
kwargs["request"] = request
|
||||
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
if isasyncgenfunction(local_func):
|
||||
yield from runner.run_asyncgen_fixture(local_func, kwargs)
|
||||
else:
|
||||
yield runner.run_fixture(local_func, kwargs)
|
||||
|
||||
# Only apply this to coroutine functions and async generator functions in requests
|
||||
# that involve the anyio_backend fixture
|
||||
func = fixturedef.func
|
||||
if isasyncgenfunction(func) or iscoroutinefunction(func):
|
||||
if "anyio_backend" in request.fixturenames:
|
||||
fixturedef.func = wrapper
|
||||
original_argname = fixturedef.argnames
|
||||
|
||||
if not (has_backend_arg := "anyio_backend" in fixturedef.argnames):
|
||||
fixturedef.argnames += ("anyio_backend",)
|
||||
|
||||
if not (has_request_arg := "request" in fixturedef.argnames):
|
||||
fixturedef.argnames += ("request",)
|
||||
|
||||
try:
|
||||
return (yield)
|
||||
finally:
|
||||
fixturedef.func = func
|
||||
fixturedef.argnames = original_argname
|
||||
|
||||
return (yield)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pycollect_makeitem(collector: Any, name: Any, obj: Any) -> None:
|
||||
if collector.istestfunction(obj, name):
|
||||
inner_func = obj.hypothesis.inner_test if hasattr(obj, "hypothesis") else obj
|
||||
if iscoroutinefunction(inner_func):
|
||||
marker = collector.get_closest_marker("anyio")
|
||||
own_markers = getattr(obj, "pytestmark", ())
|
||||
if marker or any(marker.name == "anyio" for marker in own_markers):
|
||||
pytest.mark.usefixtures("anyio_backend")(obj)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_pyfunc_call(pyfuncitem: Any) -> bool | None:
|
||||
def run_with_hypothesis(**kwargs: Any) -> None:
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
runner.run_test(original_func, kwargs)
|
||||
|
||||
backend = pyfuncitem.funcargs.get("anyio_backend")
|
||||
if backend:
|
||||
backend_name, backend_options = extract_backend_and_options(backend)
|
||||
|
||||
if hasattr(pyfuncitem.obj, "hypothesis"):
|
||||
# Wrap the inner test function unless it's already wrapped
|
||||
original_func = pyfuncitem.obj.hypothesis.inner_test
|
||||
if original_func.__qualname__ != run_with_hypothesis.__qualname__:
|
||||
if iscoroutinefunction(original_func):
|
||||
pyfuncitem.obj.hypothesis.inner_test = run_with_hypothesis
|
||||
|
||||
return None
|
||||
|
||||
if iscoroutinefunction(pyfuncitem.obj):
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
||||
with get_runner(backend_name, backend_options) as runner:
|
||||
try:
|
||||
runner.run_test(pyfuncitem.obj, testargs)
|
||||
except ExceptionGroup as excgrp:
|
||||
for exc in iterate_exceptions(excgrp):
|
||||
if isinstance(exc, (Exit, KeyboardInterrupt, SystemExit)):
|
||||
raise exc from excgrp
|
||||
|
||||
raise
|
||||
|
||||
return True
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture(scope="module", params=get_all_backends())
|
||||
def anyio_backend(request: Any) -> Any:
|
||||
return request.param
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_name(anyio_backend: Any) -> str:
|
||||
if isinstance(anyio_backend, str):
|
||||
return anyio_backend
|
||||
else:
|
||||
return anyio_backend[0]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def anyio_backend_options(anyio_backend: Any) -> dict[str, Any]:
|
||||
if isinstance(anyio_backend, str):
|
||||
return {}
|
||||
else:
|
||||
return anyio_backend[1]
|
@@ -1,119 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from .. import ClosedResourceError, DelimiterNotFound, EndOfStream, IncompleteRead
|
||||
from ..abc import AnyByteReceiveStream, ByteReceiveStream
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class BufferedByteReceiveStream(ByteReceiveStream):
|
||||
"""
|
||||
Wraps any bytes-based receive stream and uses a buffer to provide sophisticated
|
||||
receiving capabilities in the form of a byte stream.
|
||||
"""
|
||||
|
||||
receive_stream: AnyByteReceiveStream
|
||||
_buffer: bytearray = field(init=False, default_factory=bytearray)
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.receive_stream.aclose()
|
||||
self._closed = True
|
||||
|
||||
@property
|
||||
def buffer(self) -> bytes:
|
||||
"""The bytes currently in the buffer."""
|
||||
return bytes(self._buffer)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.receive_stream.extra_attributes
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._buffer:
|
||||
chunk = bytes(self._buffer[:max_bytes])
|
||||
del self._buffer[:max_bytes]
|
||||
return chunk
|
||||
elif isinstance(self.receive_stream, ByteReceiveStream):
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
else:
|
||||
# With a bytes-oriented object stream, we need to handle any surplus bytes
|
||||
# we get from the receive() call
|
||||
chunk = await self.receive_stream.receive()
|
||||
if len(chunk) > max_bytes:
|
||||
# Save the surplus bytes in the buffer
|
||||
self._buffer.extend(chunk[max_bytes:])
|
||||
return chunk[:max_bytes]
|
||||
else:
|
||||
return chunk
|
||||
|
||||
async def receive_exactly(self, nbytes: int) -> bytes:
|
||||
"""
|
||||
Read exactly the given amount of bytes from the stream.
|
||||
|
||||
:param nbytes: the number of bytes to read
|
||||
:return: the bytes read
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the requested
|
||||
amount of bytes could be read from the stream
|
||||
|
||||
"""
|
||||
while True:
|
||||
remaining = nbytes - len(self._buffer)
|
||||
if remaining <= 0:
|
||||
retval = self._buffer[:nbytes]
|
||||
del self._buffer[:nbytes]
|
||||
return bytes(retval)
|
||||
|
||||
try:
|
||||
if isinstance(self.receive_stream, ByteReceiveStream):
|
||||
chunk = await self.receive_stream.receive(remaining)
|
||||
else:
|
||||
chunk = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
self._buffer.extend(chunk)
|
||||
|
||||
async def receive_until(self, delimiter: bytes, max_bytes: int) -> bytes:
|
||||
"""
|
||||
Read from the stream until the delimiter is found or max_bytes have been read.
|
||||
|
||||
:param delimiter: the marker to look for in the stream
|
||||
:param max_bytes: maximum number of bytes that will be read before raising
|
||||
:exc:`~anyio.DelimiterNotFound`
|
||||
:return: the bytes read (not including the delimiter)
|
||||
:raises ~anyio.IncompleteRead: if the stream was closed before the delimiter
|
||||
was found
|
||||
:raises ~anyio.DelimiterNotFound: if the delimiter is not found within the
|
||||
bytes read up to the maximum allowed
|
||||
|
||||
"""
|
||||
delimiter_size = len(delimiter)
|
||||
offset = 0
|
||||
while True:
|
||||
# Check if the delimiter can be found in the current buffer
|
||||
index = self._buffer.find(delimiter, offset)
|
||||
if index >= 0:
|
||||
found = self._buffer[:index]
|
||||
del self._buffer[: index + len(delimiter) :]
|
||||
return bytes(found)
|
||||
|
||||
# Check if the buffer is already at or over the limit
|
||||
if len(self._buffer) >= max_bytes:
|
||||
raise DelimiterNotFound(max_bytes)
|
||||
|
||||
# Read more data into the buffer from the socket
|
||||
try:
|
||||
data = await self.receive_stream.receive()
|
||||
except EndOfStream as exc:
|
||||
raise IncompleteRead from exc
|
||||
|
||||
# Move the offset forward and add the new data to the buffer
|
||||
offset = max(len(self._buffer) - delimiter_size + 1, 0)
|
||||
self._buffer.extend(data)
|
@@ -1,148 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping
|
||||
from io import SEEK_SET, UnsupportedOperation
|
||||
from os import PathLike
|
||||
from pathlib import Path
|
||||
from typing import Any, BinaryIO, cast
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
TypedAttributeSet,
|
||||
to_thread,
|
||||
typed_attribute,
|
||||
)
|
||||
from ..abc import ByteReceiveStream, ByteSendStream
|
||||
|
||||
|
||||
class FileStreamAttribute(TypedAttributeSet):
|
||||
#: the open file descriptor
|
||||
file: BinaryIO = typed_attribute()
|
||||
#: the path of the file on the file system, if available (file must be a real file)
|
||||
path: Path = typed_attribute()
|
||||
#: the file number, if available (file must be a real file or a TTY)
|
||||
fileno: int = typed_attribute()
|
||||
|
||||
|
||||
class _BaseFileStream:
|
||||
def __init__(self, file: BinaryIO):
|
||||
self._file = file
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await to_thread.run_sync(self._file.close)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict[Any, Callable[[], Any]] = {
|
||||
FileStreamAttribute.file: lambda: self._file,
|
||||
}
|
||||
|
||||
if hasattr(self._file, "name"):
|
||||
attributes[FileStreamAttribute.path] = lambda: Path(self._file.name)
|
||||
|
||||
try:
|
||||
self._file.fileno()
|
||||
except UnsupportedOperation:
|
||||
pass
|
||||
else:
|
||||
attributes[FileStreamAttribute.fileno] = lambda: self._file.fileno()
|
||||
|
||||
return attributes
|
||||
|
||||
|
||||
class FileReadStream(_BaseFileStream, ByteReceiveStream):
|
||||
"""
|
||||
A byte stream that reads from a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for reading in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(cls, path: str | PathLike[str]) -> FileReadStream:
|
||||
"""
|
||||
Create a file read stream by opening the given file.
|
||||
|
||||
:param path: path of the file to read from
|
||||
|
||||
"""
|
||||
file = await to_thread.run_sync(Path(path).open, "rb")
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
try:
|
||||
data = await to_thread.run_sync(self._file.read, max_bytes)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
||||
|
||||
if data:
|
||||
return data
|
||||
else:
|
||||
raise EndOfStream
|
||||
|
||||
async def seek(self, position: int, whence: int = SEEK_SET) -> int:
|
||||
"""
|
||||
Seek the file to the given position.
|
||||
|
||||
.. seealso:: :meth:`io.IOBase.seek`
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:param position: position to seek the file to
|
||||
:param whence: controls how ``position`` is interpreted
|
||||
:return: the new absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.seek, position, whence)
|
||||
|
||||
async def tell(self) -> int:
|
||||
"""
|
||||
Return the current stream position.
|
||||
|
||||
.. note:: Not all file descriptors are seekable.
|
||||
|
||||
:return: the current absolute position
|
||||
:raises OSError: if the file is not seekable
|
||||
|
||||
"""
|
||||
return await to_thread.run_sync(self._file.tell)
|
||||
|
||||
|
||||
class FileWriteStream(_BaseFileStream, ByteSendStream):
|
||||
"""
|
||||
A byte stream that writes to a file in the file system.
|
||||
|
||||
:param file: a file that has been opened for writing in binary mode
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
async def from_path(
|
||||
cls, path: str | PathLike[str], append: bool = False
|
||||
) -> FileWriteStream:
|
||||
"""
|
||||
Create a file write stream by opening the given file for writing.
|
||||
|
||||
:param path: path of the file to write to
|
||||
:param append: if ``True``, open the file for appending; if ``False``, any
|
||||
existing file at the given path will be truncated
|
||||
|
||||
"""
|
||||
mode = "ab" if append else "wb"
|
||||
file = await to_thread.run_sync(Path(path).open, mode)
|
||||
return cls(cast(BinaryIO, file))
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
try:
|
||||
await to_thread.run_sync(self._file.write, item)
|
||||
except ValueError:
|
||||
raise ClosedResourceError from None
|
||||
except OSError as exc:
|
||||
raise BrokenResourceError from exc
|
@@ -1,317 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from collections import OrderedDict, deque
|
||||
from dataclasses import dataclass, field
|
||||
from types import TracebackType
|
||||
from typing import Generic, NamedTuple, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
ClosedResourceError,
|
||||
EndOfStream,
|
||||
WouldBlock,
|
||||
)
|
||||
from .._core._testing import TaskInfo, get_current_task
|
||||
from ..abc import Event, ObjectReceiveStream, ObjectSendStream
|
||||
from ..lowlevel import checkpoint
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_co = TypeVar("T_co", covariant=True)
|
||||
T_contra = TypeVar("T_contra", contravariant=True)
|
||||
|
||||
|
||||
class MemoryObjectStreamStatistics(NamedTuple):
|
||||
current_buffer_used: int #: number of items stored in the buffer
|
||||
#: maximum number of items that can be stored on this stream (or :data:`math.inf`)
|
||||
max_buffer_size: float
|
||||
open_send_streams: int #: number of unclosed clones of the send stream
|
||||
open_receive_streams: int #: number of unclosed clones of the receive stream
|
||||
#: number of tasks blocked on :meth:`MemoryObjectSendStream.send`
|
||||
tasks_waiting_send: int
|
||||
#: number of tasks blocked on :meth:`MemoryObjectReceiveStream.receive`
|
||||
tasks_waiting_receive: int
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectItemReceiver(Generic[T_Item]):
|
||||
task_info: TaskInfo = field(init=False, default_factory=get_current_task)
|
||||
item: T_Item = field(init=False)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
# When item is not defined, we get following error with default __repr__:
|
||||
# AttributeError: 'MemoryObjectItemReceiver' object has no attribute 'item'
|
||||
item = getattr(self, "item", None)
|
||||
return f"{self.__class__.__name__}(task_info={self.task_info}, item={item!r})"
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectStreamState(Generic[T_Item]):
|
||||
max_buffer_size: float = field()
|
||||
buffer: deque[T_Item] = field(init=False, default_factory=deque)
|
||||
open_send_channels: int = field(init=False, default=0)
|
||||
open_receive_channels: int = field(init=False, default=0)
|
||||
waiting_receivers: OrderedDict[Event, MemoryObjectItemReceiver[T_Item]] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
waiting_senders: OrderedDict[Event, T_Item] = field(
|
||||
init=False, default_factory=OrderedDict
|
||||
)
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
return MemoryObjectStreamStatistics(
|
||||
len(self.buffer),
|
||||
self.max_buffer_size,
|
||||
self.open_send_channels,
|
||||
self.open_receive_channels,
|
||||
len(self.waiting_senders),
|
||||
len(self.waiting_receivers),
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectReceiveStream(Generic[T_co], ObjectReceiveStream[T_co]):
|
||||
_state: MemoryObjectStreamState[T_co]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_receive_channels += 1
|
||||
|
||||
def receive_nowait(self) -> T_co:
|
||||
"""
|
||||
Receive the next item if it can be done without waiting.
|
||||
|
||||
:return: the received item
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.EndOfStream: if the buffer is empty and this stream has been
|
||||
closed from the sending end
|
||||
:raises ~anyio.WouldBlock: if there are no items in the buffer and no tasks
|
||||
waiting to send
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
if self._state.waiting_senders:
|
||||
# Get the item from the next sender
|
||||
send_event, item = self._state.waiting_senders.popitem(last=False)
|
||||
self._state.buffer.append(item)
|
||||
send_event.set()
|
||||
|
||||
if self._state.buffer:
|
||||
return self._state.buffer.popleft()
|
||||
elif not self._state.open_send_channels:
|
||||
raise EndOfStream
|
||||
|
||||
raise WouldBlock
|
||||
|
||||
async def receive(self) -> T_co:
|
||||
await checkpoint()
|
||||
try:
|
||||
return self.receive_nowait()
|
||||
except WouldBlock:
|
||||
# Add ourselves in the queue
|
||||
receive_event = Event()
|
||||
receiver = MemoryObjectItemReceiver[T_co]()
|
||||
self._state.waiting_receivers[receive_event] = receiver
|
||||
|
||||
try:
|
||||
await receive_event.wait()
|
||||
finally:
|
||||
self._state.waiting_receivers.pop(receive_event, None)
|
||||
|
||||
try:
|
||||
return receiver.item
|
||||
except AttributeError:
|
||||
raise EndOfStream
|
||||
|
||||
def clone(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
"""
|
||||
Create a clone of this receive stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the receiving end of the memory stream be considered closed by the sending ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectReceiveStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_receive_channels -= 1
|
||||
if self._state.open_receive_channels == 0:
|
||||
send_events = list(self._state.waiting_senders.keys())
|
||||
for event in send_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectReceiveStream[T_co]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MemoryObjectSendStream(Generic[T_contra], ObjectSendStream[T_contra]):
|
||||
_state: MemoryObjectStreamState[T_contra]
|
||||
_closed: bool = field(init=False, default=False)
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
self._state.open_send_channels += 1
|
||||
|
||||
def send_nowait(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item immediately if it can be done without waiting.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
:raises ~anyio.WouldBlock: if the buffer is full and there are no tasks waiting
|
||||
to receive
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
if not self._state.open_receive_channels:
|
||||
raise BrokenResourceError
|
||||
|
||||
while self._state.waiting_receivers:
|
||||
receive_event, receiver = self._state.waiting_receivers.popitem(last=False)
|
||||
if not receiver.task_info.has_pending_cancellation():
|
||||
receiver.item = item
|
||||
receive_event.set()
|
||||
return
|
||||
|
||||
if len(self._state.buffer) < self._state.max_buffer_size:
|
||||
self._state.buffer.append(item)
|
||||
else:
|
||||
raise WouldBlock
|
||||
|
||||
async def send(self, item: T_contra) -> None:
|
||||
"""
|
||||
Send an item to the stream.
|
||||
|
||||
If the buffer is full, this method blocks until there is again room in the
|
||||
buffer or the item can be sent directly to a receiver.
|
||||
|
||||
:param item: the item to send
|
||||
:raises ~anyio.ClosedResourceError: if this send stream has been closed
|
||||
:raises ~anyio.BrokenResourceError: if the stream has been closed from the
|
||||
receiving end
|
||||
|
||||
"""
|
||||
await checkpoint()
|
||||
try:
|
||||
self.send_nowait(item)
|
||||
except WouldBlock:
|
||||
# Wait until there's someone on the receiving end
|
||||
send_event = Event()
|
||||
self._state.waiting_senders[send_event] = item
|
||||
try:
|
||||
await send_event.wait()
|
||||
except BaseException:
|
||||
self._state.waiting_senders.pop(send_event, None)
|
||||
raise
|
||||
|
||||
if send_event in self._state.waiting_senders:
|
||||
del self._state.waiting_senders[send_event]
|
||||
raise BrokenResourceError from None
|
||||
|
||||
def clone(self) -> MemoryObjectSendStream[T_contra]:
|
||||
"""
|
||||
Create a clone of this send stream.
|
||||
|
||||
Each clone can be closed separately. Only when all clones have been closed will
|
||||
the sending end of the memory stream be considered closed by the receiving ends.
|
||||
|
||||
:return: the cloned stream
|
||||
|
||||
"""
|
||||
if self._closed:
|
||||
raise ClosedResourceError
|
||||
|
||||
return MemoryObjectSendStream(_state=self._state)
|
||||
|
||||
def close(self) -> None:
|
||||
"""
|
||||
Close the stream.
|
||||
|
||||
This works the exact same way as :meth:`aclose`, but is provided as a special
|
||||
case for the benefit of synchronous callbacks.
|
||||
|
||||
"""
|
||||
if not self._closed:
|
||||
self._closed = True
|
||||
self._state.open_send_channels -= 1
|
||||
if self._state.open_send_channels == 0:
|
||||
receive_events = list(self._state.waiting_receivers.keys())
|
||||
self._state.waiting_receivers.clear()
|
||||
for event in receive_events:
|
||||
event.set()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
self.close()
|
||||
|
||||
def statistics(self) -> MemoryObjectStreamStatistics:
|
||||
"""
|
||||
Return statistics about the current state of this stream.
|
||||
|
||||
.. versionadded:: 3.0
|
||||
"""
|
||||
return self._state.statistics()
|
||||
|
||||
def __enter__(self) -> MemoryObjectSendStream[T_contra]:
|
||||
return self
|
||||
|
||||
def __exit__(
|
||||
self,
|
||||
exc_type: type[BaseException] | None,
|
||||
exc_val: BaseException | None,
|
||||
exc_tb: TracebackType | None,
|
||||
) -> None:
|
||||
self.close()
|
||||
|
||||
def __del__(self) -> None:
|
||||
if not self._closed:
|
||||
warnings.warn(
|
||||
f"Unclosed <{self.__class__.__name__} at {id(self):x}>",
|
||||
ResourceWarning,
|
||||
source=self,
|
||||
)
|
@@ -1,141 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from collections.abc import Callable, Mapping, Sequence
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Generic, TypeVar
|
||||
|
||||
from ..abc import (
|
||||
ByteReceiveStream,
|
||||
ByteSendStream,
|
||||
ByteStream,
|
||||
Listener,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
TaskGroup,
|
||||
)
|
||||
|
||||
T_Item = TypeVar("T_Item")
|
||||
T_Stream = TypeVar("T_Stream")
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledByteStream(ByteStream):
|
||||
"""
|
||||
Combines two byte streams into a single, bidirectional byte stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ByteSendStream send_stream: the sending byte stream
|
||||
:param ByteReceiveStream receive_stream: the receiving byte stream
|
||||
"""
|
||||
|
||||
send_stream: ByteSendStream
|
||||
receive_stream: ByteReceiveStream
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
return await self.receive_stream.receive(max_bytes)
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class StapledObjectStream(Generic[T_Item], ObjectStream[T_Item]):
|
||||
"""
|
||||
Combines two object streams into a single, bidirectional object stream.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param ObjectSendStream send_stream: the sending object stream
|
||||
:param ObjectReceiveStream receive_stream: the receiving object stream
|
||||
"""
|
||||
|
||||
send_stream: ObjectSendStream[T_Item]
|
||||
receive_stream: ObjectReceiveStream[T_Item]
|
||||
|
||||
async def receive(self) -> T_Item:
|
||||
return await self.receive_stream.receive()
|
||||
|
||||
async def send(self, item: T_Item) -> None:
|
||||
await self.send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.send_stream.aclose()
|
||||
await self.receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.send_stream.extra_attributes,
|
||||
**self.receive_stream.extra_attributes,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class MultiListener(Generic[T_Stream], Listener[T_Stream]):
|
||||
"""
|
||||
Combines multiple listeners into one, serving connections from all of them at once.
|
||||
|
||||
Any MultiListeners in the given collection of listeners will have their listeners
|
||||
moved into this one.
|
||||
|
||||
Extra attributes are provided from each listener, with each successive listener
|
||||
overriding any conflicting attributes from the previous one.
|
||||
|
||||
:param listeners: listeners to serve
|
||||
:type listeners: Sequence[Listener[T_Stream]]
|
||||
"""
|
||||
|
||||
listeners: Sequence[Listener[T_Stream]]
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
listeners: list[Listener[T_Stream]] = []
|
||||
for listener in self.listeners:
|
||||
if isinstance(listener, MultiListener):
|
||||
listeners.extend(listener.listeners)
|
||||
del listener.listeners[:] # type: ignore[attr-defined]
|
||||
else:
|
||||
listeners.append(listener)
|
||||
|
||||
self.listeners = listeners
|
||||
|
||||
async def serve(
|
||||
self, handler: Callable[[T_Stream], Any], task_group: TaskGroup | None = None
|
||||
) -> None:
|
||||
from .. import create_task_group
|
||||
|
||||
async with create_task_group() as tg:
|
||||
for listener in self.listeners:
|
||||
tg.start_soon(listener.serve, handler, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
for listener in self.listeners:
|
||||
await listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
attributes: dict = {}
|
||||
for listener in self.listeners:
|
||||
attributes.update(listener.extra_attributes)
|
||||
|
||||
return attributes
|
@@ -1,147 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import codecs
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import InitVar, dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from ..abc import (
|
||||
AnyByteReceiveStream,
|
||||
AnyByteSendStream,
|
||||
AnyByteStream,
|
||||
ObjectReceiveStream,
|
||||
ObjectSendStream,
|
||||
ObjectStream,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextReceiveStream(ObjectReceiveStream[str]):
|
||||
"""
|
||||
Stream wrapper that decodes bytes to strings using the given encoding.
|
||||
|
||||
Decoding is done using :class:`~codecs.IncrementalDecoder` which returns any
|
||||
completely received unicode characters as soon as they come in.
|
||||
|
||||
:param transport_stream: any bytes-based receive stream
|
||||
:param encoding: character encoding to use for decoding bytes to strings (defaults
|
||||
to ``utf-8``)
|
||||
:param errors: handling scheme for decoding errors (defaults to ``strict``; see the
|
||||
`codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteReceiveStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_decoder: codecs.IncrementalDecoder = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
decoder_class = codecs.getincrementaldecoder(encoding)
|
||||
self._decoder = decoder_class(errors=errors)
|
||||
|
||||
async def receive(self) -> str:
|
||||
while True:
|
||||
chunk = await self.transport_stream.receive()
|
||||
decoded = self._decoder.decode(chunk)
|
||||
if decoded:
|
||||
return decoded
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
self._decoder.reset()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextSendStream(ObjectSendStream[str]):
|
||||
"""
|
||||
Sends strings to the wrapped stream as bytes using the given encoding.
|
||||
|
||||
:param AnyByteSendStream transport_stream: any bytes-based send stream
|
||||
:param str encoding: character encoding to use for encoding strings to bytes
|
||||
(defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteSendStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: str = "strict"
|
||||
_encoder: Callable[..., tuple[bytes, int]] = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str) -> None:
|
||||
self._encoder = codecs.getencoder(encoding)
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
encoded = self._encoder(item, self.errors)[0]
|
||||
await self.transport_stream.send(encoded)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return self.transport_stream.extra_attributes
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TextStream(ObjectStream[str]):
|
||||
"""
|
||||
A bidirectional stream that decodes bytes to strings on receive and encodes strings
|
||||
to bytes on send.
|
||||
|
||||
Extra attributes will be provided from both streams, with the receive stream
|
||||
providing the values in case of a conflict.
|
||||
|
||||
:param AnyByteStream transport_stream: any bytes-based stream
|
||||
:param str encoding: character encoding to use for encoding/decoding strings to/from
|
||||
bytes (defaults to ``utf-8``)
|
||||
:param str errors: handling scheme for encoding errors (defaults to ``strict``; see
|
||||
the `codecs module documentation`_ for a comprehensive list of options)
|
||||
|
||||
.. _codecs module documentation:
|
||||
https://docs.python.org/3/library/codecs.html#codec-objects
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
encoding: InitVar[str] = "utf-8"
|
||||
errors: InitVar[str] = "strict"
|
||||
_receive_stream: TextReceiveStream = field(init=False)
|
||||
_send_stream: TextSendStream = field(init=False)
|
||||
|
||||
def __post_init__(self, encoding: str, errors: str) -> None:
|
||||
self._receive_stream = TextReceiveStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
self._send_stream = TextSendStream(
|
||||
self.transport_stream, encoding=encoding, errors=errors
|
||||
)
|
||||
|
||||
async def receive(self) -> str:
|
||||
return await self._receive_stream.receive()
|
||||
|
||||
async def send(self, item: str) -> None:
|
||||
await self._send_stream.send(item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
await self.transport_stream.send_eof()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self._send_stream.aclose()
|
||||
await self._receive_stream.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self._send_stream.extra_attributes,
|
||||
**self._receive_stream.extra_attributes,
|
||||
}
|
@@ -1,337 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import ssl
|
||||
import sys
|
||||
from collections.abc import Callable, Mapping
|
||||
from dataclasses import dataclass
|
||||
from functools import wraps
|
||||
from typing import Any, TypeVar
|
||||
|
||||
from .. import (
|
||||
BrokenResourceError,
|
||||
EndOfStream,
|
||||
aclose_forcefully,
|
||||
get_cancelled_exc_class,
|
||||
)
|
||||
from .._core._typedattr import TypedAttributeSet, typed_attribute
|
||||
from ..abc import AnyByteStream, ByteStream, Listener, TaskGroup
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
_PCTRTT = tuple[tuple[str, str], ...]
|
||||
_PCTRTTT = tuple[_PCTRTT, ...]
|
||||
|
||||
|
||||
class TLSAttribute(TypedAttributeSet):
|
||||
"""Contains Transport Layer Security related attributes."""
|
||||
|
||||
#: the selected ALPN protocol
|
||||
alpn_protocol: str | None = typed_attribute()
|
||||
#: the channel binding for type ``tls-unique``
|
||||
channel_binding_tls_unique: bytes = typed_attribute()
|
||||
#: the selected cipher
|
||||
cipher: tuple[str, str, int] = typed_attribute()
|
||||
#: the peer certificate in dictionary form (see :meth:`ssl.SSLSocket.getpeercert`
|
||||
# for more information)
|
||||
peer_certificate: None | (dict[str, str | _PCTRTTT | _PCTRTT]) = typed_attribute()
|
||||
#: the peer certificate in binary form
|
||||
peer_certificate_binary: bytes | None = typed_attribute()
|
||||
#: ``True`` if this is the server side of the connection
|
||||
server_side: bool = typed_attribute()
|
||||
#: ciphers shared by the client during the TLS handshake (``None`` if this is the
|
||||
#: client side)
|
||||
shared_ciphers: list[tuple[str, str, int]] | None = typed_attribute()
|
||||
#: the :class:`~ssl.SSLObject` used for encryption
|
||||
ssl_object: ssl.SSLObject = typed_attribute()
|
||||
#: ``True`` if this stream does (and expects) a closing TLS handshake when the
|
||||
#: stream is being closed
|
||||
standard_compatible: bool = typed_attribute()
|
||||
#: the TLS protocol version (e.g. ``TLSv1.2``)
|
||||
tls_version: str = typed_attribute()
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSStream(ByteStream):
|
||||
"""
|
||||
A stream wrapper that encrypts all sent data and decrypts received data.
|
||||
|
||||
This class has no public initializer; use :meth:`wrap` instead.
|
||||
All extra attributes from :class:`~TLSAttribute` are supported.
|
||||
|
||||
:var AnyByteStream transport_stream: the wrapped stream
|
||||
|
||||
"""
|
||||
|
||||
transport_stream: AnyByteStream
|
||||
standard_compatible: bool
|
||||
_ssl_object: ssl.SSLObject
|
||||
_read_bio: ssl.MemoryBIO
|
||||
_write_bio: ssl.MemoryBIO
|
||||
|
||||
@classmethod
|
||||
async def wrap(
|
||||
cls,
|
||||
transport_stream: AnyByteStream,
|
||||
*,
|
||||
server_side: bool | None = None,
|
||||
hostname: str | None = None,
|
||||
ssl_context: ssl.SSLContext | None = None,
|
||||
standard_compatible: bool = True,
|
||||
) -> TLSStream:
|
||||
"""
|
||||
Wrap an existing stream with Transport Layer Security.
|
||||
|
||||
This performs a TLS handshake with the peer.
|
||||
|
||||
:param transport_stream: a bytes-transporting stream to wrap
|
||||
:param server_side: ``True`` if this is the server side of the connection,
|
||||
``False`` if this is the client side (if omitted, will be set to ``False``
|
||||
if ``hostname`` has been provided, ``False`` otherwise). Used only to create
|
||||
a default context when an explicit context has not been provided.
|
||||
:param hostname: host name of the peer (if host name checking is desired)
|
||||
:param ssl_context: the SSLContext object to use (if not provided, a secure
|
||||
default will be created)
|
||||
:param standard_compatible: if ``False``, skip the closing handshake when
|
||||
closing the connection, and don't raise an exception if the peer does the
|
||||
same
|
||||
:raises ~ssl.SSLError: if the TLS handshake fails
|
||||
|
||||
"""
|
||||
if server_side is None:
|
||||
server_side = not hostname
|
||||
|
||||
if not ssl_context:
|
||||
purpose = (
|
||||
ssl.Purpose.CLIENT_AUTH if server_side else ssl.Purpose.SERVER_AUTH
|
||||
)
|
||||
ssl_context = ssl.create_default_context(purpose)
|
||||
|
||||
# Re-enable detection of unexpected EOFs if it was disabled by Python
|
||||
if hasattr(ssl, "OP_IGNORE_UNEXPECTED_EOF"):
|
||||
ssl_context.options &= ~ssl.OP_IGNORE_UNEXPECTED_EOF
|
||||
|
||||
bio_in = ssl.MemoryBIO()
|
||||
bio_out = ssl.MemoryBIO()
|
||||
ssl_object = ssl_context.wrap_bio(
|
||||
bio_in, bio_out, server_side=server_side, server_hostname=hostname
|
||||
)
|
||||
wrapper = cls(
|
||||
transport_stream=transport_stream,
|
||||
standard_compatible=standard_compatible,
|
||||
_ssl_object=ssl_object,
|
||||
_read_bio=bio_in,
|
||||
_write_bio=bio_out,
|
||||
)
|
||||
await wrapper._call_sslobject_method(ssl_object.do_handshake)
|
||||
return wrapper
|
||||
|
||||
async def _call_sslobject_method(
|
||||
self, func: Callable[[Unpack[PosArgsT]], T_Retval], *args: Unpack[PosArgsT]
|
||||
) -> T_Retval:
|
||||
while True:
|
||||
try:
|
||||
result = func(*args)
|
||||
except ssl.SSLWantReadError:
|
||||
try:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
data = await self.transport_stream.receive()
|
||||
except EndOfStream:
|
||||
self._read_bio.write_eof()
|
||||
except OSError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
self._read_bio.write(data)
|
||||
except ssl.SSLWantWriteError:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
except ssl.SSLSyscallError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
raise BrokenResourceError from exc
|
||||
except ssl.SSLError as exc:
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
if isinstance(exc, ssl.SSLEOFError) or (
|
||||
exc.strerror and "UNEXPECTED_EOF_WHILE_READING" in exc.strerror
|
||||
):
|
||||
if self.standard_compatible:
|
||||
raise BrokenResourceError from exc
|
||||
else:
|
||||
raise EndOfStream from None
|
||||
|
||||
raise
|
||||
else:
|
||||
# Flush any pending writes first
|
||||
if self._write_bio.pending:
|
||||
await self.transport_stream.send(self._write_bio.read())
|
||||
|
||||
return result
|
||||
|
||||
async def unwrap(self) -> tuple[AnyByteStream, bytes]:
|
||||
"""
|
||||
Does the TLS closing handshake.
|
||||
|
||||
:return: a tuple of (wrapped byte stream, bytes left in the read buffer)
|
||||
|
||||
"""
|
||||
await self._call_sslobject_method(self._ssl_object.unwrap)
|
||||
self._read_bio.write_eof()
|
||||
self._write_bio.write_eof()
|
||||
return self.transport_stream, self._read_bio.read()
|
||||
|
||||
async def aclose(self) -> None:
|
||||
if self.standard_compatible:
|
||||
try:
|
||||
await self.unwrap()
|
||||
except BaseException:
|
||||
await aclose_forcefully(self.transport_stream)
|
||||
raise
|
||||
|
||||
await self.transport_stream.aclose()
|
||||
|
||||
async def receive(self, max_bytes: int = 65536) -> bytes:
|
||||
data = await self._call_sslobject_method(self._ssl_object.read, max_bytes)
|
||||
if not data:
|
||||
raise EndOfStream
|
||||
|
||||
return data
|
||||
|
||||
async def send(self, item: bytes) -> None:
|
||||
await self._call_sslobject_method(self._ssl_object.write, item)
|
||||
|
||||
async def send_eof(self) -> None:
|
||||
tls_version = self.extra(TLSAttribute.tls_version)
|
||||
match = re.match(r"TLSv(\d+)(?:\.(\d+))?", tls_version)
|
||||
if match:
|
||||
major, minor = int(match.group(1)), int(match.group(2) or 0)
|
||||
if (major, minor) < (1, 3):
|
||||
raise NotImplementedError(
|
||||
f"send_eof() requires at least TLSv1.3; current "
|
||||
f"session uses {tls_version}"
|
||||
)
|
||||
|
||||
raise NotImplementedError(
|
||||
"send_eof() has not yet been implemented for TLS streams"
|
||||
)
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
**self.transport_stream.extra_attributes,
|
||||
TLSAttribute.alpn_protocol: self._ssl_object.selected_alpn_protocol,
|
||||
TLSAttribute.channel_binding_tls_unique: (
|
||||
self._ssl_object.get_channel_binding
|
||||
),
|
||||
TLSAttribute.cipher: self._ssl_object.cipher,
|
||||
TLSAttribute.peer_certificate: lambda: self._ssl_object.getpeercert(False),
|
||||
TLSAttribute.peer_certificate_binary: lambda: self._ssl_object.getpeercert(
|
||||
True
|
||||
),
|
||||
TLSAttribute.server_side: lambda: self._ssl_object.server_side,
|
||||
TLSAttribute.shared_ciphers: lambda: self._ssl_object.shared_ciphers()
|
||||
if self._ssl_object.server_side
|
||||
else None,
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
TLSAttribute.ssl_object: lambda: self._ssl_object,
|
||||
TLSAttribute.tls_version: self._ssl_object.version,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(eq=False)
|
||||
class TLSListener(Listener[TLSStream]):
|
||||
"""
|
||||
A convenience listener that wraps another listener and auto-negotiates a TLS session
|
||||
on every accepted connection.
|
||||
|
||||
If the TLS handshake times out or raises an exception,
|
||||
:meth:`handle_handshake_error` is called to do whatever post-mortem processing is
|
||||
deemed necessary.
|
||||
|
||||
Supports only the :attr:`~TLSAttribute.standard_compatible` extra attribute.
|
||||
|
||||
:param Listener listener: the listener to wrap
|
||||
:param ssl_context: the SSL context object
|
||||
:param standard_compatible: a flag passed through to :meth:`TLSStream.wrap`
|
||||
:param handshake_timeout: time limit for the TLS handshake
|
||||
(passed to :func:`~anyio.fail_after`)
|
||||
"""
|
||||
|
||||
listener: Listener[Any]
|
||||
ssl_context: ssl.SSLContext
|
||||
standard_compatible: bool = True
|
||||
handshake_timeout: float = 30
|
||||
|
||||
@staticmethod
|
||||
async def handle_handshake_error(exc: BaseException, stream: AnyByteStream) -> None:
|
||||
"""
|
||||
Handle an exception raised during the TLS handshake.
|
||||
|
||||
This method does 3 things:
|
||||
|
||||
#. Forcefully closes the original stream
|
||||
#. Logs the exception (unless it was a cancellation exception) using the
|
||||
``anyio.streams.tls`` logger
|
||||
#. Reraises the exception if it was a base exception or a cancellation exception
|
||||
|
||||
:param exc: the exception
|
||||
:param stream: the original stream
|
||||
|
||||
"""
|
||||
await aclose_forcefully(stream)
|
||||
|
||||
# Log all except cancellation exceptions
|
||||
if not isinstance(exc, get_cancelled_exc_class()):
|
||||
# CPython (as of 3.11.5) returns incorrect `sys.exc_info()` here when using
|
||||
# any asyncio implementation, so we explicitly pass the exception to log
|
||||
# (https://github.com/python/cpython/issues/108668). Trio does not have this
|
||||
# issue because it works around the CPython bug.
|
||||
logging.getLogger(__name__).exception(
|
||||
"Error during TLS handshake", exc_info=exc
|
||||
)
|
||||
|
||||
# Only reraise base exceptions and cancellation exceptions
|
||||
if not isinstance(exc, Exception) or isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
|
||||
async def serve(
|
||||
self,
|
||||
handler: Callable[[TLSStream], Any],
|
||||
task_group: TaskGroup | None = None,
|
||||
) -> None:
|
||||
@wraps(handler)
|
||||
async def handler_wrapper(stream: AnyByteStream) -> None:
|
||||
from .. import fail_after
|
||||
|
||||
try:
|
||||
with fail_after(self.handshake_timeout):
|
||||
wrapped_stream = await TLSStream.wrap(
|
||||
stream,
|
||||
ssl_context=self.ssl_context,
|
||||
standard_compatible=self.standard_compatible,
|
||||
)
|
||||
except BaseException as exc:
|
||||
await self.handle_handshake_error(exc, stream)
|
||||
else:
|
||||
await handler(wrapped_stream)
|
||||
|
||||
await self.listener.serve(handler_wrapper, task_group)
|
||||
|
||||
async def aclose(self) -> None:
|
||||
await self.listener.aclose()
|
||||
|
||||
@property
|
||||
def extra_attributes(self) -> Mapping[Any, Callable[[], Any]]:
|
||||
return {
|
||||
TLSAttribute.standard_compatible: lambda: self.standard_compatible,
|
||||
}
|
@@ -1,258 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
import sys
|
||||
from collections import deque
|
||||
from collections.abc import Callable
|
||||
from importlib.util import module_from_spec, spec_from_file_location
|
||||
from typing import TypeVar, cast
|
||||
|
||||
from ._core._eventloop import current_time, get_async_backend, get_cancelled_exc_class
|
||||
from ._core._exceptions import BrokenWorkerProcess
|
||||
from ._core._subprocesses import open_process
|
||||
from ._core._synchronization import CapacityLimiter
|
||||
from ._core._tasks import CancelScope, fail_after
|
||||
from .abc import ByteReceiveStream, ByteSendStream, Process
|
||||
from .lowlevel import RunVar, checkpoint_if_cancelled
|
||||
from .streams.buffered import BufferedByteReceiveStream
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import TypeVarTuple, Unpack
|
||||
else:
|
||||
from typing_extensions import TypeVarTuple, Unpack
|
||||
|
||||
WORKER_MAX_IDLE_TIME = 300 # 5 minutes
|
||||
|
||||
T_Retval = TypeVar("T_Retval")
|
||||
PosArgsT = TypeVarTuple("PosArgsT")
|
||||
|
||||
_process_pool_workers: RunVar[set[Process]] = RunVar("_process_pool_workers")
|
||||
_process_pool_idle_workers: RunVar[deque[tuple[Process, float]]] = RunVar(
|
||||
"_process_pool_idle_workers"
|
||||
)
|
||||
_default_process_limiter: RunVar[CapacityLimiter] = RunVar("_default_process_limiter")
|
||||
|
||||
|
||||
async def run_sync(
|
||||
func: Callable[[Unpack[PosArgsT]], T_Retval],
|
||||
*args: Unpack[PosArgsT],
|
||||
cancellable: bool = False,
|
||||
limiter: CapacityLimiter | None = None,
|
||||
) -> T_Retval:
|
||||
"""
|
||||
Call the given function with the given arguments in a worker process.
|
||||
|
||||
If the ``cancellable`` option is enabled and the task waiting for its completion is
|
||||
cancelled, the worker process running it will be abruptly terminated using SIGKILL
|
||||
(or ``terminateProcess()`` on Windows).
|
||||
|
||||
:param func: a callable
|
||||
:param args: positional arguments for the callable
|
||||
:param cancellable: ``True`` to allow cancellation of the operation while it's
|
||||
running
|
||||
:param limiter: capacity limiter to use to limit the total amount of processes
|
||||
running (if omitted, the default limiter is used)
|
||||
:return: an awaitable that yields the return value of the function.
|
||||
|
||||
"""
|
||||
|
||||
async def send_raw_command(pickled_cmd: bytes) -> object:
|
||||
try:
|
||||
await stdin.send(pickled_cmd)
|
||||
response = await buffered.receive_until(b"\n", 50)
|
||||
status, length = response.split(b" ")
|
||||
if status not in (b"RETURN", b"EXCEPTION"):
|
||||
raise RuntimeError(
|
||||
f"Worker process returned unexpected response: {response!r}"
|
||||
)
|
||||
|
||||
pickled_response = await buffered.receive_exactly(int(length))
|
||||
except BaseException as exc:
|
||||
workers.discard(process)
|
||||
try:
|
||||
process.kill()
|
||||
with CancelScope(shield=True):
|
||||
await process.aclose()
|
||||
except ProcessLookupError:
|
||||
pass
|
||||
|
||||
if isinstance(exc, get_cancelled_exc_class()):
|
||||
raise
|
||||
else:
|
||||
raise BrokenWorkerProcess from exc
|
||||
|
||||
retval = pickle.loads(pickled_response)
|
||||
if status == b"EXCEPTION":
|
||||
assert isinstance(retval, BaseException)
|
||||
raise retval
|
||||
else:
|
||||
return retval
|
||||
|
||||
# First pickle the request before trying to reserve a worker process
|
||||
await checkpoint_if_cancelled()
|
||||
request = pickle.dumps(("run", func, args), protocol=pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
# If this is the first run in this event loop thread, set up the necessary variables
|
||||
try:
|
||||
workers = _process_pool_workers.get()
|
||||
idle_workers = _process_pool_idle_workers.get()
|
||||
except LookupError:
|
||||
workers = set()
|
||||
idle_workers = deque()
|
||||
_process_pool_workers.set(workers)
|
||||
_process_pool_idle_workers.set(idle_workers)
|
||||
get_async_backend().setup_process_pool_exit_at_shutdown(workers)
|
||||
|
||||
async with limiter or current_default_process_limiter():
|
||||
# Pop processes from the pool (starting from the most recently used) until we
|
||||
# find one that hasn't exited yet
|
||||
process: Process
|
||||
while idle_workers:
|
||||
process, idle_since = idle_workers.pop()
|
||||
if process.returncode is None:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
|
||||
# Prune any other workers that have been idle for WORKER_MAX_IDLE_TIME
|
||||
# seconds or longer
|
||||
now = current_time()
|
||||
killed_processes: list[Process] = []
|
||||
while idle_workers:
|
||||
if now - idle_workers[0][1] < WORKER_MAX_IDLE_TIME:
|
||||
break
|
||||
|
||||
process_to_kill, idle_since = idle_workers.popleft()
|
||||
process_to_kill.kill()
|
||||
workers.remove(process_to_kill)
|
||||
killed_processes.append(process_to_kill)
|
||||
|
||||
with CancelScope(shield=True):
|
||||
for killed_process in killed_processes:
|
||||
await killed_process.aclose()
|
||||
|
||||
break
|
||||
|
||||
workers.remove(process)
|
||||
else:
|
||||
command = [sys.executable, "-u", "-m", __name__]
|
||||
process = await open_process(
|
||||
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE
|
||||
)
|
||||
try:
|
||||
stdin = cast(ByteSendStream, process.stdin)
|
||||
buffered = BufferedByteReceiveStream(
|
||||
cast(ByteReceiveStream, process.stdout)
|
||||
)
|
||||
with fail_after(20):
|
||||
message = await buffered.receive(6)
|
||||
|
||||
if message != b"READY\n":
|
||||
raise BrokenWorkerProcess(
|
||||
f"Worker process returned unexpected response: {message!r}"
|
||||
)
|
||||
|
||||
main_module_path = getattr(sys.modules["__main__"], "__file__", None)
|
||||
pickled = pickle.dumps(
|
||||
("init", sys.path, main_module_path),
|
||||
protocol=pickle.HIGHEST_PROTOCOL,
|
||||
)
|
||||
await send_raw_command(pickled)
|
||||
except (BrokenWorkerProcess, get_cancelled_exc_class()):
|
||||
raise
|
||||
except BaseException as exc:
|
||||
process.kill()
|
||||
raise BrokenWorkerProcess(
|
||||
"Error during worker process initialization"
|
||||
) from exc
|
||||
|
||||
workers.add(process)
|
||||
|
||||
with CancelScope(shield=not cancellable):
|
||||
try:
|
||||
return cast(T_Retval, await send_raw_command(request))
|
||||
finally:
|
||||
if process in workers:
|
||||
idle_workers.append((process, current_time()))
|
||||
|
||||
|
||||
def current_default_process_limiter() -> CapacityLimiter:
|
||||
"""
|
||||
Return the capacity limiter that is used by default to limit the number of worker
|
||||
processes.
|
||||
|
||||
:return: a capacity limiter object
|
||||
|
||||
"""
|
||||
try:
|
||||
return _default_process_limiter.get()
|
||||
except LookupError:
|
||||
limiter = CapacityLimiter(os.cpu_count() or 2)
|
||||
_default_process_limiter.set(limiter)
|
||||
return limiter
|
||||
|
||||
|
||||
def process_worker() -> None:
|
||||
# Redirect standard streams to os.devnull so that user code won't interfere with the
|
||||
# parent-worker communication
|
||||
stdin = sys.stdin
|
||||
stdout = sys.stdout
|
||||
sys.stdin = open(os.devnull)
|
||||
sys.stdout = open(os.devnull, "w")
|
||||
|
||||
stdout.buffer.write(b"READY\n")
|
||||
while True:
|
||||
retval = exception = None
|
||||
try:
|
||||
command, *args = pickle.load(stdin.buffer)
|
||||
except EOFError:
|
||||
return
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
else:
|
||||
if command == "run":
|
||||
func, args = args
|
||||
try:
|
||||
retval = func(*args)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
elif command == "init":
|
||||
main_module_path: str | None
|
||||
sys.path, main_module_path = args
|
||||
del sys.modules["__main__"]
|
||||
if main_module_path and os.path.isfile(main_module_path):
|
||||
# Load the parent's main module but as __mp_main__ instead of
|
||||
# __main__ (like multiprocessing does) to avoid infinite recursion
|
||||
try:
|
||||
spec = spec_from_file_location("__mp_main__", main_module_path)
|
||||
if spec and spec.loader:
|
||||
main = module_from_spec(spec)
|
||||
spec.loader.exec_module(main)
|
||||
sys.modules["__main__"] = main
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
try:
|
||||
if exception is not None:
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exception, pickle.HIGHEST_PROTOCOL)
|
||||
else:
|
||||
status = b"RETURN"
|
||||
pickled = pickle.dumps(retval, pickle.HIGHEST_PROTOCOL)
|
||||
except BaseException as exc:
|
||||
exception = exc
|
||||
status = b"EXCEPTION"
|
||||
pickled = pickle.dumps(exc, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
stdout.buffer.write(b"%s %d\n" % (status, len(pickled)))
|
||||
stdout.buffer.write(pickled)
|
||||
|
||||
# Respect SIGTERM
|
||||
if isinstance(exception, SystemExit):
|
||||
raise exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
process_worker()
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user