mirror of
https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git
synced 2025-08-14 00:25:46 +02:00
Все подряд
This commit is contained in:
14
.venv2/Lib/site-packages/telethon/network/__init__.py
Normal file
14
.venv2/Lib/site-packages/telethon/network/__init__.py
Normal file
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
This module contains several classes regarding network, low level connection
|
||||
with Telegram's servers and the protocol used (TCP full, abridged, etc.).
|
||||
"""
|
||||
from .mtprotoplainsender import MTProtoPlainSender
|
||||
from .authenticator import do_authentication
|
||||
from .mtprotosender import MTProtoSender
|
||||
from .connection import (
|
||||
Connection,
|
||||
ConnectionTcpFull, ConnectionTcpIntermediate, ConnectionTcpAbridged,
|
||||
ConnectionTcpObfuscated, ConnectionTcpMTProxyAbridged,
|
||||
ConnectionTcpMTProxyIntermediate,
|
||||
ConnectionTcpMTProxyRandomizedIntermediate, ConnectionHttp, TcpMTProxy
|
||||
)
|
212
.venv2/Lib/site-packages/telethon/network/authenticator.py
Normal file
212
.venv2/Lib/site-packages/telethon/network/authenticator.py
Normal file
@@ -0,0 +1,212 @@
|
||||
"""
|
||||
This module contains several functions that authenticate the client machine
|
||||
with Telegram's servers, effectively creating an authorization key.
|
||||
"""
|
||||
import os
|
||||
import time
|
||||
from hashlib import sha1
|
||||
|
||||
from ..tl.types import (
|
||||
ResPQ, PQInnerData, ServerDHParamsFail, ServerDHParamsOk,
|
||||
ServerDHInnerData, ClientDHInnerData, DhGenOk, DhGenRetry, DhGenFail
|
||||
)
|
||||
from .. import helpers
|
||||
from ..crypto import AES, AuthKey, Factorization, rsa
|
||||
from ..errors import SecurityError
|
||||
from ..extensions import BinaryReader
|
||||
from ..tl.functions import (
|
||||
ReqPqMultiRequest, ReqDHParamsRequest, SetClientDHParamsRequest
|
||||
)
|
||||
|
||||
|
||||
async def do_authentication(sender):
|
||||
"""
|
||||
Executes the authentication process with the Telegram servers.
|
||||
|
||||
:param sender: a connected `MTProtoPlainSender`.
|
||||
:return: returns a (authorization key, time offset) tuple.
|
||||
"""
|
||||
# Step 1 sending: PQ Request, endianness doesn't matter since it's random
|
||||
nonce = int.from_bytes(os.urandom(16), 'big', signed=True)
|
||||
res_pq = await sender.send(ReqPqMultiRequest(nonce))
|
||||
assert isinstance(res_pq, ResPQ), 'Step 1 answer was %s' % res_pq
|
||||
|
||||
if res_pq.nonce != nonce:
|
||||
raise SecurityError('Step 1 invalid nonce from server')
|
||||
|
||||
pq = get_int(res_pq.pq)
|
||||
|
||||
# Step 2 sending: DH Exchange
|
||||
p, q = Factorization.factorize(pq)
|
||||
p, q = rsa.get_byte_array(p), rsa.get_byte_array(q)
|
||||
new_nonce = int.from_bytes(os.urandom(32), 'little', signed=True)
|
||||
|
||||
pq_inner_data = bytes(PQInnerData(
|
||||
pq=rsa.get_byte_array(pq), p=p, q=q,
|
||||
nonce=res_pq.nonce,
|
||||
server_nonce=res_pq.server_nonce,
|
||||
new_nonce=new_nonce
|
||||
))
|
||||
|
||||
# sha_digest + data + random_bytes
|
||||
cipher_text, target_fingerprint = None, None
|
||||
for fingerprint in res_pq.server_public_key_fingerprints:
|
||||
cipher_text = rsa.encrypt(fingerprint, pq_inner_data)
|
||||
if cipher_text is not None:
|
||||
target_fingerprint = fingerprint
|
||||
break
|
||||
|
||||
if cipher_text is None:
|
||||
# Second attempt, but now we're allowed to use old keys
|
||||
for fingerprint in res_pq.server_public_key_fingerprints:
|
||||
cipher_text = rsa.encrypt(fingerprint, pq_inner_data, use_old=True)
|
||||
if cipher_text is not None:
|
||||
target_fingerprint = fingerprint
|
||||
break
|
||||
|
||||
if cipher_text is None:
|
||||
raise SecurityError(
|
||||
'Step 2 could not find a valid key for fingerprints: {}'
|
||||
.format(', '.join(
|
||||
[str(f) for f in res_pq.server_public_key_fingerprints])
|
||||
)
|
||||
)
|
||||
|
||||
server_dh_params = await sender.send(ReqDHParamsRequest(
|
||||
nonce=res_pq.nonce,
|
||||
server_nonce=res_pq.server_nonce,
|
||||
p=p, q=q,
|
||||
public_key_fingerprint=target_fingerprint,
|
||||
encrypted_data=cipher_text
|
||||
))
|
||||
|
||||
assert isinstance(
|
||||
server_dh_params, (ServerDHParamsOk, ServerDHParamsFail)),\
|
||||
'Step 2.1 answer was %s' % server_dh_params
|
||||
|
||||
if server_dh_params.nonce != res_pq.nonce:
|
||||
raise SecurityError('Step 2 invalid nonce from server')
|
||||
|
||||
if server_dh_params.server_nonce != res_pq.server_nonce:
|
||||
raise SecurityError('Step 2 invalid server nonce from server')
|
||||
|
||||
if isinstance(server_dh_params, ServerDHParamsFail):
|
||||
nnh = int.from_bytes(
|
||||
sha1(new_nonce.to_bytes(32, 'little', signed=True)).digest()[4:20],
|
||||
'little', signed=True
|
||||
)
|
||||
if server_dh_params.new_nonce_hash != nnh:
|
||||
raise SecurityError('Step 2 invalid DH fail nonce from server')
|
||||
|
||||
assert isinstance(server_dh_params, ServerDHParamsOk),\
|
||||
'Step 2.2 answer was %s' % server_dh_params
|
||||
|
||||
# Step 3 sending: Complete DH Exchange
|
||||
key, iv = helpers.generate_key_data_from_nonce(
|
||||
res_pq.server_nonce, new_nonce
|
||||
)
|
||||
if len(server_dh_params.encrypted_answer) % 16 != 0:
|
||||
# See PR#453
|
||||
raise SecurityError('Step 3 AES block size mismatch')
|
||||
|
||||
plain_text_answer = AES.decrypt_ige(
|
||||
server_dh_params.encrypted_answer, key, iv
|
||||
)
|
||||
|
||||
with BinaryReader(plain_text_answer) as reader:
|
||||
reader.read(20) # hash sum
|
||||
server_dh_inner = reader.tgread_object()
|
||||
assert isinstance(server_dh_inner, ServerDHInnerData),\
|
||||
'Step 3 answer was %s' % server_dh_inner
|
||||
|
||||
if server_dh_inner.nonce != res_pq.nonce:
|
||||
raise SecurityError('Step 3 Invalid nonce in encrypted answer')
|
||||
|
||||
if server_dh_inner.server_nonce != res_pq.server_nonce:
|
||||
raise SecurityError('Step 3 Invalid server nonce in encrypted answer')
|
||||
|
||||
dh_prime = get_int(server_dh_inner.dh_prime, signed=False)
|
||||
g = server_dh_inner.g
|
||||
g_a = get_int(server_dh_inner.g_a, signed=False)
|
||||
time_offset = server_dh_inner.server_time - int(time.time())
|
||||
|
||||
b = get_int(os.urandom(256), signed=False)
|
||||
g_b = pow(g, b, dh_prime)
|
||||
gab = pow(g_a, b, dh_prime)
|
||||
|
||||
# IMPORTANT: Apart from the conditions on the Diffie-Hellman prime
|
||||
# dh_prime and generator g, both sides are to check that g, g_a and
|
||||
# g_b are greater than 1 and less than dh_prime - 1. We recommend
|
||||
# checking that g_a and g_b are between 2^{2048-64} and
|
||||
# dh_prime - 2^{2048-64} as well.
|
||||
# (https://core.telegram.org/mtproto/auth_key#dh-key-exchange-complete)
|
||||
if not (1 < g < (dh_prime - 1)):
|
||||
raise SecurityError('g_a is not within (1, dh_prime - 1)')
|
||||
|
||||
if not (1 < g_a < (dh_prime - 1)):
|
||||
raise SecurityError('g_a is not within (1, dh_prime - 1)')
|
||||
|
||||
if not (1 < g_b < (dh_prime - 1)):
|
||||
raise SecurityError('g_b is not within (1, dh_prime - 1)')
|
||||
|
||||
safety_range = 2 ** (2048 - 64)
|
||||
if not (safety_range <= g_a <= (dh_prime - safety_range)):
|
||||
raise SecurityError('g_a is not within (2^{2048-64}, dh_prime - 2^{2048-64})')
|
||||
|
||||
if not (safety_range <= g_b <= (dh_prime - safety_range)):
|
||||
raise SecurityError('g_b is not within (2^{2048-64}, dh_prime - 2^{2048-64})')
|
||||
|
||||
# Prepare client DH Inner Data
|
||||
client_dh_inner = bytes(ClientDHInnerData(
|
||||
nonce=res_pq.nonce,
|
||||
server_nonce=res_pq.server_nonce,
|
||||
retry_id=0, # TODO Actual retry ID
|
||||
g_b=rsa.get_byte_array(g_b)
|
||||
))
|
||||
|
||||
client_dh_inner_hashed = sha1(client_dh_inner).digest() + client_dh_inner
|
||||
|
||||
# Encryption
|
||||
client_dh_encrypted = AES.encrypt_ige(client_dh_inner_hashed, key, iv)
|
||||
|
||||
# Prepare Set client DH params
|
||||
dh_gen = await sender.send(SetClientDHParamsRequest(
|
||||
nonce=res_pq.nonce,
|
||||
server_nonce=res_pq.server_nonce,
|
||||
encrypted_data=client_dh_encrypted,
|
||||
))
|
||||
|
||||
nonce_types = (DhGenOk, DhGenRetry, DhGenFail)
|
||||
assert isinstance(dh_gen, nonce_types), 'Step 3.1 answer was %s' % dh_gen
|
||||
name = dh_gen.__class__.__name__
|
||||
if dh_gen.nonce != res_pq.nonce:
|
||||
raise SecurityError('Step 3 invalid {} nonce from server'.format(name))
|
||||
|
||||
if dh_gen.server_nonce != res_pq.server_nonce:
|
||||
raise SecurityError(
|
||||
'Step 3 invalid {} server nonce from server'.format(name))
|
||||
|
||||
auth_key = AuthKey(rsa.get_byte_array(gab))
|
||||
nonce_number = 1 + nonce_types.index(type(dh_gen))
|
||||
new_nonce_hash = auth_key.calc_new_nonce_hash(new_nonce, nonce_number)
|
||||
|
||||
dh_hash = getattr(dh_gen, 'new_nonce_hash{}'.format(nonce_number))
|
||||
if dh_hash != new_nonce_hash:
|
||||
raise SecurityError('Step 3 invalid new nonce hash')
|
||||
|
||||
if not isinstance(dh_gen, DhGenOk):
|
||||
raise AssertionError('Step 3.2 answer was %s' % dh_gen)
|
||||
|
||||
return auth_key, time_offset
|
||||
|
||||
|
||||
def get_int(byte_array, signed=True):
|
||||
"""
|
||||
Gets the specified integer from its byte array.
|
||||
This should be used by this module alone, as it works with big endian.
|
||||
|
||||
:param byte_array: the byte array representing th integer.
|
||||
:param signed: whether the number is signed or not.
|
||||
:return: the integer representing the given byte array.
|
||||
"""
|
||||
return int.from_bytes(byte_array, byteorder='big', signed=signed)
|
@@ -0,0 +1,12 @@
|
||||
from .connection import Connection
|
||||
from .tcpfull import ConnectionTcpFull
|
||||
from .tcpintermediate import ConnectionTcpIntermediate
|
||||
from .tcpabridged import ConnectionTcpAbridged
|
||||
from .tcpobfuscated import ConnectionTcpObfuscated
|
||||
from .tcpmtproxy import (
|
||||
TcpMTProxy,
|
||||
ConnectionTcpMTProxyAbridged,
|
||||
ConnectionTcpMTProxyIntermediate,
|
||||
ConnectionTcpMTProxyRandomizedIntermediate
|
||||
)
|
||||
from .http import ConnectionHttp
|
@@ -0,0 +1,434 @@
|
||||
import abc
|
||||
import asyncio
|
||||
import socket
|
||||
import sys
|
||||
|
||||
try:
|
||||
import ssl as ssl_mod
|
||||
except ImportError:
|
||||
ssl_mod = None
|
||||
|
||||
try:
|
||||
import python_socks
|
||||
except ImportError:
|
||||
python_socks = None
|
||||
|
||||
from ...errors import InvalidChecksumError, InvalidBufferError
|
||||
from ... import helpers
|
||||
|
||||
|
||||
class Connection(abc.ABC):
|
||||
"""
|
||||
The `Connection` class is a wrapper around ``asyncio.open_connection``.
|
||||
|
||||
Subclasses will implement different transport modes as atomic operations,
|
||||
which this class eases doing since the exposed interface simply puts and
|
||||
gets complete data payloads to and from queues.
|
||||
|
||||
The only error that will raise from send and receive methods is
|
||||
``ConnectionError``, which will raise when attempting to send if
|
||||
the client is disconnected (includes remote disconnections).
|
||||
"""
|
||||
# this static attribute should be redefined by `Connection` subclasses and
|
||||
# should be one of `PacketCodec` implementations
|
||||
packet_codec = None
|
||||
|
||||
def __init__(self, ip, port, dc_id, *, loggers, proxy=None, local_addr=None):
|
||||
self._ip = ip
|
||||
self._port = port
|
||||
self._dc_id = dc_id # only for MTProxy, it's an abstraction leak
|
||||
self._log = loggers[__name__]
|
||||
self._proxy = proxy
|
||||
self._local_addr = local_addr
|
||||
self._reader = None
|
||||
self._writer = None
|
||||
self._connected = False
|
||||
self._send_task = None
|
||||
self._recv_task = None
|
||||
self._codec = None
|
||||
self._obfuscation = None # TcpObfuscated and MTProxy
|
||||
self._send_queue = asyncio.Queue(1)
|
||||
self._recv_queue = asyncio.Queue(1)
|
||||
|
||||
@staticmethod
|
||||
def _wrap_socket_ssl(sock):
|
||||
if ssl_mod is None:
|
||||
raise RuntimeError(
|
||||
'Cannot use proxy that requires SSL '
|
||||
'without the SSL module being available'
|
||||
)
|
||||
|
||||
return ssl_mod.wrap_socket(
|
||||
sock,
|
||||
do_handshake_on_connect=True,
|
||||
ssl_version=ssl_mod.PROTOCOL_SSLv23,
|
||||
ciphers='ADH-AES256-SHA')
|
||||
|
||||
@staticmethod
|
||||
def _parse_proxy(proxy_type, addr, port, rdns=True, username=None, password=None):
|
||||
if isinstance(proxy_type, str):
|
||||
proxy_type = proxy_type.lower()
|
||||
|
||||
# Always prefer `python_socks` when available
|
||||
if python_socks:
|
||||
from python_socks import ProxyType
|
||||
|
||||
# We do the check for numerical values here
|
||||
# to be backwards compatible with PySocks proxy format,
|
||||
# (since socks.SOCKS5 == 2, socks.SOCKS4 == 1, socks.HTTP == 3)
|
||||
if proxy_type == ProxyType.SOCKS5 or proxy_type == 2 or proxy_type == "socks5":
|
||||
protocol = ProxyType.SOCKS5
|
||||
elif proxy_type == ProxyType.SOCKS4 or proxy_type == 1 or proxy_type == "socks4":
|
||||
protocol = ProxyType.SOCKS4
|
||||
elif proxy_type == ProxyType.HTTP or proxy_type == 3 or proxy_type == "http":
|
||||
protocol = ProxyType.HTTP
|
||||
else:
|
||||
raise ValueError("Unknown proxy protocol type: {}".format(proxy_type))
|
||||
|
||||
# This tuple must be compatible with `python_socks`' `Proxy.create()` signature
|
||||
return protocol, addr, port, username, password, rdns
|
||||
|
||||
else:
|
||||
from socks import SOCKS5, SOCKS4, HTTP
|
||||
|
||||
if proxy_type == 2 or proxy_type == "socks5":
|
||||
protocol = SOCKS5
|
||||
elif proxy_type == 1 or proxy_type == "socks4":
|
||||
protocol = SOCKS4
|
||||
elif proxy_type == 3 or proxy_type == "http":
|
||||
protocol = HTTP
|
||||
else:
|
||||
raise ValueError("Unknown proxy protocol type: {}".format(proxy_type))
|
||||
|
||||
# This tuple must be compatible with `PySocks`' `socksocket.set_proxy()` signature
|
||||
return protocol, addr, port, rdns, username, password
|
||||
|
||||
async def _proxy_connect(self, timeout=None, local_addr=None):
|
||||
if isinstance(self._proxy, (tuple, list)):
|
||||
parsed = self._parse_proxy(*self._proxy)
|
||||
elif isinstance(self._proxy, dict):
|
||||
parsed = self._parse_proxy(**self._proxy)
|
||||
else:
|
||||
raise TypeError("Proxy of unknown format: {}".format(type(self._proxy)))
|
||||
|
||||
# Always prefer `python_socks` when available
|
||||
if python_socks:
|
||||
# python_socks internal errors are not inherited from
|
||||
# builtin IOError (just from Exception). Instead of adding those
|
||||
# in exceptions clauses everywhere through the code, we
|
||||
# rather monkey-patch them in place.
|
||||
|
||||
python_socks._errors.ProxyError = ConnectionError
|
||||
python_socks._errors.ProxyConnectionError = ConnectionError
|
||||
python_socks._errors.ProxyTimeoutError = ConnectionError
|
||||
|
||||
from python_socks.async_.asyncio import Proxy
|
||||
|
||||
proxy = Proxy.create(*parsed)
|
||||
|
||||
# WARNING: If `local_addr` is set we use manual socket creation, because,
|
||||
# unfortunately, `Proxy.connect()` does not expose `local_addr`
|
||||
# argument, so if we want to bind socket locally, we need to manually
|
||||
# create, bind and connect socket, and then pass to `Proxy.connect()` method.
|
||||
|
||||
if local_addr is None:
|
||||
sock = await proxy.connect(
|
||||
dest_host=self._ip,
|
||||
dest_port=self._port,
|
||||
timeout=timeout
|
||||
)
|
||||
else:
|
||||
# Here we start manual setup of the socket.
|
||||
# The `address` represents the proxy ip and proxy port,
|
||||
# not the destination one (!), because the socket
|
||||
# connects to the proxy server, not destination server.
|
||||
# IPv family is also checked on proxy address.
|
||||
if ':' in proxy.proxy_host:
|
||||
mode, address = socket.AF_INET6, (proxy.proxy_host, proxy.proxy_port, 0, 0)
|
||||
else:
|
||||
mode, address = socket.AF_INET, (proxy.proxy_host, proxy.proxy_port)
|
||||
|
||||
# Create a non-blocking socket and bind it (if local address is specified).
|
||||
sock = socket.socket(mode, socket.SOCK_STREAM)
|
||||
sock.setblocking(False)
|
||||
sock.bind(local_addr)
|
||||
|
||||
# Actual TCP connection is performed here.
|
||||
await asyncio.wait_for(
|
||||
helpers.get_running_loop().sock_connect(sock=sock, address=address),
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# As our socket is already created and connected,
|
||||
# this call sets the destination host/port and
|
||||
# starts protocol negotiations with the proxy server.
|
||||
sock = await proxy.connect(
|
||||
dest_host=self._ip,
|
||||
dest_port=self._port,
|
||||
timeout=timeout,
|
||||
_socket=sock
|
||||
)
|
||||
|
||||
else:
|
||||
import socks
|
||||
|
||||
# Here `address` represents destination address (not proxy), because of
|
||||
# the `PySocks` implementation of the connection routine.
|
||||
# IPv family is checked on proxy address, not destination address.
|
||||
if ':' in parsed[1]:
|
||||
mode, address = socket.AF_INET6, (self._ip, self._port, 0, 0)
|
||||
else:
|
||||
mode, address = socket.AF_INET, (self._ip, self._port)
|
||||
|
||||
# Setup socket, proxy, timeout and bind it (if necessary).
|
||||
sock = socks.socksocket(mode, socket.SOCK_STREAM)
|
||||
sock.set_proxy(*parsed)
|
||||
sock.settimeout(timeout)
|
||||
|
||||
if local_addr is not None:
|
||||
sock.bind(local_addr)
|
||||
|
||||
# Actual TCP connection and negotiation performed here.
|
||||
await asyncio.wait_for(
|
||||
helpers.get_running_loop().sock_connect(sock=sock, address=address),
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
sock.setblocking(False)
|
||||
|
||||
return sock
|
||||
|
||||
async def _connect(self, timeout=None, ssl=None):
|
||||
if self._local_addr is not None:
|
||||
# NOTE: If port is not specified, we use 0 port
|
||||
# to notify the OS that port should be chosen randomly
|
||||
# from the available ones.
|
||||
if isinstance(self._local_addr, tuple) and len(self._local_addr) == 2:
|
||||
local_addr = self._local_addr
|
||||
elif isinstance(self._local_addr, str):
|
||||
local_addr = (self._local_addr, 0)
|
||||
else:
|
||||
raise ValueError("Unknown local address format: {}".format(self._local_addr))
|
||||
else:
|
||||
local_addr = None
|
||||
|
||||
if not self._proxy:
|
||||
self._reader, self._writer = await asyncio.wait_for(
|
||||
asyncio.open_connection(
|
||||
host=self._ip,
|
||||
port=self._port,
|
||||
ssl=ssl,
|
||||
local_addr=local_addr
|
||||
), timeout=timeout)
|
||||
else:
|
||||
# Proxy setup, connection and negotiation is performed here.
|
||||
sock = await self._proxy_connect(
|
||||
timeout=timeout,
|
||||
local_addr=local_addr
|
||||
)
|
||||
|
||||
# Wrap socket in SSL context (if provided)
|
||||
if ssl:
|
||||
sock = self._wrap_socket_ssl(sock)
|
||||
|
||||
self._reader, self._writer = await asyncio.open_connection(sock=sock)
|
||||
|
||||
self._codec = self.packet_codec(self)
|
||||
self._init_conn()
|
||||
await self._writer.drain()
|
||||
|
||||
async def connect(self, timeout=None, ssl=None):
|
||||
"""
|
||||
Establishes a connection with the server.
|
||||
"""
|
||||
await self._connect(timeout=timeout, ssl=ssl)
|
||||
self._connected = True
|
||||
|
||||
loop = helpers.get_running_loop()
|
||||
self._send_task = loop.create_task(self._send_loop())
|
||||
self._recv_task = loop.create_task(self._recv_loop())
|
||||
|
||||
async def disconnect(self):
|
||||
"""
|
||||
Disconnects from the server, and clears
|
||||
pending outgoing and incoming messages.
|
||||
"""
|
||||
if not self._connected:
|
||||
return
|
||||
|
||||
self._connected = False
|
||||
|
||||
await helpers._cancel(
|
||||
self._log,
|
||||
send_task=self._send_task,
|
||||
recv_task=self._recv_task
|
||||
)
|
||||
|
||||
if self._writer:
|
||||
self._writer.close()
|
||||
if sys.version_info >= (3, 7):
|
||||
try:
|
||||
await asyncio.wait_for(self._writer.wait_closed(), timeout=10)
|
||||
except asyncio.TimeoutError:
|
||||
# See issue #3917. For some users, this line was hanging indefinitely.
|
||||
# The hard timeout is not ideal (connection won't be properly closed),
|
||||
# but the code will at least be able to procceed.
|
||||
self._log.warning('Graceful disconnection timed out, forcibly ignoring cleanup')
|
||||
except Exception as e:
|
||||
# Disconnecting should never raise. Seen:
|
||||
# * OSError: No route to host and
|
||||
# * OSError: [Errno 32] Broken pipe
|
||||
# * ConnectionResetError
|
||||
self._log.info('%s during disconnect: %s', type(e), e)
|
||||
|
||||
def send(self, data):
|
||||
"""
|
||||
Sends a packet of data through this connection mode.
|
||||
|
||||
This method returns a coroutine.
|
||||
"""
|
||||
if not self._connected:
|
||||
raise ConnectionError('Not connected')
|
||||
|
||||
return self._send_queue.put(data)
|
||||
|
||||
async def recv(self):
|
||||
"""
|
||||
Receives a packet of data through this connection mode.
|
||||
|
||||
This method returns a coroutine.
|
||||
"""
|
||||
while self._connected:
|
||||
result, err = await self._recv_queue.get()
|
||||
if err:
|
||||
raise err
|
||||
if result:
|
||||
return result
|
||||
|
||||
raise ConnectionError('Not connected')
|
||||
|
||||
async def _send_loop(self):
|
||||
"""
|
||||
This loop is constantly popping items off the queue to send them.
|
||||
"""
|
||||
try:
|
||||
while self._connected:
|
||||
self._send(await self._send_queue.get())
|
||||
await self._writer.drain()
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
except Exception as e:
|
||||
if isinstance(e, IOError):
|
||||
self._log.info('The server closed the connection while sending')
|
||||
else:
|
||||
self._log.exception('Unexpected exception in the send loop')
|
||||
|
||||
await self.disconnect()
|
||||
|
||||
async def _recv_loop(self):
|
||||
"""
|
||||
This loop is constantly putting items on the queue as they're read.
|
||||
"""
|
||||
try:
|
||||
while self._connected:
|
||||
try:
|
||||
data = await self._recv()
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except (IOError, asyncio.IncompleteReadError) as e:
|
||||
self._log.warning('Server closed the connection: %s', e)
|
||||
await self._recv_queue.put((None, e))
|
||||
await self.disconnect()
|
||||
except InvalidChecksumError as e:
|
||||
self._log.warning('Server response had invalid checksum: %s', e)
|
||||
await self._recv_queue.put((None, e))
|
||||
except InvalidBufferError as e:
|
||||
self._log.warning('Server response had invalid buffer: %s', e)
|
||||
await self._recv_queue.put((None, e))
|
||||
except Exception as e:
|
||||
self._log.exception('Unexpected exception in the receive loop')
|
||||
await self._recv_queue.put((None, e))
|
||||
await self.disconnect()
|
||||
else:
|
||||
await self._recv_queue.put((data, None))
|
||||
finally:
|
||||
await self.disconnect()
|
||||
|
||||
|
||||
def _init_conn(self):
|
||||
"""
|
||||
This method will be called after `connect` is called.
|
||||
After this method finishes, the writer will be drained.
|
||||
|
||||
Subclasses should make use of this if they need to send
|
||||
data to Telegram to indicate which connection mode will
|
||||
be used.
|
||||
"""
|
||||
if self._codec.tag:
|
||||
self._writer.write(self._codec.tag)
|
||||
|
||||
def _send(self, data):
|
||||
self._writer.write(self._codec.encode_packet(data))
|
||||
|
||||
async def _recv(self):
|
||||
return await self._codec.read_packet(self._reader)
|
||||
|
||||
def __str__(self):
|
||||
return '{}:{}/{}'.format(
|
||||
self._ip, self._port,
|
||||
self.__class__.__name__.replace('Connection', '')
|
||||
)
|
||||
|
||||
|
||||
class ObfuscatedConnection(Connection):
|
||||
"""
|
||||
Base class for "obfuscated" connections ("obfuscated2", "mtproto proxy")
|
||||
"""
|
||||
"""
|
||||
This attribute should be redefined by subclasses
|
||||
"""
|
||||
obfuscated_io = None
|
||||
|
||||
def _init_conn(self):
|
||||
self._obfuscation = self.obfuscated_io(self)
|
||||
self._writer.write(self._obfuscation.header)
|
||||
|
||||
def _send(self, data):
|
||||
self._obfuscation.write(self._codec.encode_packet(data))
|
||||
|
||||
async def _recv(self):
|
||||
return await self._codec.read_packet(self._obfuscation)
|
||||
|
||||
|
||||
class PacketCodec(abc.ABC):
|
||||
"""
|
||||
Base class for packet codecs
|
||||
"""
|
||||
|
||||
"""
|
||||
This attribute should be re-defined by subclass to define if some
|
||||
"magic bytes" should be sent to server right after connection is made to
|
||||
signal which protocol will be used
|
||||
"""
|
||||
tag = None
|
||||
|
||||
def __init__(self, connection):
|
||||
"""
|
||||
Codec is created when connection is just made.
|
||||
"""
|
||||
self._conn = connection
|
||||
|
||||
@abc.abstractmethod
|
||||
def encode_packet(self, data):
|
||||
"""
|
||||
Encodes single packet and returns encoded bytes.
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
@abc.abstractmethod
|
||||
async def read_packet(self, reader):
|
||||
"""
|
||||
Reads single packet from `reader` object that should have
|
||||
`readexactly(n)` method.
|
||||
"""
|
||||
raise NotImplementedError
|
39
.venv2/Lib/site-packages/telethon/network/connection/http.py
Normal file
39
.venv2/Lib/site-packages/telethon/network/connection/http.py
Normal file
@@ -0,0 +1,39 @@
|
||||
import asyncio
|
||||
|
||||
from .connection import Connection, PacketCodec
|
||||
|
||||
|
||||
SSL_PORT = 443
|
||||
|
||||
|
||||
class HttpPacketCodec(PacketCodec):
|
||||
tag = None
|
||||
obfuscate_tag = None
|
||||
|
||||
def encode_packet(self, data):
|
||||
return ('POST /api HTTP/1.1\r\n'
|
||||
'Host: {}:{}\r\n'
|
||||
'Content-Type: application/x-www-form-urlencoded\r\n'
|
||||
'Connection: keep-alive\r\n'
|
||||
'Keep-Alive: timeout=100000, max=10000000\r\n'
|
||||
'Content-Length: {}\r\n\r\n'
|
||||
.format(self._conn._ip, self._conn._port, len(data))
|
||||
.encode('ascii') + data)
|
||||
|
||||
async def read_packet(self, reader):
|
||||
while True:
|
||||
line = await reader.readline()
|
||||
if not line or line[-1] != b'\n':
|
||||
raise asyncio.IncompleteReadError(line, None)
|
||||
|
||||
if line.lower().startswith(b'content-length: '):
|
||||
await reader.readexactly(2)
|
||||
length = int(line[16:-2])
|
||||
return await reader.readexactly(length)
|
||||
|
||||
|
||||
class ConnectionHttp(Connection):
|
||||
packet_codec = HttpPacketCodec
|
||||
|
||||
async def connect(self, timeout=None, ssl=None):
|
||||
await super().connect(timeout=timeout, ssl=self._port == SSL_PORT)
|
@@ -0,0 +1,33 @@
|
||||
import struct
|
||||
|
||||
from .connection import Connection, PacketCodec
|
||||
|
||||
|
||||
class AbridgedPacketCodec(PacketCodec):
|
||||
tag = b'\xef'
|
||||
obfuscate_tag = b'\xef\xef\xef\xef'
|
||||
|
||||
def encode_packet(self, data):
|
||||
length = len(data) >> 2
|
||||
if length < 127:
|
||||
length = struct.pack('B', length)
|
||||
else:
|
||||
length = b'\x7f' + int.to_bytes(length, 3, 'little')
|
||||
return length + data
|
||||
|
||||
async def read_packet(self, reader):
|
||||
length = struct.unpack('<B', await reader.readexactly(1))[0]
|
||||
if length >= 127:
|
||||
length = struct.unpack(
|
||||
'<i', await reader.readexactly(3) + b'\0')[0]
|
||||
|
||||
return await reader.readexactly(length << 2)
|
||||
|
||||
|
||||
class ConnectionTcpAbridged(Connection):
|
||||
"""
|
||||
This is the mode with the lowest overhead, as it will
|
||||
only require 1 byte if the packet length is less than
|
||||
508 bytes (127 << 2, which is very common).
|
||||
"""
|
||||
packet_codec = AbridgedPacketCodec
|
@@ -0,0 +1,55 @@
|
||||
import struct
|
||||
from zlib import crc32
|
||||
|
||||
from .connection import Connection, PacketCodec
|
||||
from ...errors import InvalidChecksumError, InvalidBufferError
|
||||
|
||||
|
||||
class FullPacketCodec(PacketCodec):
|
||||
tag = None
|
||||
|
||||
def __init__(self, connection):
|
||||
super().__init__(connection)
|
||||
self._send_counter = 0 # Important or Telegram won't reply
|
||||
|
||||
def encode_packet(self, data):
|
||||
# https://core.telegram.org/mtproto#tcp-transport
|
||||
# total length, sequence number, packet and checksum (CRC32)
|
||||
length = len(data) + 12
|
||||
data = struct.pack('<ii', length, self._send_counter) + data
|
||||
crc = struct.pack('<I', crc32(data))
|
||||
self._send_counter += 1
|
||||
return data + crc
|
||||
|
||||
async def read_packet(self, reader):
|
||||
packet_len_seq = await reader.readexactly(8) # 4 and 4
|
||||
packet_len, seq = struct.unpack('<ii', packet_len_seq)
|
||||
if packet_len < 0 and seq < 0:
|
||||
# It has been observed that the length and seq can be -429,
|
||||
# followed by the body of 4 bytes also being -429.
|
||||
# See https://github.com/LonamiWebs/Telethon/issues/4042.
|
||||
body = await reader.readexactly(4)
|
||||
raise InvalidBufferError(body)
|
||||
elif packet_len < 8:
|
||||
# Currently unknown why packet_len may be less than 8 but not negative.
|
||||
# Attempting to `readexactly` with less than 0 fails without saying what
|
||||
# the number was which is less helpful.
|
||||
raise InvalidBufferError(packet_len_seq)
|
||||
|
||||
body = await reader.readexactly(packet_len - 8)
|
||||
checksum = struct.unpack('<I', body[-4:])[0]
|
||||
body = body[:-4]
|
||||
|
||||
valid_checksum = crc32(packet_len_seq + body)
|
||||
if checksum != valid_checksum:
|
||||
raise InvalidChecksumError(checksum, valid_checksum)
|
||||
|
||||
return body
|
||||
|
||||
|
||||
class ConnectionTcpFull(Connection):
|
||||
"""
|
||||
Default Telegram mode. Sends 12 additional bytes and
|
||||
needs to calculate the CRC value of the packet itself.
|
||||
"""
|
||||
packet_codec = FullPacketCodec
|
@@ -0,0 +1,46 @@
|
||||
import struct
|
||||
import random
|
||||
import os
|
||||
|
||||
from .connection import Connection, PacketCodec
|
||||
|
||||
|
||||
class IntermediatePacketCodec(PacketCodec):
|
||||
tag = b'\xee\xee\xee\xee'
|
||||
obfuscate_tag = tag
|
||||
|
||||
def encode_packet(self, data):
|
||||
return struct.pack('<i', len(data)) + data
|
||||
|
||||
async def read_packet(self, reader):
|
||||
length = struct.unpack('<i', await reader.readexactly(4))[0]
|
||||
return await reader.readexactly(length)
|
||||
|
||||
|
||||
class RandomizedIntermediatePacketCodec(IntermediatePacketCodec):
|
||||
"""
|
||||
Data packets are aligned to 4bytes. This codec adds random bytes of size
|
||||
from 0 to 3 bytes, which are ignored by decoder.
|
||||
"""
|
||||
tag = None
|
||||
obfuscate_tag = b'\xdd\xdd\xdd\xdd'
|
||||
|
||||
def encode_packet(self, data):
|
||||
pad_size = random.randint(0, 3)
|
||||
padding = os.urandom(pad_size)
|
||||
return super().encode_packet(data + padding)
|
||||
|
||||
async def read_packet(self, reader):
|
||||
packet_with_padding = await super().read_packet(reader)
|
||||
pad_size = len(packet_with_padding) % 4
|
||||
if pad_size > 0:
|
||||
return packet_with_padding[:-pad_size]
|
||||
return packet_with_padding
|
||||
|
||||
|
||||
class ConnectionTcpIntermediate(Connection):
|
||||
"""
|
||||
Intermediate mode between `ConnectionTcpFull` and `ConnectionTcpAbridged`.
|
||||
Always sends 4 extra bytes for the packet length.
|
||||
"""
|
||||
packet_codec = IntermediatePacketCodec
|
@@ -0,0 +1,152 @@
|
||||
import asyncio
|
||||
import hashlib
|
||||
import os
|
||||
|
||||
from .connection import ObfuscatedConnection
|
||||
from .tcpabridged import AbridgedPacketCodec
|
||||
from .tcpintermediate import (
|
||||
IntermediatePacketCodec,
|
||||
RandomizedIntermediatePacketCodec
|
||||
)
|
||||
|
||||
from ...crypto import AESModeCTR
|
||||
|
||||
|
||||
class MTProxyIO:
|
||||
"""
|
||||
It's very similar to tcpobfuscated.ObfuscatedIO, but the way
|
||||
encryption keys, protocol tag and dc_id are encoded is different.
|
||||
"""
|
||||
header = None
|
||||
|
||||
def __init__(self, connection):
|
||||
self._reader = connection._reader
|
||||
self._writer = connection._writer
|
||||
|
||||
(self.header,
|
||||
self._encrypt,
|
||||
self._decrypt) = self.init_header(
|
||||
connection._secret, connection._dc_id, connection.packet_codec)
|
||||
|
||||
@staticmethod
|
||||
def init_header(secret, dc_id, packet_codec):
|
||||
# Validate
|
||||
is_dd = (len(secret) == 17) and (secret[0] == 0xDD)
|
||||
is_rand_codec = issubclass(
|
||||
packet_codec, RandomizedIntermediatePacketCodec)
|
||||
if is_dd and not is_rand_codec:
|
||||
raise ValueError(
|
||||
"Only RandomizedIntermediate can be used with dd-secrets")
|
||||
secret = secret[1:] if is_dd else secret
|
||||
if len(secret) != 16:
|
||||
raise ValueError(
|
||||
"MTProxy secret must be a hex-string representing 16 bytes")
|
||||
|
||||
# Obfuscated messages secrets cannot start with any of these
|
||||
keywords = (b'PVrG', b'GET ', b'POST', b'\xee\xee\xee\xee')
|
||||
while True:
|
||||
random = os.urandom(64)
|
||||
if (random[0] != 0xef and
|
||||
random[:4] not in keywords and
|
||||
random[4:4] != b'\0\0\0\0'):
|
||||
break
|
||||
|
||||
random = bytearray(random)
|
||||
random_reversed = random[55:7:-1] # Reversed (8, len=48)
|
||||
|
||||
# Encryption has "continuous buffer" enabled
|
||||
encrypt_key = hashlib.sha256(
|
||||
bytes(random[8:40]) + secret).digest()
|
||||
encrypt_iv = bytes(random[40:56])
|
||||
decrypt_key = hashlib.sha256(
|
||||
bytes(random_reversed[:32]) + secret).digest()
|
||||
decrypt_iv = bytes(random_reversed[32:48])
|
||||
|
||||
encryptor = AESModeCTR(encrypt_key, encrypt_iv)
|
||||
decryptor = AESModeCTR(decrypt_key, decrypt_iv)
|
||||
|
||||
random[56:60] = packet_codec.obfuscate_tag
|
||||
|
||||
dc_id_bytes = dc_id.to_bytes(2, "little", signed=True)
|
||||
random = random[:60] + dc_id_bytes + random[62:]
|
||||
random[56:64] = encryptor.encrypt(bytes(random))[56:64]
|
||||
return (random, encryptor, decryptor)
|
||||
|
||||
async def readexactly(self, n):
|
||||
return self._decrypt.encrypt(await self._reader.readexactly(n))
|
||||
|
||||
def write(self, data):
|
||||
self._writer.write(self._encrypt.encrypt(data))
|
||||
|
||||
|
||||
class TcpMTProxy(ObfuscatedConnection):
|
||||
"""
|
||||
Connector which allows user to connect to the Telegram via proxy servers
|
||||
commonly known as MTProxy.
|
||||
Implemented very ugly due to the leaky abstractions in Telethon networking
|
||||
classes that should be refactored later (TODO).
|
||||
|
||||
.. warning::
|
||||
|
||||
The support for TcpMTProxy classes is **EXPERIMENTAL** and prone to
|
||||
be changed. You shouldn't be using this class yet.
|
||||
"""
|
||||
packet_codec = None
|
||||
obfuscated_io = MTProxyIO
|
||||
|
||||
# noinspection PyUnusedLocal
|
||||
def __init__(self, ip, port, dc_id, *, loggers, proxy=None, local_addr=None):
|
||||
# connect to proxy's host and port instead of telegram's ones
|
||||
proxy_host, proxy_port = self.address_info(proxy)
|
||||
self._secret = bytes.fromhex(proxy[2])
|
||||
super().__init__(
|
||||
proxy_host, proxy_port, dc_id, loggers=loggers)
|
||||
|
||||
async def _connect(self, timeout=None, ssl=None):
|
||||
await super()._connect(timeout=timeout, ssl=ssl)
|
||||
|
||||
# Wait for EOF for 2 seconds (or if _wait_for_data's definition
|
||||
# is missing or different, just sleep for 2 seconds). This way
|
||||
# we give the proxy a chance to close the connection if the current
|
||||
# codec (which the proxy detects with the data we sent) cannot
|
||||
# be used for this proxy. This is a work around for #1134.
|
||||
# TODO Sleeping for N seconds may not be the best solution
|
||||
# TODO This fix could be welcome for HTTP proxies as well
|
||||
try:
|
||||
await asyncio.wait_for(self._reader._wait_for_data('proxy'), 2)
|
||||
except asyncio.TimeoutError:
|
||||
pass
|
||||
except Exception:
|
||||
await asyncio.sleep(2)
|
||||
|
||||
if self._reader.at_eof():
|
||||
await self.disconnect()
|
||||
raise ConnectionError(
|
||||
'Proxy closed the connection after sending initial payload')
|
||||
|
||||
@staticmethod
|
||||
def address_info(proxy_info):
|
||||
if proxy_info is None:
|
||||
raise ValueError("No proxy info specified for MTProxy connection")
|
||||
return proxy_info[:2]
|
||||
|
||||
|
||||
class ConnectionTcpMTProxyAbridged(TcpMTProxy):
|
||||
"""
|
||||
Connect to proxy using abridged protocol
|
||||
"""
|
||||
packet_codec = AbridgedPacketCodec
|
||||
|
||||
|
||||
class ConnectionTcpMTProxyIntermediate(TcpMTProxy):
|
||||
"""
|
||||
Connect to proxy using intermediate protocol
|
||||
"""
|
||||
packet_codec = IntermediatePacketCodec
|
||||
|
||||
|
||||
class ConnectionTcpMTProxyRandomizedIntermediate(TcpMTProxy):
|
||||
"""
|
||||
Connect to proxy using randomized intermediate protocol (dd-secrets)
|
||||
"""
|
||||
packet_codec = RandomizedIntermediatePacketCodec
|
@@ -0,0 +1,62 @@
|
||||
import os
|
||||
|
||||
from .tcpabridged import AbridgedPacketCodec
|
||||
from .connection import ObfuscatedConnection
|
||||
|
||||
from ...crypto import AESModeCTR
|
||||
|
||||
|
||||
class ObfuscatedIO:
|
||||
header = None
|
||||
|
||||
def __init__(self, connection):
|
||||
self._reader = connection._reader
|
||||
self._writer = connection._writer
|
||||
|
||||
(self.header,
|
||||
self._encrypt,
|
||||
self._decrypt) = self.init_header(connection.packet_codec)
|
||||
|
||||
@staticmethod
|
||||
def init_header(packet_codec):
|
||||
# Obfuscated messages secrets cannot start with any of these
|
||||
keywords = (b'PVrG', b'GET ', b'POST', b'\xee\xee\xee\xee')
|
||||
while True:
|
||||
random = os.urandom(64)
|
||||
if (random[0] != 0xef and
|
||||
random[:4] not in keywords and
|
||||
random[4:8] != b'\0\0\0\0'):
|
||||
break
|
||||
|
||||
random = bytearray(random)
|
||||
random_reversed = random[55:7:-1] # Reversed (8, len=48)
|
||||
|
||||
# Encryption has "continuous buffer" enabled
|
||||
encrypt_key = bytes(random[8:40])
|
||||
encrypt_iv = bytes(random[40:56])
|
||||
decrypt_key = bytes(random_reversed[:32])
|
||||
decrypt_iv = bytes(random_reversed[32:48])
|
||||
|
||||
encryptor = AESModeCTR(encrypt_key, encrypt_iv)
|
||||
decryptor = AESModeCTR(decrypt_key, decrypt_iv)
|
||||
|
||||
random[56:60] = packet_codec.obfuscate_tag
|
||||
random[56:64] = encryptor.encrypt(bytes(random))[56:64]
|
||||
return (random, encryptor, decryptor)
|
||||
|
||||
async def readexactly(self, n):
|
||||
return self._decrypt.encrypt(await self._reader.readexactly(n))
|
||||
|
||||
def write(self, data):
|
||||
self._writer.write(self._encrypt.encrypt(data))
|
||||
|
||||
|
||||
class ConnectionTcpObfuscated(ObfuscatedConnection):
|
||||
"""
|
||||
Mode that Telegram defines as "obfuscated2". Encodes the packet
|
||||
just like `ConnectionTcpAbridged`, but encrypts every message with
|
||||
a randomly generated key using the AES-CTR mode so the packets are
|
||||
harder to discern.
|
||||
"""
|
||||
obfuscated_io = ObfuscatedIO
|
||||
packet_codec = AbridgedPacketCodec
|
@@ -0,0 +1,56 @@
|
||||
"""
|
||||
This module contains the class used to communicate with Telegram's servers
|
||||
in plain text, when no authorization key has been created yet.
|
||||
"""
|
||||
import struct
|
||||
|
||||
from .mtprotostate import MTProtoState
|
||||
from ..errors import InvalidBufferError
|
||||
from ..extensions import BinaryReader
|
||||
|
||||
|
||||
class MTProtoPlainSender:
|
||||
"""
|
||||
MTProto Mobile Protocol plain sender
|
||||
(https://core.telegram.org/mtproto/description#unencrypted-messages)
|
||||
"""
|
||||
def __init__(self, connection, *, loggers):
|
||||
"""
|
||||
Initializes the MTProto plain sender.
|
||||
|
||||
:param connection: the Connection to be used.
|
||||
"""
|
||||
self._state = MTProtoState(auth_key=None, loggers=loggers)
|
||||
self._connection = connection
|
||||
|
||||
async def send(self, request):
|
||||
"""
|
||||
Sends and receives the result for the given request.
|
||||
"""
|
||||
body = bytes(request)
|
||||
msg_id = self._state._get_new_msg_id()
|
||||
await self._connection.send(
|
||||
struct.pack('<qqi', 0, msg_id, len(body)) + body
|
||||
)
|
||||
|
||||
body = await self._connection.recv()
|
||||
if len(body) < 8:
|
||||
raise InvalidBufferError(body)
|
||||
|
||||
with BinaryReader(body) as reader:
|
||||
auth_key_id = reader.read_long()
|
||||
assert auth_key_id == 0, 'Bad auth_key_id'
|
||||
|
||||
msg_id = reader.read_long()
|
||||
assert msg_id != 0, 'Bad msg_id'
|
||||
# ^ We should make sure that the read ``msg_id`` is greater
|
||||
# than our own ``msg_id``. However, under some circumstances
|
||||
# (bad system clock/working behind proxies) this seems to not
|
||||
# be the case, which would cause endless assertion errors.
|
||||
|
||||
length = reader.read_int()
|
||||
assert length > 0, 'Bad length'
|
||||
# We could read length bytes and use those in a new reader to read
|
||||
# the next TLObject without including the padding, but since the
|
||||
# reader isn't used for anything else after this, it's unnecessary.
|
||||
return reader.tgread_object()
|
914
.venv2/Lib/site-packages/telethon/network/mtprotosender.py
Normal file
914
.venv2/Lib/site-packages/telethon/network/mtprotosender.py
Normal file
@@ -0,0 +1,914 @@
|
||||
import asyncio
|
||||
import collections
|
||||
import struct
|
||||
import datetime
|
||||
import time
|
||||
|
||||
from . import authenticator
|
||||
from ..extensions.messagepacker import MessagePacker
|
||||
from .mtprotoplainsender import MTProtoPlainSender
|
||||
from .requeststate import RequestState
|
||||
from .mtprotostate import MTProtoState
|
||||
from ..tl.tlobject import TLRequest
|
||||
from .. import helpers, utils
|
||||
from ..errors import (
|
||||
BadMessageError, InvalidBufferError, AuthKeyNotFound, SecurityError,
|
||||
TypeNotFoundError, rpc_message_to_error
|
||||
)
|
||||
from ..extensions import BinaryReader
|
||||
from ..tl.core import RpcResult, MessageContainer, GzipPacked
|
||||
from ..tl.functions.auth import LogOutRequest
|
||||
from ..tl.functions import PingRequest, DestroySessionRequest, DestroyAuthKeyRequest
|
||||
from ..tl.types import (
|
||||
MsgsAck, Pong, BadServerSalt, BadMsgNotification, FutureSalts,
|
||||
MsgNewDetailedInfo, NewSessionCreated, MsgDetailedInfo, MsgsStateReq,
|
||||
MsgsStateInfo, MsgsAllInfo, MsgResendReq, upload, DestroySessionOk, DestroySessionNone,
|
||||
DestroyAuthKeyOk, DestroyAuthKeyNone, DestroyAuthKeyFail
|
||||
)
|
||||
from ..tl import types as _tl
|
||||
from ..crypto import AuthKey
|
||||
from ..helpers import retry_range
|
||||
|
||||
|
||||
class MTProtoSender:
|
||||
"""
|
||||
MTProto Mobile Protocol sender
|
||||
(https://core.telegram.org/mtproto/description).
|
||||
|
||||
This class is responsible for wrapping requests into `TLMessage`'s,
|
||||
sending them over the network and receiving them in a safe manner.
|
||||
|
||||
Automatic reconnection due to temporary network issues is a concern
|
||||
for this class as well, including retry of messages that could not
|
||||
be sent successfully.
|
||||
|
||||
A new authorization key will be generated on connection if no other
|
||||
key exists yet.
|
||||
"""
|
||||
def __init__(self, auth_key, *, loggers,
|
||||
retries=5, delay=1, auto_reconnect=True, connect_timeout=None,
|
||||
auth_key_callback=None,
|
||||
updates_queue=None, auto_reconnect_callback=None):
|
||||
self._connection = None
|
||||
self._loggers = loggers
|
||||
self._log = loggers[__name__]
|
||||
self._retries = retries
|
||||
self._delay = delay
|
||||
self._auto_reconnect = auto_reconnect
|
||||
self._connect_timeout = connect_timeout
|
||||
self._auth_key_callback = auth_key_callback
|
||||
self._updates_queue = updates_queue
|
||||
self._auto_reconnect_callback = auto_reconnect_callback
|
||||
self._connect_lock = asyncio.Lock()
|
||||
self._ping = None
|
||||
|
||||
# Whether the user has explicitly connected or disconnected.
|
||||
#
|
||||
# If a disconnection happens for any other reason and it
|
||||
# was *not* user action then the pending messages won't
|
||||
# be cleared but on explicit user disconnection all the
|
||||
# pending futures should be cancelled.
|
||||
self._user_connected = False
|
||||
self._reconnecting = False
|
||||
self._disconnected = helpers.get_running_loop().create_future()
|
||||
self._disconnected.set_result(None)
|
||||
|
||||
# We need to join the loops upon disconnection
|
||||
self._send_loop_handle = None
|
||||
self._recv_loop_handle = None
|
||||
|
||||
# Preserving the references of the AuthKey and state is important
|
||||
self.auth_key = auth_key or AuthKey(None)
|
||||
self._state = MTProtoState(self.auth_key, loggers=self._loggers)
|
||||
|
||||
# Outgoing messages are put in a queue and sent in a batch.
|
||||
# Note that here we're also storing their ``_RequestState``.
|
||||
self._send_queue = MessagePacker(self._state, loggers=self._loggers)
|
||||
|
||||
# Sent states are remembered until a response is received.
|
||||
self._pending_state = {}
|
||||
|
||||
# Responses must be acknowledged, and we can also batch these.
|
||||
self._pending_ack = set()
|
||||
|
||||
# Similar to pending_messages but only for the last acknowledges.
|
||||
# These can't go in pending_messages because no acknowledge for them
|
||||
# is received, but we may still need to resend their state on bad salts.
|
||||
self._last_acks = collections.deque(maxlen=10)
|
||||
|
||||
# Jump table from response ID to method that handles it
|
||||
self._handlers = {
|
||||
RpcResult.CONSTRUCTOR_ID: self._handle_rpc_result,
|
||||
MessageContainer.CONSTRUCTOR_ID: self._handle_container,
|
||||
GzipPacked.CONSTRUCTOR_ID: self._handle_gzip_packed,
|
||||
Pong.CONSTRUCTOR_ID: self._handle_pong,
|
||||
BadServerSalt.CONSTRUCTOR_ID: self._handle_bad_server_salt,
|
||||
BadMsgNotification.CONSTRUCTOR_ID: self._handle_bad_notification,
|
||||
MsgDetailedInfo.CONSTRUCTOR_ID: self._handle_detailed_info,
|
||||
MsgNewDetailedInfo.CONSTRUCTOR_ID: self._handle_new_detailed_info,
|
||||
NewSessionCreated.CONSTRUCTOR_ID: self._handle_new_session_created,
|
||||
MsgsAck.CONSTRUCTOR_ID: self._handle_ack,
|
||||
FutureSalts.CONSTRUCTOR_ID: self._handle_future_salts,
|
||||
MsgsStateReq.CONSTRUCTOR_ID: self._handle_state_forgotten,
|
||||
MsgResendReq.CONSTRUCTOR_ID: self._handle_state_forgotten,
|
||||
MsgsAllInfo.CONSTRUCTOR_ID: self._handle_msg_all,
|
||||
DestroySessionOk.CONSTRUCTOR_ID: self._handle_destroy_session,
|
||||
DestroySessionNone.CONSTRUCTOR_ID: self._handle_destroy_session,
|
||||
DestroyAuthKeyOk.CONSTRUCTOR_ID: self._handle_destroy_auth_key,
|
||||
DestroyAuthKeyNone.CONSTRUCTOR_ID: self._handle_destroy_auth_key,
|
||||
DestroyAuthKeyFail.CONSTRUCTOR_ID: self._handle_destroy_auth_key,
|
||||
}
|
||||
|
||||
# Public API
|
||||
|
||||
async def connect(self, connection):
|
||||
"""
|
||||
Connects to the specified given connection using the given auth key.
|
||||
"""
|
||||
async with self._connect_lock:
|
||||
if self._user_connected:
|
||||
self._log.info('User is already connected!')
|
||||
return False
|
||||
|
||||
self._connection = connection
|
||||
await self._connect()
|
||||
self._user_connected = True
|
||||
return True
|
||||
|
||||
def is_connected(self):
|
||||
return self._user_connected
|
||||
|
||||
def _transport_connected(self):
|
||||
return (
|
||||
not self._reconnecting
|
||||
and self._connection is not None
|
||||
and self._connection._connected
|
||||
)
|
||||
|
||||
async def disconnect(self):
|
||||
"""
|
||||
Cleanly disconnects the instance from the network, cancels
|
||||
all pending requests, and closes the send and receive loops.
|
||||
"""
|
||||
await self._disconnect()
|
||||
|
||||
def send(self, request, ordered=False):
|
||||
"""
|
||||
This method enqueues the given request to be sent. Its send
|
||||
state will be saved until a response arrives, and a ``Future``
|
||||
that will be resolved when the response arrives will be returned:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
async def method():
|
||||
# Sending (enqueued for the send loop)
|
||||
future = sender.send(request)
|
||||
# Receiving (waits for the receive loop to read the result)
|
||||
result = await future
|
||||
|
||||
Designed like this because Telegram may send the response at
|
||||
any point, and it can send other items while one waits for it.
|
||||
Once the response for this future arrives, it is set with the
|
||||
received result, quite similar to how a ``receive()`` call
|
||||
would otherwise work.
|
||||
|
||||
Since the receiving part is "built in" the future, it's
|
||||
impossible to await receive a result that was never sent.
|
||||
"""
|
||||
if not self._user_connected:
|
||||
raise ConnectionError('Cannot send requests while disconnected')
|
||||
|
||||
if not utils.is_list_like(request):
|
||||
try:
|
||||
state = RequestState(request)
|
||||
except struct.error as e:
|
||||
# "struct.error: required argument is not an integer" is not
|
||||
# very helpful; log the request to find out what wasn't int.
|
||||
self._log.error('Request caused struct.error: %s: %s', e, request)
|
||||
raise
|
||||
|
||||
self._send_queue.append(state)
|
||||
return state.future
|
||||
else:
|
||||
states = []
|
||||
futures = []
|
||||
state = None
|
||||
for req in request:
|
||||
try:
|
||||
state = RequestState(req, after=ordered and state)
|
||||
except struct.error as e:
|
||||
self._log.error('Request caused struct.error: %s: %s', e, request)
|
||||
raise
|
||||
|
||||
states.append(state)
|
||||
futures.append(state.future)
|
||||
|
||||
self._send_queue.extend(states)
|
||||
return futures
|
||||
|
||||
@property
|
||||
def disconnected(self):
|
||||
"""
|
||||
Future that resolves when the connection to Telegram
|
||||
ends, either by user action or in the background.
|
||||
|
||||
Note that it may resolve in either a ``ConnectionError``
|
||||
or any other unexpected error that could not be handled.
|
||||
"""
|
||||
return asyncio.shield(self._disconnected)
|
||||
|
||||
# Private methods
|
||||
|
||||
async def _connect(self):
|
||||
"""
|
||||
Performs the actual connection, retrying, generating the
|
||||
authorization key if necessary, and starting the send and
|
||||
receive loops.
|
||||
"""
|
||||
self._log.info('Connecting to %s...', self._connection)
|
||||
|
||||
connected = False
|
||||
|
||||
for attempt in retry_range(self._retries):
|
||||
if not connected:
|
||||
connected = await self._try_connect(attempt)
|
||||
if not connected:
|
||||
continue # skip auth key generation until we're connected
|
||||
|
||||
if not self.auth_key:
|
||||
try:
|
||||
if not await self._try_gen_auth_key(attempt):
|
||||
continue # keep retrying until we have the auth key
|
||||
except (IOError, asyncio.TimeoutError) as e:
|
||||
# Sometimes, specially during user-DC migrations,
|
||||
# Telegram may close the connection during auth_key
|
||||
# generation. If that's the case, we will need to
|
||||
# connect again.
|
||||
self._log.warning('Connection error %d during auth_key gen: %s: %s',
|
||||
attempt, type(e).__name__, e)
|
||||
|
||||
# Whatever the IOError was, make sure to disconnect so we can
|
||||
# reconnect cleanly after.
|
||||
await self._connection.disconnect()
|
||||
connected = False
|
||||
await asyncio.sleep(self._delay)
|
||||
continue # next iteration we will try to reconnect
|
||||
|
||||
break # all steps done, break retry loop
|
||||
else:
|
||||
if not connected:
|
||||
raise ConnectionError('Connection to Telegram failed {} time(s)'.format(self._retries))
|
||||
|
||||
e = ConnectionError('auth_key generation failed {} time(s)'.format(self._retries))
|
||||
await self._disconnect(error=e)
|
||||
raise e
|
||||
|
||||
loop = helpers.get_running_loop()
|
||||
self._log.debug('Starting send loop')
|
||||
self._send_loop_handle = loop.create_task(self._send_loop())
|
||||
|
||||
self._log.debug('Starting receive loop')
|
||||
self._recv_loop_handle = loop.create_task(self._recv_loop())
|
||||
|
||||
# _disconnected only completes after manual disconnection
|
||||
# or errors after which the sender cannot continue such
|
||||
# as failing to reconnect or any unexpected error.
|
||||
if self._disconnected.done():
|
||||
self._disconnected = loop.create_future()
|
||||
|
||||
self._log.info('Connection to %s complete!', self._connection)
|
||||
|
||||
async def _try_connect(self, attempt):
|
||||
try:
|
||||
self._log.debug('Connection attempt %d...', attempt)
|
||||
await self._connection.connect(timeout=self._connect_timeout)
|
||||
self._log.debug('Connection success!')
|
||||
return True
|
||||
except (IOError, asyncio.TimeoutError) as e:
|
||||
self._log.warning('Attempt %d at connecting failed: %s: %s',
|
||||
attempt, type(e).__name__, e)
|
||||
await asyncio.sleep(self._delay)
|
||||
return False
|
||||
|
||||
async def _try_gen_auth_key(self, attempt):
|
||||
plain = MTProtoPlainSender(self._connection, loggers=self._loggers)
|
||||
try:
|
||||
self._log.debug('New auth_key attempt %d...', attempt)
|
||||
self.auth_key.key, self._state.time_offset = \
|
||||
await authenticator.do_authentication(plain)
|
||||
|
||||
# This is *EXTREMELY* important since we don't control
|
||||
# external references to the authorization key, we must
|
||||
# notify whenever we change it. This is crucial when we
|
||||
# switch to different data centers.
|
||||
if self._auth_key_callback:
|
||||
self._auth_key_callback(self.auth_key)
|
||||
|
||||
self._log.debug('auth_key generation success!')
|
||||
return True
|
||||
except (SecurityError, AssertionError) as e:
|
||||
self._log.warning('Attempt %d at new auth_key failed: %s', attempt, e)
|
||||
await asyncio.sleep(self._delay)
|
||||
return False
|
||||
|
||||
async def _disconnect(self, error=None):
|
||||
if self._connection is None:
|
||||
self._log.info('Not disconnecting (already have no connection)')
|
||||
return
|
||||
|
||||
self._log.info('Disconnecting from %s...', self._connection)
|
||||
self._user_connected = False
|
||||
try:
|
||||
self._log.debug('Closing current connection...')
|
||||
await self._connection.disconnect()
|
||||
finally:
|
||||
self._log.debug('Cancelling %d pending message(s)...', len(self._pending_state))
|
||||
for state in self._pending_state.values():
|
||||
if error and not state.future.done():
|
||||
state.future.set_exception(error)
|
||||
else:
|
||||
state.future.cancel()
|
||||
|
||||
self._pending_state.clear()
|
||||
await helpers._cancel(
|
||||
self._log,
|
||||
send_loop_handle=self._send_loop_handle,
|
||||
recv_loop_handle=self._recv_loop_handle
|
||||
)
|
||||
|
||||
self._log.info('Disconnection from %s complete!', self._connection)
|
||||
self._connection = None
|
||||
|
||||
if self._disconnected and not self._disconnected.done():
|
||||
if error:
|
||||
self._disconnected.set_exception(error)
|
||||
else:
|
||||
self._disconnected.set_result(None)
|
||||
|
||||
async def _reconnect(self, last_error):
|
||||
"""
|
||||
Cleanly disconnects and then reconnects.
|
||||
"""
|
||||
self._log.info('Closing current connection to begin reconnect...')
|
||||
await self._connection.disconnect()
|
||||
|
||||
await helpers._cancel(
|
||||
self._log,
|
||||
send_loop_handle=self._send_loop_handle,
|
||||
recv_loop_handle=self._recv_loop_handle
|
||||
)
|
||||
|
||||
# TODO See comment in `_start_reconnect`
|
||||
# Perhaps this should be the last thing to do?
|
||||
# But _connect() creates tasks which may run and,
|
||||
# if they see that reconnecting is True, they will end.
|
||||
# Perhaps that task creation should not belong in connect?
|
||||
self._reconnecting = False
|
||||
|
||||
# Start with a clean state (and thus session ID) to avoid old msgs
|
||||
self._state.reset()
|
||||
|
||||
retries = self._retries if self._auto_reconnect else 0
|
||||
|
||||
attempt = 0
|
||||
ok = True
|
||||
# We're already "retrying" to connect, so we don't want to force retries
|
||||
for attempt in retry_range(retries, force_retry=False):
|
||||
try:
|
||||
await self._connect()
|
||||
except (IOError, asyncio.TimeoutError) as e:
|
||||
last_error = e
|
||||
self._log.info('Failed reconnection attempt %d with %s',
|
||||
attempt, e.__class__.__name__)
|
||||
await asyncio.sleep(self._delay)
|
||||
except BufferError as e:
|
||||
# TODO there should probably only be one place to except all these errors
|
||||
if isinstance(e, InvalidBufferError) and e.code == 404:
|
||||
self._log.info('Server does not know about the current auth key; the session may need to be recreated')
|
||||
last_error = AuthKeyNotFound()
|
||||
ok = False
|
||||
break
|
||||
else:
|
||||
self._log.warning('Invalid buffer %s', e)
|
||||
|
||||
except Exception as e:
|
||||
last_error = e
|
||||
self._log.exception('Unexpected exception reconnecting on '
|
||||
'attempt %d', attempt)
|
||||
|
||||
await asyncio.sleep(self._delay)
|
||||
else:
|
||||
self._send_queue.extend(self._pending_state.values())
|
||||
self._pending_state.clear()
|
||||
|
||||
if self._auto_reconnect_callback:
|
||||
helpers.get_running_loop().create_task(self._auto_reconnect_callback())
|
||||
|
||||
break
|
||||
else:
|
||||
ok = False
|
||||
|
||||
if not ok:
|
||||
self._log.error('Automatic reconnection failed %d time(s)', attempt)
|
||||
# There may be no error (e.g. automatic reconnection was turned off).
|
||||
error = last_error.with_traceback(None) if last_error else None
|
||||
await self._disconnect(error=error)
|
||||
|
||||
def _start_reconnect(self, error):
|
||||
"""Starts a reconnection in the background."""
|
||||
if self._user_connected and not self._reconnecting:
|
||||
# We set reconnecting to True here and not inside the new task
|
||||
# because it may happen that send/recv loop calls this again
|
||||
# while the new task hasn't had a chance to run yet. This race
|
||||
# condition puts `self.connection` in a bad state with two calls
|
||||
# to its `connect` without disconnecting, so it creates a second
|
||||
# receive loop. There can't be two tasks receiving data from
|
||||
# the reader, since that causes an error, and the library just
|
||||
# gets stuck.
|
||||
# TODO It still gets stuck? Investigate where and why.
|
||||
self._reconnecting = True
|
||||
helpers.get_running_loop().create_task(self._reconnect(error))
|
||||
|
||||
def _keepalive_ping(self, rnd_id):
|
||||
"""
|
||||
Send a keep-alive ping. If a pong for the last ping was not received
|
||||
yet, this means we're probably not connected.
|
||||
"""
|
||||
# TODO this is ugly, update loop shouldn't worry about this, sender should
|
||||
if self._ping is None:
|
||||
self._ping = rnd_id
|
||||
self.send(PingRequest(rnd_id))
|
||||
else:
|
||||
self._start_reconnect(None)
|
||||
|
||||
# Loops
|
||||
|
||||
async def _send_loop(self):
|
||||
"""
|
||||
This loop is responsible for popping items off the send
|
||||
queue, encrypting them, and sending them over the network.
|
||||
|
||||
Besides `connect`, only this method ever sends data.
|
||||
"""
|
||||
while self._user_connected and not self._reconnecting:
|
||||
if self._pending_ack:
|
||||
ack = RequestState(MsgsAck(list(self._pending_ack)))
|
||||
self._send_queue.append(ack)
|
||||
self._last_acks.append(ack)
|
||||
self._pending_ack.clear()
|
||||
|
||||
self._log.debug('Waiting for messages to send...')
|
||||
# TODO Wait for the connection send queue to be empty?
|
||||
# This means that while it's not empty we can wait for
|
||||
# more messages to be added to the send queue.
|
||||
batch, data = await self._send_queue.get()
|
||||
|
||||
if not data:
|
||||
continue
|
||||
|
||||
self._log.debug('Encrypting %d message(s) in %d bytes for sending',
|
||||
len(batch), len(data))
|
||||
|
||||
data = self._state.encrypt_message_data(data)
|
||||
|
||||
# Whether sending succeeds or not, the popped requests are now
|
||||
# pending because they're removed from the queue. If a reconnect
|
||||
# occurs, they will be removed from pending state and re-enqueued
|
||||
# so even if the network fails they won't be lost. If they were
|
||||
# never re-enqueued, the future waiting for a response "locks".
|
||||
for state in batch:
|
||||
if not isinstance(state, list):
|
||||
if isinstance(state.request, TLRequest):
|
||||
self._pending_state[state.msg_id] = state
|
||||
else:
|
||||
for s in state:
|
||||
if isinstance(s.request, TLRequest):
|
||||
self._pending_state[s.msg_id] = s
|
||||
|
||||
try:
|
||||
await self._connection.send(data)
|
||||
except IOError as e:
|
||||
self._log.info('Connection closed while sending data')
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
|
||||
self._log.debug('Encrypted messages put in a queue to be sent')
|
||||
|
||||
async def _recv_loop(self):
|
||||
"""
|
||||
This loop is responsible for reading all incoming responses
|
||||
from the network, decrypting and handling or dispatching them.
|
||||
|
||||
Besides `connect`, only this method ever receives data.
|
||||
"""
|
||||
while self._user_connected and not self._reconnecting:
|
||||
self._log.debug('Receiving items from the network...')
|
||||
try:
|
||||
body = await self._connection.recv()
|
||||
except asyncio.CancelledError:
|
||||
raise # bypass except Exception
|
||||
except (IOError, asyncio.IncompleteReadError) as e:
|
||||
self._log.info('Connection closed while receiving data: %s', e)
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
except InvalidBufferError as e:
|
||||
if e.code == 429:
|
||||
self._log.warning('Server indicated flood error at transport level: %s', e)
|
||||
await self._disconnect(error=e)
|
||||
else:
|
||||
self._log.exception('Server sent invalid buffer')
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
except Exception as e:
|
||||
self._log.exception('Unhandled error while receiving data')
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
|
||||
try:
|
||||
message = self._state.decrypt_message_data(body)
|
||||
if message is None:
|
||||
continue # this message is to be ignored
|
||||
except TypeNotFoundError as e:
|
||||
# Received object which we don't know how to deserialize
|
||||
self._log.info('Type %08x not found, remaining data %r',
|
||||
e.invalid_constructor_id, e.remaining)
|
||||
continue
|
||||
except SecurityError as e:
|
||||
# A step while decoding had the incorrect data. This message
|
||||
# should not be considered safe and it should be ignored.
|
||||
self._log.warning('Security error while unpacking a '
|
||||
'received message: %s', e)
|
||||
continue
|
||||
except BufferError as e:
|
||||
if isinstance(e, InvalidBufferError) and e.code == 404:
|
||||
self._log.info('Server does not know about the current auth key; the session may need to be recreated')
|
||||
await self._disconnect(error=AuthKeyNotFound())
|
||||
else:
|
||||
self._log.warning('Invalid buffer %s', e)
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
except Exception as e:
|
||||
self._log.exception('Unhandled error while decrypting data')
|
||||
self._start_reconnect(e)
|
||||
return
|
||||
|
||||
try:
|
||||
await self._process_message(message)
|
||||
except Exception:
|
||||
self._log.exception('Unhandled error while processing msgs')
|
||||
|
||||
# Response Handlers
|
||||
|
||||
async def _process_message(self, message):
|
||||
"""
|
||||
Adds the given message to the list of messages that must be
|
||||
acknowledged and dispatches control to different ``_handle_*``
|
||||
method based on its type.
|
||||
"""
|
||||
self._pending_ack.add(message.msg_id)
|
||||
handler = self._handlers.get(message.obj.CONSTRUCTOR_ID,
|
||||
self._handle_update)
|
||||
await handler(message)
|
||||
|
||||
def _pop_states(self, msg_id):
|
||||
"""
|
||||
Pops the states known to match the given ID from pending messages.
|
||||
|
||||
This method should be used when the response isn't specific.
|
||||
"""
|
||||
state = self._pending_state.pop(msg_id, None)
|
||||
if state:
|
||||
return [state]
|
||||
|
||||
to_pop = []
|
||||
for state in self._pending_state.values():
|
||||
if state.container_id == msg_id:
|
||||
to_pop.append(state.msg_id)
|
||||
|
||||
if to_pop:
|
||||
return [self._pending_state.pop(x) for x in to_pop]
|
||||
|
||||
for ack in self._last_acks:
|
||||
if ack.msg_id == msg_id:
|
||||
return [ack]
|
||||
|
||||
return []
|
||||
|
||||
async def _handle_rpc_result(self, message):
|
||||
"""
|
||||
Handles the result for Remote Procedure Calls:
|
||||
|
||||
rpc_result#f35c6d01 req_msg_id:long result:bytes = RpcResult;
|
||||
|
||||
This is where the future results for sent requests are set.
|
||||
"""
|
||||
rpc_result = message.obj
|
||||
state = self._pending_state.pop(rpc_result.req_msg_id, None)
|
||||
self._log.debug('Handling RPC result for message %d',
|
||||
rpc_result.req_msg_id)
|
||||
|
||||
if not state:
|
||||
# TODO We should not get responses to things we never sent
|
||||
# However receiving a File() with empty bytes is "common".
|
||||
# See #658, #759 and #958. They seem to happen in a container
|
||||
# which contain the real response right after.
|
||||
#
|
||||
# But, it might also happen that we get an *error* for no parent request.
|
||||
# If that's the case attempting to read from body which is None would fail with:
|
||||
# "BufferError: No more data left to read (need 4, got 0: b''); last read None".
|
||||
# This seems to be particularly common for "RpcError(error_code=-500, error_message='No workers running')".
|
||||
if rpc_result.error:
|
||||
self._log.info('Received error without parent request: %s', rpc_result.error)
|
||||
else:
|
||||
try:
|
||||
with BinaryReader(rpc_result.body) as reader:
|
||||
if not isinstance(reader.tgread_object(), upload.File):
|
||||
raise ValueError('Not an upload.File')
|
||||
except (TypeNotFoundError, ValueError):
|
||||
self._log.info('Received response without parent request: %s', rpc_result.body)
|
||||
return
|
||||
|
||||
if rpc_result.error:
|
||||
error = rpc_message_to_error(rpc_result.error, state.request)
|
||||
self._send_queue.append(
|
||||
RequestState(MsgsAck([state.msg_id])))
|
||||
|
||||
if not state.future.cancelled():
|
||||
state.future.set_exception(error)
|
||||
else:
|
||||
try:
|
||||
with BinaryReader(rpc_result.body) as reader:
|
||||
result = state.request.read_result(reader)
|
||||
except Exception as e:
|
||||
# e.g. TypeNotFoundError, should be propagated to caller
|
||||
if not state.future.cancelled():
|
||||
state.future.set_exception(e)
|
||||
else:
|
||||
self._store_own_updates(result)
|
||||
if not state.future.cancelled():
|
||||
state.future.set_result(result)
|
||||
|
||||
async def _handle_container(self, message):
|
||||
"""
|
||||
Processes the inner messages of a container with many of them:
|
||||
|
||||
msg_container#73f1f8dc messages:vector<%Message> = MessageContainer;
|
||||
"""
|
||||
self._log.debug('Handling container')
|
||||
for inner_message in message.obj.messages:
|
||||
await self._process_message(inner_message)
|
||||
|
||||
async def _handle_gzip_packed(self, message):
|
||||
"""
|
||||
Unpacks the data from a gzipped object and processes it:
|
||||
|
||||
gzip_packed#3072cfa1 packed_data:bytes = Object;
|
||||
"""
|
||||
self._log.debug('Handling gzipped data')
|
||||
with BinaryReader(message.obj.data) as reader:
|
||||
message.obj = reader.tgread_object()
|
||||
await self._process_message(message)
|
||||
|
||||
async def _handle_update(self, message):
|
||||
try:
|
||||
assert message.obj.SUBCLASS_OF_ID == 0x8af52aac # crc32(b'Updates')
|
||||
except AssertionError:
|
||||
self._log.warning(
|
||||
'Note: %s is not an update, not dispatching it %s',
|
||||
message.obj.__class__.__name__,
|
||||
message.obj
|
||||
)
|
||||
return
|
||||
|
||||
self._log.debug('Handling update %s', message.obj.__class__.__name__)
|
||||
self._updates_queue.put_nowait(message.obj)
|
||||
|
||||
def _store_own_updates(self, obj, *, _update_ids=frozenset((
|
||||
_tl.UpdateShortMessage.CONSTRUCTOR_ID,
|
||||
_tl.UpdateShortChatMessage.CONSTRUCTOR_ID,
|
||||
_tl.UpdateShort.CONSTRUCTOR_ID,
|
||||
_tl.UpdatesCombined.CONSTRUCTOR_ID,
|
||||
_tl.Updates.CONSTRUCTOR_ID,
|
||||
_tl.UpdateShortSentMessage.CONSTRUCTOR_ID,
|
||||
)), _update_like_ids=frozenset((
|
||||
_tl.messages.AffectedHistory.CONSTRUCTOR_ID,
|
||||
_tl.messages.AffectedMessages.CONSTRUCTOR_ID,
|
||||
_tl.messages.AffectedFoundMessages.CONSTRUCTOR_ID,
|
||||
))):
|
||||
try:
|
||||
if obj.CONSTRUCTOR_ID in _update_ids:
|
||||
obj._self_outgoing = True # flag to only process, but not dispatch these
|
||||
self._updates_queue.put_nowait(obj)
|
||||
elif obj.CONSTRUCTOR_ID in _update_like_ids:
|
||||
# Ugly "hack" (?) - otherwise bots reliably detect gaps when deleting messages.
|
||||
#
|
||||
# Note: the `date` being `None` is used to check for `updatesTooLong`, so epoch
|
||||
# is used instead. It is still not read, because `updateShort` has no `seq`.
|
||||
#
|
||||
# Some requests, such as `readHistory`, also return these types. But the `pts_count`
|
||||
# seems to be zero, so while this will produce some bogus `updateDeleteMessages`,
|
||||
# it's still one of the "cleaner" approaches to handling the new `pts`.
|
||||
# `updateDeleteMessages` is probably the "least-invasive" update that can be used.
|
||||
upd = _tl.UpdateShort(
|
||||
_tl.UpdateDeleteMessages([], obj.pts, obj.pts_count),
|
||||
datetime.datetime(*time.gmtime(0)[:6]).replace(tzinfo=datetime.timezone.utc)
|
||||
)
|
||||
upd._self_outgoing = True
|
||||
self._updates_queue.put_nowait(upd)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
async def _handle_pong(self, message):
|
||||
"""
|
||||
Handles pong results, which don't come inside a ``rpc_result``
|
||||
but are still sent through a request:
|
||||
|
||||
pong#347773c5 msg_id:long ping_id:long = Pong;
|
||||
"""
|
||||
pong = message.obj
|
||||
self._log.debug('Handling pong for message %d', pong.msg_id)
|
||||
if self._ping == pong.ping_id:
|
||||
self._ping = None
|
||||
|
||||
state = self._pending_state.pop(pong.msg_id, None)
|
||||
if state:
|
||||
state.future.set_result(pong)
|
||||
|
||||
async def _handle_bad_server_salt(self, message):
|
||||
"""
|
||||
Corrects the currently used server salt to use the right value
|
||||
before enqueuing the rejected message to be re-sent:
|
||||
|
||||
bad_server_salt#edab447b bad_msg_id:long bad_msg_seqno:int
|
||||
error_code:int new_server_salt:long = BadMsgNotification;
|
||||
"""
|
||||
bad_salt = message.obj
|
||||
self._log.debug('Handling bad salt for message %d', bad_salt.bad_msg_id)
|
||||
self._state.salt = bad_salt.new_server_salt
|
||||
states = self._pop_states(bad_salt.bad_msg_id)
|
||||
self._send_queue.extend(states)
|
||||
|
||||
self._log.debug('%d message(s) will be resent', len(states))
|
||||
|
||||
async def _handle_bad_notification(self, message):
|
||||
"""
|
||||
Adjusts the current state to be correct based on the
|
||||
received bad message notification whenever possible:
|
||||
|
||||
bad_msg_notification#a7eff811 bad_msg_id:long bad_msg_seqno:int
|
||||
error_code:int = BadMsgNotification;
|
||||
"""
|
||||
bad_msg = message.obj
|
||||
states = self._pop_states(bad_msg.bad_msg_id)
|
||||
|
||||
self._log.debug('Handling bad msg %s', bad_msg)
|
||||
if bad_msg.error_code in (16, 17):
|
||||
# Sent msg_id too low or too high (respectively).
|
||||
# Use the current msg_id to determine the right time offset.
|
||||
to = self._state.update_time_offset(
|
||||
correct_msg_id=message.msg_id)
|
||||
self._log.info('System clock is wrong, set time offset to %ds', to)
|
||||
elif bad_msg.error_code == 32:
|
||||
# msg_seqno too low, so just pump it up by some "large" amount
|
||||
# TODO A better fix would be to start with a new fresh session ID
|
||||
self._state._sequence += 64
|
||||
elif bad_msg.error_code == 33:
|
||||
# msg_seqno too high never seems to happen but just in case
|
||||
self._state._sequence -= 16
|
||||
else:
|
||||
for state in states:
|
||||
state.future.set_exception(
|
||||
BadMessageError(state.request, bad_msg.error_code))
|
||||
return
|
||||
|
||||
# Messages are to be re-sent once we've corrected the issue
|
||||
self._send_queue.extend(states)
|
||||
self._log.debug('%d messages will be resent due to bad msg',
|
||||
len(states))
|
||||
|
||||
async def _handle_detailed_info(self, message):
|
||||
"""
|
||||
Updates the current status with the received detailed information:
|
||||
|
||||
msg_detailed_info#276d3ec6 msg_id:long answer_msg_id:long
|
||||
bytes:int status:int = MsgDetailedInfo;
|
||||
"""
|
||||
# TODO https://goo.gl/VvpCC6
|
||||
msg_id = message.obj.answer_msg_id
|
||||
self._log.debug('Handling detailed info for message %d', msg_id)
|
||||
self._pending_ack.add(msg_id)
|
||||
|
||||
async def _handle_new_detailed_info(self, message):
|
||||
"""
|
||||
Updates the current status with the received detailed information:
|
||||
|
||||
msg_new_detailed_info#809db6df answer_msg_id:long
|
||||
bytes:int status:int = MsgDetailedInfo;
|
||||
"""
|
||||
# TODO https://goo.gl/G7DPsR
|
||||
msg_id = message.obj.answer_msg_id
|
||||
self._log.debug('Handling new detailed info for message %d', msg_id)
|
||||
self._pending_ack.add(msg_id)
|
||||
|
||||
async def _handle_new_session_created(self, message):
|
||||
"""
|
||||
Updates the current status with the received session information:
|
||||
|
||||
new_session_created#9ec20908 first_msg_id:long unique_id:long
|
||||
server_salt:long = NewSession;
|
||||
"""
|
||||
# TODO https://goo.gl/LMyN7A
|
||||
self._log.debug('Handling new session created')
|
||||
self._state.salt = message.obj.server_salt
|
||||
|
||||
async def _handle_ack(self, message):
|
||||
"""
|
||||
Handles a server acknowledge about our messages. Normally
|
||||
these can be ignored except in the case of ``auth.logOut``:
|
||||
|
||||
auth.logOut#5717da40 = Bool;
|
||||
|
||||
Telegram doesn't seem to send its result so we need to confirm
|
||||
it manually. No other request is known to have this behaviour.
|
||||
|
||||
Since the ID of sent messages consisting of a container is
|
||||
never returned (unless on a bad notification), this method
|
||||
also removes containers messages when any of their inner
|
||||
messages are acknowledged.
|
||||
"""
|
||||
ack = message.obj
|
||||
self._log.debug('Handling acknowledge for %s', str(ack.msg_ids))
|
||||
for msg_id in ack.msg_ids:
|
||||
state = self._pending_state.get(msg_id)
|
||||
if state and isinstance(state.request, LogOutRequest):
|
||||
del self._pending_state[msg_id]
|
||||
if not state.future.cancelled():
|
||||
state.future.set_result(True)
|
||||
|
||||
async def _handle_future_salts(self, message):
|
||||
"""
|
||||
Handles future salt results, which don't come inside a
|
||||
``rpc_result`` but are still sent through a request:
|
||||
|
||||
future_salts#ae500895 req_msg_id:long now:int
|
||||
salts:vector<future_salt> = FutureSalts;
|
||||
"""
|
||||
# TODO save these salts and automatically adjust to the
|
||||
# correct one whenever the salt in use expires.
|
||||
self._log.debug('Handling future salts for message %d', message.msg_id)
|
||||
state = self._pending_state.pop(message.msg_id, None)
|
||||
if state:
|
||||
state.future.set_result(message.obj)
|
||||
|
||||
async def _handle_state_forgotten(self, message):
|
||||
"""
|
||||
Handles both :tl:`MsgsStateReq` and :tl:`MsgResendReq` by
|
||||
enqueuing a :tl:`MsgsStateInfo` to be sent at a later point.
|
||||
"""
|
||||
self._send_queue.append(RequestState(MsgsStateInfo(
|
||||
req_msg_id=message.msg_id, info=chr(1) * len(message.obj.msg_ids)
|
||||
)))
|
||||
|
||||
async def _handle_msg_all(self, message):
|
||||
"""
|
||||
Handles :tl:`MsgsAllInfo` by doing nothing (yet).
|
||||
"""
|
||||
|
||||
async def _handle_destroy_session(self, message):
|
||||
"""
|
||||
Handles both :tl:`DestroySessionOk` and :tl:`DestroySessionNone`.
|
||||
It behaves pretty much like handling an RPC result.
|
||||
"""
|
||||
for msg_id, state in self._pending_state.items():
|
||||
if isinstance(state.request, DestroySessionRequest)\
|
||||
and state.request.session_id == message.obj.session_id:
|
||||
break
|
||||
else:
|
||||
return
|
||||
|
||||
del self._pending_state[msg_id]
|
||||
if not state.future.cancelled():
|
||||
state.future.set_result(message.obj)
|
||||
|
||||
async def _handle_destroy_auth_key(self, message):
|
||||
"""
|
||||
Handles :tl:`DestroyAuthKeyFail`, :tl:`DestroyAuthKeyNone`, and :tl:`DestroyAuthKeyOk`.
|
||||
|
||||
:tl:`DestroyAuthKey` is not intended for users to use, but they still
|
||||
might, and the response won't come in `rpc_result`, so thhat's worked
|
||||
around here.
|
||||
"""
|
||||
self._log.debug('Handling destroy auth key %s', message.obj)
|
||||
for msg_id, state in list(self._pending_state.items()):
|
||||
if isinstance(state.request, DestroyAuthKeyRequest):
|
||||
del self._pending_state[msg_id]
|
||||
if not state.future.cancelled():
|
||||
state.future.set_result(message.obj)
|
||||
|
||||
# If the auth key has been destroyed, that pretty much means the
|
||||
# library can't continue as our auth key will no longer be found
|
||||
# on the server.
|
||||
# Even if the library didn't disconnect, the server would (and then
|
||||
# the library would reconnect and learn about auth key being invalid).
|
||||
if isinstance(message.obj, DestroyAuthKeyOk):
|
||||
await self._disconnect(error=AuthKeyNotFound())
|
279
.venv2/Lib/site-packages/telethon/network/mtprotostate.py
Normal file
279
.venv2/Lib/site-packages/telethon/network/mtprotostate.py
Normal file
@@ -0,0 +1,279 @@
|
||||
import os
|
||||
import struct
|
||||
import time
|
||||
from hashlib import sha256
|
||||
from collections import deque
|
||||
|
||||
from ..crypto import AES
|
||||
from ..errors import SecurityError, InvalidBufferError
|
||||
from ..extensions import BinaryReader
|
||||
from ..tl.core import TLMessage
|
||||
from ..tl.tlobject import TLRequest
|
||||
from ..tl.functions import InvokeAfterMsgRequest
|
||||
from ..tl.core.gzippacked import GzipPacked
|
||||
from ..tl.types import BadServerSalt, BadMsgNotification
|
||||
|
||||
|
||||
# N is not specified in https://core.telegram.org/mtproto/security_guidelines#checking-msg-id, but 500 is reasonable
|
||||
MAX_RECENT_MSG_IDS = 500
|
||||
|
||||
MSG_TOO_NEW_DELTA = 30
|
||||
MSG_TOO_OLD_DELTA = 300
|
||||
|
||||
# Something must be wrong if we ignore too many messages at the same time
|
||||
MAX_CONSECUTIVE_IGNORED = 10
|
||||
|
||||
|
||||
class _OpaqueRequest(TLRequest):
|
||||
"""
|
||||
Wraps a serialized request into a type that can be serialized again.
|
||||
"""
|
||||
def __init__(self, data: bytes):
|
||||
self.data = data
|
||||
|
||||
def _bytes(self):
|
||||
return self.data
|
||||
|
||||
|
||||
|
||||
class MTProtoState:
|
||||
"""
|
||||
`telethon.network.mtprotosender.MTProtoSender` needs to hold a state
|
||||
in order to be able to encrypt and decrypt incoming/outgoing messages,
|
||||
as well as generating the message IDs. Instances of this class hold
|
||||
together all the required information.
|
||||
|
||||
It doesn't make sense to use `telethon.sessions.abstract.Session` for
|
||||
the sender because the sender should *not* be concerned about storing
|
||||
this information to disk, as one may create as many senders as they
|
||||
desire to any other data center, or some CDN. Using the same session
|
||||
for all these is not a good idea as each need their own authkey, and
|
||||
the concept of "copying" sessions with the unnecessary entities or
|
||||
updates state for these connections doesn't make sense.
|
||||
|
||||
While it would be possible to have a `MTProtoPlainState` that does no
|
||||
encryption so that it was usable through the `MTProtoLayer` and thus
|
||||
avoid the need for a `MTProtoPlainSender`, the `MTProtoLayer` is more
|
||||
focused to efficiency and this state is also more advanced (since it
|
||||
supports gzipping and invoking after other message IDs). There are too
|
||||
many methods that would be needed to make it convenient to use for the
|
||||
authentication process, at which point the `MTProtoPlainSender` is better.
|
||||
"""
|
||||
def __init__(self, auth_key, loggers):
|
||||
self.auth_key = auth_key
|
||||
self._log = loggers[__name__]
|
||||
self.time_offset = 0
|
||||
self.salt = 0
|
||||
|
||||
self.id = self._sequence = self._last_msg_id = None
|
||||
self._recent_remote_ids = deque(maxlen=MAX_RECENT_MSG_IDS)
|
||||
self._highest_remote_id = 0
|
||||
self._ignore_count = 0
|
||||
self.reset()
|
||||
|
||||
def reset(self):
|
||||
"""
|
||||
Resets the state.
|
||||
"""
|
||||
# Session IDs can be random on every connection
|
||||
self.id = struct.unpack('q', os.urandom(8))[0]
|
||||
self._sequence = 0
|
||||
self._last_msg_id = 0
|
||||
self._recent_remote_ids.clear()
|
||||
self._highest_remote_id = 0
|
||||
self._ignore_count = 0
|
||||
|
||||
def update_message_id(self, message):
|
||||
"""
|
||||
Updates the message ID to a new one,
|
||||
used when the time offset changed.
|
||||
"""
|
||||
message.msg_id = self._get_new_msg_id()
|
||||
|
||||
@staticmethod
|
||||
def _calc_key(auth_key, msg_key, client):
|
||||
"""
|
||||
Calculate the key based on Telegram guidelines for MTProto 2,
|
||||
specifying whether it's the client or not. See
|
||||
https://core.telegram.org/mtproto/description#defining-aes-key-and-initialization-vector
|
||||
"""
|
||||
x = 0 if client else 8
|
||||
sha256a = sha256(msg_key + auth_key[x: x + 36]).digest()
|
||||
sha256b = sha256(auth_key[x + 40:x + 76] + msg_key).digest()
|
||||
|
||||
aes_key = sha256a[:8] + sha256b[8:24] + sha256a[24:32]
|
||||
aes_iv = sha256b[:8] + sha256a[8:24] + sha256b[24:32]
|
||||
|
||||
return aes_key, aes_iv
|
||||
|
||||
def write_data_as_message(self, buffer, data, content_related,
|
||||
*, after_id=None):
|
||||
"""
|
||||
Writes a message containing the given data into buffer.
|
||||
|
||||
Returns the message id.
|
||||
"""
|
||||
msg_id = self._get_new_msg_id()
|
||||
seq_no = self._get_seq_no(content_related)
|
||||
if after_id is None:
|
||||
body = GzipPacked.gzip_if_smaller(content_related, data)
|
||||
else:
|
||||
# The `RequestState` stores `bytes(request)`, not the request itself.
|
||||
# `invokeAfterMsg` wants a `TLRequest` though, hence the wrapping.
|
||||
body = GzipPacked.gzip_if_smaller(content_related,
|
||||
bytes(InvokeAfterMsgRequest(after_id, _OpaqueRequest(data))))
|
||||
|
||||
buffer.write(struct.pack('<qii', msg_id, seq_no, len(body)))
|
||||
buffer.write(body)
|
||||
return msg_id
|
||||
|
||||
def encrypt_message_data(self, data):
|
||||
"""
|
||||
Encrypts the given message data using the current authorization key
|
||||
following MTProto 2.0 guidelines core.telegram.org/mtproto/description.
|
||||
"""
|
||||
data = struct.pack('<qq', self.salt, self.id) + data
|
||||
padding = os.urandom(-(len(data) + 12) % 16 + 12)
|
||||
|
||||
# Being substr(what, offset, length); x = 0 for client
|
||||
# "msg_key_large = SHA256(substr(auth_key, 88+x, 32) + pt + padding)"
|
||||
msg_key_large = sha256(
|
||||
self.auth_key.key[88:88 + 32] + data + padding).digest()
|
||||
|
||||
# "msg_key = substr (msg_key_large, 8, 16)"
|
||||
msg_key = msg_key_large[8:24]
|
||||
aes_key, aes_iv = self._calc_key(self.auth_key.key, msg_key, True)
|
||||
|
||||
key_id = struct.pack('<Q', self.auth_key.key_id)
|
||||
return (key_id + msg_key +
|
||||
AES.encrypt_ige(data + padding, aes_key, aes_iv))
|
||||
|
||||
def decrypt_message_data(self, body):
|
||||
"""
|
||||
Inverse of `encrypt_message_data` for incoming server messages.
|
||||
"""
|
||||
now = time.time() + self.time_offset # get the time as early as possible, even if other checks make it go unused
|
||||
|
||||
if len(body) < 8:
|
||||
raise InvalidBufferError(body)
|
||||
|
||||
# TODO Check salt, session_id and sequence_number
|
||||
key_id = struct.unpack('<Q', body[:8])[0]
|
||||
if key_id != self.auth_key.key_id:
|
||||
raise SecurityError('Server replied with an invalid auth key')
|
||||
|
||||
msg_key = body[8:24]
|
||||
aes_key, aes_iv = self._calc_key(self.auth_key.key, msg_key, False)
|
||||
body = AES.decrypt_ige(body[24:], aes_key, aes_iv)
|
||||
|
||||
# https://core.telegram.org/mtproto/security_guidelines
|
||||
# Sections "checking sha256 hash" and "message length"
|
||||
our_key = sha256(self.auth_key.key[96:96 + 32] + body)
|
||||
if msg_key != our_key.digest()[8:24]:
|
||||
raise SecurityError(
|
||||
"Received msg_key doesn't match with expected one")
|
||||
|
||||
reader = BinaryReader(body)
|
||||
reader.read_long() # remote_salt
|
||||
if reader.read_long() != self.id:
|
||||
raise SecurityError('Server replied with a wrong session ID (see FAQ for details)')
|
||||
|
||||
remote_msg_id = reader.read_long()
|
||||
|
||||
if remote_msg_id % 2 != 1:
|
||||
raise SecurityError('Server sent an even msg_id')
|
||||
|
||||
# Only perform the (somewhat expensive) check of duplicate if we did receive a lower ID
|
||||
if remote_msg_id <= self._highest_remote_id and remote_msg_id in self._recent_remote_ids:
|
||||
self._log.warning('Server resent the older message %d, ignoring', remote_msg_id)
|
||||
self._count_ignored()
|
||||
return None
|
||||
|
||||
remote_sequence = reader.read_int()
|
||||
reader.read_int() # msg_len for the inner object, padding ignored
|
||||
|
||||
# We could read msg_len bytes and use those in a new reader to read
|
||||
# the next TLObject without including the padding, but since the
|
||||
# reader isn't used for anything else after this, it's unnecessary.
|
||||
obj = reader.tgread_object()
|
||||
|
||||
# "Certain client-to-server service messages containing data sent by the client to the
|
||||
# server (for example, msg_id of a recent client query) may, nonetheless, be processed
|
||||
# on the client even if the time appears to be "incorrect". This is especially true of
|
||||
# messages to change server_salt and notifications about invalid time on the client."
|
||||
#
|
||||
# This means we skip the time check for certain types of messages.
|
||||
if obj.CONSTRUCTOR_ID not in (BadServerSalt.CONSTRUCTOR_ID, BadMsgNotification.CONSTRUCTOR_ID):
|
||||
remote_msg_time = remote_msg_id >> 32
|
||||
time_delta = now - remote_msg_time
|
||||
|
||||
if time_delta > MSG_TOO_OLD_DELTA:
|
||||
self._log.warning('Server sent a very old message with ID %d, ignoring (see FAQ for details)', remote_msg_id)
|
||||
self._count_ignored()
|
||||
return None
|
||||
|
||||
if -time_delta > MSG_TOO_NEW_DELTA:
|
||||
self._log.warning('Server sent a very new message with ID %d, ignoring (see FAQ for details)', remote_msg_id)
|
||||
self._count_ignored()
|
||||
return None
|
||||
|
||||
self._recent_remote_ids.append(remote_msg_id)
|
||||
self._highest_remote_id = remote_msg_id
|
||||
self._ignore_count = 0
|
||||
|
||||
return TLMessage(remote_msg_id, remote_sequence, obj)
|
||||
|
||||
def _count_ignored(self):
|
||||
# It's possible that ignoring a message "bricks" the connection,
|
||||
# but this should not happen unless there's something else wrong.
|
||||
self._ignore_count += 1
|
||||
if self._ignore_count >= MAX_CONSECUTIVE_IGNORED:
|
||||
raise SecurityError('Too many messages had to be ignored consecutively')
|
||||
|
||||
def _get_new_msg_id(self):
|
||||
"""
|
||||
Generates a new unique message ID based on the current
|
||||
time (in ms) since epoch, applying a known time offset.
|
||||
"""
|
||||
now = time.time() + self.time_offset
|
||||
nanoseconds = int((now - int(now)) * 1e+9)
|
||||
new_msg_id = (int(now) << 32) | (nanoseconds << 2)
|
||||
|
||||
if self._last_msg_id >= new_msg_id:
|
||||
new_msg_id = self._last_msg_id + 4
|
||||
|
||||
self._last_msg_id = new_msg_id
|
||||
return new_msg_id
|
||||
|
||||
def update_time_offset(self, correct_msg_id):
|
||||
"""
|
||||
Updates the time offset to the correct
|
||||
one given a known valid message ID.
|
||||
"""
|
||||
bad = self._get_new_msg_id()
|
||||
old = self.time_offset
|
||||
|
||||
now = int(time.time())
|
||||
correct = correct_msg_id >> 32
|
||||
self.time_offset = correct - now
|
||||
|
||||
if self.time_offset != old:
|
||||
self._last_msg_id = 0
|
||||
self._log.debug(
|
||||
'Updated time offset (old offset %d, bad %d, good %d, new %d)',
|
||||
old, bad, correct_msg_id, self.time_offset
|
||||
)
|
||||
|
||||
return self.time_offset
|
||||
|
||||
def _get_seq_no(self, content_related):
|
||||
"""
|
||||
Generates the next sequence number depending on whether
|
||||
it should be for a content-related query or not.
|
||||
"""
|
||||
if content_related:
|
||||
result = self._sequence * 2 + 1
|
||||
self._sequence += 1
|
||||
return result
|
||||
else:
|
||||
return self._sequence * 2
|
19
.venv2/Lib/site-packages/telethon/network/requeststate.py
Normal file
19
.venv2/Lib/site-packages/telethon/network/requeststate.py
Normal file
@@ -0,0 +1,19 @@
|
||||
import asyncio
|
||||
|
||||
|
||||
class RequestState:
|
||||
"""
|
||||
This request state holds several information relevant to sent messages,
|
||||
in particular the message ID assigned to the request, the container ID
|
||||
it belongs to, the request itself, the request as bytes, and the future
|
||||
result that will eventually be resolved.
|
||||
"""
|
||||
__slots__ = ('container_id', 'msg_id', 'request', 'data', 'future', 'after')
|
||||
|
||||
def __init__(self, request, after=None):
|
||||
self.container_id = None
|
||||
self.msg_id = None
|
||||
self.request = request
|
||||
self.data = bytes(request)
|
||||
self.future = asyncio.Future()
|
||||
self.after = after
|
Reference in New Issue
Block a user