mirror of
https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git
synced 2025-08-14 00:25:46 +02:00
Initial commit
This commit is contained in:
4
venv/Lib/site-packages/blib2to3/pgen2/__init__.py
Normal file
4
venv/Lib/site-packages/blib2to3/pgen2/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""The pgen2 package."""
|
BIN
venv/Lib/site-packages/blib2to3/pgen2/conv.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/blib2to3/pgen2/conv.cp311-win_amd64.pyd
Normal file
Binary file not shown.
256
venv/Lib/site-packages/blib2to3/pgen2/conv.py
Normal file
256
venv/Lib/site-packages/blib2to3/pgen2/conv.py
Normal file
@@ -0,0 +1,256 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
# mypy: ignore-errors
|
||||
|
||||
"""Convert graminit.[ch] spit out by pgen to Python code.
|
||||
|
||||
Pgen is the Python parser generator. It is useful to quickly create a
|
||||
parser from a grammar file in Python's grammar notation. But I don't
|
||||
want my parsers to be written in C (yet), so I'm translating the
|
||||
parsing tables to Python data structures and writing a Python parse
|
||||
engine.
|
||||
|
||||
Note that the token numbers are constants determined by the standard
|
||||
Python tokenizer. The standard token module defines these numbers and
|
||||
their names (the names are not used much). The token numbers are
|
||||
hardcoded into the Python tokenizer and into pgen. A Python
|
||||
implementation of the Python tokenizer is also available, in the
|
||||
standard tokenize module.
|
||||
|
||||
On the other hand, symbol numbers (representing the grammar's
|
||||
non-terminals) are assigned by pgen based on the actual grammar
|
||||
input.
|
||||
|
||||
Note: this module is pretty much obsolete; the pgen module generates
|
||||
equivalent grammar tables directly from the Grammar.txt input file
|
||||
without having to invoke the Python pgen C program.
|
||||
|
||||
"""
|
||||
|
||||
# Python imports
|
||||
import re
|
||||
|
||||
# Local imports
|
||||
from pgen2 import grammar, token
|
||||
|
||||
|
||||
class Converter(grammar.Grammar):
|
||||
"""Grammar subclass that reads classic pgen output files.
|
||||
|
||||
The run() method reads the tables as produced by the pgen parser
|
||||
generator, typically contained in two C files, graminit.h and
|
||||
graminit.c. The other methods are for internal use only.
|
||||
|
||||
See the base class for more documentation.
|
||||
|
||||
"""
|
||||
|
||||
def run(self, graminit_h, graminit_c):
|
||||
"""Load the grammar tables from the text files written by pgen."""
|
||||
self.parse_graminit_h(graminit_h)
|
||||
self.parse_graminit_c(graminit_c)
|
||||
self.finish_off()
|
||||
|
||||
def parse_graminit_h(self, filename):
|
||||
"""Parse the .h file written by pgen. (Internal)
|
||||
|
||||
This file is a sequence of #define statements defining the
|
||||
nonterminals of the grammar as numbers. We build two tables
|
||||
mapping the numbers to names and back.
|
||||
|
||||
"""
|
||||
try:
|
||||
f = open(filename)
|
||||
except OSError as err:
|
||||
print(f"Can't open {filename}: {err}")
|
||||
return False
|
||||
self.symbol2number = {}
|
||||
self.number2symbol = {}
|
||||
lineno = 0
|
||||
for line in f:
|
||||
lineno += 1
|
||||
mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line)
|
||||
if not mo and line.strip():
|
||||
print(f"{filename}({lineno}): can't parse {line.strip()}")
|
||||
else:
|
||||
symbol, number = mo.groups()
|
||||
number = int(number)
|
||||
assert symbol not in self.symbol2number
|
||||
assert number not in self.number2symbol
|
||||
self.symbol2number[symbol] = number
|
||||
self.number2symbol[number] = symbol
|
||||
return True
|
||||
|
||||
def parse_graminit_c(self, filename):
|
||||
"""Parse the .c file written by pgen. (Internal)
|
||||
|
||||
The file looks as follows. The first two lines are always this:
|
||||
|
||||
#include "pgenheaders.h"
|
||||
#include "grammar.h"
|
||||
|
||||
After that come four blocks:
|
||||
|
||||
1) one or more state definitions
|
||||
2) a table defining dfas
|
||||
3) a table defining labels
|
||||
4) a struct defining the grammar
|
||||
|
||||
A state definition has the following form:
|
||||
- one or more arc arrays, each of the form:
|
||||
static arc arcs_<n>_<m>[<k>] = {
|
||||
{<i>, <j>},
|
||||
...
|
||||
};
|
||||
- followed by a state array, of the form:
|
||||
static state states_<s>[<t>] = {
|
||||
{<k>, arcs_<n>_<m>},
|
||||
...
|
||||
};
|
||||
|
||||
"""
|
||||
try:
|
||||
f = open(filename)
|
||||
except OSError as err:
|
||||
print(f"Can't open {filename}: {err}")
|
||||
return False
|
||||
# The code below essentially uses f's iterator-ness!
|
||||
lineno = 0
|
||||
|
||||
# Expect the two #include lines
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == '#include "pgenheaders.h"\n', (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == '#include "grammar.h"\n', (lineno, line)
|
||||
|
||||
# Parse the state definitions
|
||||
lineno, line = lineno + 1, next(f)
|
||||
allarcs = {}
|
||||
states = []
|
||||
while line.startswith("static arc "):
|
||||
while line.startswith("static arc "):
|
||||
mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line)
|
||||
assert mo, (lineno, line)
|
||||
n, m, k = list(map(int, mo.groups()))
|
||||
arcs = []
|
||||
for _ in range(k):
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"\s+{(\d+), (\d+)},$", line)
|
||||
assert mo, (lineno, line)
|
||||
i, j = list(map(int, mo.groups()))
|
||||
arcs.append((i, j))
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "};\n", (lineno, line)
|
||||
allarcs[(n, m)] = arcs
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
|
||||
assert mo, (lineno, line)
|
||||
s, t = list(map(int, mo.groups()))
|
||||
assert s == len(states), (lineno, line)
|
||||
state = []
|
||||
for _ in range(t):
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
|
||||
assert mo, (lineno, line)
|
||||
k, n, m = list(map(int, mo.groups()))
|
||||
arcs = allarcs[n, m]
|
||||
assert k == len(arcs), (lineno, line)
|
||||
state.append(arcs)
|
||||
states.append(state)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "};\n", (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
self.states = states
|
||||
|
||||
# Parse the dfas
|
||||
dfas = {}
|
||||
mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
|
||||
assert mo, (lineno, line)
|
||||
ndfas = int(mo.group(1))
|
||||
for i in range(ndfas):
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line)
|
||||
assert mo, (lineno, line)
|
||||
symbol = mo.group(2)
|
||||
number, x, y, z = list(map(int, mo.group(1, 3, 4, 5)))
|
||||
assert self.symbol2number[symbol] == number, (lineno, line)
|
||||
assert self.number2symbol[number] == symbol, (lineno, line)
|
||||
assert x == 0, (lineno, line)
|
||||
state = states[z]
|
||||
assert y == len(state), (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
|
||||
assert mo, (lineno, line)
|
||||
first = {}
|
||||
rawbitset = eval(mo.group(1))
|
||||
for i, c in enumerate(rawbitset):
|
||||
byte = ord(c)
|
||||
for j in range(8):
|
||||
if byte & (1 << j):
|
||||
first[i * 8 + j] = 1
|
||||
dfas[number] = (state, first)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "};\n", (lineno, line)
|
||||
self.dfas = dfas
|
||||
|
||||
# Parse the labels
|
||||
labels = []
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"static label labels\[(\d+)\] = {$", line)
|
||||
assert mo, (lineno, line)
|
||||
nlabels = int(mo.group(1))
|
||||
for i in range(nlabels):
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
|
||||
assert mo, (lineno, line)
|
||||
x, y = mo.groups()
|
||||
x = int(x)
|
||||
if y == "0":
|
||||
y = None
|
||||
else:
|
||||
y = eval(y)
|
||||
labels.append((x, y))
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "};\n", (lineno, line)
|
||||
self.labels = labels
|
||||
|
||||
# Parse the grammar struct
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"\s+(\d+),$", line)
|
||||
assert mo, (lineno, line)
|
||||
ndfas = int(mo.group(1))
|
||||
assert ndfas == len(self.dfas)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "\tdfas,\n", (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"\s+{(\d+), labels},$", line)
|
||||
assert mo, (lineno, line)
|
||||
nlabels = int(mo.group(1))
|
||||
assert nlabels == len(self.labels), (lineno, line)
|
||||
lineno, line = lineno + 1, next(f)
|
||||
mo = re.match(r"\s+(\d+)$", line)
|
||||
assert mo, (lineno, line)
|
||||
start = int(mo.group(1))
|
||||
assert start in self.number2symbol, (lineno, line)
|
||||
self.start = start
|
||||
lineno, line = lineno + 1, next(f)
|
||||
assert line == "};\n", (lineno, line)
|
||||
try:
|
||||
lineno, line = lineno + 1, next(f)
|
||||
except StopIteration:
|
||||
pass
|
||||
else:
|
||||
assert 0, (lineno, line)
|
||||
|
||||
def finish_off(self):
|
||||
"""Create additional useful structures. (Internal)."""
|
||||
self.keywords = {} # map from keyword strings to arc labels
|
||||
self.tokens = {} # map from numeric token values to arc labels
|
||||
for ilabel, (type, value) in enumerate(self.labels):
|
||||
if type == token.NAME and value is not None:
|
||||
self.keywords[value] = ilabel
|
||||
elif value is None:
|
||||
self.tokens[type] = ilabel
|
BIN
venv/Lib/site-packages/blib2to3/pgen2/driver.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/blib2to3/pgen2/driver.cp311-win_amd64.pyd
Normal file
Binary file not shown.
318
venv/Lib/site-packages/blib2to3/pgen2/driver.py
Normal file
318
venv/Lib/site-packages/blib2to3/pgen2/driver.py
Normal file
@@ -0,0 +1,318 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
# Modifications:
|
||||
# Copyright 2006 Google, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Parser driver.
|
||||
|
||||
This provides a high-level interface to parse a file into a syntax tree.
|
||||
|
||||
"""
|
||||
|
||||
__author__ = "Guido van Rossum <guido@python.org>"
|
||||
|
||||
__all__ = ["Driver", "load_grammar"]
|
||||
|
||||
# Python imports
|
||||
import io
|
||||
import logging
|
||||
import os
|
||||
import pkgutil
|
||||
import sys
|
||||
from contextlib import contextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from logging import Logger
|
||||
from typing import IO, Any, Iterable, Iterator, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
from blib2to3.pytree import NL
|
||||
|
||||
# Pgen imports
|
||||
from . import grammar, parse, pgen, token, tokenize
|
||||
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ReleaseRange:
|
||||
start: int
|
||||
end: Optional[int] = None
|
||||
tokens: list[Any] = field(default_factory=list)
|
||||
|
||||
def lock(self) -> None:
|
||||
total_eaten = len(self.tokens)
|
||||
self.end = self.start + total_eaten
|
||||
|
||||
|
||||
class TokenProxy:
|
||||
def __init__(self, generator: Any) -> None:
|
||||
self._tokens = generator
|
||||
self._counter = 0
|
||||
self._release_ranges: list[ReleaseRange] = []
|
||||
|
||||
@contextmanager
|
||||
def release(self) -> Iterator["TokenProxy"]:
|
||||
release_range = ReleaseRange(self._counter)
|
||||
self._release_ranges.append(release_range)
|
||||
try:
|
||||
yield self
|
||||
finally:
|
||||
# Lock the last release range to the final position that
|
||||
# has been eaten.
|
||||
release_range.lock()
|
||||
|
||||
def eat(self, point: int) -> Any:
|
||||
eaten_tokens = self._release_ranges[-1].tokens
|
||||
if point < len(eaten_tokens):
|
||||
return eaten_tokens[point]
|
||||
else:
|
||||
while point >= len(eaten_tokens):
|
||||
token = next(self._tokens)
|
||||
eaten_tokens.append(token)
|
||||
return token
|
||||
|
||||
def __iter__(self) -> "TokenProxy":
|
||||
return self
|
||||
|
||||
def __next__(self) -> Any:
|
||||
# If the current position is already compromised (looked up)
|
||||
# return the eaten token, if not just go further on the given
|
||||
# token producer.
|
||||
for release_range in self._release_ranges:
|
||||
assert release_range.end is not None
|
||||
|
||||
start, end = release_range.start, release_range.end
|
||||
if start <= self._counter < end:
|
||||
token = release_range.tokens[self._counter - start]
|
||||
break
|
||||
else:
|
||||
token = next(self._tokens)
|
||||
self._counter += 1
|
||||
return token
|
||||
|
||||
def can_advance(self, to: int) -> bool:
|
||||
# Try to eat, fail if it can't. The eat operation is cached
|
||||
# so there won't be any additional cost of eating here
|
||||
try:
|
||||
self.eat(to)
|
||||
except StopIteration:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
class Driver:
|
||||
def __init__(self, grammar: Grammar, logger: Optional[Logger] = None) -> None:
|
||||
self.grammar = grammar
|
||||
if logger is None:
|
||||
logger = logging.getLogger(__name__)
|
||||
self.logger = logger
|
||||
|
||||
def parse_tokens(self, tokens: Iterable[GoodTokenInfo], debug: bool = False) -> NL:
|
||||
"""Parse a series of tokens and return the syntax tree."""
|
||||
# XXX Move the prefix computation into a wrapper around tokenize.
|
||||
proxy = TokenProxy(tokens)
|
||||
|
||||
p = parse.Parser(self.grammar)
|
||||
p.setup(proxy=proxy)
|
||||
|
||||
lineno = 1
|
||||
column = 0
|
||||
indent_columns: list[int] = []
|
||||
type = value = start = end = line_text = None
|
||||
prefix = ""
|
||||
|
||||
for quintuple in proxy:
|
||||
type, value, start, end, line_text = quintuple
|
||||
if start != (lineno, column):
|
||||
assert (lineno, column) <= start, ((lineno, column), start)
|
||||
s_lineno, s_column = start
|
||||
if lineno < s_lineno:
|
||||
prefix += "\n" * (s_lineno - lineno)
|
||||
lineno = s_lineno
|
||||
column = 0
|
||||
if column < s_column:
|
||||
prefix += line_text[column:s_column]
|
||||
column = s_column
|
||||
if type in (tokenize.COMMENT, tokenize.NL):
|
||||
prefix += value
|
||||
lineno, column = end
|
||||
if value.endswith("\n"):
|
||||
lineno += 1
|
||||
column = 0
|
||||
continue
|
||||
if type == token.OP:
|
||||
type = grammar.opmap[value]
|
||||
if debug:
|
||||
assert type is not None
|
||||
self.logger.debug(
|
||||
"%s %r (prefix=%r)", token.tok_name[type], value, prefix
|
||||
)
|
||||
if type == token.INDENT:
|
||||
indent_columns.append(len(value))
|
||||
_prefix = prefix + value
|
||||
prefix = ""
|
||||
value = ""
|
||||
elif type == token.DEDENT:
|
||||
_indent_col = indent_columns.pop()
|
||||
prefix, _prefix = self._partially_consume_prefix(prefix, _indent_col)
|
||||
if p.addtoken(cast(int, type), value, (prefix, start)):
|
||||
if debug:
|
||||
self.logger.debug("Stop.")
|
||||
break
|
||||
prefix = ""
|
||||
if type in {token.INDENT, token.DEDENT}:
|
||||
prefix = _prefix
|
||||
lineno, column = end
|
||||
# FSTRING_MIDDLE is the only token that can end with a newline, and
|
||||
# `end` will point to the next line. For that case, don't increment lineno.
|
||||
if value.endswith("\n") and type != token.FSTRING_MIDDLE:
|
||||
lineno += 1
|
||||
column = 0
|
||||
else:
|
||||
# We never broke out -- EOF is too soon (how can this happen???)
|
||||
assert start is not None
|
||||
raise parse.ParseError("incomplete input", type, value, (prefix, start))
|
||||
assert p.rootnode is not None
|
||||
return p.rootnode
|
||||
|
||||
def parse_stream_raw(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(stream.readline, grammar=self.grammar)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def parse_stream(self, stream: IO[str], debug: bool = False) -> NL:
|
||||
"""Parse a stream and return the syntax tree."""
|
||||
return self.parse_stream_raw(stream, debug)
|
||||
|
||||
def parse_file(
|
||||
self, filename: Path, encoding: Optional[str] = None, debug: bool = False
|
||||
) -> NL:
|
||||
"""Parse a file and return the syntax tree."""
|
||||
with open(filename, encoding=encoding) as stream:
|
||||
return self.parse_stream(stream, debug)
|
||||
|
||||
def parse_string(self, text: str, debug: bool = False) -> NL:
|
||||
"""Parse a string and return the syntax tree."""
|
||||
tokens = tokenize.generate_tokens(
|
||||
io.StringIO(text).readline, grammar=self.grammar
|
||||
)
|
||||
return self.parse_tokens(tokens, debug)
|
||||
|
||||
def _partially_consume_prefix(self, prefix: str, column: int) -> tuple[str, str]:
|
||||
lines: list[str] = []
|
||||
current_line = ""
|
||||
current_column = 0
|
||||
wait_for_nl = False
|
||||
for char in prefix:
|
||||
current_line += char
|
||||
if wait_for_nl:
|
||||
if char == "\n":
|
||||
if current_line.strip() and current_column < column:
|
||||
res = "".join(lines)
|
||||
return res, prefix[len(res) :]
|
||||
|
||||
lines.append(current_line)
|
||||
current_line = ""
|
||||
current_column = 0
|
||||
wait_for_nl = False
|
||||
elif char in " \t":
|
||||
current_column += 1
|
||||
elif char == "\n":
|
||||
# unexpected empty line
|
||||
current_column = 0
|
||||
elif char == "\f":
|
||||
current_column = 0
|
||||
else:
|
||||
# indent is finished
|
||||
wait_for_nl = True
|
||||
return "".join(lines), current_line
|
||||
|
||||
|
||||
def _generate_pickle_name(gt: Path, cache_dir: Optional[Path] = None) -> str:
|
||||
head, tail = os.path.splitext(gt)
|
||||
if tail == ".txt":
|
||||
tail = ""
|
||||
name = head + tail + ".".join(map(str, sys.version_info)) + ".pickle"
|
||||
if cache_dir:
|
||||
return os.path.join(cache_dir, os.path.basename(name))
|
||||
else:
|
||||
return name
|
||||
|
||||
|
||||
def load_grammar(
|
||||
gt: str = "Grammar.txt",
|
||||
gp: Optional[str] = None,
|
||||
save: bool = True,
|
||||
force: bool = False,
|
||||
logger: Optional[Logger] = None,
|
||||
) -> Grammar:
|
||||
"""Load the grammar (maybe from a pickle)."""
|
||||
if logger is None:
|
||||
logger = logging.getLogger(__name__)
|
||||
gp = _generate_pickle_name(gt) if gp is None else gp
|
||||
if force or not _newer(gp, gt):
|
||||
g: grammar.Grammar = pgen.generate_grammar(gt)
|
||||
if save:
|
||||
try:
|
||||
g.dump(gp)
|
||||
except OSError:
|
||||
# Ignore error, caching is not vital.
|
||||
pass
|
||||
else:
|
||||
g = grammar.Grammar()
|
||||
g.load(gp)
|
||||
return g
|
||||
|
||||
|
||||
def _newer(a: str, b: str) -> bool:
|
||||
"""Inquire whether file a was written since file b."""
|
||||
if not os.path.exists(a):
|
||||
return False
|
||||
if not os.path.exists(b):
|
||||
return True
|
||||
return os.path.getmtime(a) >= os.path.getmtime(b)
|
||||
|
||||
|
||||
def load_packaged_grammar(
|
||||
package: str, grammar_source: str, cache_dir: Optional[Path] = None
|
||||
) -> grammar.Grammar:
|
||||
"""Normally, loads a pickled grammar by doing
|
||||
pkgutil.get_data(package, pickled_grammar)
|
||||
where *pickled_grammar* is computed from *grammar_source* by adding the
|
||||
Python version and using a ``.pickle`` extension.
|
||||
|
||||
However, if *grammar_source* is an extant file, load_grammar(grammar_source)
|
||||
is called instead. This facilitates using a packaged grammar file when needed
|
||||
but preserves load_grammar's automatic regeneration behavior when possible.
|
||||
|
||||
"""
|
||||
if os.path.isfile(grammar_source):
|
||||
gp = _generate_pickle_name(grammar_source, cache_dir) if cache_dir else None
|
||||
return load_grammar(grammar_source, gp=gp)
|
||||
pickled_name = _generate_pickle_name(os.path.basename(grammar_source), cache_dir)
|
||||
data = pkgutil.get_data(package, pickled_name)
|
||||
assert data is not None
|
||||
g = grammar.Grammar()
|
||||
g.loads(data)
|
||||
return g
|
||||
|
||||
|
||||
def main(*args: str) -> bool:
|
||||
"""Main program, when run as a script: produce grammar pickle files.
|
||||
|
||||
Calls load_grammar for each argument, a path to a grammar text file.
|
||||
"""
|
||||
if not args:
|
||||
args = tuple(sys.argv[1:])
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(message)s")
|
||||
for gt in args:
|
||||
load_grammar(gt, save=True, force=True)
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(int(not main()))
|
Binary file not shown.
228
venv/Lib/site-packages/blib2to3/pgen2/grammar.py
Normal file
228
venv/Lib/site-packages/blib2to3/pgen2/grammar.py
Normal file
@@ -0,0 +1,228 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""This module defines the data structures used to represent a grammar.
|
||||
|
||||
These are a bit arcane because they are derived from the data
|
||||
structures used by Python's 'pgen' parser generator.
|
||||
|
||||
There's also a table here mapping operators to their names in the
|
||||
token module; the Python tokenize module reports all operators as the
|
||||
fallback token code OP, but the parser needs the actual token code.
|
||||
|
||||
"""
|
||||
|
||||
# Python imports
|
||||
import os
|
||||
import pickle
|
||||
import tempfile
|
||||
from typing import Any, Optional, TypeVar, Union
|
||||
|
||||
# Local imports
|
||||
from . import token
|
||||
|
||||
_P = TypeVar("_P", bound="Grammar")
|
||||
Label = tuple[int, Optional[str]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
class Grammar:
|
||||
"""Pgen parsing tables conversion class.
|
||||
|
||||
Once initialized, this class supplies the grammar tables for the
|
||||
parsing engine implemented by parse.py. The parsing engine
|
||||
accesses the instance variables directly. The class here does not
|
||||
provide initialization of the tables; several subclasses exist to
|
||||
do this (see the conv and pgen modules).
|
||||
|
||||
The load() method reads the tables from a pickle file, which is
|
||||
much faster than the other ways offered by subclasses. The pickle
|
||||
file is written by calling dump() (after loading the grammar
|
||||
tables using a subclass). The report() method prints a readable
|
||||
representation of the tables to stdout, for debugging.
|
||||
|
||||
The instance variables are as follows:
|
||||
|
||||
symbol2number -- a dict mapping symbol names to numbers. Symbol
|
||||
numbers are always 256 or higher, to distinguish
|
||||
them from token numbers, which are between 0 and
|
||||
255 (inclusive).
|
||||
|
||||
number2symbol -- a dict mapping numbers to symbol names;
|
||||
these two are each other's inverse.
|
||||
|
||||
states -- a list of DFAs, where each DFA is a list of
|
||||
states, each state is a list of arcs, and each
|
||||
arc is a (i, j) pair where i is a label and j is
|
||||
a state number. The DFA number is the index into
|
||||
this list. (This name is slightly confusing.)
|
||||
Final states are represented by a special arc of
|
||||
the form (0, j) where j is its own state number.
|
||||
|
||||
dfas -- a dict mapping symbol numbers to (DFA, first)
|
||||
pairs, where DFA is an item from the states list
|
||||
above, and first is a set of tokens that can
|
||||
begin this grammar rule (represented by a dict
|
||||
whose values are always 1).
|
||||
|
||||
labels -- a list of (x, y) pairs where x is either a token
|
||||
number or a symbol number, and y is either None
|
||||
or a string; the strings are keywords. The label
|
||||
number is the index in this list; label numbers
|
||||
are used to mark state transitions (arcs) in the
|
||||
DFAs.
|
||||
|
||||
start -- the number of the grammar's start symbol.
|
||||
|
||||
keywords -- a dict mapping keyword strings to arc labels.
|
||||
|
||||
tokens -- a dict mapping token numbers to arc labels.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.symbol2number: dict[str, int] = {}
|
||||
self.number2symbol: dict[int, str] = {}
|
||||
self.states: list[DFA] = []
|
||||
self.dfas: dict[int, DFAS] = {}
|
||||
self.labels: list[Label] = [(0, "EMPTY")]
|
||||
self.keywords: dict[str, int] = {}
|
||||
self.soft_keywords: dict[str, int] = {}
|
||||
self.tokens: dict[int, int] = {}
|
||||
self.symbol2label: dict[str, int] = {}
|
||||
self.version: tuple[int, int] = (0, 0)
|
||||
self.start = 256
|
||||
# Python 3.7+ parses async as a keyword, not an identifier
|
||||
self.async_keywords = False
|
||||
|
||||
def dump(self, filename: Path) -> None:
|
||||
"""Dump the grammar tables to a pickle file."""
|
||||
|
||||
# mypyc generates objects that don't have a __dict__, but they
|
||||
# do have __getstate__ methods that will return an equivalent
|
||||
# dictionary
|
||||
if hasattr(self, "__dict__"):
|
||||
d = self.__dict__
|
||||
else:
|
||||
d = self.__getstate__() # type: ignore
|
||||
|
||||
with tempfile.NamedTemporaryFile(
|
||||
dir=os.path.dirname(filename), delete=False
|
||||
) as f:
|
||||
pickle.dump(d, f, pickle.HIGHEST_PROTOCOL)
|
||||
os.replace(f.name, filename)
|
||||
|
||||
def _update(self, attrs: dict[str, Any]) -> None:
|
||||
for k, v in attrs.items():
|
||||
setattr(self, k, v)
|
||||
|
||||
def load(self, filename: Path) -> None:
|
||||
"""Load the grammar tables from a pickle file."""
|
||||
with open(filename, "rb") as f:
|
||||
d = pickle.load(f)
|
||||
self._update(d)
|
||||
|
||||
def loads(self, pkl: bytes) -> None:
|
||||
"""Load the grammar tables from a pickle bytes object."""
|
||||
self._update(pickle.loads(pkl))
|
||||
|
||||
def copy(self: _P) -> _P:
|
||||
"""
|
||||
Copy the grammar.
|
||||
"""
|
||||
new = self.__class__()
|
||||
for dict_attr in (
|
||||
"symbol2number",
|
||||
"number2symbol",
|
||||
"dfas",
|
||||
"keywords",
|
||||
"soft_keywords",
|
||||
"tokens",
|
||||
"symbol2label",
|
||||
):
|
||||
setattr(new, dict_attr, getattr(self, dict_attr).copy())
|
||||
new.labels = self.labels[:]
|
||||
new.states = self.states[:]
|
||||
new.start = self.start
|
||||
new.version = self.version
|
||||
new.async_keywords = self.async_keywords
|
||||
return new
|
||||
|
||||
def report(self) -> None:
|
||||
"""Dump the grammar tables to standard output, for debugging."""
|
||||
from pprint import pprint
|
||||
|
||||
print("s2n")
|
||||
pprint(self.symbol2number)
|
||||
print("n2s")
|
||||
pprint(self.number2symbol)
|
||||
print("states")
|
||||
pprint(self.states)
|
||||
print("dfas")
|
||||
pprint(self.dfas)
|
||||
print("labels")
|
||||
pprint(self.labels)
|
||||
print("start", self.start)
|
||||
|
||||
|
||||
# Map from operator to number (since tokenize doesn't do this)
|
||||
|
||||
opmap_raw = """
|
||||
( LPAR
|
||||
) RPAR
|
||||
[ LSQB
|
||||
] RSQB
|
||||
: COLON
|
||||
, COMMA
|
||||
; SEMI
|
||||
+ PLUS
|
||||
- MINUS
|
||||
* STAR
|
||||
/ SLASH
|
||||
| VBAR
|
||||
& AMPER
|
||||
< LESS
|
||||
> GREATER
|
||||
= EQUAL
|
||||
. DOT
|
||||
% PERCENT
|
||||
` BACKQUOTE
|
||||
{ LBRACE
|
||||
} RBRACE
|
||||
@ AT
|
||||
@= ATEQUAL
|
||||
== EQEQUAL
|
||||
!= NOTEQUAL
|
||||
<> NOTEQUAL
|
||||
<= LESSEQUAL
|
||||
>= GREATEREQUAL
|
||||
~ TILDE
|
||||
^ CIRCUMFLEX
|
||||
<< LEFTSHIFT
|
||||
>> RIGHTSHIFT
|
||||
** DOUBLESTAR
|
||||
+= PLUSEQUAL
|
||||
-= MINEQUAL
|
||||
*= STAREQUAL
|
||||
/= SLASHEQUAL
|
||||
%= PERCENTEQUAL
|
||||
&= AMPEREQUAL
|
||||
|= VBAREQUAL
|
||||
^= CIRCUMFLEXEQUAL
|
||||
<<= LEFTSHIFTEQUAL
|
||||
>>= RIGHTSHIFTEQUAL
|
||||
**= DOUBLESTAREQUAL
|
||||
// DOUBLESLASH
|
||||
//= DOUBLESLASHEQUAL
|
||||
-> RARROW
|
||||
:= COLONEQUAL
|
||||
! BANG
|
||||
"""
|
||||
|
||||
opmap = {}
|
||||
for line in opmap_raw.splitlines():
|
||||
if line:
|
||||
op, name = line.split()
|
||||
opmap[op] = getattr(token, name)
|
Binary file not shown.
66
venv/Lib/site-packages/blib2to3/pgen2/literals.py
Normal file
66
venv/Lib/site-packages/blib2to3/pgen2/literals.py
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Safely evaluate Python string literals without using eval()."""
|
||||
|
||||
import re
|
||||
from typing import Match
|
||||
|
||||
simple_escapes: dict[str, str] = {
|
||||
"a": "\a",
|
||||
"b": "\b",
|
||||
"f": "\f",
|
||||
"n": "\n",
|
||||
"r": "\r",
|
||||
"t": "\t",
|
||||
"v": "\v",
|
||||
"'": "'",
|
||||
'"': '"',
|
||||
"\\": "\\",
|
||||
}
|
||||
|
||||
|
||||
def escape(m: Match[str]) -> str:
|
||||
all, tail = m.group(0, 1)
|
||||
assert all.startswith("\\")
|
||||
esc = simple_escapes.get(tail)
|
||||
if esc is not None:
|
||||
return esc
|
||||
if tail.startswith("x"):
|
||||
hexes = tail[1:]
|
||||
if len(hexes) < 2:
|
||||
raise ValueError("invalid hex string escape ('\\%s')" % tail)
|
||||
try:
|
||||
i = int(hexes, 16)
|
||||
except ValueError:
|
||||
raise ValueError("invalid hex string escape ('\\%s')" % tail) from None
|
||||
else:
|
||||
try:
|
||||
i = int(tail, 8)
|
||||
except ValueError:
|
||||
raise ValueError("invalid octal string escape ('\\%s')" % tail) from None
|
||||
return chr(i)
|
||||
|
||||
|
||||
def evalString(s: str) -> str:
|
||||
assert s.startswith("'") or s.startswith('"'), repr(s[:1])
|
||||
q = s[0]
|
||||
if s[:3] == q * 3:
|
||||
q = q * 3
|
||||
assert s.endswith(q), repr(s[-len(q) :])
|
||||
assert len(s) >= 2 * len(q)
|
||||
s = s[len(q) : -len(q)]
|
||||
return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s)
|
||||
|
||||
|
||||
def test() -> None:
|
||||
for i in range(256):
|
||||
c = chr(i)
|
||||
s = repr(c)
|
||||
e = evalString(s)
|
||||
if e != c:
|
||||
print(i, c, s, e)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
test()
|
BIN
venv/Lib/site-packages/blib2to3/pgen2/parse.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/blib2to3/pgen2/parse.cp311-win_amd64.pyd
Normal file
Binary file not shown.
399
venv/Lib/site-packages/blib2to3/pgen2/parse.py
Normal file
399
venv/Lib/site-packages/blib2to3/pgen2/parse.py
Normal file
@@ -0,0 +1,399 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
"""Parser engine for the grammar tables generated by pgen.
|
||||
|
||||
The grammar table must be loaded first.
|
||||
|
||||
See Parser/parser.c in the Python distribution for additional info on
|
||||
how this parsing engine works.
|
||||
|
||||
"""
|
||||
from contextlib import contextmanager
|
||||
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, Union, cast
|
||||
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pytree import NL, Context, Leaf, Node, RawNode, convert
|
||||
|
||||
# Local imports
|
||||
from . import grammar, token, tokenize
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from blib2to3.pgen2.driver import TokenProxy
|
||||
|
||||
|
||||
Results = dict[str, NL]
|
||||
Convert = Callable[[Grammar, RawNode], Union[Node, Leaf]]
|
||||
DFA = list[list[tuple[int, int]]]
|
||||
DFAS = tuple[DFA, dict[int, int]]
|
||||
|
||||
|
||||
def lam_sub(grammar: Grammar, node: RawNode) -> NL:
|
||||
assert node[3] is not None
|
||||
return Node(type=node[0], children=node[3], context=node[2])
|
||||
|
||||
|
||||
# A placeholder node, used when parser is backtracking.
|
||||
DUMMY_NODE = (-1, None, None, None)
|
||||
|
||||
|
||||
def stack_copy(
|
||||
stack: list[tuple[DFAS, int, RawNode]],
|
||||
) -> list[tuple[DFAS, int, RawNode]]:
|
||||
"""Nodeless stack copy."""
|
||||
return [(dfa, label, DUMMY_NODE) for dfa, label, _ in stack]
|
||||
|
||||
|
||||
class Recorder:
|
||||
def __init__(self, parser: "Parser", ilabels: list[int], context: Context) -> None:
|
||||
self.parser = parser
|
||||
self._ilabels = ilabels
|
||||
self.context = context # not really matter
|
||||
|
||||
self._dead_ilabels: set[int] = set()
|
||||
self._start_point = self.parser.stack
|
||||
self._points = {ilabel: stack_copy(self._start_point) for ilabel in ilabels}
|
||||
|
||||
@property
|
||||
def ilabels(self) -> set[int]:
|
||||
return self._dead_ilabels.symmetric_difference(self._ilabels)
|
||||
|
||||
@contextmanager
|
||||
def switch_to(self, ilabel: int) -> Iterator[None]:
|
||||
with self.backtrack():
|
||||
self.parser.stack = self._points[ilabel]
|
||||
try:
|
||||
yield
|
||||
except ParseError:
|
||||
self._dead_ilabels.add(ilabel)
|
||||
finally:
|
||||
self.parser.stack = self._start_point
|
||||
|
||||
@contextmanager
|
||||
def backtrack(self) -> Iterator[None]:
|
||||
"""
|
||||
Use the node-level invariant ones for basic parsing operations (push/pop/shift).
|
||||
These still will operate on the stack; but they won't create any new nodes, or
|
||||
modify the contents of any other existing nodes.
|
||||
|
||||
This saves us a ton of time when we are backtracking, since we
|
||||
want to restore to the initial state as quick as possible, which
|
||||
can only be done by having as little mutatations as possible.
|
||||
"""
|
||||
is_backtracking = self.parser.is_backtracking
|
||||
try:
|
||||
self.parser.is_backtracking = True
|
||||
yield
|
||||
finally:
|
||||
self.parser.is_backtracking = is_backtracking
|
||||
|
||||
def add_token(self, tok_type: int, tok_val: str, raw: bool = False) -> None:
|
||||
func: Callable[..., Any]
|
||||
if raw:
|
||||
func = self.parser._addtoken
|
||||
else:
|
||||
func = self.parser.addtoken
|
||||
|
||||
for ilabel in self.ilabels:
|
||||
with self.switch_to(ilabel):
|
||||
args = [tok_type, tok_val, self.context]
|
||||
if raw:
|
||||
args.insert(0, ilabel)
|
||||
func(*args)
|
||||
|
||||
def determine_route(
|
||||
self, value: Optional[str] = None, force: bool = False
|
||||
) -> Optional[int]:
|
||||
alive_ilabels = self.ilabels
|
||||
if len(alive_ilabels) == 0:
|
||||
*_, most_successful_ilabel = self._dead_ilabels
|
||||
raise ParseError("bad input", most_successful_ilabel, value, self.context)
|
||||
|
||||
ilabel, *rest = alive_ilabels
|
||||
if force or not rest:
|
||||
return ilabel
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
"""Exception to signal the parser is stuck."""
|
||||
|
||||
def __init__(
|
||||
self, msg: str, type: Optional[int], value: Optional[str], context: Context
|
||||
) -> None:
|
||||
Exception.__init__(
|
||||
self, f"{msg}: type={type!r}, value={value!r}, context={context!r}"
|
||||
)
|
||||
self.msg = msg
|
||||
self.type = type
|
||||
self.value = value
|
||||
self.context = context
|
||||
|
||||
|
||||
class Parser:
|
||||
"""Parser engine.
|
||||
|
||||
The proper usage sequence is:
|
||||
|
||||
p = Parser(grammar, [converter]) # create instance
|
||||
p.setup([start]) # prepare for parsing
|
||||
<for each input token>:
|
||||
if p.addtoken(...): # parse a token; may raise ParseError
|
||||
break
|
||||
root = p.rootnode # root of abstract syntax tree
|
||||
|
||||
A Parser instance may be reused by calling setup() repeatedly.
|
||||
|
||||
A Parser instance contains state pertaining to the current token
|
||||
sequence, and should not be used concurrently by different threads
|
||||
to parse separate token sequences.
|
||||
|
||||
See driver.py for how to get input tokens by tokenizing a file or
|
||||
string.
|
||||
|
||||
Parsing is complete when addtoken() returns True; the root of the
|
||||
abstract syntax tree can then be retrieved from the rootnode
|
||||
instance variable. When a syntax error occurs, addtoken() raises
|
||||
the ParseError exception. There is no error recovery; the parser
|
||||
cannot be used after a syntax error was reported (but it can be
|
||||
reinitialized by calling setup()).
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, grammar: Grammar, convert: Optional[Convert] = None) -> None:
|
||||
"""Constructor.
|
||||
|
||||
The grammar argument is a grammar.Grammar instance; see the
|
||||
grammar module for more information.
|
||||
|
||||
The parser is not ready yet for parsing; you must call the
|
||||
setup() method to get it started.
|
||||
|
||||
The optional convert argument is a function mapping concrete
|
||||
syntax tree nodes to abstract syntax tree nodes. If not
|
||||
given, no conversion is done and the syntax tree produced is
|
||||
the concrete syntax tree. If given, it must be a function of
|
||||
two arguments, the first being the grammar (a grammar.Grammar
|
||||
instance), and the second being the concrete syntax tree node
|
||||
to be converted. The syntax tree is converted from the bottom
|
||||
up.
|
||||
|
||||
**post-note: the convert argument is ignored since for Black's
|
||||
usage, convert will always be blib2to3.pytree.convert. Allowing
|
||||
this to be dynamic hurts mypyc's ability to use early binding.
|
||||
These docs are left for historical and informational value.
|
||||
|
||||
A concrete syntax tree node is a (type, value, context, nodes)
|
||||
tuple, where type is the node type (a token or symbol number),
|
||||
value is None for symbols and a string for tokens, context is
|
||||
None or an opaque value used for error reporting (typically a
|
||||
(lineno, offset) pair), and nodes is a list of children for
|
||||
symbols, and None for tokens.
|
||||
|
||||
An abstract syntax tree node may be anything; this is entirely
|
||||
up to the converter function.
|
||||
|
||||
"""
|
||||
self.grammar = grammar
|
||||
# See note in docstring above. TL;DR this is ignored.
|
||||
self.convert = convert or lam_sub
|
||||
self.is_backtracking = False
|
||||
self.last_token: Optional[int] = None
|
||||
|
||||
def setup(self, proxy: "TokenProxy", start: Optional[int] = None) -> None:
|
||||
"""Prepare for parsing.
|
||||
|
||||
This *must* be called before starting to parse.
|
||||
|
||||
The optional argument is an alternative start symbol; it
|
||||
defaults to the grammar's start symbol.
|
||||
|
||||
You can use a Parser instance to parse any number of programs;
|
||||
each time you call setup() the parser is reset to an initial
|
||||
state determined by the (implicit or explicit) start symbol.
|
||||
|
||||
"""
|
||||
if start is None:
|
||||
start = self.grammar.start
|
||||
# Each stack entry is a tuple: (dfa, state, node).
|
||||
# A node is a tuple: (type, value, context, children),
|
||||
# where children is a list of nodes or None, and context may be None.
|
||||
newnode: RawNode = (start, None, None, [])
|
||||
stackentry = (self.grammar.dfas[start], 0, newnode)
|
||||
self.stack: list[tuple[DFAS, int, RawNode]] = [stackentry]
|
||||
self.rootnode: Optional[NL] = None
|
||||
self.used_names: set[str] = set()
|
||||
self.proxy = proxy
|
||||
self.last_token = None
|
||||
|
||||
def addtoken(self, type: int, value: str, context: Context) -> bool:
|
||||
"""Add a token; return True iff this is the end of the program."""
|
||||
# Map from token to label
|
||||
ilabels = self.classify(type, value, context)
|
||||
assert len(ilabels) >= 1
|
||||
|
||||
# If we have only one state to advance, we'll directly
|
||||
# take it as is.
|
||||
if len(ilabels) == 1:
|
||||
[ilabel] = ilabels
|
||||
return self._addtoken(ilabel, type, value, context)
|
||||
|
||||
# If there are multiple states which we can advance (only
|
||||
# happen under soft-keywords), then we will try all of them
|
||||
# in parallel and as soon as one state can reach further than
|
||||
# the rest, we'll choose that one. This is a pretty hacky
|
||||
# and hopefully temporary algorithm.
|
||||
#
|
||||
# For a more detailed explanation, check out this post:
|
||||
# https://tree.science/what-the-backtracking.html
|
||||
|
||||
with self.proxy.release() as proxy:
|
||||
counter, force = 0, False
|
||||
recorder = Recorder(self, ilabels, context)
|
||||
recorder.add_token(type, value, raw=True)
|
||||
|
||||
next_token_value = value
|
||||
while recorder.determine_route(next_token_value) is None:
|
||||
if not proxy.can_advance(counter):
|
||||
force = True
|
||||
break
|
||||
|
||||
next_token_type, next_token_value, *_ = proxy.eat(counter)
|
||||
if next_token_type in (tokenize.COMMENT, tokenize.NL):
|
||||
counter += 1
|
||||
continue
|
||||
|
||||
if next_token_type == tokenize.OP:
|
||||
next_token_type = grammar.opmap[next_token_value]
|
||||
|
||||
recorder.add_token(next_token_type, next_token_value)
|
||||
counter += 1
|
||||
|
||||
ilabel = cast(int, recorder.determine_route(next_token_value, force=force))
|
||||
assert ilabel is not None
|
||||
|
||||
return self._addtoken(ilabel, type, value, context)
|
||||
|
||||
def _addtoken(self, ilabel: int, type: int, value: str, context: Context) -> bool:
|
||||
# Loop until the token is shifted; may raise exceptions
|
||||
while True:
|
||||
dfa, state, node = self.stack[-1]
|
||||
states, first = dfa
|
||||
arcs = states[state]
|
||||
# Look for a state with this label
|
||||
for i, newstate in arcs:
|
||||
t = self.grammar.labels[i][0]
|
||||
if t >= 256:
|
||||
# See if it's a symbol and if we're in its first set
|
||||
itsdfa = self.grammar.dfas[t]
|
||||
itsstates, itsfirst = itsdfa
|
||||
if ilabel in itsfirst:
|
||||
# Push a symbol
|
||||
self.push(t, itsdfa, newstate, context)
|
||||
break # To continue the outer while loop
|
||||
|
||||
elif ilabel == i:
|
||||
# Look it up in the list of labels
|
||||
# Shift a token; we're done with it
|
||||
self.shift(type, value, newstate, context)
|
||||
# Pop while we are in an accept-only state
|
||||
state = newstate
|
||||
while states[state] == [(0, state)]:
|
||||
self.pop()
|
||||
if not self.stack:
|
||||
# Done parsing!
|
||||
return True
|
||||
dfa, state, node = self.stack[-1]
|
||||
states, first = dfa
|
||||
# Done with this token
|
||||
self.last_token = type
|
||||
return False
|
||||
|
||||
else:
|
||||
if (0, state) in arcs:
|
||||
# An accepting state, pop it and try something else
|
||||
self.pop()
|
||||
if not self.stack:
|
||||
# Done parsing, but another token is input
|
||||
raise ParseError("too much input", type, value, context)
|
||||
else:
|
||||
# No success finding a transition
|
||||
raise ParseError("bad input", type, value, context)
|
||||
|
||||
def classify(self, type: int, value: str, context: Context) -> list[int]:
|
||||
"""Turn a token into a label. (Internal)
|
||||
|
||||
Depending on whether the value is a soft-keyword or not,
|
||||
this function may return multiple labels to choose from."""
|
||||
if type == token.NAME:
|
||||
# Keep a listing of all used names
|
||||
self.used_names.add(value)
|
||||
# Check for reserved words
|
||||
if value in self.grammar.keywords:
|
||||
return [self.grammar.keywords[value]]
|
||||
elif value in self.grammar.soft_keywords:
|
||||
assert type in self.grammar.tokens
|
||||
# Current soft keywords (match, case, type) can only appear at the
|
||||
# beginning of a statement. So as a shortcut, don't try to treat them
|
||||
# like keywords in any other context.
|
||||
# ('_' is also a soft keyword in the real grammar, but for our grammar
|
||||
# it's just an expression, so we don't need to treat it specially.)
|
||||
if self.last_token not in (
|
||||
None,
|
||||
token.INDENT,
|
||||
token.DEDENT,
|
||||
token.NEWLINE,
|
||||
token.SEMI,
|
||||
token.COLON,
|
||||
):
|
||||
return [self.grammar.tokens[type]]
|
||||
return [
|
||||
self.grammar.tokens[type],
|
||||
self.grammar.soft_keywords[value],
|
||||
]
|
||||
|
||||
ilabel = self.grammar.tokens.get(type)
|
||||
if ilabel is None:
|
||||
raise ParseError("bad token", type, value, context)
|
||||
return [ilabel]
|
||||
|
||||
def shift(self, type: int, value: str, newstate: int, context: Context) -> None:
|
||||
"""Shift a token. (Internal)"""
|
||||
if self.is_backtracking:
|
||||
dfa, state, _ = self.stack[-1]
|
||||
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
|
||||
else:
|
||||
dfa, state, node = self.stack[-1]
|
||||
rawnode: RawNode = (type, value, context, None)
|
||||
newnode = convert(self.grammar, rawnode)
|
||||
assert node[-1] is not None
|
||||
node[-1].append(newnode)
|
||||
self.stack[-1] = (dfa, newstate, node)
|
||||
|
||||
def push(self, type: int, newdfa: DFAS, newstate: int, context: Context) -> None:
|
||||
"""Push a nonterminal. (Internal)"""
|
||||
if self.is_backtracking:
|
||||
dfa, state, _ = self.stack[-1]
|
||||
self.stack[-1] = (dfa, newstate, DUMMY_NODE)
|
||||
self.stack.append((newdfa, 0, DUMMY_NODE))
|
||||
else:
|
||||
dfa, state, node = self.stack[-1]
|
||||
newnode: RawNode = (type, None, context, [])
|
||||
self.stack[-1] = (dfa, newstate, node)
|
||||
self.stack.append((newdfa, 0, newnode))
|
||||
|
||||
def pop(self) -> None:
|
||||
"""Pop a nonterminal. (Internal)"""
|
||||
if self.is_backtracking:
|
||||
self.stack.pop()
|
||||
else:
|
||||
popdfa, popstate, popnode = self.stack.pop()
|
||||
newnode = convert(self.grammar, popnode)
|
||||
if self.stack:
|
||||
dfa, state, node = self.stack[-1]
|
||||
assert node[-1] is not None
|
||||
node[-1].append(newnode)
|
||||
else:
|
||||
self.rootnode = newnode
|
||||
self.rootnode.used_names = self.used_names
|
BIN
venv/Lib/site-packages/blib2to3/pgen2/pgen.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/blib2to3/pgen2/pgen.cp311-win_amd64.pyd
Normal file
Binary file not shown.
417
venv/Lib/site-packages/blib2to3/pgen2/pgen.py
Normal file
417
venv/Lib/site-packages/blib2to3/pgen2/pgen.py
Normal file
@@ -0,0 +1,417 @@
|
||||
# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved.
|
||||
# Licensed to PSF under a Contributor Agreement.
|
||||
|
||||
import os
|
||||
from typing import IO, Any, Iterator, NoReturn, Optional, Sequence, Union
|
||||
|
||||
from blib2to3.pgen2 import grammar, token, tokenize
|
||||
from blib2to3.pgen2.tokenize import GoodTokenInfo
|
||||
|
||||
Path = Union[str, "os.PathLike[str]"]
|
||||
|
||||
|
||||
class PgenGrammar(grammar.Grammar):
|
||||
pass
|
||||
|
||||
|
||||
class ParserGenerator:
|
||||
filename: Path
|
||||
stream: IO[str]
|
||||
generator: Iterator[GoodTokenInfo]
|
||||
first: dict[str, Optional[dict[str, int]]]
|
||||
|
||||
def __init__(self, filename: Path, stream: Optional[IO[str]] = None) -> None:
|
||||
close_stream = None
|
||||
if stream is None:
|
||||
stream = open(filename, encoding="utf-8")
|
||||
close_stream = stream.close
|
||||
self.filename = filename
|
||||
self.stream = stream
|
||||
self.generator = tokenize.generate_tokens(stream.readline)
|
||||
self.gettoken() # Initialize lookahead
|
||||
self.dfas, self.startsymbol = self.parse()
|
||||
if close_stream is not None:
|
||||
close_stream()
|
||||
self.first = {} # map from symbol name to set of tokens
|
||||
self.addfirstsets()
|
||||
|
||||
def make_grammar(self) -> PgenGrammar:
|
||||
c = PgenGrammar()
|
||||
names = list(self.dfas.keys())
|
||||
names.sort()
|
||||
names.remove(self.startsymbol)
|
||||
names.insert(0, self.startsymbol)
|
||||
for name in names:
|
||||
i = 256 + len(c.symbol2number)
|
||||
c.symbol2number[name] = i
|
||||
c.number2symbol[i] = name
|
||||
for name in names:
|
||||
dfa = self.dfas[name]
|
||||
states = []
|
||||
for state in dfa:
|
||||
arcs = []
|
||||
for label, next in sorted(state.arcs.items()):
|
||||
arcs.append((self.make_label(c, label), dfa.index(next)))
|
||||
if state.isfinal:
|
||||
arcs.append((0, dfa.index(state)))
|
||||
states.append(arcs)
|
||||
c.states.append(states)
|
||||
c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name))
|
||||
c.start = c.symbol2number[self.startsymbol]
|
||||
return c
|
||||
|
||||
def make_first(self, c: PgenGrammar, name: str) -> dict[int, int]:
|
||||
rawfirst = self.first[name]
|
||||
assert rawfirst is not None
|
||||
first = {}
|
||||
for label in sorted(rawfirst):
|
||||
ilabel = self.make_label(c, label)
|
||||
##assert ilabel not in first # XXX failed on <> ... !=
|
||||
first[ilabel] = 1
|
||||
return first
|
||||
|
||||
def make_label(self, c: PgenGrammar, label: str) -> int:
|
||||
# XXX Maybe this should be a method on a subclass of converter?
|
||||
ilabel = len(c.labels)
|
||||
if label[0].isalpha():
|
||||
# Either a symbol name or a named token
|
||||
if label in c.symbol2number:
|
||||
# A symbol name (a non-terminal)
|
||||
if label in c.symbol2label:
|
||||
return c.symbol2label[label]
|
||||
else:
|
||||
c.labels.append((c.symbol2number[label], None))
|
||||
c.symbol2label[label] = ilabel
|
||||
return ilabel
|
||||
else:
|
||||
# A named token (NAME, NUMBER, STRING)
|
||||
itoken = getattr(token, label, None)
|
||||
assert isinstance(itoken, int), label
|
||||
assert itoken in token.tok_name, label
|
||||
if itoken in c.tokens:
|
||||
return c.tokens[itoken]
|
||||
else:
|
||||
c.labels.append((itoken, None))
|
||||
c.tokens[itoken] = ilabel
|
||||
return ilabel
|
||||
else:
|
||||
# Either a keyword or an operator
|
||||
assert label[0] in ('"', "'"), label
|
||||
value = eval(label)
|
||||
if value[0].isalpha():
|
||||
if label[0] == '"':
|
||||
keywords = c.soft_keywords
|
||||
else:
|
||||
keywords = c.keywords
|
||||
|
||||
# A keyword
|
||||
if value in keywords:
|
||||
return keywords[value]
|
||||
else:
|
||||
c.labels.append((token.NAME, value))
|
||||
keywords[value] = ilabel
|
||||
return ilabel
|
||||
else:
|
||||
# An operator (any non-numeric token)
|
||||
itoken = grammar.opmap[value] # Fails if unknown token
|
||||
if itoken in c.tokens:
|
||||
return c.tokens[itoken]
|
||||
else:
|
||||
c.labels.append((itoken, None))
|
||||
c.tokens[itoken] = ilabel
|
||||
return ilabel
|
||||
|
||||
def addfirstsets(self) -> None:
|
||||
names = list(self.dfas.keys())
|
||||
names.sort()
|
||||
for name in names:
|
||||
if name not in self.first:
|
||||
self.calcfirst(name)
|
||||
# print name, self.first[name].keys()
|
||||
|
||||
def calcfirst(self, name: str) -> None:
|
||||
dfa = self.dfas[name]
|
||||
self.first[name] = None # dummy to detect left recursion
|
||||
state = dfa[0]
|
||||
totalset: dict[str, int] = {}
|
||||
overlapcheck = {}
|
||||
for label in state.arcs:
|
||||
if label in self.dfas:
|
||||
if label in self.first:
|
||||
fset = self.first[label]
|
||||
if fset is None:
|
||||
raise ValueError("recursion for rule %r" % name)
|
||||
else:
|
||||
self.calcfirst(label)
|
||||
fset = self.first[label]
|
||||
assert fset is not None
|
||||
totalset.update(fset)
|
||||
overlapcheck[label] = fset
|
||||
else:
|
||||
totalset[label] = 1
|
||||
overlapcheck[label] = {label: 1}
|
||||
inverse: dict[str, str] = {}
|
||||
for label, itsfirst in overlapcheck.items():
|
||||
for symbol in itsfirst:
|
||||
if symbol in inverse:
|
||||
raise ValueError(
|
||||
"rule %s is ambiguous; %s is in the first sets of %s as well"
|
||||
" as %s" % (name, symbol, label, inverse[symbol])
|
||||
)
|
||||
inverse[symbol] = label
|
||||
self.first[name] = totalset
|
||||
|
||||
def parse(self) -> tuple[dict[str, list["DFAState"]], str]:
|
||||
dfas = {}
|
||||
startsymbol: Optional[str] = None
|
||||
# MSTART: (NEWLINE | RULE)* ENDMARKER
|
||||
while self.type != token.ENDMARKER:
|
||||
while self.type == token.NEWLINE:
|
||||
self.gettoken()
|
||||
# RULE: NAME ':' RHS NEWLINE
|
||||
name = self.expect(token.NAME)
|
||||
self.expect(token.OP, ":")
|
||||
a, z = self.parse_rhs()
|
||||
self.expect(token.NEWLINE)
|
||||
# self.dump_nfa(name, a, z)
|
||||
dfa = self.make_dfa(a, z)
|
||||
# self.dump_dfa(name, dfa)
|
||||
# oldlen = len(dfa)
|
||||
self.simplify_dfa(dfa)
|
||||
# newlen = len(dfa)
|
||||
dfas[name] = dfa
|
||||
# print name, oldlen, newlen
|
||||
if startsymbol is None:
|
||||
startsymbol = name
|
||||
assert startsymbol is not None
|
||||
return dfas, startsymbol
|
||||
|
||||
def make_dfa(self, start: "NFAState", finish: "NFAState") -> list["DFAState"]:
|
||||
# To turn an NFA into a DFA, we define the states of the DFA
|
||||
# to correspond to *sets* of states of the NFA. Then do some
|
||||
# state reduction. Let's represent sets as dicts with 1 for
|
||||
# values.
|
||||
assert isinstance(start, NFAState)
|
||||
assert isinstance(finish, NFAState)
|
||||
|
||||
def closure(state: NFAState) -> dict[NFAState, int]:
|
||||
base: dict[NFAState, int] = {}
|
||||
addclosure(state, base)
|
||||
return base
|
||||
|
||||
def addclosure(state: NFAState, base: dict[NFAState, int]) -> None:
|
||||
assert isinstance(state, NFAState)
|
||||
if state in base:
|
||||
return
|
||||
base[state] = 1
|
||||
for label, next in state.arcs:
|
||||
if label is None:
|
||||
addclosure(next, base)
|
||||
|
||||
states = [DFAState(closure(start), finish)]
|
||||
for state in states: # NB states grows while we're iterating
|
||||
arcs: dict[str, dict[NFAState, int]] = {}
|
||||
for nfastate in state.nfaset:
|
||||
for label, next in nfastate.arcs:
|
||||
if label is not None:
|
||||
addclosure(next, arcs.setdefault(label, {}))
|
||||
for label, nfaset in sorted(arcs.items()):
|
||||
for st in states:
|
||||
if st.nfaset == nfaset:
|
||||
break
|
||||
else:
|
||||
st = DFAState(nfaset, finish)
|
||||
states.append(st)
|
||||
state.addarc(st, label)
|
||||
return states # List of DFAState instances; first one is start
|
||||
|
||||
def dump_nfa(self, name: str, start: "NFAState", finish: "NFAState") -> None:
|
||||
print("Dump of NFA for", name)
|
||||
todo = [start]
|
||||
for i, state in enumerate(todo):
|
||||
print(" State", i, state is finish and "(final)" or "")
|
||||
for label, next in state.arcs:
|
||||
if next in todo:
|
||||
j = todo.index(next)
|
||||
else:
|
||||
j = len(todo)
|
||||
todo.append(next)
|
||||
if label is None:
|
||||
print(" -> %d" % j)
|
||||
else:
|
||||
print(" %s -> %d" % (label, j))
|
||||
|
||||
def dump_dfa(self, name: str, dfa: Sequence["DFAState"]) -> None:
|
||||
print("Dump of DFA for", name)
|
||||
for i, state in enumerate(dfa):
|
||||
print(" State", i, state.isfinal and "(final)" or "")
|
||||
for label, next in sorted(state.arcs.items()):
|
||||
print(" %s -> %d" % (label, dfa.index(next)))
|
||||
|
||||
def simplify_dfa(self, dfa: list["DFAState"]) -> None:
|
||||
# This is not theoretically optimal, but works well enough.
|
||||
# Algorithm: repeatedly look for two states that have the same
|
||||
# set of arcs (same labels pointing to the same nodes) and
|
||||
# unify them, until things stop changing.
|
||||
|
||||
# dfa is a list of DFAState instances
|
||||
changes = True
|
||||
while changes:
|
||||
changes = False
|
||||
for i, state_i in enumerate(dfa):
|
||||
for j in range(i + 1, len(dfa)):
|
||||
state_j = dfa[j]
|
||||
if state_i == state_j:
|
||||
# print " unify", i, j
|
||||
del dfa[j]
|
||||
for state in dfa:
|
||||
state.unifystate(state_j, state_i)
|
||||
changes = True
|
||||
break
|
||||
|
||||
def parse_rhs(self) -> tuple["NFAState", "NFAState"]:
|
||||
# RHS: ALT ('|' ALT)*
|
||||
a, z = self.parse_alt()
|
||||
if self.value != "|":
|
||||
return a, z
|
||||
else:
|
||||
aa = NFAState()
|
||||
zz = NFAState()
|
||||
aa.addarc(a)
|
||||
z.addarc(zz)
|
||||
while self.value == "|":
|
||||
self.gettoken()
|
||||
a, z = self.parse_alt()
|
||||
aa.addarc(a)
|
||||
z.addarc(zz)
|
||||
return aa, zz
|
||||
|
||||
def parse_alt(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ALT: ITEM+
|
||||
a, b = self.parse_item()
|
||||
while self.value in ("(", "[") or self.type in (token.NAME, token.STRING):
|
||||
c, d = self.parse_item()
|
||||
b.addarc(c)
|
||||
b = d
|
||||
return a, b
|
||||
|
||||
def parse_item(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ITEM: '[' RHS ']' | ATOM ['+' | '*']
|
||||
if self.value == "[":
|
||||
self.gettoken()
|
||||
a, z = self.parse_rhs()
|
||||
self.expect(token.OP, "]")
|
||||
a.addarc(z)
|
||||
return a, z
|
||||
else:
|
||||
a, z = self.parse_atom()
|
||||
value = self.value
|
||||
if value not in ("+", "*"):
|
||||
return a, z
|
||||
self.gettoken()
|
||||
z.addarc(a)
|
||||
if value == "+":
|
||||
return a, z
|
||||
else:
|
||||
return a, a
|
||||
|
||||
def parse_atom(self) -> tuple["NFAState", "NFAState"]:
|
||||
# ATOM: '(' RHS ')' | NAME | STRING
|
||||
if self.value == "(":
|
||||
self.gettoken()
|
||||
a, z = self.parse_rhs()
|
||||
self.expect(token.OP, ")")
|
||||
return a, z
|
||||
elif self.type in (token.NAME, token.STRING):
|
||||
a = NFAState()
|
||||
z = NFAState()
|
||||
a.addarc(z, self.value)
|
||||
self.gettoken()
|
||||
return a, z
|
||||
else:
|
||||
self.raise_error(
|
||||
"expected (...) or NAME or STRING, got %s/%s", self.type, self.value
|
||||
)
|
||||
raise AssertionError
|
||||
|
||||
def expect(self, type: int, value: Optional[Any] = None) -> str:
|
||||
if self.type != type or (value is not None and self.value != value):
|
||||
self.raise_error(
|
||||
"expected %s/%s, got %s/%s", type, value, self.type, self.value
|
||||
)
|
||||
value = self.value
|
||||
self.gettoken()
|
||||
return value
|
||||
|
||||
def gettoken(self) -> None:
|
||||
tup = next(self.generator)
|
||||
while tup[0] in (tokenize.COMMENT, tokenize.NL):
|
||||
tup = next(self.generator)
|
||||
self.type, self.value, self.begin, self.end, self.line = tup
|
||||
# print token.tok_name[self.type], repr(self.value)
|
||||
|
||||
def raise_error(self, msg: str, *args: Any) -> NoReturn:
|
||||
if args:
|
||||
try:
|
||||
msg = msg % args
|
||||
except Exception:
|
||||
msg = " ".join([msg] + list(map(str, args)))
|
||||
raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line))
|
||||
|
||||
|
||||
class NFAState:
|
||||
arcs: list[tuple[Optional[str], "NFAState"]]
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.arcs = [] # list of (label, NFAState) pairs
|
||||
|
||||
def addarc(self, next: "NFAState", label: Optional[str] = None) -> None:
|
||||
assert label is None or isinstance(label, str)
|
||||
assert isinstance(next, NFAState)
|
||||
self.arcs.append((label, next))
|
||||
|
||||
|
||||
class DFAState:
|
||||
nfaset: dict[NFAState, Any]
|
||||
isfinal: bool
|
||||
arcs: dict[str, "DFAState"]
|
||||
|
||||
def __init__(self, nfaset: dict[NFAState, Any], final: NFAState) -> None:
|
||||
assert isinstance(nfaset, dict)
|
||||
assert isinstance(next(iter(nfaset)), NFAState)
|
||||
assert isinstance(final, NFAState)
|
||||
self.nfaset = nfaset
|
||||
self.isfinal = final in nfaset
|
||||
self.arcs = {} # map from label to DFAState
|
||||
|
||||
def addarc(self, next: "DFAState", label: str) -> None:
|
||||
assert isinstance(label, str)
|
||||
assert label not in self.arcs
|
||||
assert isinstance(next, DFAState)
|
||||
self.arcs[label] = next
|
||||
|
||||
def unifystate(self, old: "DFAState", new: "DFAState") -> None:
|
||||
for label, next in self.arcs.items():
|
||||
if next is old:
|
||||
self.arcs[label] = new
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
# Equality test -- ignore the nfaset instance variable
|
||||
assert isinstance(other, DFAState)
|
||||
if self.isfinal != other.isfinal:
|
||||
return False
|
||||
# Can't just return self.arcs == other.arcs, because that
|
||||
# would invoke this method recursively, with cycles...
|
||||
if len(self.arcs) != len(other.arcs):
|
||||
return False
|
||||
for label, next in self.arcs.items():
|
||||
if next is not other.arcs.get(label):
|
||||
return False
|
||||
return True
|
||||
|
||||
__hash__: Any = None # For Py3 compatibility.
|
||||
|
||||
|
||||
def generate_grammar(filename: Path = "Grammar.txt") -> PgenGrammar:
|
||||
p = ParserGenerator(filename)
|
||||
return p.make_grammar()
|
BIN
venv/Lib/site-packages/blib2to3/pgen2/token.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/blib2to3/pgen2/token.cp311-win_amd64.pyd
Normal file
Binary file not shown.
92
venv/Lib/site-packages/blib2to3/pgen2/token.py
Normal file
92
venv/Lib/site-packages/blib2to3/pgen2/token.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Token constants (from "token.h")."""
|
||||
|
||||
from typing import Final
|
||||
|
||||
# Taken from Python (r53757) and modified to include some tokens
|
||||
# originally monkeypatched in by pgen2.tokenize
|
||||
|
||||
# --start constants--
|
||||
ENDMARKER: Final = 0
|
||||
NAME: Final = 1
|
||||
NUMBER: Final = 2
|
||||
STRING: Final = 3
|
||||
NEWLINE: Final = 4
|
||||
INDENT: Final = 5
|
||||
DEDENT: Final = 6
|
||||
LPAR: Final = 7
|
||||
RPAR: Final = 8
|
||||
LSQB: Final = 9
|
||||
RSQB: Final = 10
|
||||
COLON: Final = 11
|
||||
COMMA: Final = 12
|
||||
SEMI: Final = 13
|
||||
PLUS: Final = 14
|
||||
MINUS: Final = 15
|
||||
STAR: Final = 16
|
||||
SLASH: Final = 17
|
||||
VBAR: Final = 18
|
||||
AMPER: Final = 19
|
||||
LESS: Final = 20
|
||||
GREATER: Final = 21
|
||||
EQUAL: Final = 22
|
||||
DOT: Final = 23
|
||||
PERCENT: Final = 24
|
||||
BACKQUOTE: Final = 25
|
||||
LBRACE: Final = 26
|
||||
RBRACE: Final = 27
|
||||
EQEQUAL: Final = 28
|
||||
NOTEQUAL: Final = 29
|
||||
LESSEQUAL: Final = 30
|
||||
GREATEREQUAL: Final = 31
|
||||
TILDE: Final = 32
|
||||
CIRCUMFLEX: Final = 33
|
||||
LEFTSHIFT: Final = 34
|
||||
RIGHTSHIFT: Final = 35
|
||||
DOUBLESTAR: Final = 36
|
||||
PLUSEQUAL: Final = 37
|
||||
MINEQUAL: Final = 38
|
||||
STAREQUAL: Final = 39
|
||||
SLASHEQUAL: Final = 40
|
||||
PERCENTEQUAL: Final = 41
|
||||
AMPEREQUAL: Final = 42
|
||||
VBAREQUAL: Final = 43
|
||||
CIRCUMFLEXEQUAL: Final = 44
|
||||
LEFTSHIFTEQUAL: Final = 45
|
||||
RIGHTSHIFTEQUAL: Final = 46
|
||||
DOUBLESTAREQUAL: Final = 47
|
||||
DOUBLESLASH: Final = 48
|
||||
DOUBLESLASHEQUAL: Final = 49
|
||||
AT: Final = 50
|
||||
ATEQUAL: Final = 51
|
||||
OP: Final = 52
|
||||
COMMENT: Final = 53
|
||||
NL: Final = 54
|
||||
RARROW: Final = 55
|
||||
AWAIT: Final = 56
|
||||
ASYNC: Final = 57
|
||||
ERRORTOKEN: Final = 58
|
||||
COLONEQUAL: Final = 59
|
||||
FSTRING_START: Final = 60
|
||||
FSTRING_MIDDLE: Final = 61
|
||||
FSTRING_END: Final = 62
|
||||
BANG: Final = 63
|
||||
N_TOKENS: Final = 64
|
||||
NT_OFFSET: Final = 256
|
||||
# --end constants--
|
||||
|
||||
tok_name: Final[dict[int, str]] = {}
|
||||
for _name, _value in list(globals().items()):
|
||||
if type(_value) is int:
|
||||
tok_name[_value] = _name
|
||||
|
||||
|
||||
def ISTERMINAL(x: int) -> bool:
|
||||
return x < NT_OFFSET
|
||||
|
||||
|
||||
def ISNONTERMINAL(x: int) -> bool:
|
||||
return x >= NT_OFFSET
|
||||
|
||||
|
||||
def ISEOF(x: int) -> bool:
|
||||
return x == ENDMARKER
|
Binary file not shown.
1112
venv/Lib/site-packages/blib2to3/pgen2/tokenize.py
Normal file
1112
venv/Lib/site-packages/blib2to3/pgen2/tokenize.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user