mirror of
https://gitlab.com/MoonTestUse1/AdministrationItDepartmens.git
synced 2025-08-14 00:25:46 +02:00
Initial commit
This commit is contained in:
BIN
venv/Lib/site-packages/black/__init__.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/__init__.cp311-win_amd64.pyd
Normal file
Binary file not shown.
1592
venv/Lib/site-packages/black/__init__.py
Normal file
1592
venv/Lib/site-packages/black/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
3
venv/Lib/site-packages/black/__main__.py
Normal file
3
venv/Lib/site-packages/black/__main__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from black import patched_main
|
||||
|
||||
patched_main()
|
BIN
venv/Lib/site-packages/black/_width_table.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/_width_table.cp311-win_amd64.pyd
Normal file
Binary file not shown.
478
venv/Lib/site-packages/black/_width_table.py
Normal file
478
venv/Lib/site-packages/black/_width_table.py
Normal file
@@ -0,0 +1,478 @@
|
||||
# Generated by make_width_table.py
|
||||
# wcwidth 0.2.6
|
||||
# Unicode 15.0.0
|
||||
from typing import Final
|
||||
|
||||
WIDTH_TABLE: Final[list[tuple[int, int, int]]] = [
|
||||
(0, 0, 0),
|
||||
(1, 31, -1),
|
||||
(127, 159, -1),
|
||||
(768, 879, 0),
|
||||
(1155, 1161, 0),
|
||||
(1425, 1469, 0),
|
||||
(1471, 1471, 0),
|
||||
(1473, 1474, 0),
|
||||
(1476, 1477, 0),
|
||||
(1479, 1479, 0),
|
||||
(1552, 1562, 0),
|
||||
(1611, 1631, 0),
|
||||
(1648, 1648, 0),
|
||||
(1750, 1756, 0),
|
||||
(1759, 1764, 0),
|
||||
(1767, 1768, 0),
|
||||
(1770, 1773, 0),
|
||||
(1809, 1809, 0),
|
||||
(1840, 1866, 0),
|
||||
(1958, 1968, 0),
|
||||
(2027, 2035, 0),
|
||||
(2045, 2045, 0),
|
||||
(2070, 2073, 0),
|
||||
(2075, 2083, 0),
|
||||
(2085, 2087, 0),
|
||||
(2089, 2093, 0),
|
||||
(2137, 2139, 0),
|
||||
(2200, 2207, 0),
|
||||
(2250, 2273, 0),
|
||||
(2275, 2306, 0),
|
||||
(2362, 2362, 0),
|
||||
(2364, 2364, 0),
|
||||
(2369, 2376, 0),
|
||||
(2381, 2381, 0),
|
||||
(2385, 2391, 0),
|
||||
(2402, 2403, 0),
|
||||
(2433, 2433, 0),
|
||||
(2492, 2492, 0),
|
||||
(2497, 2500, 0),
|
||||
(2509, 2509, 0),
|
||||
(2530, 2531, 0),
|
||||
(2558, 2558, 0),
|
||||
(2561, 2562, 0),
|
||||
(2620, 2620, 0),
|
||||
(2625, 2626, 0),
|
||||
(2631, 2632, 0),
|
||||
(2635, 2637, 0),
|
||||
(2641, 2641, 0),
|
||||
(2672, 2673, 0),
|
||||
(2677, 2677, 0),
|
||||
(2689, 2690, 0),
|
||||
(2748, 2748, 0),
|
||||
(2753, 2757, 0),
|
||||
(2759, 2760, 0),
|
||||
(2765, 2765, 0),
|
||||
(2786, 2787, 0),
|
||||
(2810, 2815, 0),
|
||||
(2817, 2817, 0),
|
||||
(2876, 2876, 0),
|
||||
(2879, 2879, 0),
|
||||
(2881, 2884, 0),
|
||||
(2893, 2893, 0),
|
||||
(2901, 2902, 0),
|
||||
(2914, 2915, 0),
|
||||
(2946, 2946, 0),
|
||||
(3008, 3008, 0),
|
||||
(3021, 3021, 0),
|
||||
(3072, 3072, 0),
|
||||
(3076, 3076, 0),
|
||||
(3132, 3132, 0),
|
||||
(3134, 3136, 0),
|
||||
(3142, 3144, 0),
|
||||
(3146, 3149, 0),
|
||||
(3157, 3158, 0),
|
||||
(3170, 3171, 0),
|
||||
(3201, 3201, 0),
|
||||
(3260, 3260, 0),
|
||||
(3263, 3263, 0),
|
||||
(3270, 3270, 0),
|
||||
(3276, 3277, 0),
|
||||
(3298, 3299, 0),
|
||||
(3328, 3329, 0),
|
||||
(3387, 3388, 0),
|
||||
(3393, 3396, 0),
|
||||
(3405, 3405, 0),
|
||||
(3426, 3427, 0),
|
||||
(3457, 3457, 0),
|
||||
(3530, 3530, 0),
|
||||
(3538, 3540, 0),
|
||||
(3542, 3542, 0),
|
||||
(3633, 3633, 0),
|
||||
(3636, 3642, 0),
|
||||
(3655, 3662, 0),
|
||||
(3761, 3761, 0),
|
||||
(3764, 3772, 0),
|
||||
(3784, 3790, 0),
|
||||
(3864, 3865, 0),
|
||||
(3893, 3893, 0),
|
||||
(3895, 3895, 0),
|
||||
(3897, 3897, 0),
|
||||
(3953, 3966, 0),
|
||||
(3968, 3972, 0),
|
||||
(3974, 3975, 0),
|
||||
(3981, 3991, 0),
|
||||
(3993, 4028, 0),
|
||||
(4038, 4038, 0),
|
||||
(4141, 4144, 0),
|
||||
(4146, 4151, 0),
|
||||
(4153, 4154, 0),
|
||||
(4157, 4158, 0),
|
||||
(4184, 4185, 0),
|
||||
(4190, 4192, 0),
|
||||
(4209, 4212, 0),
|
||||
(4226, 4226, 0),
|
||||
(4229, 4230, 0),
|
||||
(4237, 4237, 0),
|
||||
(4253, 4253, 0),
|
||||
(4352, 4447, 2),
|
||||
(4957, 4959, 0),
|
||||
(5906, 5908, 0),
|
||||
(5938, 5939, 0),
|
||||
(5970, 5971, 0),
|
||||
(6002, 6003, 0),
|
||||
(6068, 6069, 0),
|
||||
(6071, 6077, 0),
|
||||
(6086, 6086, 0),
|
||||
(6089, 6099, 0),
|
||||
(6109, 6109, 0),
|
||||
(6155, 6157, 0),
|
||||
(6159, 6159, 0),
|
||||
(6277, 6278, 0),
|
||||
(6313, 6313, 0),
|
||||
(6432, 6434, 0),
|
||||
(6439, 6440, 0),
|
||||
(6450, 6450, 0),
|
||||
(6457, 6459, 0),
|
||||
(6679, 6680, 0),
|
||||
(6683, 6683, 0),
|
||||
(6742, 6742, 0),
|
||||
(6744, 6750, 0),
|
||||
(6752, 6752, 0),
|
||||
(6754, 6754, 0),
|
||||
(6757, 6764, 0),
|
||||
(6771, 6780, 0),
|
||||
(6783, 6783, 0),
|
||||
(6832, 6862, 0),
|
||||
(6912, 6915, 0),
|
||||
(6964, 6964, 0),
|
||||
(6966, 6970, 0),
|
||||
(6972, 6972, 0),
|
||||
(6978, 6978, 0),
|
||||
(7019, 7027, 0),
|
||||
(7040, 7041, 0),
|
||||
(7074, 7077, 0),
|
||||
(7080, 7081, 0),
|
||||
(7083, 7085, 0),
|
||||
(7142, 7142, 0),
|
||||
(7144, 7145, 0),
|
||||
(7149, 7149, 0),
|
||||
(7151, 7153, 0),
|
||||
(7212, 7219, 0),
|
||||
(7222, 7223, 0),
|
||||
(7376, 7378, 0),
|
||||
(7380, 7392, 0),
|
||||
(7394, 7400, 0),
|
||||
(7405, 7405, 0),
|
||||
(7412, 7412, 0),
|
||||
(7416, 7417, 0),
|
||||
(7616, 7679, 0),
|
||||
(8203, 8207, 0),
|
||||
(8232, 8238, 0),
|
||||
(8288, 8291, 0),
|
||||
(8400, 8432, 0),
|
||||
(8986, 8987, 2),
|
||||
(9001, 9002, 2),
|
||||
(9193, 9196, 2),
|
||||
(9200, 9200, 2),
|
||||
(9203, 9203, 2),
|
||||
(9725, 9726, 2),
|
||||
(9748, 9749, 2),
|
||||
(9800, 9811, 2),
|
||||
(9855, 9855, 2),
|
||||
(9875, 9875, 2),
|
||||
(9889, 9889, 2),
|
||||
(9898, 9899, 2),
|
||||
(9917, 9918, 2),
|
||||
(9924, 9925, 2),
|
||||
(9934, 9934, 2),
|
||||
(9940, 9940, 2),
|
||||
(9962, 9962, 2),
|
||||
(9970, 9971, 2),
|
||||
(9973, 9973, 2),
|
||||
(9978, 9978, 2),
|
||||
(9981, 9981, 2),
|
||||
(9989, 9989, 2),
|
||||
(9994, 9995, 2),
|
||||
(10024, 10024, 2),
|
||||
(10060, 10060, 2),
|
||||
(10062, 10062, 2),
|
||||
(10067, 10069, 2),
|
||||
(10071, 10071, 2),
|
||||
(10133, 10135, 2),
|
||||
(10160, 10160, 2),
|
||||
(10175, 10175, 2),
|
||||
(11035, 11036, 2),
|
||||
(11088, 11088, 2),
|
||||
(11093, 11093, 2),
|
||||
(11503, 11505, 0),
|
||||
(11647, 11647, 0),
|
||||
(11744, 11775, 0),
|
||||
(11904, 11929, 2),
|
||||
(11931, 12019, 2),
|
||||
(12032, 12245, 2),
|
||||
(12272, 12283, 2),
|
||||
(12288, 12329, 2),
|
||||
(12330, 12333, 0),
|
||||
(12334, 12350, 2),
|
||||
(12353, 12438, 2),
|
||||
(12441, 12442, 0),
|
||||
(12443, 12543, 2),
|
||||
(12549, 12591, 2),
|
||||
(12593, 12686, 2),
|
||||
(12688, 12771, 2),
|
||||
(12784, 12830, 2),
|
||||
(12832, 12871, 2),
|
||||
(12880, 19903, 2),
|
||||
(19968, 42124, 2),
|
||||
(42128, 42182, 2),
|
||||
(42607, 42610, 0),
|
||||
(42612, 42621, 0),
|
||||
(42654, 42655, 0),
|
||||
(42736, 42737, 0),
|
||||
(43010, 43010, 0),
|
||||
(43014, 43014, 0),
|
||||
(43019, 43019, 0),
|
||||
(43045, 43046, 0),
|
||||
(43052, 43052, 0),
|
||||
(43204, 43205, 0),
|
||||
(43232, 43249, 0),
|
||||
(43263, 43263, 0),
|
||||
(43302, 43309, 0),
|
||||
(43335, 43345, 0),
|
||||
(43360, 43388, 2),
|
||||
(43392, 43394, 0),
|
||||
(43443, 43443, 0),
|
||||
(43446, 43449, 0),
|
||||
(43452, 43453, 0),
|
||||
(43493, 43493, 0),
|
||||
(43561, 43566, 0),
|
||||
(43569, 43570, 0),
|
||||
(43573, 43574, 0),
|
||||
(43587, 43587, 0),
|
||||
(43596, 43596, 0),
|
||||
(43644, 43644, 0),
|
||||
(43696, 43696, 0),
|
||||
(43698, 43700, 0),
|
||||
(43703, 43704, 0),
|
||||
(43710, 43711, 0),
|
||||
(43713, 43713, 0),
|
||||
(43756, 43757, 0),
|
||||
(43766, 43766, 0),
|
||||
(44005, 44005, 0),
|
||||
(44008, 44008, 0),
|
||||
(44013, 44013, 0),
|
||||
(44032, 55203, 2),
|
||||
(63744, 64255, 2),
|
||||
(64286, 64286, 0),
|
||||
(65024, 65039, 0),
|
||||
(65040, 65049, 2),
|
||||
(65056, 65071, 0),
|
||||
(65072, 65106, 2),
|
||||
(65108, 65126, 2),
|
||||
(65128, 65131, 2),
|
||||
(65281, 65376, 2),
|
||||
(65504, 65510, 2),
|
||||
(66045, 66045, 0),
|
||||
(66272, 66272, 0),
|
||||
(66422, 66426, 0),
|
||||
(68097, 68099, 0),
|
||||
(68101, 68102, 0),
|
||||
(68108, 68111, 0),
|
||||
(68152, 68154, 0),
|
||||
(68159, 68159, 0),
|
||||
(68325, 68326, 0),
|
||||
(68900, 68903, 0),
|
||||
(69291, 69292, 0),
|
||||
(69373, 69375, 0),
|
||||
(69446, 69456, 0),
|
||||
(69506, 69509, 0),
|
||||
(69633, 69633, 0),
|
||||
(69688, 69702, 0),
|
||||
(69744, 69744, 0),
|
||||
(69747, 69748, 0),
|
||||
(69759, 69761, 0),
|
||||
(69811, 69814, 0),
|
||||
(69817, 69818, 0),
|
||||
(69826, 69826, 0),
|
||||
(69888, 69890, 0),
|
||||
(69927, 69931, 0),
|
||||
(69933, 69940, 0),
|
||||
(70003, 70003, 0),
|
||||
(70016, 70017, 0),
|
||||
(70070, 70078, 0),
|
||||
(70089, 70092, 0),
|
||||
(70095, 70095, 0),
|
||||
(70191, 70193, 0),
|
||||
(70196, 70196, 0),
|
||||
(70198, 70199, 0),
|
||||
(70206, 70206, 0),
|
||||
(70209, 70209, 0),
|
||||
(70367, 70367, 0),
|
||||
(70371, 70378, 0),
|
||||
(70400, 70401, 0),
|
||||
(70459, 70460, 0),
|
||||
(70464, 70464, 0),
|
||||
(70502, 70508, 0),
|
||||
(70512, 70516, 0),
|
||||
(70712, 70719, 0),
|
||||
(70722, 70724, 0),
|
||||
(70726, 70726, 0),
|
||||
(70750, 70750, 0),
|
||||
(70835, 70840, 0),
|
||||
(70842, 70842, 0),
|
||||
(70847, 70848, 0),
|
||||
(70850, 70851, 0),
|
||||
(71090, 71093, 0),
|
||||
(71100, 71101, 0),
|
||||
(71103, 71104, 0),
|
||||
(71132, 71133, 0),
|
||||
(71219, 71226, 0),
|
||||
(71229, 71229, 0),
|
||||
(71231, 71232, 0),
|
||||
(71339, 71339, 0),
|
||||
(71341, 71341, 0),
|
||||
(71344, 71349, 0),
|
||||
(71351, 71351, 0),
|
||||
(71453, 71455, 0),
|
||||
(71458, 71461, 0),
|
||||
(71463, 71467, 0),
|
||||
(71727, 71735, 0),
|
||||
(71737, 71738, 0),
|
||||
(71995, 71996, 0),
|
||||
(71998, 71998, 0),
|
||||
(72003, 72003, 0),
|
||||
(72148, 72151, 0),
|
||||
(72154, 72155, 0),
|
||||
(72160, 72160, 0),
|
||||
(72193, 72202, 0),
|
||||
(72243, 72248, 0),
|
||||
(72251, 72254, 0),
|
||||
(72263, 72263, 0),
|
||||
(72273, 72278, 0),
|
||||
(72281, 72283, 0),
|
||||
(72330, 72342, 0),
|
||||
(72344, 72345, 0),
|
||||
(72752, 72758, 0),
|
||||
(72760, 72765, 0),
|
||||
(72767, 72767, 0),
|
||||
(72850, 72871, 0),
|
||||
(72874, 72880, 0),
|
||||
(72882, 72883, 0),
|
||||
(72885, 72886, 0),
|
||||
(73009, 73014, 0),
|
||||
(73018, 73018, 0),
|
||||
(73020, 73021, 0),
|
||||
(73023, 73029, 0),
|
||||
(73031, 73031, 0),
|
||||
(73104, 73105, 0),
|
||||
(73109, 73109, 0),
|
||||
(73111, 73111, 0),
|
||||
(73459, 73460, 0),
|
||||
(73472, 73473, 0),
|
||||
(73526, 73530, 0),
|
||||
(73536, 73536, 0),
|
||||
(73538, 73538, 0),
|
||||
(78912, 78912, 0),
|
||||
(78919, 78933, 0),
|
||||
(92912, 92916, 0),
|
||||
(92976, 92982, 0),
|
||||
(94031, 94031, 0),
|
||||
(94095, 94098, 0),
|
||||
(94176, 94179, 2),
|
||||
(94180, 94180, 0),
|
||||
(94192, 94193, 2),
|
||||
(94208, 100343, 2),
|
||||
(100352, 101589, 2),
|
||||
(101632, 101640, 2),
|
||||
(110576, 110579, 2),
|
||||
(110581, 110587, 2),
|
||||
(110589, 110590, 2),
|
||||
(110592, 110882, 2),
|
||||
(110898, 110898, 2),
|
||||
(110928, 110930, 2),
|
||||
(110933, 110933, 2),
|
||||
(110948, 110951, 2),
|
||||
(110960, 111355, 2),
|
||||
(113821, 113822, 0),
|
||||
(118528, 118573, 0),
|
||||
(118576, 118598, 0),
|
||||
(119143, 119145, 0),
|
||||
(119163, 119170, 0),
|
||||
(119173, 119179, 0),
|
||||
(119210, 119213, 0),
|
||||
(119362, 119364, 0),
|
||||
(121344, 121398, 0),
|
||||
(121403, 121452, 0),
|
||||
(121461, 121461, 0),
|
||||
(121476, 121476, 0),
|
||||
(121499, 121503, 0),
|
||||
(121505, 121519, 0),
|
||||
(122880, 122886, 0),
|
||||
(122888, 122904, 0),
|
||||
(122907, 122913, 0),
|
||||
(122915, 122916, 0),
|
||||
(122918, 122922, 0),
|
||||
(123023, 123023, 0),
|
||||
(123184, 123190, 0),
|
||||
(123566, 123566, 0),
|
||||
(123628, 123631, 0),
|
||||
(124140, 124143, 0),
|
||||
(125136, 125142, 0),
|
||||
(125252, 125258, 0),
|
||||
(126980, 126980, 2),
|
||||
(127183, 127183, 2),
|
||||
(127374, 127374, 2),
|
||||
(127377, 127386, 2),
|
||||
(127488, 127490, 2),
|
||||
(127504, 127547, 2),
|
||||
(127552, 127560, 2),
|
||||
(127568, 127569, 2),
|
||||
(127584, 127589, 2),
|
||||
(127744, 127776, 2),
|
||||
(127789, 127797, 2),
|
||||
(127799, 127868, 2),
|
||||
(127870, 127891, 2),
|
||||
(127904, 127946, 2),
|
||||
(127951, 127955, 2),
|
||||
(127968, 127984, 2),
|
||||
(127988, 127988, 2),
|
||||
(127992, 128062, 2),
|
||||
(128064, 128064, 2),
|
||||
(128066, 128252, 2),
|
||||
(128255, 128317, 2),
|
||||
(128331, 128334, 2),
|
||||
(128336, 128359, 2),
|
||||
(128378, 128378, 2),
|
||||
(128405, 128406, 2),
|
||||
(128420, 128420, 2),
|
||||
(128507, 128591, 2),
|
||||
(128640, 128709, 2),
|
||||
(128716, 128716, 2),
|
||||
(128720, 128722, 2),
|
||||
(128725, 128727, 2),
|
||||
(128732, 128735, 2),
|
||||
(128747, 128748, 2),
|
||||
(128756, 128764, 2),
|
||||
(128992, 129003, 2),
|
||||
(129008, 129008, 2),
|
||||
(129292, 129338, 2),
|
||||
(129340, 129349, 2),
|
||||
(129351, 129535, 2),
|
||||
(129648, 129660, 2),
|
||||
(129664, 129672, 2),
|
||||
(129680, 129725, 2),
|
||||
(129727, 129733, 2),
|
||||
(129742, 129755, 2),
|
||||
(129760, 129768, 2),
|
||||
(129776, 129784, 2),
|
||||
(131072, 196605, 2),
|
||||
(196608, 262141, 2),
|
||||
(917760, 917999, 0),
|
||||
]
|
BIN
venv/Lib/site-packages/black/brackets.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/brackets.cp311-win_amd64.pyd
Normal file
Binary file not shown.
382
venv/Lib/site-packages/black/brackets.py
Normal file
382
venv/Lib/site-packages/black/brackets.py
Normal file
@@ -0,0 +1,382 @@
|
||||
"""Builds on top of nodes.py to track brackets."""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Final, Iterable, Optional, Sequence, Union
|
||||
|
||||
from black.nodes import (
|
||||
BRACKET,
|
||||
CLOSING_BRACKETS,
|
||||
COMPARATORS,
|
||||
LOGIC_OPERATORS,
|
||||
MATH_OPERATORS,
|
||||
OPENING_BRACKETS,
|
||||
UNPACKING_PARENTS,
|
||||
VARARGS_PARENTS,
|
||||
is_vararg,
|
||||
syms,
|
||||
)
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
# types
|
||||
LN = Union[Leaf, Node]
|
||||
Depth = int
|
||||
LeafID = int
|
||||
NodeType = int
|
||||
Priority = int
|
||||
|
||||
|
||||
COMPREHENSION_PRIORITY: Final = 20
|
||||
COMMA_PRIORITY: Final = 18
|
||||
TERNARY_PRIORITY: Final = 16
|
||||
LOGIC_PRIORITY: Final = 14
|
||||
STRING_PRIORITY: Final = 12
|
||||
COMPARATOR_PRIORITY: Final = 10
|
||||
MATH_PRIORITIES: Final = {
|
||||
token.VBAR: 9,
|
||||
token.CIRCUMFLEX: 8,
|
||||
token.AMPER: 7,
|
||||
token.LEFTSHIFT: 6,
|
||||
token.RIGHTSHIFT: 6,
|
||||
token.PLUS: 5,
|
||||
token.MINUS: 5,
|
||||
token.STAR: 4,
|
||||
token.SLASH: 4,
|
||||
token.DOUBLESLASH: 4,
|
||||
token.PERCENT: 4,
|
||||
token.AT: 4,
|
||||
token.TILDE: 3,
|
||||
token.DOUBLESTAR: 2,
|
||||
}
|
||||
DOT_PRIORITY: Final = 1
|
||||
|
||||
|
||||
class BracketMatchError(Exception):
|
||||
"""Raised when an opening bracket is unable to be matched to a closing bracket."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class BracketTracker:
|
||||
"""Keeps track of brackets on a line."""
|
||||
|
||||
depth: int = 0
|
||||
bracket_match: dict[tuple[Depth, NodeType], Leaf] = field(default_factory=dict)
|
||||
delimiters: dict[LeafID, Priority] = field(default_factory=dict)
|
||||
previous: Optional[Leaf] = None
|
||||
_for_loop_depths: list[int] = field(default_factory=list)
|
||||
_lambda_argument_depths: list[int] = field(default_factory=list)
|
||||
invisible: list[Leaf] = field(default_factory=list)
|
||||
|
||||
def mark(self, leaf: Leaf) -> None:
|
||||
"""Mark `leaf` with bracket-related metadata. Keep track of delimiters.
|
||||
|
||||
All leaves receive an int `bracket_depth` field that stores how deep
|
||||
within brackets a given leaf is. 0 means there are no enclosing brackets
|
||||
that started on this line.
|
||||
|
||||
If a leaf is itself a closing bracket and there is a matching opening
|
||||
bracket earlier, it receives an `opening_bracket` field with which it forms a
|
||||
pair. This is a one-directional link to avoid reference cycles. Closing
|
||||
bracket without opening happens on lines continued from previous
|
||||
breaks, e.g. `) -> "ReturnType":` as part of a funcdef where we place
|
||||
the return type annotation on its own line of the previous closing RPAR.
|
||||
|
||||
If a leaf is a delimiter (a token on which Black can split the line if
|
||||
needed) and it's on depth 0, its `id()` is stored in the tracker's
|
||||
`delimiters` field.
|
||||
"""
|
||||
if leaf.type == token.COMMENT:
|
||||
return
|
||||
|
||||
if (
|
||||
self.depth == 0
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
and (self.depth, leaf.type) not in self.bracket_match
|
||||
):
|
||||
return
|
||||
|
||||
self.maybe_decrement_after_for_loop_variable(leaf)
|
||||
self.maybe_decrement_after_lambda_arguments(leaf)
|
||||
if leaf.type in CLOSING_BRACKETS:
|
||||
self.depth -= 1
|
||||
try:
|
||||
opening_bracket = self.bracket_match.pop((self.depth, leaf.type))
|
||||
except KeyError as e:
|
||||
raise BracketMatchError(
|
||||
"Unable to match a closing bracket to the following opening"
|
||||
f" bracket: {leaf}"
|
||||
) from e
|
||||
leaf.opening_bracket = opening_bracket
|
||||
if not leaf.value:
|
||||
self.invisible.append(leaf)
|
||||
leaf.bracket_depth = self.depth
|
||||
if self.depth == 0:
|
||||
delim = is_split_before_delimiter(leaf, self.previous)
|
||||
if delim and self.previous is not None:
|
||||
self.delimiters[id(self.previous)] = delim
|
||||
else:
|
||||
delim = is_split_after_delimiter(leaf)
|
||||
if delim:
|
||||
self.delimiters[id(leaf)] = delim
|
||||
if leaf.type in OPENING_BRACKETS:
|
||||
self.bracket_match[self.depth, BRACKET[leaf.type]] = leaf
|
||||
self.depth += 1
|
||||
if not leaf.value:
|
||||
self.invisible.append(leaf)
|
||||
self.previous = leaf
|
||||
self.maybe_increment_lambda_arguments(leaf)
|
||||
self.maybe_increment_for_loop_variable(leaf)
|
||||
|
||||
def any_open_for_or_lambda(self) -> bool:
|
||||
"""Return True if there is an open for or lambda expression on the line.
|
||||
|
||||
See maybe_increment_for_loop_variable and maybe_increment_lambda_arguments
|
||||
for details."""
|
||||
return bool(self._for_loop_depths or self._lambda_argument_depths)
|
||||
|
||||
def any_open_brackets(self) -> bool:
|
||||
"""Return True if there is an yet unmatched open bracket on the line."""
|
||||
return bool(self.bracket_match)
|
||||
|
||||
def max_delimiter_priority(self, exclude: Iterable[LeafID] = ()) -> Priority:
|
||||
"""Return the highest priority of a delimiter found on the line.
|
||||
|
||||
Values are consistent with what `is_split_*_delimiter()` return.
|
||||
Raises ValueError on no delimiters.
|
||||
"""
|
||||
return max(v for k, v in self.delimiters.items() if k not in exclude)
|
||||
|
||||
def delimiter_count_with_priority(self, priority: Priority = 0) -> int:
|
||||
"""Return the number of delimiters with the given `priority`.
|
||||
|
||||
If no `priority` is passed, defaults to max priority on the line.
|
||||
"""
|
||||
if not self.delimiters:
|
||||
return 0
|
||||
|
||||
priority = priority or self.max_delimiter_priority()
|
||||
return sum(1 for p in self.delimiters.values() if p == priority)
|
||||
|
||||
def maybe_increment_for_loop_variable(self, leaf: Leaf) -> bool:
|
||||
"""In a for loop, or comprehension, the variables are often unpacks.
|
||||
|
||||
To avoid splitting on the comma in this situation, increase the depth of
|
||||
tokens between `for` and `in`.
|
||||
"""
|
||||
if leaf.type == token.NAME and leaf.value == "for":
|
||||
self.depth += 1
|
||||
self._for_loop_depths.append(self.depth)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_decrement_after_for_loop_variable(self, leaf: Leaf) -> bool:
|
||||
"""See `maybe_increment_for_loop_variable` above for explanation."""
|
||||
if (
|
||||
self._for_loop_depths
|
||||
and self._for_loop_depths[-1] == self.depth
|
||||
and leaf.type == token.NAME
|
||||
and leaf.value == "in"
|
||||
):
|
||||
self.depth -= 1
|
||||
self._for_loop_depths.pop()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_increment_lambda_arguments(self, leaf: Leaf) -> bool:
|
||||
"""In a lambda expression, there might be more than one argument.
|
||||
|
||||
To avoid splitting on the comma in this situation, increase the depth of
|
||||
tokens between `lambda` and `:`.
|
||||
"""
|
||||
if leaf.type == token.NAME and leaf.value == "lambda":
|
||||
self.depth += 1
|
||||
self._lambda_argument_depths.append(self.depth)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def maybe_decrement_after_lambda_arguments(self, leaf: Leaf) -> bool:
|
||||
"""See `maybe_increment_lambda_arguments` above for explanation."""
|
||||
if (
|
||||
self._lambda_argument_depths
|
||||
and self._lambda_argument_depths[-1] == self.depth
|
||||
and leaf.type == token.COLON
|
||||
):
|
||||
self.depth -= 1
|
||||
self._lambda_argument_depths.pop()
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_open_lsqb(self) -> Optional[Leaf]:
|
||||
"""Return the most recent opening square bracket (if any)."""
|
||||
return self.bracket_match.get((self.depth - 1, token.RSQB))
|
||||
|
||||
|
||||
def is_split_after_delimiter(leaf: Leaf) -> Priority:
|
||||
"""Return the priority of the `leaf` delimiter, given a line break after it.
|
||||
|
||||
The delimiter priorities returned here are from those delimiters that would
|
||||
cause a line break after themselves.
|
||||
|
||||
Higher numbers are higher priority.
|
||||
"""
|
||||
if leaf.type == token.COMMA:
|
||||
return COMMA_PRIORITY
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def is_split_before_delimiter(leaf: Leaf, previous: Optional[Leaf] = None) -> Priority:
|
||||
"""Return the priority of the `leaf` delimiter, given a line break before it.
|
||||
|
||||
The delimiter priorities returned here are from those delimiters that would
|
||||
cause a line break before themselves.
|
||||
|
||||
Higher numbers are higher priority.
|
||||
"""
|
||||
if is_vararg(leaf, within=VARARGS_PARENTS | UNPACKING_PARENTS):
|
||||
# * and ** might also be MATH_OPERATORS but in this case they are not.
|
||||
# Don't treat them as a delimiter.
|
||||
return 0
|
||||
|
||||
if (
|
||||
leaf.type == token.DOT
|
||||
and leaf.parent
|
||||
and leaf.parent.type not in {syms.import_from, syms.dotted_name}
|
||||
and (previous is None or previous.type in CLOSING_BRACKETS)
|
||||
):
|
||||
return DOT_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.type in MATH_OPERATORS
|
||||
and leaf.parent
|
||||
and leaf.parent.type not in {syms.factor, syms.star_expr}
|
||||
):
|
||||
return MATH_PRIORITIES[leaf.type]
|
||||
|
||||
if leaf.type in COMPARATORS:
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.type == token.STRING
|
||||
and previous is not None
|
||||
and previous.type == token.STRING
|
||||
):
|
||||
return STRING_PRIORITY
|
||||
|
||||
if leaf.type not in {token.NAME, token.ASYNC}:
|
||||
return 0
|
||||
|
||||
if (
|
||||
leaf.value == "for"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_for, syms.old_comp_for}
|
||||
or leaf.type == token.ASYNC
|
||||
):
|
||||
if (
|
||||
not isinstance(leaf.prev_sibling, Leaf)
|
||||
or leaf.prev_sibling.value != "async"
|
||||
):
|
||||
return COMPREHENSION_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "if"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_if, syms.old_comp_if}
|
||||
):
|
||||
return COMPREHENSION_PRIORITY
|
||||
|
||||
if leaf.value in {"if", "else"} and leaf.parent and leaf.parent.type == syms.test:
|
||||
return TERNARY_PRIORITY
|
||||
|
||||
if leaf.value == "is":
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "in"
|
||||
and leaf.parent
|
||||
and leaf.parent.type in {syms.comp_op, syms.comparison}
|
||||
and not (
|
||||
previous is not None
|
||||
and previous.type == token.NAME
|
||||
and previous.value == "not"
|
||||
)
|
||||
):
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if (
|
||||
leaf.value == "not"
|
||||
and leaf.parent
|
||||
and leaf.parent.type == syms.comp_op
|
||||
and not (
|
||||
previous is not None
|
||||
and previous.type == token.NAME
|
||||
and previous.value == "is"
|
||||
)
|
||||
):
|
||||
return COMPARATOR_PRIORITY
|
||||
|
||||
if leaf.value in LOGIC_OPERATORS and leaf.parent:
|
||||
return LOGIC_PRIORITY
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def max_delimiter_priority_in_atom(node: LN) -> Priority:
|
||||
"""Return maximum delimiter priority inside `node`.
|
||||
|
||||
This is specific to atoms with contents contained in a pair of parentheses.
|
||||
If `node` isn't an atom or there are no enclosing parentheses, returns 0.
|
||||
"""
|
||||
if node.type != syms.atom:
|
||||
return 0
|
||||
|
||||
first = node.children[0]
|
||||
last = node.children[-1]
|
||||
if not (first.type == token.LPAR and last.type == token.RPAR):
|
||||
return 0
|
||||
|
||||
bt = BracketTracker()
|
||||
for c in node.children[1:-1]:
|
||||
if isinstance(c, Leaf):
|
||||
bt.mark(c)
|
||||
else:
|
||||
for leaf in c.leaves():
|
||||
bt.mark(leaf)
|
||||
try:
|
||||
return bt.max_delimiter_priority()
|
||||
|
||||
except ValueError:
|
||||
return 0
|
||||
|
||||
|
||||
def get_leaves_inside_matching_brackets(leaves: Sequence[Leaf]) -> set[LeafID]:
|
||||
"""Return leaves that are inside matching brackets.
|
||||
|
||||
The input `leaves` can have non-matching brackets at the head or tail parts.
|
||||
Matching brackets are included.
|
||||
"""
|
||||
try:
|
||||
# Start with the first opening bracket and ignore closing brackets before.
|
||||
start_index = next(
|
||||
i for i, l in enumerate(leaves) if l.type in OPENING_BRACKETS
|
||||
)
|
||||
except StopIteration:
|
||||
return set()
|
||||
bracket_stack = []
|
||||
ids = set()
|
||||
for i in range(start_index, len(leaves)):
|
||||
leaf = leaves[i]
|
||||
if leaf.type in OPENING_BRACKETS:
|
||||
bracket_stack.append((BRACKET[leaf.type], i))
|
||||
if leaf.type in CLOSING_BRACKETS:
|
||||
if bracket_stack and leaf.type == bracket_stack[-1][0]:
|
||||
_, start = bracket_stack.pop()
|
||||
for j in range(start, i + 1):
|
||||
ids.add(id(leaves[j]))
|
||||
else:
|
||||
break
|
||||
return ids
|
BIN
venv/Lib/site-packages/black/cache.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/cache.cp311-win_amd64.pyd
Normal file
Binary file not shown.
149
venv/Lib/site-packages/black/cache.py
Normal file
149
venv/Lib/site-packages/black/cache.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""Caching of formatted files with feature-based invalidation."""
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
import tempfile
|
||||
from dataclasses import dataclass, field
|
||||
from pathlib import Path
|
||||
from typing import Iterable, NamedTuple
|
||||
|
||||
from platformdirs import user_cache_dir
|
||||
|
||||
from _black_version import version as __version__
|
||||
from black.mode import Mode
|
||||
from black.output import err
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
from typing import Self
|
||||
else:
|
||||
from typing_extensions import Self
|
||||
|
||||
|
||||
class FileData(NamedTuple):
|
||||
st_mtime: float
|
||||
st_size: int
|
||||
hash: str
|
||||
|
||||
|
||||
def get_cache_dir() -> Path:
|
||||
"""Get the cache directory used by black.
|
||||
|
||||
Users can customize this directory on all systems using `BLACK_CACHE_DIR`
|
||||
environment variable. By default, the cache directory is the user cache directory
|
||||
under the black application.
|
||||
|
||||
This result is immediately set to a constant `black.cache.CACHE_DIR` as to avoid
|
||||
repeated calls.
|
||||
"""
|
||||
# NOTE: Function mostly exists as a clean way to test getting the cache directory.
|
||||
default_cache_dir = user_cache_dir("black")
|
||||
cache_dir = Path(os.environ.get("BLACK_CACHE_DIR", default_cache_dir))
|
||||
cache_dir = cache_dir / __version__
|
||||
return cache_dir
|
||||
|
||||
|
||||
CACHE_DIR = get_cache_dir()
|
||||
|
||||
|
||||
def get_cache_file(mode: Mode) -> Path:
|
||||
return CACHE_DIR / f"cache.{mode.get_cache_key()}.pickle"
|
||||
|
||||
|
||||
@dataclass
|
||||
class Cache:
|
||||
mode: Mode
|
||||
cache_file: Path
|
||||
file_data: dict[str, FileData] = field(default_factory=dict)
|
||||
|
||||
@classmethod
|
||||
def read(cls, mode: Mode) -> Self:
|
||||
"""Read the cache if it exists and is well-formed.
|
||||
|
||||
If it is not well-formed, the call to write later should
|
||||
resolve the issue.
|
||||
"""
|
||||
cache_file = get_cache_file(mode)
|
||||
try:
|
||||
exists = cache_file.exists()
|
||||
except OSError as e:
|
||||
# Likely file too long; see #4172 and #4174
|
||||
err(f"Unable to read cache file {cache_file} due to {e}")
|
||||
return cls(mode, cache_file)
|
||||
if not exists:
|
||||
return cls(mode, cache_file)
|
||||
|
||||
with cache_file.open("rb") as fobj:
|
||||
try:
|
||||
data: dict[str, tuple[float, int, str]] = pickle.load(fobj)
|
||||
file_data = {k: FileData(*v) for k, v in data.items()}
|
||||
except (pickle.UnpicklingError, ValueError, IndexError):
|
||||
return cls(mode, cache_file)
|
||||
|
||||
return cls(mode, cache_file, file_data)
|
||||
|
||||
@staticmethod
|
||||
def hash_digest(path: Path) -> str:
|
||||
"""Return hash digest for path."""
|
||||
|
||||
data = path.read_bytes()
|
||||
return hashlib.sha256(data).hexdigest()
|
||||
|
||||
@staticmethod
|
||||
def get_file_data(path: Path) -> FileData:
|
||||
"""Return file data for path."""
|
||||
|
||||
stat = path.stat()
|
||||
hash = Cache.hash_digest(path)
|
||||
return FileData(stat.st_mtime, stat.st_size, hash)
|
||||
|
||||
def is_changed(self, source: Path) -> bool:
|
||||
"""Check if source has changed compared to cached version."""
|
||||
res_src = source.resolve()
|
||||
old = self.file_data.get(str(res_src))
|
||||
if old is None:
|
||||
return True
|
||||
|
||||
st = res_src.stat()
|
||||
if st.st_size != old.st_size:
|
||||
return True
|
||||
if st.st_mtime != old.st_mtime:
|
||||
new_hash = Cache.hash_digest(res_src)
|
||||
if new_hash != old.hash:
|
||||
return True
|
||||
return False
|
||||
|
||||
def filtered_cached(self, sources: Iterable[Path]) -> tuple[set[Path], set[Path]]:
|
||||
"""Split an iterable of paths in `sources` into two sets.
|
||||
|
||||
The first contains paths of files that modified on disk or are not in the
|
||||
cache. The other contains paths to non-modified files.
|
||||
"""
|
||||
changed: set[Path] = set()
|
||||
done: set[Path] = set()
|
||||
for src in sources:
|
||||
if self.is_changed(src):
|
||||
changed.add(src)
|
||||
else:
|
||||
done.add(src)
|
||||
return changed, done
|
||||
|
||||
def write(self, sources: Iterable[Path]) -> None:
|
||||
"""Update the cache file data and write a new cache file."""
|
||||
self.file_data.update(
|
||||
**{str(src.resolve()): Cache.get_file_data(src) for src in sources}
|
||||
)
|
||||
try:
|
||||
CACHE_DIR.mkdir(parents=True, exist_ok=True)
|
||||
with tempfile.NamedTemporaryFile(
|
||||
dir=str(self.cache_file.parent), delete=False
|
||||
) as f:
|
||||
# We store raw tuples in the cache because it's faster.
|
||||
data: dict[str, tuple[float, int, str]] = {
|
||||
k: (*v,) for k, v in self.file_data.items()
|
||||
}
|
||||
pickle.dump(data, f, protocol=4)
|
||||
os.replace(f.name, self.cache_file)
|
||||
except OSError:
|
||||
pass
|
BIN
venv/Lib/site-packages/black/comments.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/comments.cp311-win_amd64.pyd
Normal file
Binary file not shown.
415
venv/Lib/site-packages/black/comments.py
Normal file
415
venv/Lib/site-packages/black/comments.py
Normal file
@@ -0,0 +1,415 @@
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
from functools import lru_cache
|
||||
from typing import Collection, Final, Iterator, Optional, Union
|
||||
|
||||
from black.mode import Mode, Preview
|
||||
from black.nodes import (
|
||||
CLOSING_BRACKETS,
|
||||
STANDALONE_COMMENT,
|
||||
WHITESPACE,
|
||||
container_of,
|
||||
first_leaf_of,
|
||||
make_simple_prefix,
|
||||
preceding_leaf,
|
||||
syms,
|
||||
)
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
# types
|
||||
LN = Union[Leaf, Node]
|
||||
|
||||
FMT_OFF: Final = {"# fmt: off", "# fmt:off", "# yapf: disable"}
|
||||
FMT_SKIP: Final = {"# fmt: skip", "# fmt:skip"}
|
||||
FMT_ON: Final = {"# fmt: on", "# fmt:on", "# yapf: enable"}
|
||||
|
||||
COMMENT_EXCEPTIONS = " !:#'"
|
||||
_COMMENT_PREFIX = "# "
|
||||
_COMMENT_LIST_SEPARATOR = ";"
|
||||
|
||||
|
||||
@dataclass
|
||||
class ProtoComment:
|
||||
"""Describes a piece of syntax that is a comment.
|
||||
|
||||
It's not a :class:`blib2to3.pytree.Leaf` so that:
|
||||
|
||||
* it can be cached (`Leaf` objects should not be reused more than once as
|
||||
they store their lineno, column, prefix, and parent information);
|
||||
* `newlines` and `consumed` fields are kept separate from the `value`. This
|
||||
simplifies handling of special marker comments like ``# fmt: off/on``.
|
||||
"""
|
||||
|
||||
type: int # token.COMMENT or STANDALONE_COMMENT
|
||||
value: str # content of the comment
|
||||
newlines: int # how many newlines before the comment
|
||||
consumed: int # how many characters of the original leaf's prefix did we consume
|
||||
form_feed: bool # is there a form feed before the comment
|
||||
leading_whitespace: str # leading whitespace before the comment, if any
|
||||
|
||||
|
||||
def generate_comments(leaf: LN) -> Iterator[Leaf]:
|
||||
"""Clean the prefix of the `leaf` and generate comments from it, if any.
|
||||
|
||||
Comments in lib2to3 are shoved into the whitespace prefix. This happens
|
||||
in `pgen2/driver.py:Driver.parse_tokens()`. This was a brilliant implementation
|
||||
move because it does away with modifying the grammar to include all the
|
||||
possible places in which comments can be placed.
|
||||
|
||||
The sad consequence for us though is that comments don't "belong" anywhere.
|
||||
This is why this function generates simple parentless Leaf objects for
|
||||
comments. We simply don't know what the correct parent should be.
|
||||
|
||||
No matter though, we can live without this. We really only need to
|
||||
differentiate between inline and standalone comments. The latter don't
|
||||
share the line with any code.
|
||||
|
||||
Inline comments are emitted as regular token.COMMENT leaves. Standalone
|
||||
are emitted with a fake STANDALONE_COMMENT token identifier.
|
||||
"""
|
||||
total_consumed = 0
|
||||
for pc in list_comments(leaf.prefix, is_endmarker=leaf.type == token.ENDMARKER):
|
||||
total_consumed = pc.consumed
|
||||
prefix = make_simple_prefix(pc.newlines, pc.form_feed)
|
||||
yield Leaf(pc.type, pc.value, prefix=prefix)
|
||||
normalize_trailing_prefix(leaf, total_consumed)
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def list_comments(prefix: str, *, is_endmarker: bool) -> list[ProtoComment]:
|
||||
"""Return a list of :class:`ProtoComment` objects parsed from the given `prefix`."""
|
||||
result: list[ProtoComment] = []
|
||||
if not prefix or "#" not in prefix:
|
||||
return result
|
||||
|
||||
consumed = 0
|
||||
nlines = 0
|
||||
ignored_lines = 0
|
||||
form_feed = False
|
||||
for index, full_line in enumerate(re.split("\r?\n", prefix)):
|
||||
consumed += len(full_line) + 1 # adding the length of the split '\n'
|
||||
match = re.match(r"^(\s*)(\S.*|)$", full_line)
|
||||
assert match
|
||||
whitespace, line = match.groups()
|
||||
if not line:
|
||||
nlines += 1
|
||||
if "\f" in full_line:
|
||||
form_feed = True
|
||||
if not line.startswith("#"):
|
||||
# Escaped newlines outside of a comment are not really newlines at
|
||||
# all. We treat a single-line comment following an escaped newline
|
||||
# as a simple trailing comment.
|
||||
if line.endswith("\\"):
|
||||
ignored_lines += 1
|
||||
continue
|
||||
|
||||
if index == ignored_lines and not is_endmarker:
|
||||
comment_type = token.COMMENT # simple trailing comment
|
||||
else:
|
||||
comment_type = STANDALONE_COMMENT
|
||||
comment = make_comment(line)
|
||||
result.append(
|
||||
ProtoComment(
|
||||
type=comment_type,
|
||||
value=comment,
|
||||
newlines=nlines,
|
||||
consumed=consumed,
|
||||
form_feed=form_feed,
|
||||
leading_whitespace=whitespace,
|
||||
)
|
||||
)
|
||||
form_feed = False
|
||||
nlines = 0
|
||||
return result
|
||||
|
||||
|
||||
def normalize_trailing_prefix(leaf: LN, total_consumed: int) -> None:
|
||||
"""Normalize the prefix that's left over after generating comments.
|
||||
|
||||
Note: don't use backslashes for formatting or you'll lose your voting rights.
|
||||
"""
|
||||
remainder = leaf.prefix[total_consumed:]
|
||||
if "\\" not in remainder:
|
||||
nl_count = remainder.count("\n")
|
||||
form_feed = "\f" in remainder and remainder.endswith("\n")
|
||||
leaf.prefix = make_simple_prefix(nl_count, form_feed)
|
||||
return
|
||||
|
||||
leaf.prefix = ""
|
||||
|
||||
|
||||
def make_comment(content: str) -> str:
|
||||
"""Return a consistently formatted comment from the given `content` string.
|
||||
|
||||
All comments (except for "##", "#!", "#:", '#'") should have a single
|
||||
space between the hash sign and the content.
|
||||
|
||||
If `content` didn't start with a hash sign, one is provided.
|
||||
"""
|
||||
content = content.rstrip()
|
||||
if not content:
|
||||
return "#"
|
||||
|
||||
if content[0] == "#":
|
||||
content = content[1:]
|
||||
NON_BREAKING_SPACE = " "
|
||||
if (
|
||||
content
|
||||
and content[0] == NON_BREAKING_SPACE
|
||||
and not content.lstrip().startswith("type:")
|
||||
):
|
||||
content = " " + content[1:] # Replace NBSP by a simple space
|
||||
if content and content[0] not in COMMENT_EXCEPTIONS:
|
||||
content = " " + content
|
||||
return "#" + content
|
||||
|
||||
|
||||
def normalize_fmt_off(
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> None:
|
||||
"""Convert content between `# fmt: off`/`# fmt: on` into standalone comments."""
|
||||
try_again = True
|
||||
while try_again:
|
||||
try_again = convert_one_fmt_off_pair(node, mode, lines)
|
||||
|
||||
|
||||
def convert_one_fmt_off_pair(
|
||||
node: Node, mode: Mode, lines: Collection[tuple[int, int]]
|
||||
) -> bool:
|
||||
"""Convert content of a single `# fmt: off`/`# fmt: on` into a standalone comment.
|
||||
|
||||
Returns True if a pair was converted.
|
||||
"""
|
||||
for leaf in node.leaves():
|
||||
previous_consumed = 0
|
||||
for comment in list_comments(leaf.prefix, is_endmarker=False):
|
||||
is_fmt_off = comment.value in FMT_OFF
|
||||
is_fmt_skip = _contains_fmt_skip_comment(comment.value, mode)
|
||||
if (not is_fmt_off and not is_fmt_skip) or (
|
||||
# Invalid use when `# fmt: off` is applied before a closing bracket.
|
||||
is_fmt_off
|
||||
and leaf.type in CLOSING_BRACKETS
|
||||
):
|
||||
previous_consumed = comment.consumed
|
||||
continue
|
||||
# We only want standalone comments. If there's no previous leaf or
|
||||
# the previous leaf is indentation, it's a standalone comment in
|
||||
# disguise.
|
||||
if comment.type != STANDALONE_COMMENT:
|
||||
prev = preceding_leaf(leaf)
|
||||
if prev:
|
||||
if is_fmt_off and prev.type not in WHITESPACE:
|
||||
continue
|
||||
if is_fmt_skip and prev.type in WHITESPACE:
|
||||
continue
|
||||
|
||||
ignored_nodes = list(generate_ignored_nodes(leaf, comment, mode))
|
||||
if not ignored_nodes:
|
||||
continue
|
||||
|
||||
first = ignored_nodes[0] # Can be a container node with the `leaf`.
|
||||
parent = first.parent
|
||||
prefix = first.prefix
|
||||
if comment.value in FMT_OFF:
|
||||
first.prefix = prefix[comment.consumed :]
|
||||
if is_fmt_skip:
|
||||
first.prefix = ""
|
||||
standalone_comment_prefix = prefix
|
||||
else:
|
||||
standalone_comment_prefix = (
|
||||
prefix[:previous_consumed] + "\n" * comment.newlines
|
||||
)
|
||||
hidden_value = "".join(str(n) for n in ignored_nodes)
|
||||
comment_lineno = leaf.lineno - comment.newlines
|
||||
if comment.value in FMT_OFF:
|
||||
fmt_off_prefix = ""
|
||||
if len(lines) > 0 and not any(
|
||||
line[0] <= comment_lineno <= line[1] for line in lines
|
||||
):
|
||||
# keeping indentation of comment by preserving original whitespaces.
|
||||
fmt_off_prefix = prefix.split(comment.value)[0]
|
||||
if "\n" in fmt_off_prefix:
|
||||
fmt_off_prefix = fmt_off_prefix.split("\n")[-1]
|
||||
standalone_comment_prefix += fmt_off_prefix
|
||||
hidden_value = comment.value + "\n" + hidden_value
|
||||
if is_fmt_skip:
|
||||
hidden_value += (
|
||||
comment.leading_whitespace
|
||||
if Preview.no_normalize_fmt_skip_whitespace in mode
|
||||
else " "
|
||||
) + comment.value
|
||||
if hidden_value.endswith("\n"):
|
||||
# That happens when one of the `ignored_nodes` ended with a NEWLINE
|
||||
# leaf (possibly followed by a DEDENT).
|
||||
hidden_value = hidden_value[:-1]
|
||||
first_idx: Optional[int] = None
|
||||
for ignored in ignored_nodes:
|
||||
index = ignored.remove()
|
||||
if first_idx is None:
|
||||
first_idx = index
|
||||
assert parent is not None, "INTERNAL ERROR: fmt: on/off handling (1)"
|
||||
assert first_idx is not None, "INTERNAL ERROR: fmt: on/off handling (2)"
|
||||
parent.insert_child(
|
||||
first_idx,
|
||||
Leaf(
|
||||
STANDALONE_COMMENT,
|
||||
hidden_value,
|
||||
prefix=standalone_comment_prefix,
|
||||
fmt_pass_converted_first_leaf=first_leaf_of(first),
|
||||
),
|
||||
)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def generate_ignored_nodes(
|
||||
leaf: Leaf, comment: ProtoComment, mode: Mode
|
||||
) -> Iterator[LN]:
|
||||
"""Starting from the container of `leaf`, generate all leaves until `# fmt: on`.
|
||||
|
||||
If comment is skip, returns leaf only.
|
||||
Stops at the end of the block.
|
||||
"""
|
||||
if _contains_fmt_skip_comment(comment.value, mode):
|
||||
yield from _generate_ignored_nodes_from_fmt_skip(leaf, comment)
|
||||
return
|
||||
container: Optional[LN] = container_of(leaf)
|
||||
while container is not None and container.type != token.ENDMARKER:
|
||||
if is_fmt_on(container):
|
||||
return
|
||||
|
||||
# fix for fmt: on in children
|
||||
if children_contains_fmt_on(container):
|
||||
for index, child in enumerate(container.children):
|
||||
if isinstance(child, Leaf) and is_fmt_on(child):
|
||||
if child.type in CLOSING_BRACKETS:
|
||||
# This means `# fmt: on` is placed at a different bracket level
|
||||
# than `# fmt: off`. This is an invalid use, but as a courtesy,
|
||||
# we include this closing bracket in the ignored nodes.
|
||||
# The alternative is to fail the formatting.
|
||||
yield child
|
||||
return
|
||||
if (
|
||||
child.type == token.INDENT
|
||||
and index < len(container.children) - 1
|
||||
and children_contains_fmt_on(container.children[index + 1])
|
||||
):
|
||||
# This means `# fmt: on` is placed right after an indentation
|
||||
# level, and we shouldn't swallow the previous INDENT token.
|
||||
return
|
||||
if children_contains_fmt_on(child):
|
||||
return
|
||||
yield child
|
||||
else:
|
||||
if container.type == token.DEDENT and container.next_sibling is None:
|
||||
# This can happen when there is no matching `# fmt: on` comment at the
|
||||
# same level as `# fmt: on`. We need to keep this DEDENT.
|
||||
return
|
||||
yield container
|
||||
container = container.next_sibling
|
||||
|
||||
|
||||
def _generate_ignored_nodes_from_fmt_skip(
|
||||
leaf: Leaf, comment: ProtoComment
|
||||
) -> Iterator[LN]:
|
||||
"""Generate all leaves that should be ignored by the `# fmt: skip` from `leaf`."""
|
||||
prev_sibling = leaf.prev_sibling
|
||||
parent = leaf.parent
|
||||
# Need to properly format the leaf prefix to compare it to comment.value,
|
||||
# which is also formatted
|
||||
comments = list_comments(leaf.prefix, is_endmarker=False)
|
||||
if not comments or comment.value != comments[0].value:
|
||||
return
|
||||
if prev_sibling is not None:
|
||||
leaf.prefix = ""
|
||||
siblings = [prev_sibling]
|
||||
while "\n" not in prev_sibling.prefix and prev_sibling.prev_sibling is not None:
|
||||
prev_sibling = prev_sibling.prev_sibling
|
||||
siblings.insert(0, prev_sibling)
|
||||
yield from siblings
|
||||
elif (
|
||||
parent is not None and parent.type == syms.suite and leaf.type == token.NEWLINE
|
||||
):
|
||||
# The `# fmt: skip` is on the colon line of the if/while/def/class/...
|
||||
# statements. The ignored nodes should be previous siblings of the
|
||||
# parent suite node.
|
||||
leaf.prefix = ""
|
||||
ignored_nodes: list[LN] = []
|
||||
parent_sibling = parent.prev_sibling
|
||||
while parent_sibling is not None and parent_sibling.type != syms.suite:
|
||||
ignored_nodes.insert(0, parent_sibling)
|
||||
parent_sibling = parent_sibling.prev_sibling
|
||||
# Special case for `async_stmt` where the ASYNC token is on the
|
||||
# grandparent node.
|
||||
grandparent = parent.parent
|
||||
if (
|
||||
grandparent is not None
|
||||
and grandparent.prev_sibling is not None
|
||||
and grandparent.prev_sibling.type == token.ASYNC
|
||||
):
|
||||
ignored_nodes.insert(0, grandparent.prev_sibling)
|
||||
yield from iter(ignored_nodes)
|
||||
|
||||
|
||||
def is_fmt_on(container: LN) -> bool:
|
||||
"""Determine whether formatting is switched on within a container.
|
||||
Determined by whether the last `# fmt:` comment is `on` or `off`.
|
||||
"""
|
||||
fmt_on = False
|
||||
for comment in list_comments(container.prefix, is_endmarker=False):
|
||||
if comment.value in FMT_ON:
|
||||
fmt_on = True
|
||||
elif comment.value in FMT_OFF:
|
||||
fmt_on = False
|
||||
return fmt_on
|
||||
|
||||
|
||||
def children_contains_fmt_on(container: LN) -> bool:
|
||||
"""Determine if children have formatting switched on."""
|
||||
for child in container.children:
|
||||
leaf = first_leaf_of(child)
|
||||
if leaf is not None and is_fmt_on(leaf):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def contains_pragma_comment(comment_list: list[Leaf]) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff one of the comments in @comment_list is a pragma used by one
|
||||
of the more common static analysis tools for python (e.g. mypy, flake8,
|
||||
pylint).
|
||||
"""
|
||||
for comment in comment_list:
|
||||
if comment.value.startswith(("# type:", "# noqa", "# pylint:")):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _contains_fmt_skip_comment(comment_line: str, mode: Mode) -> bool:
|
||||
"""
|
||||
Checks if the given comment contains FMT_SKIP alone or paired with other comments.
|
||||
Matching styles:
|
||||
# fmt:skip <-- single comment
|
||||
# noqa:XXX # fmt:skip # a nice line <-- multiple comments (Preview)
|
||||
# pylint:XXX; fmt:skip <-- list of comments (; separated, Preview)
|
||||
"""
|
||||
semantic_comment_blocks = [
|
||||
comment_line,
|
||||
*[
|
||||
_COMMENT_PREFIX + comment.strip()
|
||||
for comment in comment_line.split(_COMMENT_PREFIX)[1:]
|
||||
],
|
||||
*[
|
||||
_COMMENT_PREFIX + comment.strip()
|
||||
for comment in comment_line.strip(_COMMENT_PREFIX).split(
|
||||
_COMMENT_LIST_SEPARATOR
|
||||
)
|
||||
],
|
||||
]
|
||||
|
||||
return any(comment in FMT_SKIP for comment in semantic_comment_blocks)
|
190
venv/Lib/site-packages/black/concurrency.py
Normal file
190
venv/Lib/site-packages/black/concurrency.py
Normal file
@@ -0,0 +1,190 @@
|
||||
"""
|
||||
Formatting many files at once via multiprocessing. Contains entrypoint and utilities.
|
||||
|
||||
NOTE: this module is only imported if we need to format several files at once.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
import traceback
|
||||
from concurrent.futures import Executor, ProcessPoolExecutor, ThreadPoolExecutor
|
||||
from multiprocessing import Manager
|
||||
from pathlib import Path
|
||||
from typing import Any, Iterable, Optional
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
from black import WriteBack, format_file_in_place
|
||||
from black.cache import Cache
|
||||
from black.mode import Mode
|
||||
from black.output import err
|
||||
from black.report import Changed, Report
|
||||
|
||||
|
||||
def maybe_install_uvloop() -> None:
|
||||
"""If our environment has uvloop installed we use it.
|
||||
|
||||
This is called only from command-line entry points to avoid
|
||||
interfering with the parent process if Black is used as a library.
|
||||
"""
|
||||
try:
|
||||
import uvloop
|
||||
|
||||
uvloop.install()
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def cancel(tasks: Iterable["asyncio.Future[Any]"]) -> None:
|
||||
"""asyncio signal handler that cancels all `tasks` and reports to stderr."""
|
||||
err("Aborted!")
|
||||
for task in tasks:
|
||||
task.cancel()
|
||||
|
||||
|
||||
def shutdown(loop: asyncio.AbstractEventLoop) -> None:
|
||||
"""Cancel all pending tasks on `loop`, wait for them, and close the loop."""
|
||||
try:
|
||||
# This part is borrowed from asyncio/runners.py in Python 3.7b2.
|
||||
to_cancel = [task for task in asyncio.all_tasks(loop) if not task.done()]
|
||||
if not to_cancel:
|
||||
return
|
||||
|
||||
for task in to_cancel:
|
||||
task.cancel()
|
||||
loop.run_until_complete(asyncio.gather(*to_cancel, return_exceptions=True))
|
||||
finally:
|
||||
# `concurrent.futures.Future` objects cannot be cancelled once they
|
||||
# are already running. There might be some when the `shutdown()` happened.
|
||||
# Silence their logger's spew about the event loop being closed.
|
||||
cf_logger = logging.getLogger("concurrent.futures")
|
||||
cf_logger.setLevel(logging.CRITICAL)
|
||||
loop.close()
|
||||
|
||||
|
||||
# diff-shades depends on being to monkeypatch this function to operate. I know it's
|
||||
# not ideal, but this shouldn't cause any issues ... hopefully. ~ichard26
|
||||
@mypyc_attr(patchable=True)
|
||||
def reformat_many(
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
report: Report,
|
||||
workers: Optional[int],
|
||||
) -> None:
|
||||
"""Reformat multiple files using a ProcessPoolExecutor."""
|
||||
maybe_install_uvloop()
|
||||
|
||||
executor: Executor
|
||||
if workers is None:
|
||||
workers = int(os.environ.get("BLACK_NUM_WORKERS", 0))
|
||||
workers = workers or os.cpu_count() or 1
|
||||
if sys.platform == "win32":
|
||||
# Work around https://bugs.python.org/issue26903
|
||||
workers = min(workers, 60)
|
||||
try:
|
||||
executor = ProcessPoolExecutor(max_workers=workers)
|
||||
except (ImportError, NotImplementedError, OSError):
|
||||
# we arrive here if the underlying system does not support multi-processing
|
||||
# like in AWS Lambda or Termux, in which case we gracefully fallback to
|
||||
# a ThreadPoolExecutor with just a single worker (more workers would not do us
|
||||
# any good due to the Global Interpreter Lock)
|
||||
executor = ThreadPoolExecutor(max_workers=1)
|
||||
|
||||
loop = asyncio.new_event_loop()
|
||||
asyncio.set_event_loop(loop)
|
||||
try:
|
||||
loop.run_until_complete(
|
||||
schedule_formatting(
|
||||
sources=sources,
|
||||
fast=fast,
|
||||
write_back=write_back,
|
||||
mode=mode,
|
||||
report=report,
|
||||
loop=loop,
|
||||
executor=executor,
|
||||
)
|
||||
)
|
||||
finally:
|
||||
try:
|
||||
shutdown(loop)
|
||||
finally:
|
||||
asyncio.set_event_loop(None)
|
||||
if executor is not None:
|
||||
executor.shutdown()
|
||||
|
||||
|
||||
async def schedule_formatting(
|
||||
sources: set[Path],
|
||||
fast: bool,
|
||||
write_back: WriteBack,
|
||||
mode: Mode,
|
||||
report: "Report",
|
||||
loop: asyncio.AbstractEventLoop,
|
||||
executor: "Executor",
|
||||
) -> None:
|
||||
"""Run formatting of `sources` in parallel using the provided `executor`.
|
||||
|
||||
(Use ProcessPoolExecutors for actual parallelism.)
|
||||
|
||||
`write_back`, `fast`, and `mode` options are passed to
|
||||
:func:`format_file_in_place`.
|
||||
"""
|
||||
cache = Cache.read(mode)
|
||||
if write_back not in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
|
||||
sources, cached = cache.filtered_cached(sources)
|
||||
for src in sorted(cached):
|
||||
report.done(src, Changed.CACHED)
|
||||
if not sources:
|
||||
return
|
||||
|
||||
cancelled = []
|
||||
sources_to_cache = []
|
||||
lock = None
|
||||
if write_back in (WriteBack.DIFF, WriteBack.COLOR_DIFF):
|
||||
# For diff output, we need locks to ensure we don't interleave output
|
||||
# from different processes.
|
||||
manager = Manager()
|
||||
lock = manager.Lock()
|
||||
tasks = {
|
||||
asyncio.ensure_future(
|
||||
loop.run_in_executor(
|
||||
executor, format_file_in_place, src, fast, mode, write_back, lock
|
||||
)
|
||||
): src
|
||||
for src in sorted(sources)
|
||||
}
|
||||
pending = tasks.keys()
|
||||
try:
|
||||
loop.add_signal_handler(signal.SIGINT, cancel, pending)
|
||||
loop.add_signal_handler(signal.SIGTERM, cancel, pending)
|
||||
except NotImplementedError:
|
||||
# There are no good alternatives for these on Windows.
|
||||
pass
|
||||
while pending:
|
||||
done, _ = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED)
|
||||
for task in done:
|
||||
src = tasks.pop(task)
|
||||
if task.cancelled():
|
||||
cancelled.append(task)
|
||||
elif exc := task.exception():
|
||||
if report.verbose:
|
||||
traceback.print_exception(type(exc), exc, exc.__traceback__)
|
||||
report.failed(src, str(exc))
|
||||
else:
|
||||
changed = Changed.YES if task.result() else Changed.NO
|
||||
# If the file was written back or was successfully checked as
|
||||
# well-formatted, store this information in the cache.
|
||||
if write_back is WriteBack.YES or (
|
||||
write_back is WriteBack.CHECK and changed is Changed.NO
|
||||
):
|
||||
sources_to_cache.append(src)
|
||||
report.done(src, changed)
|
||||
if cancelled:
|
||||
await asyncio.gather(*cancelled, return_exceptions=True)
|
||||
if sources_to_cache:
|
||||
cache.write(sources_to_cache)
|
BIN
venv/Lib/site-packages/black/const.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/const.cp311-win_amd64.pyd
Normal file
Binary file not shown.
4
venv/Lib/site-packages/black/const.py
Normal file
4
venv/Lib/site-packages/black/const.py
Normal file
@@ -0,0 +1,4 @@
|
||||
DEFAULT_LINE_LENGTH = 88
|
||||
DEFAULT_EXCLUDES = r"/(\.direnv|\.eggs|\.git|\.hg|\.ipynb_checkpoints|\.mypy_cache|\.nox|\.pytest_cache|\.ruff_cache|\.tox|\.svn|\.venv|\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/" # noqa: B950
|
||||
DEFAULT_INCLUDES = r"(\.pyi?|\.ipynb)$"
|
||||
STDIN_PLACEHOLDER = "__BLACK_STDIN_FILENAME__"
|
54
venv/Lib/site-packages/black/debug.py
Normal file
54
venv/Lib/site-packages/black/debug.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Iterator, TypeVar, Union
|
||||
|
||||
from black.nodes import Visitor
|
||||
from black.output import out
|
||||
from black.parsing import lib2to3_parse
|
||||
from blib2to3.pgen2 import token
|
||||
from blib2to3.pytree import Leaf, Node, type_repr
|
||||
|
||||
LN = Union[Leaf, Node]
|
||||
T = TypeVar("T")
|
||||
|
||||
|
||||
@dataclass
|
||||
class DebugVisitor(Visitor[T]):
|
||||
tree_depth: int = 0
|
||||
list_output: list[str] = field(default_factory=list)
|
||||
print_output: bool = True
|
||||
|
||||
def out(self, message: str, *args: Any, **kwargs: Any) -> None:
|
||||
self.list_output.append(message)
|
||||
if self.print_output:
|
||||
out(message, *args, **kwargs)
|
||||
|
||||
def visit_default(self, node: LN) -> Iterator[T]:
|
||||
indent = " " * (2 * self.tree_depth)
|
||||
if isinstance(node, Node):
|
||||
_type = type_repr(node.type)
|
||||
self.out(f"{indent}{_type}", fg="yellow")
|
||||
self.tree_depth += 1
|
||||
for child in node.children:
|
||||
yield from self.visit(child)
|
||||
|
||||
self.tree_depth -= 1
|
||||
self.out(f"{indent}/{_type}", fg="yellow", bold=False)
|
||||
else:
|
||||
_type = token.tok_name.get(node.type, str(node.type))
|
||||
self.out(f"{indent}{_type}", fg="blue", nl=False)
|
||||
if node.prefix:
|
||||
# We don't have to handle prefixes for `Node` objects since
|
||||
# that delegates to the first child anyway.
|
||||
self.out(f" {node.prefix!r}", fg="green", bold=False, nl=False)
|
||||
self.out(f" {node.value!r}", fg="blue", bold=False)
|
||||
|
||||
@classmethod
|
||||
def show(cls, code: Union[str, Leaf, Node]) -> None:
|
||||
"""Pretty-print the lib2to3 AST of a given string of `code`.
|
||||
|
||||
Convenience method for debugging.
|
||||
"""
|
||||
v: DebugVisitor[None] = DebugVisitor()
|
||||
if isinstance(code, str):
|
||||
code = lib2to3_parse(code)
|
||||
list(v.visit(code))
|
433
venv/Lib/site-packages/black/files.py
Normal file
433
venv/Lib/site-packages/black/files.py
Normal file
@@ -0,0 +1,433 @@
|
||||
import io
|
||||
import os
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import (
|
||||
TYPE_CHECKING,
|
||||
Any,
|
||||
Iterable,
|
||||
Iterator,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Union,
|
||||
)
|
||||
|
||||
from mypy_extensions import mypyc_attr
|
||||
from packaging.specifiers import InvalidSpecifier, Specifier, SpecifierSet
|
||||
from packaging.version import InvalidVersion, Version
|
||||
from pathspec import PathSpec
|
||||
from pathspec.patterns.gitwildmatch import GitWildMatchPatternError
|
||||
|
||||
if sys.version_info >= (3, 11):
|
||||
try:
|
||||
import tomllib
|
||||
except ImportError:
|
||||
# Help users on older alphas
|
||||
if not TYPE_CHECKING:
|
||||
import tomli as tomllib
|
||||
else:
|
||||
import tomli as tomllib
|
||||
|
||||
from black.handle_ipynb_magics import jupyter_dependencies_are_installed
|
||||
from black.mode import TargetVersion
|
||||
from black.output import err
|
||||
from black.report import Report
|
||||
|
||||
if TYPE_CHECKING:
|
||||
import colorama # noqa: F401
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _load_toml(path: Union[Path, str]) -> dict[str, Any]:
|
||||
with open(path, "rb") as f:
|
||||
return tomllib.load(f)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _cached_resolve(path: Path) -> Path:
|
||||
return path.resolve()
|
||||
|
||||
|
||||
@lru_cache
|
||||
def find_project_root(
|
||||
srcs: Sequence[str], stdin_filename: Optional[str] = None
|
||||
) -> tuple[Path, str]:
|
||||
"""Return a directory containing .git, .hg, or pyproject.toml.
|
||||
|
||||
pyproject.toml files are only considered if they contain a [tool.black]
|
||||
section and are ignored otherwise.
|
||||
|
||||
That directory will be a common parent of all files and directories
|
||||
passed in `srcs`.
|
||||
|
||||
If no directory in the tree contains a marker that would specify it's the
|
||||
project root, the root of the file system is returned.
|
||||
|
||||
Returns a two-tuple with the first element as the project root path and
|
||||
the second element as a string describing the method by which the
|
||||
project root was discovered.
|
||||
"""
|
||||
if stdin_filename is not None:
|
||||
srcs = tuple(stdin_filename if s == "-" else s for s in srcs)
|
||||
if not srcs:
|
||||
srcs = [str(_cached_resolve(Path.cwd()))]
|
||||
|
||||
path_srcs = [_cached_resolve(Path(Path.cwd(), src)) for src in srcs]
|
||||
|
||||
# A list of lists of parents for each 'src'. 'src' is included as a
|
||||
# "parent" of itself if it is a directory
|
||||
src_parents = [
|
||||
list(path.parents) + ([path] if path.is_dir() else []) for path in path_srcs
|
||||
]
|
||||
|
||||
common_base = max(
|
||||
set.intersection(*(set(parents) for parents in src_parents)),
|
||||
key=lambda path: path.parts,
|
||||
)
|
||||
|
||||
for directory in (common_base, *common_base.parents):
|
||||
if (directory / ".git").exists():
|
||||
return directory, ".git directory"
|
||||
|
||||
if (directory / ".hg").is_dir():
|
||||
return directory, ".hg directory"
|
||||
|
||||
if (directory / "pyproject.toml").is_file():
|
||||
pyproject_toml = _load_toml(directory / "pyproject.toml")
|
||||
if "black" in pyproject_toml.get("tool", {}):
|
||||
return directory, "pyproject.toml"
|
||||
|
||||
return directory, "file system root"
|
||||
|
||||
|
||||
def find_pyproject_toml(
|
||||
path_search_start: tuple[str, ...], stdin_filename: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
"""Find the absolute filepath to a pyproject.toml if it exists"""
|
||||
path_project_root, _ = find_project_root(path_search_start, stdin_filename)
|
||||
path_pyproject_toml = path_project_root / "pyproject.toml"
|
||||
if path_pyproject_toml.is_file():
|
||||
return str(path_pyproject_toml)
|
||||
|
||||
try:
|
||||
path_user_pyproject_toml = find_user_pyproject_toml()
|
||||
return (
|
||||
str(path_user_pyproject_toml)
|
||||
if path_user_pyproject_toml.is_file()
|
||||
else None
|
||||
)
|
||||
except (PermissionError, RuntimeError) as e:
|
||||
# We do not have access to the user-level config directory, so ignore it.
|
||||
err(f"Ignoring user configuration directory due to {e!r}")
|
||||
return None
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def parse_pyproject_toml(path_config: str) -> dict[str, Any]:
|
||||
"""Parse a pyproject toml file, pulling out relevant parts for Black.
|
||||
|
||||
If parsing fails, will raise a tomllib.TOMLDecodeError.
|
||||
"""
|
||||
pyproject_toml = _load_toml(path_config)
|
||||
config: dict[str, Any] = pyproject_toml.get("tool", {}).get("black", {})
|
||||
config = {k.replace("--", "").replace("-", "_"): v for k, v in config.items()}
|
||||
|
||||
if "target_version" not in config:
|
||||
inferred_target_version = infer_target_version(pyproject_toml)
|
||||
if inferred_target_version is not None:
|
||||
config["target_version"] = [v.name.lower() for v in inferred_target_version]
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def infer_target_version(
|
||||
pyproject_toml: dict[str, Any],
|
||||
) -> Optional[list[TargetVersion]]:
|
||||
"""Infer Black's target version from the project metadata in pyproject.toml.
|
||||
|
||||
Supports the PyPA standard format (PEP 621):
|
||||
https://packaging.python.org/en/latest/specifications/declaring-project-metadata/#requires-python
|
||||
|
||||
If the target version cannot be inferred, returns None.
|
||||
"""
|
||||
project_metadata = pyproject_toml.get("project", {})
|
||||
requires_python = project_metadata.get("requires-python", None)
|
||||
if requires_python is not None:
|
||||
try:
|
||||
return parse_req_python_version(requires_python)
|
||||
except InvalidVersion:
|
||||
pass
|
||||
try:
|
||||
return parse_req_python_specifier(requires_python)
|
||||
except (InvalidSpecifier, InvalidVersion):
|
||||
pass
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_version(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a version string (i.e. ``"3.7"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.version.InvalidVersion error.
|
||||
If the parsed version cannot be mapped to a valid TargetVersion, returns None.
|
||||
"""
|
||||
version = Version(requires_python)
|
||||
if version.release[0] != 3:
|
||||
return None
|
||||
try:
|
||||
return [TargetVersion(version.release[1])]
|
||||
except (IndexError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def parse_req_python_specifier(requires_python: str) -> Optional[list[TargetVersion]]:
|
||||
"""Parse a specifier string (i.e. ``">=3.7,<3.10"``) to a list of TargetVersion.
|
||||
|
||||
If parsing fails, will raise a packaging.specifiers.InvalidSpecifier error.
|
||||
If the parsed specifier cannot be mapped to a valid TargetVersion, returns None.
|
||||
"""
|
||||
specifier_set = strip_specifier_set(SpecifierSet(requires_python))
|
||||
if not specifier_set:
|
||||
return None
|
||||
|
||||
target_version_map = {f"3.{v.value}": v for v in TargetVersion}
|
||||
compatible_versions: list[str] = list(specifier_set.filter(target_version_map))
|
||||
if compatible_versions:
|
||||
return [target_version_map[v] for v in compatible_versions]
|
||||
return None
|
||||
|
||||
|
||||
def strip_specifier_set(specifier_set: SpecifierSet) -> SpecifierSet:
|
||||
"""Strip minor versions for some specifiers in the specifier set.
|
||||
|
||||
For background on version specifiers, see PEP 440:
|
||||
https://peps.python.org/pep-0440/#version-specifiers
|
||||
"""
|
||||
specifiers = []
|
||||
for s in specifier_set:
|
||||
if "*" in str(s):
|
||||
specifiers.append(s)
|
||||
elif s.operator in ["~=", "==", ">=", "==="]:
|
||||
version = Version(s.version)
|
||||
stripped = Specifier(f"{s.operator}{version.major}.{version.minor}")
|
||||
specifiers.append(stripped)
|
||||
elif s.operator == ">":
|
||||
version = Version(s.version)
|
||||
if len(version.release) > 2:
|
||||
s = Specifier(f">={version.major}.{version.minor}")
|
||||
specifiers.append(s)
|
||||
else:
|
||||
specifiers.append(s)
|
||||
|
||||
return SpecifierSet(",".join(str(s) for s in specifiers))
|
||||
|
||||
|
||||
@lru_cache
|
||||
def find_user_pyproject_toml() -> Path:
|
||||
r"""Return the path to the top-level user configuration for black.
|
||||
|
||||
This looks for ~\.black on Windows and ~/.config/black on Linux and other
|
||||
Unix systems.
|
||||
|
||||
May raise:
|
||||
- RuntimeError: if the current user has no homedir
|
||||
- PermissionError: if the current process cannot access the user's homedir
|
||||
"""
|
||||
if sys.platform == "win32":
|
||||
# Windows
|
||||
user_config_path = Path.home() / ".black"
|
||||
else:
|
||||
config_root = os.environ.get("XDG_CONFIG_HOME", "~/.config")
|
||||
user_config_path = Path(config_root).expanduser() / "black"
|
||||
return _cached_resolve(user_config_path)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def get_gitignore(root: Path) -> PathSpec:
|
||||
"""Return a PathSpec matching gitignore content if present."""
|
||||
gitignore = root / ".gitignore"
|
||||
lines: list[str] = []
|
||||
if gitignore.is_file():
|
||||
with gitignore.open(encoding="utf-8") as gf:
|
||||
lines = gf.readlines()
|
||||
try:
|
||||
return PathSpec.from_lines("gitwildmatch", lines)
|
||||
except GitWildMatchPatternError as e:
|
||||
err(f"Could not parse {gitignore}: {e}")
|
||||
raise
|
||||
|
||||
|
||||
def resolves_outside_root_or_cannot_stat(
|
||||
path: Path,
|
||||
root: Path,
|
||||
report: Optional[Report] = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Returns whether the path is a symbolic link that points outside the
|
||||
root directory. Also returns True if we failed to resolve the path.
|
||||
"""
|
||||
try:
|
||||
resolved_path = _cached_resolve(path)
|
||||
except OSError as e:
|
||||
if report:
|
||||
report.path_ignored(path, f"cannot be read because {e}")
|
||||
return True
|
||||
try:
|
||||
resolved_path.relative_to(root)
|
||||
except ValueError:
|
||||
if report:
|
||||
report.path_ignored(path, f"is a symbolic link that points outside {root}")
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def best_effort_relative_path(path: Path, root: Path) -> Path:
|
||||
# Precondition: resolves_outside_root_or_cannot_stat(path, root) is False
|
||||
try:
|
||||
return path.absolute().relative_to(root)
|
||||
except ValueError:
|
||||
pass
|
||||
root_parent = next((p for p in path.parents if _cached_resolve(p) == root), None)
|
||||
if root_parent is not None:
|
||||
return path.relative_to(root_parent)
|
||||
# something adversarial, fallback to path guaranteed by precondition
|
||||
return _cached_resolve(path).relative_to(root)
|
||||
|
||||
|
||||
def _path_is_ignored(
|
||||
root_relative_path: str,
|
||||
root: Path,
|
||||
gitignore_dict: dict[Path, PathSpec],
|
||||
) -> bool:
|
||||
path = root / root_relative_path
|
||||
# Note that this logic is sensitive to the ordering of gitignore_dict. Callers must
|
||||
# ensure that gitignore_dict is ordered from least specific to most specific.
|
||||
for gitignore_path, pattern in gitignore_dict.items():
|
||||
try:
|
||||
relative_path = path.relative_to(gitignore_path).as_posix()
|
||||
if path.is_dir():
|
||||
relative_path = relative_path + "/"
|
||||
except ValueError:
|
||||
break
|
||||
if pattern.match_file(relative_path):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def path_is_excluded(
|
||||
normalized_path: str,
|
||||
pattern: Optional[Pattern[str]],
|
||||
) -> bool:
|
||||
match = pattern.search(normalized_path) if pattern else None
|
||||
return bool(match and match.group(0))
|
||||
|
||||
|
||||
def gen_python_files(
|
||||
paths: Iterable[Path],
|
||||
root: Path,
|
||||
include: Pattern[str],
|
||||
exclude: Pattern[str],
|
||||
extend_exclude: Optional[Pattern[str]],
|
||||
force_exclude: Optional[Pattern[str]],
|
||||
report: Report,
|
||||
gitignore_dict: Optional[dict[Path, PathSpec]],
|
||||
*,
|
||||
verbose: bool,
|
||||
quiet: bool,
|
||||
) -> Iterator[Path]:
|
||||
"""Generate all files under `path` whose paths are not excluded by the
|
||||
`exclude_regex`, `extend_exclude`, or `force_exclude` regexes,
|
||||
but are included by the `include` regex.
|
||||
|
||||
Symbolic links pointing outside of the `root` directory are ignored.
|
||||
|
||||
`report` is where output about exclusions goes.
|
||||
"""
|
||||
|
||||
assert root.is_absolute(), f"INTERNAL ERROR: `root` must be absolute but is {root}"
|
||||
for child in paths:
|
||||
assert child.is_absolute()
|
||||
root_relative_path = child.relative_to(root).as_posix()
|
||||
|
||||
# First ignore files matching .gitignore, if passed
|
||||
if gitignore_dict and _path_is_ignored(
|
||||
root_relative_path, root, gitignore_dict
|
||||
):
|
||||
report.path_ignored(child, "matches a .gitignore file content")
|
||||
continue
|
||||
|
||||
# Then ignore with `--exclude` `--extend-exclude` and `--force-exclude` options.
|
||||
root_relative_path = "/" + root_relative_path
|
||||
if child.is_dir():
|
||||
root_relative_path += "/"
|
||||
|
||||
if path_is_excluded(root_relative_path, exclude):
|
||||
report.path_ignored(child, "matches the --exclude regular expression")
|
||||
continue
|
||||
|
||||
if path_is_excluded(root_relative_path, extend_exclude):
|
||||
report.path_ignored(
|
||||
child, "matches the --extend-exclude regular expression"
|
||||
)
|
||||
continue
|
||||
|
||||
if path_is_excluded(root_relative_path, force_exclude):
|
||||
report.path_ignored(child, "matches the --force-exclude regular expression")
|
||||
continue
|
||||
|
||||
if resolves_outside_root_or_cannot_stat(child, root, report):
|
||||
continue
|
||||
|
||||
if child.is_dir():
|
||||
# If gitignore is None, gitignore usage is disabled, while a Falsey
|
||||
# gitignore is when the directory doesn't have a .gitignore file.
|
||||
if gitignore_dict is not None:
|
||||
new_gitignore_dict = {
|
||||
**gitignore_dict,
|
||||
root / child: get_gitignore(child),
|
||||
}
|
||||
else:
|
||||
new_gitignore_dict = None
|
||||
yield from gen_python_files(
|
||||
child.iterdir(),
|
||||
root,
|
||||
include,
|
||||
exclude,
|
||||
extend_exclude,
|
||||
force_exclude,
|
||||
report,
|
||||
new_gitignore_dict,
|
||||
verbose=verbose,
|
||||
quiet=quiet,
|
||||
)
|
||||
|
||||
elif child.is_file():
|
||||
if child.suffix == ".ipynb" and not jupyter_dependencies_are_installed(
|
||||
warn=verbose or not quiet
|
||||
):
|
||||
continue
|
||||
include_match = include.search(root_relative_path) if include else True
|
||||
if include_match:
|
||||
yield child
|
||||
|
||||
|
||||
def wrap_stream_for_windows(
|
||||
f: io.TextIOWrapper,
|
||||
) -> Union[io.TextIOWrapper, "colorama.AnsiToWin32"]:
|
||||
"""
|
||||
Wrap stream with colorama's wrap_stream so colors are shown on Windows.
|
||||
|
||||
If `colorama` is unavailable, the original stream is returned unmodified.
|
||||
Otherwise, the `wrap_stream()` function determines whether the stream needs
|
||||
to be wrapped for a Windows environment and will accordingly either return
|
||||
an `AnsiToWin32` wrapper or the original stream.
|
||||
"""
|
||||
try:
|
||||
from colorama.initialise import wrap_stream
|
||||
except ImportError:
|
||||
return f
|
||||
else:
|
||||
# Set `strip=False` to avoid needing to modify test_express_diff_with_color.
|
||||
return wrap_stream(f, convert=None, strip=False, autoreset=False, wrap=True)
|
Binary file not shown.
497
venv/Lib/site-packages/black/handle_ipynb_magics.py
Normal file
497
venv/Lib/site-packages/black/handle_ipynb_magics.py
Normal file
@@ -0,0 +1,497 @@
|
||||
"""Functions to process IPython magics with."""
|
||||
|
||||
import ast
|
||||
import collections
|
||||
import dataclasses
|
||||
import re
|
||||
import secrets
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from importlib.util import find_spec
|
||||
from typing import Optional
|
||||
|
||||
if sys.version_info >= (3, 10):
|
||||
from typing import TypeGuard
|
||||
else:
|
||||
from typing_extensions import TypeGuard
|
||||
|
||||
from black.mode import Mode
|
||||
from black.output import out
|
||||
from black.report import NothingChanged
|
||||
|
||||
TRANSFORMED_MAGICS = frozenset((
|
||||
"get_ipython().run_cell_magic",
|
||||
"get_ipython().system",
|
||||
"get_ipython().getoutput",
|
||||
"get_ipython().run_line_magic",
|
||||
))
|
||||
TOKENS_TO_IGNORE = frozenset((
|
||||
"ENDMARKER",
|
||||
"NL",
|
||||
"NEWLINE",
|
||||
"COMMENT",
|
||||
"DEDENT",
|
||||
"UNIMPORTANT_WS",
|
||||
"ESCAPED_NL",
|
||||
))
|
||||
PYTHON_CELL_MAGICS = frozenset((
|
||||
"capture",
|
||||
"prun",
|
||||
"pypy",
|
||||
"python",
|
||||
"python3",
|
||||
"time",
|
||||
"timeit",
|
||||
))
|
||||
TOKEN_HEX = secrets.token_hex
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Replacement:
|
||||
mask: str
|
||||
src: str
|
||||
|
||||
|
||||
@lru_cache
|
||||
def jupyter_dependencies_are_installed(*, warn: bool) -> bool:
|
||||
installed = (
|
||||
find_spec("tokenize_rt") is not None and find_spec("IPython") is not None
|
||||
)
|
||||
if not installed and warn:
|
||||
msg = (
|
||||
"Skipping .ipynb files as Jupyter dependencies are not installed.\n"
|
||||
'You can fix this by running ``pip install "black[jupyter]"``'
|
||||
)
|
||||
out(msg)
|
||||
return installed
|
||||
|
||||
|
||||
def validate_cell(src: str, mode: Mode) -> None:
|
||||
"""Check that cell does not already contain TransformerManager transformations,
|
||||
or non-Python cell magics, which might cause tokenizer_rt to break because of
|
||||
indentations.
|
||||
|
||||
If a cell contains ``!ls``, then it'll be transformed to
|
||||
``get_ipython().system('ls')``. However, if the cell originally contained
|
||||
``get_ipython().system('ls')``, then it would get transformed in the same way:
|
||||
|
||||
>>> TransformerManager().transform_cell("get_ipython().system('ls')")
|
||||
"get_ipython().system('ls')\n"
|
||||
>>> TransformerManager().transform_cell("!ls")
|
||||
"get_ipython().system('ls')\n"
|
||||
|
||||
Due to the impossibility of safely roundtripping in such situations, cells
|
||||
containing transformed magics will be ignored.
|
||||
"""
|
||||
if any(transformed_magic in src for transformed_magic in TRANSFORMED_MAGICS):
|
||||
raise NothingChanged
|
||||
|
||||
line = _get_code_start(src)
|
||||
if line.startswith("%%") and (
|
||||
line.split(maxsplit=1)[0][2:]
|
||||
not in PYTHON_CELL_MAGICS | mode.python_cell_magics
|
||||
):
|
||||
raise NothingChanged
|
||||
|
||||
|
||||
def remove_trailing_semicolon(src: str) -> tuple[str, bool]:
|
||||
"""Remove trailing semicolon from Jupyter notebook cell.
|
||||
|
||||
For example,
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(x_data, y_data); # plot data
|
||||
|
||||
would become
|
||||
|
||||
fig, ax = plt.subplots()
|
||||
ax.plot(x_data, y_data) # plot data
|
||||
|
||||
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
|
||||
``tokenize_rt`` so that round-tripping works fine.
|
||||
"""
|
||||
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
|
||||
|
||||
tokens = src_to_tokens(src)
|
||||
trailing_semicolon = False
|
||||
for idx, token in reversed_enumerate(tokens):
|
||||
if token.name in TOKENS_TO_IGNORE:
|
||||
continue
|
||||
if token.name == "OP" and token.src == ";":
|
||||
del tokens[idx]
|
||||
trailing_semicolon = True
|
||||
break
|
||||
if not trailing_semicolon:
|
||||
return src, False
|
||||
return tokens_to_src(tokens), True
|
||||
|
||||
|
||||
def put_trailing_semicolon_back(src: str, has_trailing_semicolon: bool) -> str:
|
||||
"""Put trailing semicolon back if cell originally had it.
|
||||
|
||||
Mirrors the logic in `quiet` from `IPython.core.displayhook`, but uses
|
||||
``tokenize_rt`` so that round-tripping works fine.
|
||||
"""
|
||||
if not has_trailing_semicolon:
|
||||
return src
|
||||
from tokenize_rt import reversed_enumerate, src_to_tokens, tokens_to_src
|
||||
|
||||
tokens = src_to_tokens(src)
|
||||
for idx, token in reversed_enumerate(tokens):
|
||||
if token.name in TOKENS_TO_IGNORE:
|
||||
continue
|
||||
tokens[idx] = token._replace(src=token.src + ";")
|
||||
break
|
||||
else: # pragma: nocover
|
||||
raise AssertionError(
|
||||
"INTERNAL ERROR: Was not able to reinstate trailing semicolon. "
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
) from None
|
||||
return str(tokens_to_src(tokens))
|
||||
|
||||
|
||||
def mask_cell(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Mask IPython magics so content becomes parseable Python code.
|
||||
|
||||
For example,
|
||||
|
||||
%matplotlib inline
|
||||
'foo'
|
||||
|
||||
becomes
|
||||
|
||||
"25716f358c32750e"
|
||||
'foo'
|
||||
|
||||
The replacements are returned, along with the transformed code.
|
||||
"""
|
||||
replacements: list[Replacement] = []
|
||||
try:
|
||||
ast.parse(src)
|
||||
except SyntaxError:
|
||||
# Might have IPython magics, will process below.
|
||||
pass
|
||||
else:
|
||||
# Syntax is fine, nothing to mask, early return.
|
||||
return src, replacements
|
||||
|
||||
from IPython.core.inputtransformer2 import TransformerManager
|
||||
|
||||
transformer_manager = TransformerManager()
|
||||
transformed = transformer_manager.transform_cell(src)
|
||||
transformed, cell_magic_replacements = replace_cell_magics(transformed)
|
||||
replacements += cell_magic_replacements
|
||||
transformed = transformer_manager.transform_cell(transformed)
|
||||
transformed, magic_replacements = replace_magics(transformed)
|
||||
if len(transformed.splitlines()) != len(src.splitlines()):
|
||||
# Multi-line magic, not supported.
|
||||
raise NothingChanged
|
||||
replacements += magic_replacements
|
||||
return transformed, replacements
|
||||
|
||||
|
||||
def get_token(src: str, magic: str) -> str:
|
||||
"""Return randomly generated token to mask IPython magic with.
|
||||
|
||||
For example, if 'magic' was `%matplotlib inline`, then a possible
|
||||
token to mask it with would be `"43fdd17f7e5ddc83"`. The token
|
||||
will be the same length as the magic, and we make sure that it was
|
||||
not already present anywhere else in the cell.
|
||||
"""
|
||||
assert magic
|
||||
nbytes = max(len(magic) // 2 - 1, 1)
|
||||
token = TOKEN_HEX(nbytes)
|
||||
counter = 0
|
||||
while token in src:
|
||||
token = TOKEN_HEX(nbytes)
|
||||
counter += 1
|
||||
if counter > 100:
|
||||
raise AssertionError(
|
||||
"INTERNAL ERROR: Black was not able to replace IPython magic. "
|
||||
"Please report a bug on https://github.com/psf/black/issues. "
|
||||
f"The magic might be helpful: {magic}"
|
||||
) from None
|
||||
if len(token) + 2 < len(magic):
|
||||
token = f"{token}."
|
||||
return f'"{token}"'
|
||||
|
||||
|
||||
def replace_cell_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace cell magic with token.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
Example,
|
||||
|
||||
get_ipython().run_cell_magic('t', '-n1', 'ls =!ls\\n')
|
||||
|
||||
becomes
|
||||
|
||||
"a794."
|
||||
ls =!ls
|
||||
|
||||
The replacement, along with the transformed code, is returned.
|
||||
"""
|
||||
replacements: list[Replacement] = []
|
||||
|
||||
tree = ast.parse(src)
|
||||
|
||||
cell_magic_finder = CellMagicFinder()
|
||||
cell_magic_finder.visit(tree)
|
||||
if cell_magic_finder.cell_magic is None:
|
||||
return src, replacements
|
||||
header = cell_magic_finder.cell_magic.header
|
||||
mask = get_token(src, header)
|
||||
replacements.append(Replacement(mask=mask, src=header))
|
||||
return f"{mask}\n{cell_magic_finder.cell_magic.body}", replacements
|
||||
|
||||
|
||||
def replace_magics(src: str) -> tuple[str, list[Replacement]]:
|
||||
"""Replace magics within body of cell.
|
||||
|
||||
Note that 'src' will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
Example, this
|
||||
|
||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||
'foo'
|
||||
|
||||
becomes
|
||||
|
||||
"5e67db56d490fd39"
|
||||
'foo'
|
||||
|
||||
The replacement, along with the transformed code, are returned.
|
||||
"""
|
||||
replacements = []
|
||||
magic_finder = MagicFinder()
|
||||
magic_finder.visit(ast.parse(src))
|
||||
new_srcs = []
|
||||
for i, line in enumerate(src.splitlines(), start=1):
|
||||
if i in magic_finder.magics:
|
||||
offsets_and_magics = magic_finder.magics[i]
|
||||
if len(offsets_and_magics) != 1: # pragma: nocover
|
||||
raise AssertionError(
|
||||
f"Expecting one magic per line, got: {offsets_and_magics}\n"
|
||||
"Please report a bug on https://github.com/psf/black/issues."
|
||||
)
|
||||
col_offset, magic = (
|
||||
offsets_and_magics[0].col_offset,
|
||||
offsets_and_magics[0].magic,
|
||||
)
|
||||
mask = get_token(src, magic)
|
||||
replacements.append(Replacement(mask=mask, src=magic))
|
||||
line = line[:col_offset] + mask
|
||||
new_srcs.append(line)
|
||||
return "\n".join(new_srcs), replacements
|
||||
|
||||
|
||||
def unmask_cell(src: str, replacements: list[Replacement]) -> str:
|
||||
"""Remove replacements from cell.
|
||||
|
||||
For example
|
||||
|
||||
"9b20"
|
||||
foo = bar
|
||||
|
||||
becomes
|
||||
|
||||
%%time
|
||||
foo = bar
|
||||
"""
|
||||
for replacement in replacements:
|
||||
src = src.replace(replacement.mask, replacement.src)
|
||||
return src
|
||||
|
||||
|
||||
def _get_code_start(src: str) -> str:
|
||||
"""Provides the first line where the code starts.
|
||||
|
||||
Iterates over lines of code until it finds the first line that doesn't
|
||||
contain only empty spaces and comments. It removes any empty spaces at the
|
||||
start of the line and returns it. If such line doesn't exist, it returns an
|
||||
empty string.
|
||||
"""
|
||||
for match in re.finditer(".+", src):
|
||||
line = match.group(0).lstrip()
|
||||
if line and not line.startswith("#"):
|
||||
return line
|
||||
return ""
|
||||
|
||||
|
||||
def _is_ipython_magic(node: ast.expr) -> TypeGuard[ast.Attribute]:
|
||||
"""Check if attribute is IPython magic.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
"""
|
||||
return (
|
||||
isinstance(node, ast.Attribute)
|
||||
and isinstance(node.value, ast.Call)
|
||||
and isinstance(node.value.func, ast.Name)
|
||||
and node.value.func.id == "get_ipython"
|
||||
)
|
||||
|
||||
|
||||
def _get_str_args(args: list[ast.expr]) -> list[str]:
|
||||
str_args = []
|
||||
for arg in args:
|
||||
assert isinstance(arg, ast.Constant) and isinstance(arg.value, str)
|
||||
str_args.append(arg.value)
|
||||
return str_args
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class CellMagic:
|
||||
name: str
|
||||
params: Optional[str]
|
||||
body: str
|
||||
|
||||
@property
|
||||
def header(self) -> str:
|
||||
if self.params:
|
||||
return f"%%{self.name} {self.params}"
|
||||
return f"%%{self.name}"
|
||||
|
||||
|
||||
# ast.NodeVisitor + dataclass = breakage under mypyc.
|
||||
class CellMagicFinder(ast.NodeVisitor):
|
||||
"""Find cell magics.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
For example,
|
||||
|
||||
%%time\n
|
||||
foo()
|
||||
|
||||
would have been transformed to
|
||||
|
||||
get_ipython().run_cell_magic('time', '', 'foo()\\n')
|
||||
|
||||
and we look for instances of the latter.
|
||||
"""
|
||||
|
||||
def __init__(self, cell_magic: Optional[CellMagic] = None) -> None:
|
||||
self.cell_magic = cell_magic
|
||||
|
||||
def visit_Expr(self, node: ast.Expr) -> None:
|
||||
"""Find cell magic, extract header and body."""
|
||||
if (
|
||||
isinstance(node.value, ast.Call)
|
||||
and _is_ipython_magic(node.value.func)
|
||||
and node.value.func.attr == "run_cell_magic"
|
||||
):
|
||||
args = _get_str_args(node.value.args)
|
||||
self.cell_magic = CellMagic(name=args[0], params=args[1], body=args[2])
|
||||
self.generic_visit(node)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class OffsetAndMagic:
|
||||
col_offset: int
|
||||
magic: str
|
||||
|
||||
|
||||
# Unsurprisingly, subclassing ast.NodeVisitor means we can't use dataclasses here
|
||||
# as mypyc will generate broken code.
|
||||
class MagicFinder(ast.NodeVisitor):
|
||||
"""Visit cell to look for get_ipython calls.
|
||||
|
||||
Note that the source of the abstract syntax tree
|
||||
will already have been processed by IPython's
|
||||
TransformerManager().transform_cell.
|
||||
|
||||
For example,
|
||||
|
||||
%matplotlib inline
|
||||
|
||||
would have been transformed to
|
||||
|
||||
get_ipython().run_line_magic('matplotlib', 'inline')
|
||||
|
||||
and we look for instances of the latter (and likewise for other
|
||||
types of magics).
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.magics: dict[int, list[OffsetAndMagic]] = collections.defaultdict(list)
|
||||
|
||||
def visit_Assign(self, node: ast.Assign) -> None:
|
||||
"""Look for system assign magics.
|
||||
|
||||
For example,
|
||||
|
||||
black_version = !black --version
|
||||
env = %env var
|
||||
|
||||
would have been (respectively) transformed to
|
||||
|
||||
black_version = get_ipython().getoutput('black --version')
|
||||
env = get_ipython().run_line_magic('env', 'var')
|
||||
|
||||
and we look for instances of any of the latter.
|
||||
"""
|
||||
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
|
||||
args = _get_str_args(node.value.args)
|
||||
if node.value.func.attr == "getoutput":
|
||||
src = f"!{args[0]}"
|
||||
elif node.value.func.attr == "run_line_magic":
|
||||
src = f"%{args[0]}"
|
||||
if args[1]:
|
||||
src += f" {args[1]}"
|
||||
else:
|
||||
raise AssertionError(
|
||||
f"Unexpected IPython magic {node.value.func.attr!r} found. "
|
||||
"Please report a bug on https://github.com/psf/black/issues."
|
||||
) from None
|
||||
self.magics[node.value.lineno].append(
|
||||
OffsetAndMagic(node.value.col_offset, src)
|
||||
)
|
||||
self.generic_visit(node)
|
||||
|
||||
def visit_Expr(self, node: ast.Expr) -> None:
|
||||
"""Look for magics in body of cell.
|
||||
|
||||
For examples,
|
||||
|
||||
!ls
|
||||
!!ls
|
||||
?ls
|
||||
??ls
|
||||
|
||||
would (respectively) get transformed to
|
||||
|
||||
get_ipython().system('ls')
|
||||
get_ipython().getoutput('ls')
|
||||
get_ipython().run_line_magic('pinfo', 'ls')
|
||||
get_ipython().run_line_magic('pinfo2', 'ls')
|
||||
|
||||
and we look for instances of any of the latter.
|
||||
"""
|
||||
if isinstance(node.value, ast.Call) and _is_ipython_magic(node.value.func):
|
||||
args = _get_str_args(node.value.args)
|
||||
if node.value.func.attr == "run_line_magic":
|
||||
if args[0] == "pinfo":
|
||||
src = f"?{args[1]}"
|
||||
elif args[0] == "pinfo2":
|
||||
src = f"??{args[1]}"
|
||||
else:
|
||||
src = f"%{args[0]}"
|
||||
if args[1]:
|
||||
src += f" {args[1]}"
|
||||
elif node.value.func.attr == "system":
|
||||
src = f"!{args[0]}"
|
||||
elif node.value.func.attr == "getoutput":
|
||||
src = f"!!{args[0]}"
|
||||
else:
|
||||
raise NothingChanged # unsupported magic.
|
||||
self.magics[node.value.lineno].append(
|
||||
OffsetAndMagic(node.value.col_offset, src)
|
||||
)
|
||||
self.generic_visit(node)
|
BIN
venv/Lib/site-packages/black/linegen.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/linegen.cp311-win_amd64.pyd
Normal file
Binary file not shown.
1814
venv/Lib/site-packages/black/linegen.py
Normal file
1814
venv/Lib/site-packages/black/linegen.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
venv/Lib/site-packages/black/lines.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/lines.cp311-win_amd64.pyd
Normal file
Binary file not shown.
1067
venv/Lib/site-packages/black/lines.py
Normal file
1067
venv/Lib/site-packages/black/lines.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
venv/Lib/site-packages/black/mode.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/mode.cp311-win_amd64.pyd
Normal file
Binary file not shown.
296
venv/Lib/site-packages/black/mode.py
Normal file
296
venv/Lib/site-packages/black/mode.py
Normal file
@@ -0,0 +1,296 @@
|
||||
"""Data structures configuring Black behavior.
|
||||
|
||||
Mostly around Python language feature support per version and Black configuration
|
||||
chosen by the user.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from enum import Enum, auto
|
||||
from hashlib import sha256
|
||||
from operator import attrgetter
|
||||
from typing import Final
|
||||
|
||||
from black.const import DEFAULT_LINE_LENGTH
|
||||
|
||||
|
||||
class TargetVersion(Enum):
|
||||
PY33 = 3
|
||||
PY34 = 4
|
||||
PY35 = 5
|
||||
PY36 = 6
|
||||
PY37 = 7
|
||||
PY38 = 8
|
||||
PY39 = 9
|
||||
PY310 = 10
|
||||
PY311 = 11
|
||||
PY312 = 12
|
||||
PY313 = 13
|
||||
|
||||
def pretty(self) -> str:
|
||||
assert self.name[:2] == "PY"
|
||||
return f"Python {self.name[2]}.{self.name[3:]}"
|
||||
|
||||
|
||||
class Feature(Enum):
|
||||
F_STRINGS = 2
|
||||
NUMERIC_UNDERSCORES = 3
|
||||
TRAILING_COMMA_IN_CALL = 4
|
||||
TRAILING_COMMA_IN_DEF = 5
|
||||
# The following two feature-flags are mutually exclusive, and exactly one should be
|
||||
# set for every version of python.
|
||||
ASYNC_IDENTIFIERS = 6
|
||||
ASYNC_KEYWORDS = 7
|
||||
ASSIGNMENT_EXPRESSIONS = 8
|
||||
POS_ONLY_ARGUMENTS = 9
|
||||
RELAXED_DECORATORS = 10
|
||||
PATTERN_MATCHING = 11
|
||||
UNPACKING_ON_FLOW = 12
|
||||
ANN_ASSIGN_EXTENDED_RHS = 13
|
||||
EXCEPT_STAR = 14
|
||||
VARIADIC_GENERICS = 15
|
||||
DEBUG_F_STRINGS = 16
|
||||
PARENTHESIZED_CONTEXT_MANAGERS = 17
|
||||
TYPE_PARAMS = 18
|
||||
FSTRING_PARSING = 19
|
||||
TYPE_PARAM_DEFAULTS = 20
|
||||
FORCE_OPTIONAL_PARENTHESES = 50
|
||||
|
||||
# __future__ flags
|
||||
FUTURE_ANNOTATIONS = 51
|
||||
|
||||
|
||||
FUTURE_FLAG_TO_FEATURE: Final = {
|
||||
"annotations": Feature.FUTURE_ANNOTATIONS,
|
||||
}
|
||||
|
||||
|
||||
VERSION_TO_FEATURES: dict[TargetVersion, set[Feature]] = {
|
||||
TargetVersion.PY33: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY34: {Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY35: {Feature.TRAILING_COMMA_IN_CALL, Feature.ASYNC_IDENTIFIERS},
|
||||
TargetVersion.PY36: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_IDENTIFIERS,
|
||||
},
|
||||
TargetVersion.PY37: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
},
|
||||
TargetVersion.PY38: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
},
|
||||
TargetVersion.PY39: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
},
|
||||
TargetVersion.PY310: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
},
|
||||
TargetVersion.PY311: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
Feature.EXCEPT_STAR,
|
||||
Feature.VARIADIC_GENERICS,
|
||||
},
|
||||
TargetVersion.PY312: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
Feature.EXCEPT_STAR,
|
||||
Feature.VARIADIC_GENERICS,
|
||||
Feature.TYPE_PARAMS,
|
||||
Feature.FSTRING_PARSING,
|
||||
},
|
||||
TargetVersion.PY313: {
|
||||
Feature.F_STRINGS,
|
||||
Feature.DEBUG_F_STRINGS,
|
||||
Feature.NUMERIC_UNDERSCORES,
|
||||
Feature.TRAILING_COMMA_IN_CALL,
|
||||
Feature.TRAILING_COMMA_IN_DEF,
|
||||
Feature.ASYNC_KEYWORDS,
|
||||
Feature.FUTURE_ANNOTATIONS,
|
||||
Feature.ASSIGNMENT_EXPRESSIONS,
|
||||
Feature.RELAXED_DECORATORS,
|
||||
Feature.POS_ONLY_ARGUMENTS,
|
||||
Feature.UNPACKING_ON_FLOW,
|
||||
Feature.ANN_ASSIGN_EXTENDED_RHS,
|
||||
Feature.PARENTHESIZED_CONTEXT_MANAGERS,
|
||||
Feature.PATTERN_MATCHING,
|
||||
Feature.EXCEPT_STAR,
|
||||
Feature.VARIADIC_GENERICS,
|
||||
Feature.TYPE_PARAMS,
|
||||
Feature.FSTRING_PARSING,
|
||||
Feature.TYPE_PARAM_DEFAULTS,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def supports_feature(target_versions: set[TargetVersion], feature: Feature) -> bool:
|
||||
return all(feature in VERSION_TO_FEATURES[version] for version in target_versions)
|
||||
|
||||
|
||||
class Preview(Enum):
|
||||
"""Individual preview style features."""
|
||||
|
||||
hex_codes_in_unicode_sequences = auto()
|
||||
# NOTE: string_processing requires wrap_long_dict_values_in_parens
|
||||
# for https://github.com/psf/black/issues/3117 to be fixed.
|
||||
string_processing = auto()
|
||||
hug_parens_with_braces_and_square_brackets = auto()
|
||||
unify_docstring_detection = auto()
|
||||
no_normalize_fmt_skip_whitespace = auto()
|
||||
wrap_long_dict_values_in_parens = auto()
|
||||
multiline_string_handling = auto()
|
||||
typed_params_trailing_comma = auto()
|
||||
is_simple_lookup_for_doublestar_expression = auto()
|
||||
docstring_check_for_newline = auto()
|
||||
remove_redundant_guard_parens = auto()
|
||||
parens_for_long_if_clauses_in_case_block = auto()
|
||||
pep646_typed_star_arg_type_var_tuple = auto()
|
||||
|
||||
|
||||
UNSTABLE_FEATURES: set[Preview] = {
|
||||
# Many issues, see summary in https://github.com/psf/black/issues/4042
|
||||
Preview.string_processing,
|
||||
# See issues #3452 and #4158
|
||||
Preview.wrap_long_dict_values_in_parens,
|
||||
# See issue #4159
|
||||
Preview.multiline_string_handling,
|
||||
# See issue #4036 (crash), #4098, #4099 (proposed tweaks)
|
||||
Preview.hug_parens_with_braces_and_square_brackets,
|
||||
}
|
||||
|
||||
|
||||
class Deprecated(UserWarning):
|
||||
"""Visible deprecation warning."""
|
||||
|
||||
|
||||
_MAX_CACHE_KEY_PART_LENGTH: Final = 32
|
||||
|
||||
|
||||
@dataclass
|
||||
class Mode:
|
||||
target_versions: set[TargetVersion] = field(default_factory=set)
|
||||
line_length: int = DEFAULT_LINE_LENGTH
|
||||
string_normalization: bool = True
|
||||
is_pyi: bool = False
|
||||
is_ipynb: bool = False
|
||||
skip_source_first_line: bool = False
|
||||
magic_trailing_comma: bool = True
|
||||
python_cell_magics: set[str] = field(default_factory=set)
|
||||
preview: bool = False
|
||||
unstable: bool = False
|
||||
enabled_features: set[Preview] = field(default_factory=set)
|
||||
|
||||
def __contains__(self, feature: Preview) -> bool:
|
||||
"""
|
||||
Provide `Preview.FEATURE in Mode` syntax that mirrors the ``preview`` flag.
|
||||
|
||||
In unstable mode, all features are enabled. In preview mode, all features
|
||||
except those in UNSTABLE_FEATURES are enabled. Any features in
|
||||
`self.enabled_features` are also enabled.
|
||||
"""
|
||||
if self.unstable:
|
||||
return True
|
||||
if feature in self.enabled_features:
|
||||
return True
|
||||
return self.preview and feature not in UNSTABLE_FEATURES
|
||||
|
||||
def get_cache_key(self) -> str:
|
||||
if self.target_versions:
|
||||
version_str = ",".join(
|
||||
str(version.value)
|
||||
for version in sorted(self.target_versions, key=attrgetter("value"))
|
||||
)
|
||||
else:
|
||||
version_str = "-"
|
||||
if len(version_str) > _MAX_CACHE_KEY_PART_LENGTH:
|
||||
version_str = sha256(version_str.encode()).hexdigest()[
|
||||
:_MAX_CACHE_KEY_PART_LENGTH
|
||||
]
|
||||
features_and_magics = (
|
||||
",".join(sorted(f.name for f in self.enabled_features))
|
||||
+ "@"
|
||||
+ ",".join(sorted(self.python_cell_magics))
|
||||
)
|
||||
if len(features_and_magics) > _MAX_CACHE_KEY_PART_LENGTH:
|
||||
features_and_magics = sha256(features_and_magics.encode()).hexdigest()[
|
||||
:_MAX_CACHE_KEY_PART_LENGTH
|
||||
]
|
||||
parts = [
|
||||
version_str,
|
||||
str(self.line_length),
|
||||
str(int(self.string_normalization)),
|
||||
str(int(self.is_pyi)),
|
||||
str(int(self.is_ipynb)),
|
||||
str(int(self.skip_source_first_line)),
|
||||
str(int(self.magic_trailing_comma)),
|
||||
str(int(self.preview)),
|
||||
str(int(self.unstable)),
|
||||
features_and_magics,
|
||||
]
|
||||
return ".".join(parts)
|
BIN
venv/Lib/site-packages/black/nodes.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/nodes.cp311-win_amd64.pyd
Normal file
Binary file not shown.
1033
venv/Lib/site-packages/black/nodes.py
Normal file
1033
venv/Lib/site-packages/black/nodes.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
venv/Lib/site-packages/black/numerics.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/numerics.cp311-win_amd64.pyd
Normal file
Binary file not shown.
61
venv/Lib/site-packages/black/numerics.py
Normal file
61
venv/Lib/site-packages/black/numerics.py
Normal file
@@ -0,0 +1,61 @@
|
||||
"""
|
||||
Formatting numeric literals.
|
||||
"""
|
||||
|
||||
from blib2to3.pytree import Leaf
|
||||
|
||||
|
||||
def format_hex(text: str) -> str:
|
||||
"""
|
||||
Formats a hexadecimal string like "0x12B3"
|
||||
"""
|
||||
before, after = text[:2], text[2:]
|
||||
return f"{before}{after.upper()}"
|
||||
|
||||
|
||||
def format_scientific_notation(text: str) -> str:
|
||||
"""Formats a numeric string utilizing scientific notation"""
|
||||
before, after = text.split("e")
|
||||
sign = ""
|
||||
if after.startswith("-"):
|
||||
after = after[1:]
|
||||
sign = "-"
|
||||
elif after.startswith("+"):
|
||||
after = after[1:]
|
||||
before = format_float_or_int_string(before)
|
||||
return f"{before}e{sign}{after}"
|
||||
|
||||
|
||||
def format_complex_number(text: str) -> str:
|
||||
"""Formats a complex string like `10j`"""
|
||||
number = text[:-1]
|
||||
suffix = text[-1]
|
||||
return f"{format_float_or_int_string(number)}{suffix}"
|
||||
|
||||
|
||||
def format_float_or_int_string(text: str) -> str:
|
||||
"""Formats a float string like "1.0"."""
|
||||
if "." not in text:
|
||||
return text
|
||||
|
||||
before, after = text.split(".")
|
||||
return f"{before or 0}.{after or 0}"
|
||||
|
||||
|
||||
def normalize_numeric_literal(leaf: Leaf) -> None:
|
||||
"""Normalizes numeric (float, int, and complex) literals.
|
||||
|
||||
All letters used in the representation are normalized to lowercase."""
|
||||
text = leaf.value.lower()
|
||||
if text.startswith(("0o", "0b")):
|
||||
# Leave octal and binary literals alone.
|
||||
pass
|
||||
elif text.startswith("0x"):
|
||||
text = format_hex(text)
|
||||
elif "e" in text:
|
||||
text = format_scientific_notation(text)
|
||||
elif text.endswith("j"):
|
||||
text = format_complex_number(text)
|
||||
else:
|
||||
text = format_float_or_int_string(text)
|
||||
leaf.value = text
|
122
venv/Lib/site-packages/black/output.py
Normal file
122
venv/Lib/site-packages/black/output.py
Normal file
@@ -0,0 +1,122 @@
|
||||
"""Nice output for Black.
|
||||
|
||||
The double calls are for patching purposes in tests.
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import tempfile
|
||||
from typing import Any, Optional
|
||||
|
||||
from click import echo, style
|
||||
from mypy_extensions import mypyc_attr
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def _out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
if message is not None:
|
||||
if "bold" not in styles:
|
||||
styles["bold"] = True
|
||||
message = style(message, **styles)
|
||||
echo(message, nl=nl, err=True)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def _err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
if message is not None:
|
||||
if "fg" not in styles:
|
||||
styles["fg"] = "red"
|
||||
message = style(message, **styles)
|
||||
echo(message, nl=nl, err=True)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def out(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
_out(message, nl=nl, **styles)
|
||||
|
||||
|
||||
def err(message: Optional[str] = None, nl: bool = True, **styles: Any) -> None:
|
||||
_err(message, nl=nl, **styles)
|
||||
|
||||
|
||||
def ipynb_diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
"""Return a unified diff string between each cell in notebooks `a` and `b`."""
|
||||
a_nb = json.loads(a)
|
||||
b_nb = json.loads(b)
|
||||
diff_lines = [
|
||||
diff(
|
||||
"".join(a_nb["cells"][cell_number]["source"]) + "\n",
|
||||
"".join(b_nb["cells"][cell_number]["source"]) + "\n",
|
||||
f"{a_name}:cell_{cell_number}",
|
||||
f"{b_name}:cell_{cell_number}",
|
||||
)
|
||||
for cell_number, cell in enumerate(a_nb["cells"])
|
||||
if cell["cell_type"] == "code"
|
||||
]
|
||||
return "".join(diff_lines)
|
||||
|
||||
|
||||
_line_pattern = re.compile(r"(.*?(?:\r\n|\n|\r|$))")
|
||||
|
||||
|
||||
def _splitlines_no_ff(source: str) -> list[str]:
|
||||
"""Split a string into lines ignoring form feed and other chars.
|
||||
|
||||
This mimics how the Python parser splits source code.
|
||||
|
||||
A simplified version of the function with the same name in Lib/ast.py
|
||||
"""
|
||||
result = [match[0] for match in _line_pattern.finditer(source)]
|
||||
if result[-1] == "":
|
||||
result.pop(-1)
|
||||
return result
|
||||
|
||||
|
||||
def diff(a: str, b: str, a_name: str, b_name: str) -> str:
|
||||
"""Return a unified diff string between strings `a` and `b`."""
|
||||
import difflib
|
||||
|
||||
a_lines = _splitlines_no_ff(a)
|
||||
b_lines = _splitlines_no_ff(b)
|
||||
diff_lines = []
|
||||
for line in difflib.unified_diff(
|
||||
a_lines, b_lines, fromfile=a_name, tofile=b_name, n=5
|
||||
):
|
||||
# Work around https://bugs.python.org/issue2142
|
||||
# See:
|
||||
# https://www.gnu.org/software/diffutils/manual/html_node/Incomplete-Lines.html
|
||||
if line[-1] == "\n":
|
||||
diff_lines.append(line)
|
||||
else:
|
||||
diff_lines.append(line + "\n")
|
||||
diff_lines.append("\\ No newline at end of file\n")
|
||||
return "".join(diff_lines)
|
||||
|
||||
|
||||
def color_diff(contents: str) -> str:
|
||||
"""Inject the ANSI color codes to the diff."""
|
||||
lines = contents.split("\n")
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith("+++") or line.startswith("---"):
|
||||
line = "\033[1m" + line + "\033[0m" # bold, reset
|
||||
elif line.startswith("@@"):
|
||||
line = "\033[36m" + line + "\033[0m" # cyan, reset
|
||||
elif line.startswith("+"):
|
||||
line = "\033[32m" + line + "\033[0m" # green, reset
|
||||
elif line.startswith("-"):
|
||||
line = "\033[31m" + line + "\033[0m" # red, reset
|
||||
lines[i] = line
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
@mypyc_attr(patchable=True)
|
||||
def dump_to_file(*output: str, ensure_final_newline: bool = True) -> str:
|
||||
"""Dump `output` to a temporary file. Return path to the file."""
|
||||
with tempfile.NamedTemporaryFile(
|
||||
mode="w", prefix="blk_", suffix=".log", delete=False, encoding="utf8"
|
||||
) as f:
|
||||
for lines in output:
|
||||
f.write(lines)
|
||||
if ensure_final_newline and lines and lines[-1] != "\n":
|
||||
f.write("\n")
|
||||
return f.name
|
BIN
venv/Lib/site-packages/black/parsing.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/parsing.cp311-win_amd64.pyd
Normal file
Binary file not shown.
252
venv/Lib/site-packages/black/parsing.py
Normal file
252
venv/Lib/site-packages/black/parsing.py
Normal file
@@ -0,0 +1,252 @@
|
||||
"""
|
||||
Parse Python code and perform AST validation.
|
||||
"""
|
||||
|
||||
import ast
|
||||
import sys
|
||||
import warnings
|
||||
from typing import Collection, Iterator
|
||||
|
||||
from black.mode import VERSION_TO_FEATURES, Feature, TargetVersion, supports_feature
|
||||
from black.nodes import syms
|
||||
from blib2to3 import pygram
|
||||
from blib2to3.pgen2 import driver
|
||||
from blib2to3.pgen2.grammar import Grammar
|
||||
from blib2to3.pgen2.parse import ParseError
|
||||
from blib2to3.pgen2.tokenize import TokenError
|
||||
from blib2to3.pytree import Leaf, Node
|
||||
|
||||
|
||||
class InvalidInput(ValueError):
|
||||
"""Raised when input source code fails all parse attempts."""
|
||||
|
||||
|
||||
def get_grammars(target_versions: set[TargetVersion]) -> list[Grammar]:
|
||||
if not target_versions:
|
||||
# No target_version specified, so try all grammars.
|
||||
return [
|
||||
# Python 3.7-3.9
|
||||
pygram.python_grammar_async_keywords,
|
||||
# Python 3.0-3.6
|
||||
pygram.python_grammar,
|
||||
# Python 3.10+
|
||||
pygram.python_grammar_soft_keywords,
|
||||
]
|
||||
|
||||
grammars = []
|
||||
# If we have to parse both, try to parse async as a keyword first
|
||||
if not supports_feature(
|
||||
target_versions, Feature.ASYNC_IDENTIFIERS
|
||||
) and not supports_feature(target_versions, Feature.PATTERN_MATCHING):
|
||||
# Python 3.7-3.9
|
||||
grammars.append(pygram.python_grammar_async_keywords)
|
||||
if not supports_feature(target_versions, Feature.ASYNC_KEYWORDS):
|
||||
# Python 3.0-3.6
|
||||
grammars.append(pygram.python_grammar)
|
||||
if any(Feature.PATTERN_MATCHING in VERSION_TO_FEATURES[v] for v in target_versions):
|
||||
# Python 3.10+
|
||||
grammars.append(pygram.python_grammar_soft_keywords)
|
||||
|
||||
# At least one of the above branches must have been taken, because every Python
|
||||
# version has exactly one of the two 'ASYNC_*' flags
|
||||
return grammars
|
||||
|
||||
|
||||
def lib2to3_parse(
|
||||
src_txt: str, target_versions: Collection[TargetVersion] = ()
|
||||
) -> Node:
|
||||
"""Given a string with source, return the lib2to3 Node."""
|
||||
if not src_txt.endswith("\n"):
|
||||
src_txt += "\n"
|
||||
|
||||
grammars = get_grammars(set(target_versions))
|
||||
if target_versions:
|
||||
max_tv = max(target_versions, key=lambda tv: tv.value)
|
||||
tv_str = f" for target version {max_tv.pretty()}"
|
||||
else:
|
||||
tv_str = ""
|
||||
|
||||
errors = {}
|
||||
for grammar in grammars:
|
||||
drv = driver.Driver(grammar)
|
||||
try:
|
||||
result = drv.parse_string(src_txt, True)
|
||||
break
|
||||
|
||||
except ParseError as pe:
|
||||
lineno, column = pe.context[1]
|
||||
lines = src_txt.splitlines()
|
||||
try:
|
||||
faulty_line = lines[lineno - 1]
|
||||
except IndexError:
|
||||
faulty_line = "<line number missing in source>"
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse{tv_str}: {lineno}:{column}: {faulty_line}"
|
||||
)
|
||||
|
||||
except TokenError as te:
|
||||
# In edge cases these are raised; and typically don't have a "faulty_line".
|
||||
lineno, column = te.args[1]
|
||||
errors[grammar.version] = InvalidInput(
|
||||
f"Cannot parse{tv_str}: {lineno}:{column}: {te.args[0]}"
|
||||
)
|
||||
|
||||
else:
|
||||
# Choose the latest version when raising the actual parsing error.
|
||||
assert len(errors) >= 1
|
||||
exc = errors[max(errors)]
|
||||
raise exc from None
|
||||
|
||||
if isinstance(result, Leaf):
|
||||
result = Node(syms.file_input, [result])
|
||||
return result
|
||||
|
||||
|
||||
def matches_grammar(src_txt: str, grammar: Grammar) -> bool:
|
||||
drv = driver.Driver(grammar)
|
||||
try:
|
||||
drv.parse_string(src_txt, True)
|
||||
except (ParseError, TokenError, IndentationError):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def lib2to3_unparse(node: Node) -> str:
|
||||
"""Given a lib2to3 node, return its string representation."""
|
||||
code = str(node)
|
||||
return code
|
||||
|
||||
|
||||
class ASTSafetyError(Exception):
|
||||
"""Raised when Black's generated code is not equivalent to the old AST."""
|
||||
|
||||
|
||||
def _parse_single_version(
|
||||
src: str, version: tuple[int, int], *, type_comments: bool
|
||||
) -> ast.AST:
|
||||
filename = "<unknown>"
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore", SyntaxWarning)
|
||||
warnings.simplefilter("ignore", DeprecationWarning)
|
||||
return ast.parse(
|
||||
src, filename, feature_version=version, type_comments=type_comments
|
||||
)
|
||||
|
||||
|
||||
def parse_ast(src: str) -> ast.AST:
|
||||
# TODO: support Python 4+ ;)
|
||||
versions = [(3, minor) for minor in range(3, sys.version_info[1] + 1)]
|
||||
|
||||
first_error = ""
|
||||
for version in sorted(versions, reverse=True):
|
||||
try:
|
||||
return _parse_single_version(src, version, type_comments=True)
|
||||
except SyntaxError as e:
|
||||
if not first_error:
|
||||
first_error = str(e)
|
||||
|
||||
# Try to parse without type comments
|
||||
for version in sorted(versions, reverse=True):
|
||||
try:
|
||||
return _parse_single_version(src, version, type_comments=False)
|
||||
except SyntaxError:
|
||||
pass
|
||||
|
||||
raise SyntaxError(first_error)
|
||||
|
||||
|
||||
def _normalize(lineend: str, value: str) -> str:
|
||||
# To normalize, we strip any leading and trailing space from
|
||||
# each line...
|
||||
stripped: list[str] = [i.strip() for i in value.splitlines()]
|
||||
normalized = lineend.join(stripped)
|
||||
# ...and remove any blank lines at the beginning and end of
|
||||
# the whole string
|
||||
return normalized.strip()
|
||||
|
||||
|
||||
def stringify_ast(node: ast.AST) -> Iterator[str]:
|
||||
"""Simple visitor generating strings to compare ASTs by content."""
|
||||
return _stringify_ast(node, [])
|
||||
|
||||
|
||||
def _stringify_ast_with_new_parent(
|
||||
node: ast.AST, parent_stack: list[ast.AST], new_parent: ast.AST
|
||||
) -> Iterator[str]:
|
||||
parent_stack.append(new_parent)
|
||||
yield from _stringify_ast(node, parent_stack)
|
||||
parent_stack.pop()
|
||||
|
||||
|
||||
def _stringify_ast(node: ast.AST, parent_stack: list[ast.AST]) -> Iterator[str]:
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and isinstance(node.value, str)
|
||||
and node.kind == "u"
|
||||
):
|
||||
# It's a quirk of history that we strip the u prefix over here. We used to
|
||||
# rewrite the AST nodes for Python version compatibility and we never copied
|
||||
# over the kind
|
||||
node.kind = None
|
||||
|
||||
yield f"{' ' * len(parent_stack)}{node.__class__.__name__}("
|
||||
|
||||
for field in sorted(node._fields): # noqa: F402
|
||||
# TypeIgnore has only one field 'lineno' which breaks this comparison
|
||||
if isinstance(node, ast.TypeIgnore):
|
||||
break
|
||||
|
||||
try:
|
||||
value: object = getattr(node, field)
|
||||
except AttributeError:
|
||||
continue
|
||||
|
||||
yield f"{' ' * (len(parent_stack) + 1)}{field}="
|
||||
|
||||
if isinstance(value, list):
|
||||
for item in value:
|
||||
# Ignore nested tuples within del statements, because we may insert
|
||||
# parentheses and they change the AST.
|
||||
if (
|
||||
field == "targets"
|
||||
and isinstance(node, ast.Delete)
|
||||
and isinstance(item, ast.Tuple)
|
||||
):
|
||||
for elt in item.elts:
|
||||
yield from _stringify_ast_with_new_parent(
|
||||
elt, parent_stack, node
|
||||
)
|
||||
|
||||
elif isinstance(item, ast.AST):
|
||||
yield from _stringify_ast_with_new_parent(item, parent_stack, node)
|
||||
|
||||
elif isinstance(value, ast.AST):
|
||||
yield from _stringify_ast_with_new_parent(value, parent_stack, node)
|
||||
|
||||
else:
|
||||
normalized: object
|
||||
if (
|
||||
isinstance(node, ast.Constant)
|
||||
and field == "value"
|
||||
and isinstance(value, str)
|
||||
and len(parent_stack) >= 2
|
||||
# Any standalone string, ideally this would
|
||||
# exactly match black.nodes.is_docstring
|
||||
and isinstance(parent_stack[-1], ast.Expr)
|
||||
):
|
||||
# Constant strings may be indented across newlines, if they are
|
||||
# docstrings; fold spaces after newlines when comparing. Similarly,
|
||||
# trailing and leading space may be removed.
|
||||
normalized = _normalize("\n", value)
|
||||
elif field == "type_comment" and isinstance(value, str):
|
||||
# Trailing whitespace in type comments is removed.
|
||||
normalized = value.rstrip()
|
||||
else:
|
||||
normalized = value
|
||||
yield (
|
||||
f"{' ' * (len(parent_stack) + 1)}{normalized!r}, #"
|
||||
f" {value.__class__.__name__}"
|
||||
)
|
||||
|
||||
yield f"{' ' * len(parent_stack)}) # /{node.__class__.__name__}"
|
0
venv/Lib/site-packages/black/py.typed
Normal file
0
venv/Lib/site-packages/black/py.typed
Normal file
BIN
venv/Lib/site-packages/black/ranges.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/ranges.cp311-win_amd64.pyd
Normal file
Binary file not shown.
521
venv/Lib/site-packages/black/ranges.py
Normal file
521
venv/Lib/site-packages/black/ranges.py
Normal file
@@ -0,0 +1,521 @@
|
||||
"""Functions related to Black's formatting by line ranges feature."""
|
||||
|
||||
import difflib
|
||||
from dataclasses import dataclass
|
||||
from typing import Collection, Iterator, Sequence, Union
|
||||
|
||||
from black.nodes import (
|
||||
LN,
|
||||
STANDALONE_COMMENT,
|
||||
Leaf,
|
||||
Node,
|
||||
Visitor,
|
||||
first_leaf,
|
||||
furthest_ancestor_with_last_leaf,
|
||||
last_leaf,
|
||||
syms,
|
||||
)
|
||||
from blib2to3.pgen2.token import ASYNC, NEWLINE
|
||||
|
||||
|
||||
def parse_line_ranges(line_ranges: Sequence[str]) -> list[tuple[int, int]]:
|
||||
lines: list[tuple[int, int]] = []
|
||||
for lines_str in line_ranges:
|
||||
parts = lines_str.split("-")
|
||||
if len(parts) != 2:
|
||||
raise ValueError(
|
||||
"Incorrect --line-ranges format, expect 'START-END', found"
|
||||
f" {lines_str!r}"
|
||||
)
|
||||
try:
|
||||
start = int(parts[0])
|
||||
end = int(parts[1])
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"Incorrect --line-ranges value, expect integer ranges, found"
|
||||
f" {lines_str!r}"
|
||||
) from None
|
||||
else:
|
||||
lines.append((start, end))
|
||||
return lines
|
||||
|
||||
|
||||
def is_valid_line_range(lines: tuple[int, int]) -> bool:
|
||||
"""Returns whether the line range is valid."""
|
||||
return not lines or lines[0] <= lines[1]
|
||||
|
||||
|
||||
def sanitized_lines(
|
||||
lines: Collection[tuple[int, int]], src_contents: str
|
||||
) -> Collection[tuple[int, int]]:
|
||||
"""Returns the valid line ranges for the given source.
|
||||
|
||||
This removes ranges that are entirely outside the valid lines.
|
||||
|
||||
Other ranges are normalized so that the start values are at least 1 and the
|
||||
end values are at most the (1-based) index of the last source line.
|
||||
"""
|
||||
if not src_contents:
|
||||
return []
|
||||
good_lines = []
|
||||
src_line_count = src_contents.count("\n")
|
||||
if not src_contents.endswith("\n"):
|
||||
src_line_count += 1
|
||||
for start, end in lines:
|
||||
if start > src_line_count:
|
||||
continue
|
||||
# line-ranges are 1-based
|
||||
start = max(start, 1)
|
||||
if end < start:
|
||||
continue
|
||||
end = min(end, src_line_count)
|
||||
good_lines.append((start, end))
|
||||
return good_lines
|
||||
|
||||
|
||||
def adjusted_lines(
|
||||
lines: Collection[tuple[int, int]],
|
||||
original_source: str,
|
||||
modified_source: str,
|
||||
) -> list[tuple[int, int]]:
|
||||
"""Returns the adjusted line ranges based on edits from the original code.
|
||||
|
||||
This computes the new line ranges by diffing original_source and
|
||||
modified_source, and adjust each range based on how the range overlaps with
|
||||
the diffs.
|
||||
|
||||
Note the diff can contain lines outside of the original line ranges. This can
|
||||
happen when the formatting has to be done in adjacent to maintain consistent
|
||||
local results. For example:
|
||||
|
||||
1. def my_func(arg1, arg2,
|
||||
2. arg3,):
|
||||
3. pass
|
||||
|
||||
If it restricts to line 2-2, it can't simply reformat line 2, it also has
|
||||
to reformat line 1:
|
||||
|
||||
1. def my_func(
|
||||
2. arg1,
|
||||
3. arg2,
|
||||
4. arg3,
|
||||
5. ):
|
||||
6. pass
|
||||
|
||||
In this case, we will expand the line ranges to also include the whole diff
|
||||
block.
|
||||
|
||||
Args:
|
||||
lines: a collection of line ranges.
|
||||
original_source: the original source.
|
||||
modified_source: the modified source.
|
||||
"""
|
||||
lines_mappings = _calculate_lines_mappings(original_source, modified_source)
|
||||
|
||||
new_lines = []
|
||||
# Keep an index of the current search. Since the lines and lines_mappings are
|
||||
# sorted, this makes the search complexity linear.
|
||||
current_mapping_index = 0
|
||||
for start, end in sorted(lines):
|
||||
start_mapping_index = _find_lines_mapping_index(
|
||||
start,
|
||||
lines_mappings,
|
||||
current_mapping_index,
|
||||
)
|
||||
end_mapping_index = _find_lines_mapping_index(
|
||||
end,
|
||||
lines_mappings,
|
||||
start_mapping_index,
|
||||
)
|
||||
current_mapping_index = start_mapping_index
|
||||
if start_mapping_index >= len(lines_mappings) or end_mapping_index >= len(
|
||||
lines_mappings
|
||||
):
|
||||
# Protect against invalid inputs.
|
||||
continue
|
||||
start_mapping = lines_mappings[start_mapping_index]
|
||||
end_mapping = lines_mappings[end_mapping_index]
|
||||
if start_mapping.is_changed_block:
|
||||
# When the line falls into a changed block, expands to the whole block.
|
||||
new_start = start_mapping.modified_start
|
||||
else:
|
||||
new_start = (
|
||||
start - start_mapping.original_start + start_mapping.modified_start
|
||||
)
|
||||
if end_mapping.is_changed_block:
|
||||
# When the line falls into a changed block, expands to the whole block.
|
||||
new_end = end_mapping.modified_end
|
||||
else:
|
||||
new_end = end - end_mapping.original_start + end_mapping.modified_start
|
||||
new_range = (new_start, new_end)
|
||||
if is_valid_line_range(new_range):
|
||||
new_lines.append(new_range)
|
||||
return new_lines
|
||||
|
||||
|
||||
def convert_unchanged_lines(src_node: Node, lines: Collection[tuple[int, int]]) -> None:
|
||||
"""Converts unchanged lines to STANDALONE_COMMENT.
|
||||
|
||||
The idea is similar to how `# fmt: on/off` is implemented. It also converts the
|
||||
nodes between those markers as a single `STANDALONE_COMMENT` leaf node with
|
||||
the unformatted code as its value. `STANDALONE_COMMENT` is a "fake" token
|
||||
that will be formatted as-is with its prefix normalized.
|
||||
|
||||
Here we perform two passes:
|
||||
|
||||
1. Visit the top-level statements, and convert them to a single
|
||||
`STANDALONE_COMMENT` when unchanged. This speeds up formatting when some
|
||||
of the top-level statements aren't changed.
|
||||
2. Convert unchanged "unwrapped lines" to `STANDALONE_COMMENT` nodes line by
|
||||
line. "unwrapped lines" are divided by the `NEWLINE` token. e.g. a
|
||||
multi-line statement is *one* "unwrapped line" that ends with `NEWLINE`,
|
||||
even though this statement itself can span multiple lines, and the
|
||||
tokenizer only sees the last '\n' as the `NEWLINE` token.
|
||||
|
||||
NOTE: During pass (2), comment prefixes and indentations are ALWAYS
|
||||
normalized even when the lines aren't changed. This is fixable by moving
|
||||
more formatting to pass (1). However, it's hard to get it correct when
|
||||
incorrect indentations are used. So we defer this to future optimizations.
|
||||
"""
|
||||
lines_set: set[int] = set()
|
||||
for start, end in lines:
|
||||
lines_set.update(range(start, end + 1))
|
||||
visitor = _TopLevelStatementsVisitor(lines_set)
|
||||
_ = list(visitor.visit(src_node)) # Consume all results.
|
||||
_convert_unchanged_line_by_line(src_node, lines_set)
|
||||
|
||||
|
||||
def _contains_standalone_comment(node: LN) -> bool:
|
||||
if isinstance(node, Leaf):
|
||||
return node.type == STANDALONE_COMMENT
|
||||
else:
|
||||
for child in node.children:
|
||||
if _contains_standalone_comment(child):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class _TopLevelStatementsVisitor(Visitor[None]):
|
||||
"""
|
||||
A node visitor that converts unchanged top-level statements to
|
||||
STANDALONE_COMMENT.
|
||||
|
||||
This is used in addition to _convert_unchanged_line_by_line, to
|
||||
speed up formatting when there are unchanged top-level
|
||||
classes/functions/statements.
|
||||
"""
|
||||
|
||||
def __init__(self, lines_set: set[int]):
|
||||
self._lines_set = lines_set
|
||||
|
||||
def visit_simple_stmt(self, node: Node) -> Iterator[None]:
|
||||
# This is only called for top-level statements, since `visit_suite`
|
||||
# won't visit its children nodes.
|
||||
yield from []
|
||||
newline_leaf = last_leaf(node)
|
||||
if not newline_leaf:
|
||||
return
|
||||
assert (
|
||||
newline_leaf.type == NEWLINE
|
||||
), f"Unexpectedly found leaf.type={newline_leaf.type}"
|
||||
# We need to find the furthest ancestor with the NEWLINE as the last
|
||||
# leaf, since a `suite` can simply be a `simple_stmt` when it puts
|
||||
# its body on the same line. Example: `if cond: pass`.
|
||||
ancestor = furthest_ancestor_with_last_leaf(newline_leaf)
|
||||
if not _get_line_range(ancestor).intersection(self._lines_set):
|
||||
_convert_node_to_standalone_comment(ancestor)
|
||||
|
||||
def visit_suite(self, node: Node) -> Iterator[None]:
|
||||
yield from []
|
||||
# If there is a STANDALONE_COMMENT node, it means parts of the node tree
|
||||
# have fmt on/off/skip markers. Those STANDALONE_COMMENT nodes can't
|
||||
# be simply converted by calling str(node). So we just don't convert
|
||||
# here.
|
||||
if _contains_standalone_comment(node):
|
||||
return
|
||||
# Find the semantic parent of this suite. For `async_stmt` and
|
||||
# `async_funcdef`, the ASYNC token is defined on a separate level by the
|
||||
# grammar.
|
||||
semantic_parent = node.parent
|
||||
if semantic_parent is not None:
|
||||
if (
|
||||
semantic_parent.prev_sibling is not None
|
||||
and semantic_parent.prev_sibling.type == ASYNC
|
||||
):
|
||||
semantic_parent = semantic_parent.parent
|
||||
if semantic_parent is not None and not _get_line_range(
|
||||
semantic_parent
|
||||
).intersection(self._lines_set):
|
||||
_convert_node_to_standalone_comment(semantic_parent)
|
||||
|
||||
|
||||
def _convert_unchanged_line_by_line(node: Node, lines_set: set[int]) -> None:
|
||||
"""Converts unchanged to STANDALONE_COMMENT line by line."""
|
||||
for leaf in node.leaves():
|
||||
if leaf.type != NEWLINE:
|
||||
# We only consider "unwrapped lines", which are divided by the NEWLINE
|
||||
# token.
|
||||
continue
|
||||
if leaf.parent and leaf.parent.type == syms.match_stmt:
|
||||
# The `suite` node is defined as:
|
||||
# match_stmt: "match" subject_expr ':' NEWLINE INDENT case_block+ DEDENT
|
||||
# Here we need to check `subject_expr`. The `case_block+` will be
|
||||
# checked by their own NEWLINEs.
|
||||
nodes_to_ignore: list[LN] = []
|
||||
prev_sibling = leaf.prev_sibling
|
||||
while prev_sibling:
|
||||
nodes_to_ignore.insert(0, prev_sibling)
|
||||
prev_sibling = prev_sibling.prev_sibling
|
||||
if not _get_line_range(nodes_to_ignore).intersection(lines_set):
|
||||
_convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf)
|
||||
elif leaf.parent and leaf.parent.type == syms.suite:
|
||||
# The `suite` node is defined as:
|
||||
# suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
||||
# We will check `simple_stmt` and `stmt+` separately against the lines set
|
||||
parent_sibling = leaf.parent.prev_sibling
|
||||
nodes_to_ignore = []
|
||||
while parent_sibling and not parent_sibling.type == syms.suite:
|
||||
# NOTE: Multiple suite nodes can exist as siblings in e.g. `if_stmt`.
|
||||
nodes_to_ignore.insert(0, parent_sibling)
|
||||
parent_sibling = parent_sibling.prev_sibling
|
||||
# Special case for `async_stmt` and `async_funcdef` where the ASYNC
|
||||
# token is on the grandparent node.
|
||||
grandparent = leaf.parent.parent
|
||||
if (
|
||||
grandparent is not None
|
||||
and grandparent.prev_sibling is not None
|
||||
and grandparent.prev_sibling.type == ASYNC
|
||||
):
|
||||
nodes_to_ignore.insert(0, grandparent.prev_sibling)
|
||||
if not _get_line_range(nodes_to_ignore).intersection(lines_set):
|
||||
_convert_nodes_to_standalone_comment(nodes_to_ignore, newline=leaf)
|
||||
else:
|
||||
ancestor = furthest_ancestor_with_last_leaf(leaf)
|
||||
# Consider multiple decorators as a whole block, as their
|
||||
# newlines have different behaviors than the rest of the grammar.
|
||||
if (
|
||||
ancestor.type == syms.decorator
|
||||
and ancestor.parent
|
||||
and ancestor.parent.type == syms.decorators
|
||||
):
|
||||
ancestor = ancestor.parent
|
||||
if not _get_line_range(ancestor).intersection(lines_set):
|
||||
_convert_node_to_standalone_comment(ancestor)
|
||||
|
||||
|
||||
def _convert_node_to_standalone_comment(node: LN) -> None:
|
||||
"""Convert node to STANDALONE_COMMENT by modifying the tree inline."""
|
||||
parent = node.parent
|
||||
if not parent:
|
||||
return
|
||||
first = first_leaf(node)
|
||||
last = last_leaf(node)
|
||||
if not first or not last:
|
||||
return
|
||||
if first is last:
|
||||
# This can happen on the following edge cases:
|
||||
# 1. A block of `# fmt: off/on` code except the `# fmt: on` is placed
|
||||
# on the end of the last line instead of on a new line.
|
||||
# 2. A single backslash on its own line followed by a comment line.
|
||||
# Ideally we don't want to format them when not requested, but fixing
|
||||
# isn't easy. These cases are also badly formatted code, so it isn't
|
||||
# too bad we reformat them.
|
||||
return
|
||||
# The prefix contains comments and indentation whitespaces. They are
|
||||
# reformatted accordingly to the correct indentation level.
|
||||
# This also means the indentation will be changed on the unchanged lines, and
|
||||
# this is actually required to not break incremental reformatting.
|
||||
prefix = first.prefix
|
||||
first.prefix = ""
|
||||
index = node.remove()
|
||||
if index is not None:
|
||||
# Remove the '\n', as STANDALONE_COMMENT will have '\n' appended when
|
||||
# generating the formatted code.
|
||||
value = str(node)[:-1]
|
||||
parent.insert_child(
|
||||
index,
|
||||
Leaf(
|
||||
STANDALONE_COMMENT,
|
||||
value,
|
||||
prefix=prefix,
|
||||
fmt_pass_converted_first_leaf=first,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _convert_nodes_to_standalone_comment(nodes: Sequence[LN], *, newline: Leaf) -> None:
|
||||
"""Convert nodes to STANDALONE_COMMENT by modifying the tree inline."""
|
||||
if not nodes:
|
||||
return
|
||||
parent = nodes[0].parent
|
||||
first = first_leaf(nodes[0])
|
||||
if not parent or not first:
|
||||
return
|
||||
prefix = first.prefix
|
||||
first.prefix = ""
|
||||
value = "".join(str(node) for node in nodes)
|
||||
# The prefix comment on the NEWLINE leaf is the trailing comment of the statement.
|
||||
if newline.prefix:
|
||||
value += newline.prefix
|
||||
newline.prefix = ""
|
||||
index = nodes[0].remove()
|
||||
for node in nodes[1:]:
|
||||
node.remove()
|
||||
if index is not None:
|
||||
parent.insert_child(
|
||||
index,
|
||||
Leaf(
|
||||
STANDALONE_COMMENT,
|
||||
value,
|
||||
prefix=prefix,
|
||||
fmt_pass_converted_first_leaf=first,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def _leaf_line_end(leaf: Leaf) -> int:
|
||||
"""Returns the line number of the leaf node's last line."""
|
||||
if leaf.type == NEWLINE:
|
||||
return leaf.lineno
|
||||
else:
|
||||
# Leaf nodes like multiline strings can occupy multiple lines.
|
||||
return leaf.lineno + str(leaf).count("\n")
|
||||
|
||||
|
||||
def _get_line_range(node_or_nodes: Union[LN, list[LN]]) -> set[int]:
|
||||
"""Returns the line range of this node or list of nodes."""
|
||||
if isinstance(node_or_nodes, list):
|
||||
nodes = node_or_nodes
|
||||
if not nodes:
|
||||
return set()
|
||||
first = first_leaf(nodes[0])
|
||||
last = last_leaf(nodes[-1])
|
||||
if first and last:
|
||||
line_start = first.lineno
|
||||
line_end = _leaf_line_end(last)
|
||||
return set(range(line_start, line_end + 1))
|
||||
else:
|
||||
return set()
|
||||
else:
|
||||
node = node_or_nodes
|
||||
if isinstance(node, Leaf):
|
||||
return set(range(node.lineno, _leaf_line_end(node) + 1))
|
||||
else:
|
||||
first = first_leaf(node)
|
||||
last = last_leaf(node)
|
||||
if first and last:
|
||||
return set(range(first.lineno, _leaf_line_end(last) + 1))
|
||||
else:
|
||||
return set()
|
||||
|
||||
|
||||
@dataclass
|
||||
class _LinesMapping:
|
||||
"""1-based lines mapping from original source to modified source.
|
||||
|
||||
Lines [original_start, original_end] from original source
|
||||
are mapped to [modified_start, modified_end].
|
||||
|
||||
The ranges are inclusive on both ends.
|
||||
"""
|
||||
|
||||
original_start: int
|
||||
original_end: int
|
||||
modified_start: int
|
||||
modified_end: int
|
||||
# Whether this range corresponds to a changed block, or an unchanged block.
|
||||
is_changed_block: bool
|
||||
|
||||
|
||||
def _calculate_lines_mappings(
|
||||
original_source: str,
|
||||
modified_source: str,
|
||||
) -> Sequence[_LinesMapping]:
|
||||
"""Returns a sequence of _LinesMapping by diffing the sources.
|
||||
|
||||
For example, given the following diff:
|
||||
import re
|
||||
- def func(arg1,
|
||||
- arg2, arg3):
|
||||
+ def func(arg1, arg2, arg3):
|
||||
pass
|
||||
It returns the following mappings:
|
||||
original -> modified
|
||||
(1, 1) -> (1, 1), is_changed_block=False (the "import re" line)
|
||||
(2, 3) -> (2, 2), is_changed_block=True (the diff)
|
||||
(4, 4) -> (3, 3), is_changed_block=False (the "pass" line)
|
||||
|
||||
You can think of this visually as if it brings up a side-by-side diff, and tries
|
||||
to map the line ranges from the left side to the right side:
|
||||
|
||||
(1, 1)->(1, 1) 1. import re 1. import re
|
||||
(2, 3)->(2, 2) 2. def func(arg1, 2. def func(arg1, arg2, arg3):
|
||||
3. arg2, arg3):
|
||||
(4, 4)->(3, 3) 4. pass 3. pass
|
||||
|
||||
Args:
|
||||
original_source: the original source.
|
||||
modified_source: the modified source.
|
||||
"""
|
||||
matcher = difflib.SequenceMatcher(
|
||||
None,
|
||||
original_source.splitlines(keepends=True),
|
||||
modified_source.splitlines(keepends=True),
|
||||
)
|
||||
matching_blocks = matcher.get_matching_blocks()
|
||||
lines_mappings: list[_LinesMapping] = []
|
||||
# matching_blocks is a sequence of "same block of code ranges", see
|
||||
# https://docs.python.org/3/library/difflib.html#difflib.SequenceMatcher.get_matching_blocks
|
||||
# Each block corresponds to a _LinesMapping with is_changed_block=False,
|
||||
# and the ranges between two blocks corresponds to a _LinesMapping with
|
||||
# is_changed_block=True,
|
||||
# NOTE: matching_blocks is 0-based, but _LinesMapping is 1-based.
|
||||
for i, block in enumerate(matching_blocks):
|
||||
if i == 0:
|
||||
if block.a != 0 or block.b != 0:
|
||||
lines_mappings.append(
|
||||
_LinesMapping(
|
||||
original_start=1,
|
||||
original_end=block.a,
|
||||
modified_start=1,
|
||||
modified_end=block.b,
|
||||
is_changed_block=False,
|
||||
)
|
||||
)
|
||||
else:
|
||||
previous_block = matching_blocks[i - 1]
|
||||
lines_mappings.append(
|
||||
_LinesMapping(
|
||||
original_start=previous_block.a + previous_block.size + 1,
|
||||
original_end=block.a,
|
||||
modified_start=previous_block.b + previous_block.size + 1,
|
||||
modified_end=block.b,
|
||||
is_changed_block=True,
|
||||
)
|
||||
)
|
||||
if i < len(matching_blocks) - 1:
|
||||
lines_mappings.append(
|
||||
_LinesMapping(
|
||||
original_start=block.a + 1,
|
||||
original_end=block.a + block.size,
|
||||
modified_start=block.b + 1,
|
||||
modified_end=block.b + block.size,
|
||||
is_changed_block=False,
|
||||
)
|
||||
)
|
||||
return lines_mappings
|
||||
|
||||
|
||||
def _find_lines_mapping_index(
|
||||
original_line: int,
|
||||
lines_mappings: Sequence[_LinesMapping],
|
||||
start_index: int,
|
||||
) -> int:
|
||||
"""Returns the original index of the lines mappings for the original line."""
|
||||
index = start_index
|
||||
while index < len(lines_mappings):
|
||||
mapping = lines_mappings[index]
|
||||
if mapping.original_start <= original_line <= mapping.original_end:
|
||||
return index
|
||||
index += 1
|
||||
return index
|
107
venv/Lib/site-packages/black/report.py
Normal file
107
venv/Lib/site-packages/black/report.py
Normal file
@@ -0,0 +1,107 @@
|
||||
"""
|
||||
Summarize Black runs to users.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
from pathlib import Path
|
||||
|
||||
from click import style
|
||||
|
||||
from black.output import err, out
|
||||
|
||||
|
||||
class Changed(Enum):
|
||||
NO = 0
|
||||
CACHED = 1
|
||||
YES = 2
|
||||
|
||||
|
||||
class NothingChanged(UserWarning):
|
||||
"""Raised when reformatted code is the same as source."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class Report:
|
||||
"""Provides a reformatting counter. Can be rendered with `str(report)`."""
|
||||
|
||||
check: bool = False
|
||||
diff: bool = False
|
||||
quiet: bool = False
|
||||
verbose: bool = False
|
||||
change_count: int = 0
|
||||
same_count: int = 0
|
||||
failure_count: int = 0
|
||||
|
||||
def done(self, src: Path, changed: Changed) -> None:
|
||||
"""Increment the counter for successful reformatting. Write out a message."""
|
||||
if changed is Changed.YES:
|
||||
reformatted = "would reformat" if self.check or self.diff else "reformatted"
|
||||
if self.verbose or not self.quiet:
|
||||
out(f"{reformatted} {src}")
|
||||
self.change_count += 1
|
||||
else:
|
||||
if self.verbose:
|
||||
if changed is Changed.NO:
|
||||
msg = f"{src} already well formatted, good job."
|
||||
else:
|
||||
msg = f"{src} wasn't modified on disk since last run."
|
||||
out(msg, bold=False)
|
||||
self.same_count += 1
|
||||
|
||||
def failed(self, src: Path, message: str) -> None:
|
||||
"""Increment the counter for failed reformatting. Write out a message."""
|
||||
err(f"error: cannot format {src}: {message}")
|
||||
self.failure_count += 1
|
||||
|
||||
def path_ignored(self, path: Path, message: str) -> None:
|
||||
if self.verbose:
|
||||
out(f"{path} ignored: {message}", bold=False)
|
||||
|
||||
@property
|
||||
def return_code(self) -> int:
|
||||
"""Return the exit code that the app should use.
|
||||
|
||||
This considers the current state of changed files and failures:
|
||||
- if there were any failures, return 123;
|
||||
- if any files were changed and --check is being used, return 1;
|
||||
- otherwise return 0.
|
||||
"""
|
||||
# According to http://tldp.org/LDP/abs/html/exitcodes.html starting with
|
||||
# 126 we have special return codes reserved by the shell.
|
||||
if self.failure_count:
|
||||
return 123
|
||||
|
||||
elif self.change_count and self.check:
|
||||
return 1
|
||||
|
||||
return 0
|
||||
|
||||
def __str__(self) -> str:
|
||||
"""Render a color report of the current state.
|
||||
|
||||
Use `click.unstyle` to remove colors.
|
||||
"""
|
||||
if self.check or self.diff:
|
||||
reformatted = "would be reformatted"
|
||||
unchanged = "would be left unchanged"
|
||||
failed = "would fail to reformat"
|
||||
else:
|
||||
reformatted = "reformatted"
|
||||
unchanged = "left unchanged"
|
||||
failed = "failed to reformat"
|
||||
report = []
|
||||
if self.change_count:
|
||||
s = "s" if self.change_count > 1 else ""
|
||||
report.append(
|
||||
style(f"{self.change_count} file{s} ", bold=True, fg="blue")
|
||||
+ style(f"{reformatted}", bold=True)
|
||||
)
|
||||
|
||||
if self.same_count:
|
||||
s = "s" if self.same_count > 1 else ""
|
||||
report.append(style(f"{self.same_count} file{s} ", fg="blue") + unchanged)
|
||||
if self.failure_count:
|
||||
s = "s" if self.failure_count > 1 else ""
|
||||
report.append(style(f"{self.failure_count} file{s} {failed}", fg="red"))
|
||||
return ", ".join(report) + "."
|
Binary file not shown.
0
venv/Lib/site-packages/black/resources/__init__.py
Normal file
0
venv/Lib/site-packages/black/resources/__init__.py
Normal file
155
venv/Lib/site-packages/black/resources/black.schema.json
Normal file
155
venv/Lib/site-packages/black/resources/black.schema.json
Normal file
@@ -0,0 +1,155 @@
|
||||
{
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"$id": "https://github.com/psf/black/blob/main/src/black/resources/black.schema.json",
|
||||
"$comment": "tool.black table in pyproject.toml",
|
||||
"type": "object",
|
||||
"additionalProperties": false,
|
||||
"properties": {
|
||||
"code": {
|
||||
"type": "string",
|
||||
"description": "Format the code passed in as a string."
|
||||
},
|
||||
"line-length": {
|
||||
"type": "integer",
|
||||
"description": "How many characters per line to allow.",
|
||||
"default": 88
|
||||
},
|
||||
"target-version": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": [
|
||||
"py33",
|
||||
"py34",
|
||||
"py35",
|
||||
"py36",
|
||||
"py37",
|
||||
"py38",
|
||||
"py39",
|
||||
"py310",
|
||||
"py311",
|
||||
"py312",
|
||||
"py313"
|
||||
]
|
||||
},
|
||||
"description": "Python versions that should be supported by Black's output. You should include all versions that your code supports. By default, Black will infer target versions from the project metadata in pyproject.toml. If this does not yield conclusive results, Black will use per-file auto-detection."
|
||||
},
|
||||
"pyi": {
|
||||
"type": "boolean",
|
||||
"description": "Format all input files like typing stubs regardless of file extension. This is useful when piping source on standard input.",
|
||||
"default": false
|
||||
},
|
||||
"ipynb": {
|
||||
"type": "boolean",
|
||||
"description": "Format all input files like Jupyter Notebooks regardless of file extension. This is useful when piping source on standard input.",
|
||||
"default": false
|
||||
},
|
||||
"python-cell-magics": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "When processing Jupyter Notebooks, add the given magic to the list of known python-magics (capture, prun, pypy, python, python3, time, timeit). Useful for formatting cells with custom python magics."
|
||||
},
|
||||
"skip-source-first-line": {
|
||||
"type": "boolean",
|
||||
"description": "Skip the first line of the source code.",
|
||||
"default": false
|
||||
},
|
||||
"skip-string-normalization": {
|
||||
"type": "boolean",
|
||||
"description": "Don't normalize string quotes or prefixes.",
|
||||
"default": false
|
||||
},
|
||||
"skip-magic-trailing-comma": {
|
||||
"type": "boolean",
|
||||
"description": "Don't use trailing commas as a reason to split lines.",
|
||||
"default": false
|
||||
},
|
||||
"preview": {
|
||||
"type": "boolean",
|
||||
"description": "Enable potentially disruptive style changes that may be added to Black's main functionality in the next major release.",
|
||||
"default": false
|
||||
},
|
||||
"unstable": {
|
||||
"type": "boolean",
|
||||
"description": "Enable potentially disruptive style changes that have known bugs or are not currently expected to make it into the stable style Black's next major release. Implies --preview.",
|
||||
"default": false
|
||||
},
|
||||
"enable-unstable-feature": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"enum": [
|
||||
"hex_codes_in_unicode_sequences",
|
||||
"string_processing",
|
||||
"hug_parens_with_braces_and_square_brackets",
|
||||
"unify_docstring_detection",
|
||||
"no_normalize_fmt_skip_whitespace",
|
||||
"wrap_long_dict_values_in_parens",
|
||||
"multiline_string_handling",
|
||||
"typed_params_trailing_comma",
|
||||
"is_simple_lookup_for_doublestar_expression",
|
||||
"docstring_check_for_newline",
|
||||
"remove_redundant_guard_parens",
|
||||
"parens_for_long_if_clauses_in_case_block",
|
||||
"pep646_typed_star_arg_type_var_tuple"
|
||||
]
|
||||
},
|
||||
"description": "Enable specific features included in the `--unstable` style. Requires `--preview`. No compatibility guarantees are provided on the behavior or existence of any unstable features."
|
||||
},
|
||||
"check": {
|
||||
"type": "boolean",
|
||||
"description": "Don't write the files back, just return the status. Return code 0 means nothing would change. Return code 1 means some files would be reformatted. Return code 123 means there was an internal error.",
|
||||
"default": false
|
||||
},
|
||||
"diff": {
|
||||
"type": "boolean",
|
||||
"description": "Don't write the files back, just output a diff to indicate what changes Black would've made. They are printed to stdout so capturing them is simple.",
|
||||
"default": false
|
||||
},
|
||||
"color": {
|
||||
"type": "boolean",
|
||||
"description": "Show (or do not show) colored diff. Only applies when --diff is given.",
|
||||
"default": false
|
||||
},
|
||||
"fast": {
|
||||
"type": "boolean",
|
||||
"description": "By default, Black performs an AST safety check after formatting your code. The --fast flag turns off this check and the --safe flag explicitly enables it. [default: --safe]",
|
||||
"default": false
|
||||
},
|
||||
"required-version": {
|
||||
"type": "string",
|
||||
"description": "Require a specific version of Black to be running. This is useful for ensuring that all contributors to your project are using the same version, because different versions of Black may format code a little differently. This option can be set in a configuration file for consistent results across environments."
|
||||
},
|
||||
"exclude": {
|
||||
"type": "string",
|
||||
"description": "A regular expression that matches files and directories that should be excluded on recursive searches. An empty value means no paths are excluded. Use forward slashes for directories on all platforms (Windows, too). By default, Black also ignores all paths listed in .gitignore. Changing this value will override all default exclusions. [default: /(\\.direnv|\\.eggs|\\.git|\\.hg|\\.ipynb_checkpoints|\\.mypy_cache|\\.nox|\\.pytest_cache|\\.ruff_cache|\\.tox|\\.svn|\\.venv|\\.vscode|__pypackages__|_build|buck-out|build|dist|venv)/]"
|
||||
},
|
||||
"extend-exclude": {
|
||||
"type": "string",
|
||||
"description": "Like --exclude, but adds additional files and directories on top of the default values instead of overriding them."
|
||||
},
|
||||
"force-exclude": {
|
||||
"type": "string",
|
||||
"description": "Like --exclude, but files and directories matching this regex will be excluded even when they are passed explicitly as arguments. This is useful when invoking Black programmatically on changed files, such as in a pre-commit hook or editor plugin."
|
||||
},
|
||||
"include": {
|
||||
"type": "string",
|
||||
"description": "A regular expression that matches files and directories that should be included on recursive searches. An empty value means all files are included regardless of the name. Use forward slashes for directories on all platforms (Windows, too). Overrides all exclusions, including from .gitignore and command line options.",
|
||||
"default": "(\\.pyi?|\\.ipynb)$"
|
||||
},
|
||||
"workers": {
|
||||
"type": "integer",
|
||||
"description": "When Black formats multiple files, it may use a process pool to speed up formatting. This option controls the number of parallel workers. This can also be specified via the BLACK_NUM_WORKERS environment variable. Defaults to the number of CPUs in the system."
|
||||
},
|
||||
"quiet": {
|
||||
"type": "boolean",
|
||||
"description": "Stop emitting all non-critical output. Error messages will still be emitted (which can silenced by 2>/dev/null).",
|
||||
"default": false
|
||||
},
|
||||
"verbose": {
|
||||
"type": "boolean",
|
||||
"description": "Emit messages about files that were not changed or were ignored due to exclusion patterns. If Black is using a configuration file, a message detailing which one it is using will be emitted.",
|
||||
"default": false
|
||||
}
|
||||
}
|
||||
}
|
BIN
venv/Lib/site-packages/black/rusty.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/rusty.cp311-win_amd64.pyd
Normal file
Binary file not shown.
28
venv/Lib/site-packages/black/rusty.py
Normal file
28
venv/Lib/site-packages/black/rusty.py
Normal file
@@ -0,0 +1,28 @@
|
||||
"""An error-handling model influenced by that used by the Rust programming language
|
||||
|
||||
See https://doc.rust-lang.org/book/ch09-00-error-handling.html.
|
||||
"""
|
||||
|
||||
from typing import Generic, TypeVar, Union
|
||||
|
||||
T = TypeVar("T")
|
||||
E = TypeVar("E", bound=Exception)
|
||||
|
||||
|
||||
class Ok(Generic[T]):
|
||||
def __init__(self, value: T) -> None:
|
||||
self._value = value
|
||||
|
||||
def ok(self) -> T:
|
||||
return self._value
|
||||
|
||||
|
||||
class Err(Generic[E]):
|
||||
def __init__(self, e: E) -> None:
|
||||
self._e = e
|
||||
|
||||
def err(self) -> E:
|
||||
return self._e
|
||||
|
||||
|
||||
Result = Union[Ok[T], Err[E]]
|
BIN
venv/Lib/site-packages/black/schema.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/schema.cp311-win_amd64.pyd
Normal file
Binary file not shown.
15
venv/Lib/site-packages/black/schema.py
Normal file
15
venv/Lib/site-packages/black/schema.py
Normal file
@@ -0,0 +1,15 @@
|
||||
import importlib.resources
|
||||
import json
|
||||
from typing import Any
|
||||
|
||||
|
||||
def get_schema(tool_name: str = "black") -> Any:
|
||||
"""Get the stored complete schema for black's settings."""
|
||||
assert tool_name == "black", "Only black is supported."
|
||||
|
||||
pkg = "black.resources"
|
||||
fname = "black.schema.json"
|
||||
|
||||
schema = importlib.resources.files(pkg).joinpath(fname)
|
||||
with schema.open(encoding="utf-8") as f:
|
||||
return json.load(f)
|
BIN
venv/Lib/site-packages/black/strings.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/strings.cp311-win_amd64.pyd
Normal file
Binary file not shown.
390
venv/Lib/site-packages/black/strings.py
Normal file
390
venv/Lib/site-packages/black/strings.py
Normal file
@@ -0,0 +1,390 @@
|
||||
"""
|
||||
Simple formatting on strings. Further string formatting code is in trans.py.
|
||||
"""
|
||||
|
||||
import re
|
||||
import sys
|
||||
from functools import lru_cache
|
||||
from typing import Final, Match, Pattern
|
||||
|
||||
from black._width_table import WIDTH_TABLE
|
||||
from blib2to3.pytree import Leaf
|
||||
|
||||
STRING_PREFIX_CHARS: Final = "furbFURB" # All possible string prefix characters.
|
||||
STRING_PREFIX_RE: Final = re.compile(
|
||||
r"^([" + STRING_PREFIX_CHARS + r"]*)(.*)$", re.DOTALL
|
||||
)
|
||||
UNICODE_ESCAPE_RE: Final = re.compile(
|
||||
r"(?P<backslashes>\\+)(?P<body>"
|
||||
r"(u(?P<u>[a-fA-F0-9]{4}))" # Character with 16-bit hex value xxxx
|
||||
r"|(U(?P<U>[a-fA-F0-9]{8}))" # Character with 32-bit hex value xxxxxxxx
|
||||
r"|(x(?P<x>[a-fA-F0-9]{2}))" # Character with hex value hh
|
||||
r"|(N\{(?P<N>[a-zA-Z0-9 \-]{2,})\})" # Character named name in the Unicode database
|
||||
r")",
|
||||
re.VERBOSE,
|
||||
)
|
||||
|
||||
|
||||
def sub_twice(regex: Pattern[str], replacement: str, original: str) -> str:
|
||||
"""Replace `regex` with `replacement` twice on `original`.
|
||||
|
||||
This is used by string normalization to perform replaces on
|
||||
overlapping matches.
|
||||
"""
|
||||
return regex.sub(replacement, regex.sub(replacement, original))
|
||||
|
||||
|
||||
def has_triple_quotes(string: str) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
True iff @string starts with three quotation characters.
|
||||
"""
|
||||
raw_string = string.lstrip(STRING_PREFIX_CHARS)
|
||||
return raw_string[:3] in {'"""', "'''"}
|
||||
|
||||
|
||||
def lines_with_leading_tabs_expanded(s: str) -> list[str]:
|
||||
"""
|
||||
Splits string into lines and expands only leading tabs (following the normal
|
||||
Python rules)
|
||||
"""
|
||||
lines = []
|
||||
for line in s.splitlines():
|
||||
stripped_line = line.lstrip()
|
||||
if not stripped_line or stripped_line == line:
|
||||
lines.append(line)
|
||||
else:
|
||||
prefix_length = len(line) - len(stripped_line)
|
||||
prefix = line[:prefix_length].expandtabs()
|
||||
lines.append(prefix + stripped_line)
|
||||
if s.endswith("\n"):
|
||||
lines.append("")
|
||||
return lines
|
||||
|
||||
|
||||
def fix_docstring(docstring: str, prefix: str) -> str:
|
||||
# https://www.python.org/dev/peps/pep-0257/#handling-docstring-indentation
|
||||
if not docstring:
|
||||
return ""
|
||||
lines = lines_with_leading_tabs_expanded(docstring)
|
||||
# Determine minimum indentation (first line doesn't count):
|
||||
indent = sys.maxsize
|
||||
for line in lines[1:]:
|
||||
stripped = line.lstrip()
|
||||
if stripped:
|
||||
indent = min(indent, len(line) - len(stripped))
|
||||
# Remove indentation (first line is special):
|
||||
trimmed = [lines[0].strip()]
|
||||
if indent < sys.maxsize:
|
||||
last_line_idx = len(lines) - 2
|
||||
for i, line in enumerate(lines[1:]):
|
||||
stripped_line = line[indent:].rstrip()
|
||||
if stripped_line or i == last_line_idx:
|
||||
trimmed.append(prefix + stripped_line)
|
||||
else:
|
||||
trimmed.append("")
|
||||
return "\n".join(trimmed)
|
||||
|
||||
|
||||
def get_string_prefix(string: str) -> str:
|
||||
"""
|
||||
Pre-conditions:
|
||||
* assert_is_leaf_string(@string)
|
||||
|
||||
Returns:
|
||||
@string's prefix (e.g. '', 'r', 'f', or 'rf').
|
||||
"""
|
||||
assert_is_leaf_string(string)
|
||||
|
||||
prefix = ""
|
||||
prefix_idx = 0
|
||||
while string[prefix_idx] in STRING_PREFIX_CHARS:
|
||||
prefix += string[prefix_idx]
|
||||
prefix_idx += 1
|
||||
|
||||
return prefix
|
||||
|
||||
|
||||
def assert_is_leaf_string(string: str) -> None:
|
||||
"""
|
||||
Checks the pre-condition that @string has the format that you would expect
|
||||
of `leaf.value` where `leaf` is some Leaf such that `leaf.type ==
|
||||
token.STRING`. A more precise description of the pre-conditions that are
|
||||
checked are listed below.
|
||||
|
||||
Pre-conditions:
|
||||
* @string starts with either ', ", <prefix>', or <prefix>" where
|
||||
`set(<prefix>)` is some subset of `set(STRING_PREFIX_CHARS)`.
|
||||
* @string ends with a quote character (' or ").
|
||||
|
||||
Raises:
|
||||
AssertionError(...) if the pre-conditions listed above are not
|
||||
satisfied.
|
||||
"""
|
||||
dquote_idx = string.find('"')
|
||||
squote_idx = string.find("'")
|
||||
if -1 in [dquote_idx, squote_idx]:
|
||||
quote_idx = max(dquote_idx, squote_idx)
|
||||
else:
|
||||
quote_idx = min(squote_idx, dquote_idx)
|
||||
|
||||
assert (
|
||||
0 <= quote_idx < len(string) - 1
|
||||
), f"{string!r} is missing a starting quote character (' or \")."
|
||||
assert string[-1] in (
|
||||
"'",
|
||||
'"',
|
||||
), f"{string!r} is missing an ending quote character (' or \")."
|
||||
assert set(string[:quote_idx]).issubset(
|
||||
set(STRING_PREFIX_CHARS)
|
||||
), f"{set(string[:quote_idx])} is NOT a subset of {set(STRING_PREFIX_CHARS)}."
|
||||
|
||||
|
||||
def normalize_string_prefix(s: str) -> str:
|
||||
"""Make all string prefixes lowercase."""
|
||||
match = STRING_PREFIX_RE.match(s)
|
||||
assert match is not None, f"failed to match string {s!r}"
|
||||
orig_prefix = match.group(1)
|
||||
new_prefix = (
|
||||
orig_prefix.replace("F", "f")
|
||||
.replace("B", "b")
|
||||
.replace("U", "")
|
||||
.replace("u", "")
|
||||
)
|
||||
|
||||
# Python syntax guarantees max 2 prefixes and that one of them is "r"
|
||||
if len(new_prefix) == 2 and "r" != new_prefix[0].lower():
|
||||
new_prefix = new_prefix[::-1]
|
||||
return f"{new_prefix}{match.group(2)}"
|
||||
|
||||
|
||||
# Re(gex) does actually cache patterns internally but this still improves
|
||||
# performance on a long list literal of strings by 5-9% since lru_cache's
|
||||
# caching overhead is much lower.
|
||||
@lru_cache(maxsize=64)
|
||||
def _cached_compile(pattern: str) -> Pattern[str]:
|
||||
return re.compile(pattern)
|
||||
|
||||
|
||||
def normalize_string_quotes(s: str) -> str:
|
||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||
|
||||
Adds or removes backslashes as appropriate.
|
||||
"""
|
||||
value = s.lstrip(STRING_PREFIX_CHARS)
|
||||
if value[:3] == '"""':
|
||||
return s
|
||||
|
||||
elif value[:3] == "'''":
|
||||
orig_quote = "'''"
|
||||
new_quote = '"""'
|
||||
elif value[0] == '"':
|
||||
orig_quote = '"'
|
||||
new_quote = "'"
|
||||
else:
|
||||
orig_quote = "'"
|
||||
new_quote = '"'
|
||||
first_quote_pos = s.find(orig_quote)
|
||||
if first_quote_pos == -1:
|
||||
return s # There's an internal error
|
||||
|
||||
prefix = s[:first_quote_pos]
|
||||
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
||||
escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
|
||||
escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){orig_quote}")
|
||||
body = s[first_quote_pos + len(orig_quote) : -len(orig_quote)]
|
||||
if "r" in prefix.casefold():
|
||||
if unescaped_new_quote.search(body):
|
||||
# There's at least one unescaped new_quote in this raw string
|
||||
# so converting is impossible
|
||||
return s
|
||||
|
||||
# Do not introduce or remove backslashes in raw strings
|
||||
new_body = body
|
||||
else:
|
||||
# remove unnecessary escapes
|
||||
new_body = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", body)
|
||||
if body != new_body:
|
||||
# Consider the string without unnecessary escapes as the original
|
||||
body = new_body
|
||||
s = f"{prefix}{orig_quote}{body}{orig_quote}"
|
||||
new_body = sub_twice(escaped_orig_quote, rf"\1\2{orig_quote}", new_body)
|
||||
new_body = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_body)
|
||||
|
||||
if "f" in prefix.casefold():
|
||||
matches = re.findall(
|
||||
r"""
|
||||
(?:(?<!\{)|^)\{ # start of the string or a non-{ followed by a single {
|
||||
([^{].*?) # contents of the brackets except if begins with {{
|
||||
\}(?:(?!\})|$) # A } followed by end of the string or a non-}
|
||||
""",
|
||||
new_body,
|
||||
re.VERBOSE,
|
||||
)
|
||||
for m in matches:
|
||||
if "\\" in str(m):
|
||||
# Do not introduce backslashes in interpolated expressions
|
||||
return s
|
||||
|
||||
if new_quote == '"""' and new_body[-1:] == '"':
|
||||
# edge case:
|
||||
new_body = new_body[:-1] + '\\"'
|
||||
orig_escape_count = body.count("\\")
|
||||
new_escape_count = new_body.count("\\")
|
||||
if new_escape_count > orig_escape_count:
|
||||
return s # Do not introduce more escaping
|
||||
|
||||
if new_escape_count == orig_escape_count and orig_quote == '"':
|
||||
return s # Prefer double quotes
|
||||
|
||||
return f"{prefix}{new_quote}{new_body}{new_quote}"
|
||||
|
||||
|
||||
def normalize_fstring_quotes(
|
||||
quote: str,
|
||||
middles: list[Leaf],
|
||||
is_raw_fstring: bool,
|
||||
) -> tuple[list[Leaf], str]:
|
||||
"""Prefer double quotes but only if it doesn't cause more escaping.
|
||||
|
||||
Adds or removes backslashes as appropriate.
|
||||
"""
|
||||
if quote == '"""':
|
||||
return middles, quote
|
||||
|
||||
elif quote == "'''":
|
||||
new_quote = '"""'
|
||||
elif quote == '"':
|
||||
new_quote = "'"
|
||||
else:
|
||||
new_quote = '"'
|
||||
|
||||
unescaped_new_quote = _cached_compile(rf"(([^\\]|^)(\\\\)*){new_quote}")
|
||||
escaped_new_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){new_quote}")
|
||||
escaped_orig_quote = _cached_compile(rf"([^\\]|^)\\((?:\\\\)*){quote}")
|
||||
if is_raw_fstring:
|
||||
for middle in middles:
|
||||
if unescaped_new_quote.search(middle.value):
|
||||
# There's at least one unescaped new_quote in this raw string
|
||||
# so converting is impossible
|
||||
return middles, quote
|
||||
|
||||
# Do not introduce or remove backslashes in raw strings, just use double quote
|
||||
return middles, '"'
|
||||
|
||||
new_segments = []
|
||||
for middle in middles:
|
||||
segment = middle.value
|
||||
# remove unnecessary escapes
|
||||
new_segment = sub_twice(escaped_new_quote, rf"\1\2{new_quote}", segment)
|
||||
if segment != new_segment:
|
||||
# Consider the string without unnecessary escapes as the original
|
||||
middle.value = new_segment
|
||||
|
||||
new_segment = sub_twice(escaped_orig_quote, rf"\1\2{quote}", new_segment)
|
||||
new_segment = sub_twice(unescaped_new_quote, rf"\1\\{new_quote}", new_segment)
|
||||
new_segments.append(new_segment)
|
||||
|
||||
if new_quote == '"""' and new_segments[-1].endswith('"'):
|
||||
# edge case:
|
||||
new_segments[-1] = new_segments[-1][:-1] + '\\"'
|
||||
|
||||
for middle, new_segment in zip(middles, new_segments):
|
||||
orig_escape_count = middle.value.count("\\")
|
||||
new_escape_count = new_segment.count("\\")
|
||||
|
||||
if new_escape_count > orig_escape_count:
|
||||
return middles, quote # Do not introduce more escaping
|
||||
|
||||
if new_escape_count == orig_escape_count and quote == '"':
|
||||
return middles, quote # Prefer double quotes
|
||||
|
||||
for middle, new_segment in zip(middles, new_segments):
|
||||
middle.value = new_segment
|
||||
|
||||
return middles, new_quote
|
||||
|
||||
|
||||
def normalize_unicode_escape_sequences(leaf: Leaf) -> None:
|
||||
"""Replace hex codes in Unicode escape sequences with lowercase representation."""
|
||||
text = leaf.value
|
||||
prefix = get_string_prefix(text)
|
||||
if "r" in prefix.lower():
|
||||
return
|
||||
|
||||
def replace(m: Match[str]) -> str:
|
||||
groups = m.groupdict()
|
||||
back_slashes = groups["backslashes"]
|
||||
|
||||
if len(back_slashes) % 2 == 0:
|
||||
return back_slashes + groups["body"]
|
||||
|
||||
if groups["u"]:
|
||||
# \u
|
||||
return back_slashes + "u" + groups["u"].lower()
|
||||
elif groups["U"]:
|
||||
# \U
|
||||
return back_slashes + "U" + groups["U"].lower()
|
||||
elif groups["x"]:
|
||||
# \x
|
||||
return back_slashes + "x" + groups["x"].lower()
|
||||
else:
|
||||
assert groups["N"], f"Unexpected match: {m}"
|
||||
# \N{}
|
||||
return back_slashes + "N{" + groups["N"].upper() + "}"
|
||||
|
||||
leaf.value = re.sub(UNICODE_ESCAPE_RE, replace, text)
|
||||
|
||||
|
||||
@lru_cache(maxsize=4096)
|
||||
def char_width(char: str) -> int:
|
||||
"""Return the width of a single character as it would be displayed in a
|
||||
terminal or editor (which respects Unicode East Asian Width).
|
||||
|
||||
Full width characters are counted as 2, while half width characters are
|
||||
counted as 1. Also control characters are counted as 0.
|
||||
"""
|
||||
table = WIDTH_TABLE
|
||||
codepoint = ord(char)
|
||||
highest = len(table) - 1
|
||||
lowest = 0
|
||||
idx = highest // 2
|
||||
while True:
|
||||
start_codepoint, end_codepoint, width = table[idx]
|
||||
if codepoint < start_codepoint:
|
||||
highest = idx - 1
|
||||
elif codepoint > end_codepoint:
|
||||
lowest = idx + 1
|
||||
else:
|
||||
return 0 if width < 0 else width
|
||||
if highest < lowest:
|
||||
break
|
||||
idx = (highest + lowest) // 2
|
||||
return 1
|
||||
|
||||
|
||||
def str_width(line_str: str) -> int:
|
||||
"""Return the width of `line_str` as it would be displayed in a terminal
|
||||
or editor (which respects Unicode East Asian Width).
|
||||
|
||||
You could utilize this function to determine, for example, if a string
|
||||
is too wide to display in a terminal or editor.
|
||||
"""
|
||||
if line_str.isascii():
|
||||
# Fast path for a line consisting of only ASCII characters
|
||||
return len(line_str)
|
||||
return sum(map(char_width, line_str))
|
||||
|
||||
|
||||
def count_chars_in_width(line_str: str, max_width: int) -> int:
|
||||
"""Count the number of characters in `line_str` that would fit in a
|
||||
terminal or editor of `max_width` (which respects Unicode East Asian
|
||||
Width).
|
||||
"""
|
||||
total_width = 0
|
||||
for i, char in enumerate(line_str):
|
||||
width = char_width(char)
|
||||
if width + total_width > max_width:
|
||||
return i
|
||||
total_width += width
|
||||
return len(line_str)
|
BIN
venv/Lib/site-packages/black/trans.cp311-win_amd64.pyd
Normal file
BIN
venv/Lib/site-packages/black/trans.cp311-win_amd64.pyd
Normal file
Binary file not shown.
2530
venv/Lib/site-packages/black/trans.py
Normal file
2530
venv/Lib/site-packages/black/trans.py
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user