-
-
Notifications
You must be signed in to change notification settings - Fork 11
/
yesqa.py
183 lines (147 loc) · 5.08 KB
/
yesqa.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
from __future__ import annotations
import argparse
import collections
import os.path
import re
import subprocess
import sys
import tempfile
from collections.abc import Sequence
from re import Match
import tokenize_rt
Tokens = list[tokenize_rt.Token]
NOQA_FILE_RE = re.compile(r'^# flake8[:=]\s*noqa', re.I)
_code = '[a-z]{1,3}[0-9]+'
_sep = r'[,\s]+'
NOQA_RE = re.compile(f'# noqa(: ?{_code}({_sep}{_code})*)?', re.I)
SEP_RE = re.compile(_sep)
def _run_flake8(filename: str) -> dict[int, set[str]]:
cmd = (
sys.executable,
'-mflake8',
'--format=%(row)d\t%(code)s',
'--no-show-source',
filename,
)
out, _ = subprocess.Popen(cmd, stdout=subprocess.PIPE).communicate()
ret: dict[int, set[str]] = collections.defaultdict(set)
for line in out.decode().splitlines():
lineno, code = line.split('\t')
ret[int(lineno)].add(code)
return ret
def _remove_comment(tokens: Tokens, i: int) -> None:
if i > 0 and tokens[i - 1].name == tokenize_rt.UNIMPORTANT_WS:
del tokens[i - 1:i + 1]
else:
del tokens[i]
def _remove_comments(tokens: Tokens) -> Tokens:
tokens = list(tokens)
for i, token in tokenize_rt.reversed_enumerate(tokens):
if token.name == 'COMMENT':
if NOQA_RE.search(token.src):
_mask_noqa_comment(tokens, i)
elif NOQA_FILE_RE.search(token.src):
_remove_comment(tokens, i)
return tokens
def _mask_noqa_comment(tokens: Tokens, i: int) -> None:
token = tokens[i]
match = NOQA_RE.search(token.src)
assert match is not None
def _sub(match: Match[str]) -> str:
return f'# {"." * (len(match.group()) - 2)}'
src = NOQA_RE.sub(_sub, token.src)
tokens[i] = token._replace(src=src)
def _rewrite_noqa_comment(
tokens: Tokens,
i: int,
flake8_results: dict[int, set[str]],
) -> None:
# find logical lines that this noqa comment may affect
lines: set[int] = set()
j = i
while j >= 0 and tokens[j].name not in {'NL', 'NEWLINE'}:
t = tokens[j]
if t.line is not None: # pragma: no branch (tokenize-rt<4.2.1)
lines.update(range(t.line, t.line + t.src.count('\n') + 1))
j -= 1
lints = set()
for line in lines:
lints.update(flake8_results[line])
token = tokens[i]
match = NOQA_RE.search(token.src)
assert match is not None
def _remove_noqa() -> None:
assert match is not None
if match.group() == token.src:
_remove_comment(tokens, i)
else:
src = NOQA_RE.sub('', token.src).strip()
if not src.startswith('#'):
src = f'# {src}'
tokens[i] = token._replace(src=src)
# exclude all lints on the line but no lints
if not lints:
_remove_noqa()
elif match.group().lower() != '# noqa':
codes = set(SEP_RE.split(match.group(1)[1:]))
expected_codes = codes & lints
if not expected_codes:
_remove_noqa()
elif expected_codes != codes:
comment = f'# noqa: {", ".join(sorted(expected_codes))}'
tokens[i] = token._replace(src=NOQA_RE.sub(comment, token.src))
def fix_file(filename: str) -> int:
with open(filename, 'rb') as f:
contents_bytes = f.read()
try:
contents_text = contents_bytes.decode()
except UnicodeDecodeError:
print(f'{filename} is non-utf8 (not supported)')
return 1
tokens = tokenize_rt.src_to_tokens(contents_text)
tokens_no_comments = _remove_comments(tokens)
src_no_comments = tokenize_rt.tokens_to_src(tokens_no_comments)
if src_no_comments == contents_text:
return 0
fd, path = tempfile.mkstemp(
dir=os.path.dirname(filename),
prefix=os.path.basename(filename),
suffix='.py',
)
try:
with open(fd, 'wb') as f:
f.write(src_no_comments.encode())
flake8_results = _run_flake8(path)
finally:
os.remove(path)
if any('E999' in v for v in flake8_results.values()):
print(f'{filename}: syntax error (skipping)')
return 0
for i, token in tokenize_rt.reversed_enumerate(tokens):
if token.name != 'COMMENT':
continue
if NOQA_RE.search(token.src):
_rewrite_noqa_comment(tokens, i, flake8_results)
elif NOQA_FILE_RE.match(token.src) and not flake8_results:
if i == 0 or tokens[i - 1].name == 'NEWLINE':
del tokens[i: i + 2]
else:
_remove_comment(tokens, i)
newsrc = tokenize_rt.tokens_to_src(tokens)
if newsrc != contents_text:
print(f'Rewriting {filename}')
with open(filename, 'wb') as f:
f.write(newsrc.encode())
return 1
else:
return 0
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='*')
args = parser.parse_args(argv)
retv = 0
for filename in args.filenames:
retv |= fix_file(filename)
return retv
if __name__ == '__main__':
raise SystemExit(main())