Skip to content

Commit 0fdd5b9

Browse files
authored
Merge branch 'main' into clang/decomposed-var-free-function-get
2 parents 57966ea + 0e417a7 commit 0fdd5b9

File tree

1,713 files changed

+80302
-38693
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

1,713 files changed

+80302
-38693
lines changed

.ci/metrics/metrics.py

Lines changed: 95 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
"https://influx-prod-13-prod-us-east-0.grafana.net/api/v1/push/influx/write"
1313
)
1414
GITHUB_PROJECT = "llvm/llvm-project"
15-
WORKFLOWS_TO_TRACK = ["Check code formatting", "LLVM Premerge Checks"]
15+
WORKFLOWS_TO_TRACK = ["LLVM Premerge Checks"]
1616
SCRAPE_INTERVAL_SECONDS = 5 * 60
1717

1818

@@ -26,7 +26,67 @@ class JobMetrics:
2626
workflow_id: int
2727

2828

29-
def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, int]):
29+
@dataclass
30+
class GaugeMetric:
31+
name: str
32+
value: int
33+
time_ns: int
34+
35+
36+
def get_sampled_workflow_metrics(github_repo: github.Repository):
37+
"""Gets global statistics about the Github workflow queue
38+
39+
Args:
40+
github_repo: A github repo object to use to query the relevant information.
41+
42+
Returns:
43+
Returns a list of GaugeMetric objects, containing the relevant metrics about
44+
the workflow
45+
"""
46+
47+
# Other states are available (pending, waiting, etc), but the meaning
48+
# is not documented (See #70540).
49+
# "queued" seems to be the info we want.
50+
queued_workflow_count = len(
51+
[
52+
x
53+
for x in github_repo.get_workflow_runs(status="queued")
54+
if x.name in WORKFLOWS_TO_TRACK
55+
]
56+
)
57+
running_workflow_count = len(
58+
[
59+
x
60+
for x in github_repo.get_workflow_runs(status="in_progress")
61+
if x.name in WORKFLOWS_TO_TRACK
62+
]
63+
)
64+
65+
workflow_metrics = []
66+
workflow_metrics.append(
67+
GaugeMetric(
68+
"workflow_queue_size",
69+
queued_workflow_count,
70+
time.time_ns(),
71+
)
72+
)
73+
workflow_metrics.append(
74+
GaugeMetric(
75+
"running_workflow_count",
76+
running_workflow_count,
77+
time.time_ns(),
78+
)
79+
)
80+
# Always send a hearbeat metric so we can monitor is this container is still able to log to Grafana.
81+
workflow_metrics.append(
82+
GaugeMetric("metrics_container_heartbeat", 1, time.time_ns())
83+
)
84+
return workflow_metrics
85+
86+
87+
def get_per_workflow_metrics(
88+
github_repo: github.Repository, workflows_to_track: dict[str, int]
89+
):
3090
"""Gets the metrics for specified Github workflows.
3191
3292
This function takes in a list of workflows to track, and optionally the
@@ -43,14 +103,14 @@ def get_metrics(github_repo: github.Repository, workflows_to_track: dict[str, in
43103
Returns a list of JobMetrics objects, containing the relevant metrics about
44104
the workflow.
45105
"""
46-
workflow_runs = iter(github_repo.get_workflow_runs())
47-
48106
workflow_metrics = []
49107

50108
workflows_to_include = set(workflows_to_track.keys())
51109

52-
while len(workflows_to_include) > 0:
53-
workflow_run = next(workflow_runs)
110+
for workflow_run in iter(github_repo.get_workflow_runs()):
111+
if len(workflows_to_include) == 0:
112+
break
113+
54114
if workflow_run.status != "completed":
55115
continue
56116

@@ -139,12 +199,27 @@ def upload_metrics(workflow_metrics, metrics_userid, api_key):
139199
metrics_userid: The userid to use for the upload.
140200
api_key: The API key to use for the upload.
141201
"""
202+
203+
if len(workflow_metrics) == 0:
204+
print("No metrics found to upload.", file=sys.stderr)
205+
return
206+
142207
metrics_batch = []
143208
for workflow_metric in workflow_metrics:
144-
workflow_formatted_name = workflow_metric.job_name.lower().replace(" ", "_")
145-
metrics_batch.append(
146-
f"{workflow_formatted_name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}"
147-
)
209+
if isinstance(workflow_metric, GaugeMetric):
210+
name = workflow_metric.name.lower().replace(" ", "_")
211+
metrics_batch.append(
212+
f"{name} value={workflow_metric.value} {workflow_metric.time_ns}"
213+
)
214+
elif isinstance(workflow_metric, JobMetrics):
215+
name = workflow_metric.job_name.lower().replace(" ", "_")
216+
metrics_batch.append(
217+
f"{name} queue_time={workflow_metric.queue_time},run_time={workflow_metric.run_time},status={workflow_metric.status} {workflow_metric.created_at_ns}"
218+
)
219+
else:
220+
raise ValueError(
221+
f"Unsupported object type {type(workflow_metric)}: {str(workflow_metric)}"
222+
)
148223

149224
request_data = "\n".join(metrics_batch)
150225
response = requests.post(
@@ -176,16 +251,21 @@ def main():
176251
# Enter the main loop. Every five minutes we wake up and dump metrics for
177252
# the relevant jobs.
178253
while True:
179-
current_metrics = get_metrics(github_repo, workflows_to_track)
180-
if len(current_metrics) == 0:
181-
print("No metrics found to upload.", file=sys.stderr)
182-
continue
254+
current_metrics = get_per_workflow_metrics(github_repo, workflows_to_track)
255+
current_metrics += get_sampled_workflow_metrics(github_repo)
256+
# Always send a hearbeat metric so we can monitor is this container is still able to log to Grafana.
257+
current_metrics.append(
258+
GaugeMetric("metrics_container_heartbeat", 1, time.time_ns())
259+
)
183260

184261
upload_metrics(current_metrics, grafana_metrics_userid, grafana_api_key)
185262
print(f"Uploaded {len(current_metrics)} metrics", file=sys.stderr)
186263

187264
for workflow_metric in reversed(current_metrics):
188-
workflows_to_track[workflow_metric.job_name] = workflow_metric.workflow_id
265+
if isinstance(workflow_metric, JobMetrics):
266+
workflows_to_track[
267+
workflow_metric.job_name
268+
] = workflow_metric.workflow_id
189269

190270
time.sleep(SCRAPE_INTERVAL_SECONDS)
191271

.github/CODEOWNERS

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,6 @@
2727
/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp @nikic
2828
/llvm/lib/Transforms/InstCombine/ @nikic
2929

30-
/clang/include/clang/Sema/Sema.h @Endilll
3130
/clang/test/CXX/drs/ @Endilll
3231
/clang/www/cxx_dr_status.html @Endilll
3332
/clang/www/make_cxx_dr_status @Endilll

clang-tools-extra/clang-reorder-fields/ReorderFieldsAction.cpp

Lines changed: 33 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,9 @@ getNewFieldsOrder(const RecordDecl *Definition,
6363
NameToIndex[Field->getName()] = Field->getFieldIndex();
6464

6565
if (DesiredFieldsOrder.size() != NameToIndex.size()) {
66-
llvm::errs() << "Number of provided fields doesn't match definition.\n";
66+
llvm::errs() << "Number of provided fields (" << DesiredFieldsOrder.size()
67+
<< ") doesn't match definition (" << NameToIndex.size()
68+
<< ").\n";
6769
return {};
6870
}
6971
SmallVector<unsigned, 4> NewFieldsOrder;
@@ -116,26 +118,49 @@ findMembersUsedInInitExpr(const CXXCtorInitializer *Initializer,
116118
return Results;
117119
}
118120

119-
/// Returns the full source range for the field declaration up to (not
120-
/// including) the trailing semicolumn, including potential macro invocations,
121-
/// e.g. `int a GUARDED_BY(mu);`.
121+
/// Returns the end of the trailing comments after `Loc`.
122+
static SourceLocation getEndOfTrailingComment(SourceLocation Loc,
123+
const SourceManager &SM,
124+
const LangOptions &LangOpts) {
125+
// We consider any following comment token that is indented more than the
126+
// first comment to be part of the trailing comment.
127+
const unsigned Column = SM.getPresumedColumnNumber(Loc);
128+
std::optional<Token> Tok =
129+
Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true);
130+
while (Tok && Tok->is(tok::comment) &&
131+
SM.getPresumedColumnNumber(Tok->getLocation()) > Column) {
132+
Loc = Tok->getEndLoc();
133+
Tok = Lexer::findNextToken(Loc, SM, LangOpts, /*IncludeComments=*/true);
134+
}
135+
return Loc;
136+
}
137+
138+
/// Returns the full source range for the field declaration up to (including)
139+
/// the trailing semicolumn, including potential macro invocations,
140+
/// e.g. `int a GUARDED_BY(mu);`. If there is a trailing comment, include it.
122141
static SourceRange getFullFieldSourceRange(const FieldDecl &Field,
123142
const ASTContext &Context) {
124-
SourceRange Range = Field.getSourceRange();
143+
const SourceRange Range = Field.getSourceRange();
144+
SourceLocation Begin = Range.getBegin();
125145
SourceLocation End = Range.getEnd();
126146
const SourceManager &SM = Context.getSourceManager();
127147
const LangOptions &LangOpts = Context.getLangOpts();
128148
while (true) {
129149
std::optional<Token> CurrentToken = Lexer::findNextToken(End, SM, LangOpts);
130150

131-
if (!CurrentToken || CurrentToken->is(tok::semi))
132-
break;
151+
if (!CurrentToken)
152+
return SourceRange(Begin, End);
133153

134154
if (CurrentToken->is(tok::eof))
135155
return Range; // Something is wrong, return the original range.
156+
136157
End = CurrentToken->getLastLoc();
158+
159+
if (CurrentToken->is(tok::semi))
160+
break;
137161
}
138-
return SourceRange(Range.getBegin(), End);
162+
End = getEndOfTrailingComment(End, SM, LangOpts);
163+
return SourceRange(Begin, End);
139164
}
140165

141166
/// Reorders fields in the definition of a struct/class.

clang-tools-extra/clang-tidy/bugprone/NarrowingConversionsCheck.cpp

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -513,7 +513,9 @@ void NarrowingConversionsCheck::handleFloatingCast(const ASTContext &Context,
513513
return;
514514
}
515515
const BuiltinType *FromType = getBuiltinType(Rhs);
516-
if (ToType->getKind() < FromType->getKind())
516+
if (!llvm::APFloatBase::isRepresentableBy(
517+
Context.getFloatTypeSemantics(FromType->desugar()),
518+
Context.getFloatTypeSemantics(ToType->desugar())))
517519
diagNarrowType(SourceLoc, Lhs, Rhs);
518520
}
519521
}

clang-tools-extra/clang-tidy/modernize/UseAutoCheck.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -342,7 +342,7 @@ static void ignoreTypeLocClasses(
342342
Loc = Loc.getNextTypeLoc();
343343
}
344344

345-
static bool isMutliLevelPointerToTypeLocClasses(
345+
static bool isMultiLevelPointerToTypeLocClasses(
346346
TypeLoc Loc,
347347
std::initializer_list<TypeLoc::TypeLocClass> const &LocClasses) {
348348
ignoreTypeLocClasses(Loc, {TypeLoc::Paren, TypeLoc::Qualified});
@@ -424,7 +424,7 @@ void UseAutoCheck::replaceExpr(
424424

425425
auto Diag = diag(Range.getBegin(), Message);
426426

427-
bool ShouldReplenishVariableName = isMutliLevelPointerToTypeLocClasses(
427+
bool ShouldReplenishVariableName = isMultiLevelPointerToTypeLocClasses(
428428
TSI->getTypeLoc(), {TypeLoc::FunctionProto, TypeLoc::ConstantArray});
429429

430430
// Space after 'auto' to handle cases where the '*' in the pointer type is

clang-tools-extra/clang-tidy/tool/clang-tidy-diff.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,6 +35,7 @@
3535
import tempfile
3636
import threading
3737
import traceback
38+
from pathlib import Path
3839

3940
try:
4041
import yaml
@@ -124,6 +125,23 @@ def merge_replacement_files(tmpdir, mergefile):
124125
open(mergefile, "w").close()
125126

126127

128+
def get_compiling_files(args):
129+
"""Read a compile_commands.json database and return a set of file paths"""
130+
current_dir = Path.cwd()
131+
compile_commands_json = (
132+
(current_dir / args.build_path) if args.build_path else current_dir
133+
)
134+
compile_commands_json = compile_commands_json / "compile_commands.json"
135+
files = set()
136+
with open(compile_commands_json) as db_file:
137+
db_json = json.load(db_file)
138+
for entry in db_json:
139+
if "file" not in entry:
140+
continue
141+
files.add(Path(entry["file"]))
142+
return files
143+
144+
127145
def main():
128146
parser = argparse.ArgumentParser(
129147
description="Run clang-tidy against changed files, and "
@@ -234,6 +252,13 @@ def main():
234252
action="store_true",
235253
help="Allow empty enabled checks.",
236254
)
255+
parser.add_argument(
256+
"-only-check-in-db",
257+
dest="skip_non_compiling",
258+
default=False,
259+
action="store_true",
260+
help="Only check files in the compilation database",
261+
)
237262

238263
clang_tidy_args = []
239264
argv = sys.argv[1:]
@@ -243,11 +268,13 @@ def main():
243268

244269
args = parser.parse_args(argv)
245270

271+
compiling_files = get_compiling_files(args) if args.skip_non_compiling else None
272+
246273
# Extract changed lines for each file.
247274
filename = None
248275
lines_by_file = {}
249276
for line in sys.stdin:
250-
match = re.search('^\\+\\+\\+\\ "?(.*?/){%s}([^ \t\n"]*)' % args.p, line)
277+
match = re.search(r'^\+\+\+\ "?(.*?/){%s}([^ \t\n"]*)' % args.p, line)
251278
if match:
252279
filename = match.group(2)
253280
if filename is None:
@@ -260,6 +287,13 @@ def main():
260287
if not re.match("^%s$" % args.iregex, filename, re.IGNORECASE):
261288
continue
262289

290+
# Skip any files not in the compiling list
291+
if (
292+
compiling_files is not None
293+
and (Path.cwd() / filename) not in compiling_files
294+
):
295+
continue
296+
263297
match = re.search(r"^@@.*\+(\d+)(,(\d+))?", line)
264298
if match:
265299
start_line = int(match.group(1))

clang-tools-extra/clang-tidy/utils/LexerUtils.cpp

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -86,29 +86,6 @@ SourceLocation findNextTerminator(SourceLocation Start, const SourceManager &SM,
8686
return findNextAnyTokenKind(Start, SM, LangOpts, tok::comma, tok::semi);
8787
}
8888

89-
std::optional<Token>
90-
findNextTokenIncludingComments(SourceLocation Start, const SourceManager &SM,
91-
const LangOptions &LangOpts) {
92-
// `Lexer::findNextToken` will ignore comment
93-
if (Start.isMacroID())
94-
return std::nullopt;
95-
Start = Lexer::getLocForEndOfToken(Start, 0, SM, LangOpts);
96-
// Break down the source location.
97-
std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Start);
98-
bool InvalidTemp = false;
99-
StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
100-
if (InvalidTemp)
101-
return std::nullopt;
102-
// Lex from the start of the given location.
103-
Lexer L(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
104-
File.data() + LocInfo.second, File.end());
105-
L.SetCommentRetentionState(true);
106-
// Find the token.
107-
Token Tok;
108-
L.LexFromRawLexer(Tok);
109-
return Tok;
110-
}
111-
11289
std::optional<Token>
11390
findNextTokenSkippingComments(SourceLocation Start, const SourceManager &SM,
11491
const LangOptions &LangOpts) {

clang-tools-extra/clang-tidy/utils/LexerUtils.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,11 @@ SourceLocation findNextAnyTokenKind(SourceLocation Start,
8989
}
9090
}
9191

92-
std::optional<Token>
92+
inline std::optional<Token>
9393
findNextTokenIncludingComments(SourceLocation Start, const SourceManager &SM,
94-
const LangOptions &LangOpts);
94+
const LangOptions &LangOpts) {
95+
return Lexer::findNextToken(Start, SM, LangOpts, true);
96+
}
9597

9698
// Finds next token that's not a comment.
9799
std::optional<Token> findNextTokenSkippingComments(SourceLocation Start,

0 commit comments

Comments
 (0)