Skip to content
This repository was archived by the owner on Apr 28, 2025. It is now read-only.

Updates to CI directives #539

Merged
merged 3 commits into from
Apr 18, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 11 additions & 6 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -239,6 +239,9 @@ jobs:
name: Calculate job matrix
runs-on: ubuntu-24.04
timeout-minutes: 10
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
PR_NUMBER: ${{ github.event.pull_request.number }}
outputs:
matrix: ${{ steps.script.outputs.matrix }}
steps:
Expand All @@ -258,7 +261,7 @@ jobs:
- clippy
- calculate_extensive_matrix
runs-on: ubuntu-24.04
timeout-minutes: 180
timeout-minutes: 240 # 4 hours
strategy:
matrix:
# Use the output from `calculate_extensive_matrix` to calculate the matrix
Expand All @@ -267,7 +270,7 @@ jobs:
# this is not currently possible https://github.com/actions/runner/issues/1985.
include: ${{ fromJSON(needs.calculate_extensive_matrix.outputs.matrix).matrix }}
env:
CHANGED: ${{ matrix.changed }}
TO_TEST: ${{ matrix.to_test }}
steps:
- uses: actions/checkout@v4
with:
Expand All @@ -279,16 +282,18 @@ jobs:
- uses: Swatinem/rust-cache@v2
- name: Run extensive tests
run: |
echo "Changed: '$CHANGED'"
if [ -z "$CHANGED" ]; then
echo "Tests to run: '$TO_TEST'"
if [ -z "$TO_TEST" ]; then
echo "No tests to run, exiting."
exit
fi

set -x

# Run the non-extensive tests first to catch any easy failures
cargo t --profile release-checked -- "$CHANGED"
cargo t --profile release-checked -- "$TO_TEST"

LIBM_EXTENSIVE_TESTS="$CHANGED" cargo t \
LIBM_EXTENSIVE_TESTS="$TO_TEST" cargo t \
--features build-mpfr,unstable,force-soft-floats \
--profile release-checked \
-- extensive
Expand Down
108 changes: 84 additions & 24 deletions ci/ci-util.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@
"""

import json
import os
import subprocess as sp
import sys
from dataclasses import dataclass
from glob import glob, iglob
from inspect import cleandoc
from os import getenv
from pathlib import Path
from typing import TypedDict
from typing import TypedDict, Self

USAGE = cleandoc(
"""
Expand Down Expand Up @@ -51,6 +52,13 @@
ARTIFACT_GLOB = "baseline-icount*"
# Place this in a PR body to skip regression checks (must be at the start of a line).
REGRESSION_DIRECTIVE = "ci: allow-regressions"
# Place this in a PR body to skip extensive tests
SKIP_EXTENSIVE_DIRECTIVE = "ci: skip-extensive"
# Place this in a PR body to allow running a large number of extensive tests. If not
# set, this script will error out if a threshold is exceeded in order to avoid
# accidentally spending huge amounts of CI time.
ALLOW_MANY_EXTENSIVE_DIRECTIVE = "ci: allow-many-extensive"
MANY_EXTENSIVE_THRESHOLD = 20

# Don't run exhaustive tests if these files change, even if they contaiin a function
# definition.
Expand All @@ -68,6 +76,39 @@ def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)


@dataclass
class PrInfo:
"""GitHub response for PR query"""

body: str
commits: list[str]
created_at: str
number: int

@classmethod
def load(cls, pr_number: int | str) -> Self:
"""For a given PR number, query the body and commit list"""
pr_info = sp.check_output(
[
"gh",
"pr",
"view",
str(pr_number),
"--json=number,commits,body,createdAt",
# Flatten the commit list to only hashes, change a key to snake naming
"--jq=.commits |= map(.oid) | .created_at = .createdAt | del(.createdAt)",
],
text=True,
)
eprint("PR info:", json.dumps(pr_info, indent=4))
return cls(**json.loads(pr_info))

def contains_directive(self, directive: str) -> bool:
"""Return true if the provided directive is on a line in the PR body"""
lines = self.body.splitlines()
return any(line.startswith(directive) for line in lines)


class FunctionDef(TypedDict):
"""Type for an entry in `function-definitions.json`"""

Expand Down Expand Up @@ -149,7 +190,7 @@ def changed_routines(self) -> dict[str, list[str]]:
eprint(f"changed files for {name}: {changed}")
routines.add(name)

ret = {}
ret: dict[str, list[str]] = {}
for r in sorted(routines):
ret.setdefault(self.defs[r]["type"], []).append(r)

Expand All @@ -159,17 +200,48 @@ def make_workflow_output(self) -> str:
"""Create a JSON object a list items for each type's changed files, if any
did change, and the routines that were affected by the change.
"""

pr_number = os.environ.get("PR_NUMBER")
skip_tests = False
error_on_many_tests = False

if pr_number is not None:
pr = PrInfo.load(pr_number)
skip_tests = pr.contains_directive(SKIP_EXTENSIVE_DIRECTIVE)
error_on_many_tests = not pr.contains_directive(
ALLOW_MANY_EXTENSIVE_DIRECTIVE
)

if skip_tests:
eprint("Skipping all extensive tests")

changed = self.changed_routines()
ret = []
total_to_test = 0

for ty in TYPES:
ty_changed = changed.get(ty, [])
ty_to_test = [] if skip_tests else ty_changed
total_to_test += len(ty_to_test)

item = {
"ty": ty,
"changed": ",".join(ty_changed),
"to_test": ",".join(ty_to_test),
}

ret.append(item)
output = json.dumps({"matrix": ret}, separators=(",", ":"))
eprint(f"output: {output}")
eprint(f"total extensive tests: {total_to_test}")

if error_on_many_tests and total_to_test > MANY_EXTENSIVE_THRESHOLD:
eprint(
f"More than {MANY_EXTENSIVE_THRESHOLD} tests would be run; add"
f" `{ALLOW_MANY_EXTENSIVE_DIRECTIVE}` to the PR body if this is intentional"
)
exit(1)

return output


Expand Down Expand Up @@ -266,13 +338,13 @@ def check_iai_regressions(args: list[str]):
found.
"""

iai_home = "iai-home"
pr_number = False
iai_home_str = "iai-home"
pr_number = None

while len(args) > 0:
match args:
case ["--home", home, *rest]:
iai_home = home
iai_home_str = home
args = rest
case ["--allow-pr-override", pr_num, *rest]:
pr_number = pr_num
Expand All @@ -281,18 +353,20 @@ def check_iai_regressions(args: list[str]):
eprint(USAGE)
exit(1)

iai_home = Path(iai_home)
iai_home = Path(iai_home_str)

found_summaries = False
regressions = []
regressions: list[dict] = []
for summary_path in iglob("**/summary.json", root_dir=iai_home, recursive=True):
found_summaries = True
with open(iai_home / summary_path, "r") as f:
summary = json.load(f)

summary_regs = []
run = summary["callgrind_summary"]["callgrind_run"]
name_entry = {"name": f"{summary["function_name"]}.{summary["id"]}"}
fname = summary["function_name"]
id = summary["id"]
name_entry = {"name": f"{fname}.{id}"}

for segment in run["segments"]:
summary_regs.extend(segment["regressions"])
Expand All @@ -312,22 +386,8 @@ def check_iai_regressions(args: list[str]):
eprint("Found regressions:", json.dumps(regressions, indent=4))

if pr_number is not None:
pr_info = sp.check_output(
[
"gh",
"pr",
"view",
str(pr_number),
"--json=number,commits,body,createdAt",
"--jq=.commits |= map(.oid)",
],
text=True,
)
pr = json.loads(pr_info)
eprint("PR info:", json.dumps(pr, indent=4))

lines = pr["body"].splitlines()
if any(line.startswith(REGRESSION_DIRECTIVE) for line in lines):
pr = PrInfo.load(pr_number)
if pr.contains_directive(REGRESSION_DIRECTIVE):
eprint("PR allows regressions, returning")
return

Expand Down