-
Notifications
You must be signed in to change notification settings - Fork 1.2k
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
3a7bdb3
commit d5700d7
Showing
8 changed files
with
362 additions
and
6 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,31 @@ | ||
[package] | ||
name = "ruff_benchmark" | ||
version = "0.0.0" | ||
publish = false | ||
edition.workspace = true | ||
authors.workspace = true | ||
homepage.workspace = true | ||
documentation.workspace = true | ||
repository.workspace = true | ||
description = "Ruff Micro-benchmarks" | ||
|
||
[lib] | ||
bench = false | ||
|
||
[[bench]] | ||
name = "linter" | ||
harness = false | ||
|
||
[dependencies] | ||
ruff.path = "../ruff" | ||
url = "2.3.1" | ||
ureq = "2.6.2" | ||
|
||
[dev-dependencies] | ||
criterion = { version = "0.4.0"} | ||
|
||
[target.'cfg(target_os = "windows")'.dependencies] | ||
mimalloc = "0.1.34" | ||
|
||
[target.'cfg(all(not(target_os = "windows"), not(target_os = "openbsd"), any(target_arch = "x86_64", target_arch = "aarch64", target_arch = "powerpc64")))'.dependencies] | ||
tikv-jemallocator = "0.5.0" |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,87 @@ | ||
## Ruff Micro-benchmarks | ||
|
||
Benchmarks for the different Ruff-tools. | ||
|
||
### Run Benchmark | ||
|
||
You can run the benchmarks with | ||
|
||
```shell | ||
cargo benchmark | ||
``` | ||
|
||
### Benchmark driven Development | ||
|
||
You can use `--save-baseline=<name>` to store an initial baseline benchmark (e.g. on `main`) and then use | ||
`--benchmark=<name>` to compare against that benchmark. Criterion will print a message telling you if the benchmark improved/regressed compared to that baseline. | ||
|
||
```shell | ||
# Run once on your "baseline" code | ||
cargo benchmark --save-baseline=main | ||
|
||
# Then iterate with | ||
cargo benchmark --baseline=main | ||
``` | ||
|
||
### PR Summary | ||
You can use `--save-baseline` and `critcmp` to get a pretty comparison between two recordings. | ||
This is useful to illustrate the improvements of a PR. | ||
|
||
```shell | ||
# On main | ||
cargo benchmark --save-baseline=main | ||
|
||
# After applying your changes | ||
cargo benchmark --save-baseline=pr | ||
|
||
critcmp main pr | ||
``` | ||
|
||
You must install [`critcmp`](https://github.com/BurntSushi/critcmp) for the comparison. | ||
|
||
```bash | ||
cargo install critcmp | ||
``` | ||
|
||
### Tips | ||
|
||
* Use `cargo benchmark <filter>` to only run specific benchmarks. For example: `cargo benchmark linter/pydantic` to only run the pydantic tests. | ||
* Use `cargo benchmark --quiet` for a more cleaned up output (without statistical relevance) | ||
* Use `cargo benchmark --quick` to get faster results (more prone to noise) | ||
|
||
## Profiling | ||
|
||
### Linux | ||
|
||
Install `perf` and build `ruff_benchmark` with the `release-debug` profile and then run it with perf | ||
|
||
```shell | ||
cargo bench -p ruff_benchmark --no-run --profile=release-debug && perf record -g -F 9999 cargo bench -p ruff_benchmark --profile=release-debug -- --profile-time=1 | ||
``` | ||
|
||
Then convert the recorded profile | ||
|
||
```shell | ||
perf script -F +pid > /tmp/test.perf | ||
``` | ||
|
||
You can now view the converted file with [firefox profiler](https://profiler.firefox.com/) | ||
|
||
You can find a more in-depth guide [here](https://profiler.firefox.com/docs/#/./guide-perf-profiling) | ||
|
||
### Mac | ||
|
||
Install [`cargo-instruments`](https://crates.io/crates/cargo-instruments): | ||
|
||
```shell | ||
cargo install cargo-instruments | ||
``` | ||
|
||
Then run the profiler with | ||
|
||
```shell | ||
cargo instruments -t time --bench linter --profile release-debug -p ruff_benchmark -- --profile-time=1 | ||
``` | ||
|
||
* `-t`: Specifies what to profile. Useful options are `time` to profile the wall time and `alloc` for profiling the allocations. | ||
* You may want to pass an additional filter to run a single test file |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; | ||
use ruff::linter::lint_only; | ||
use ruff::settings::{flags, Settings}; | ||
use ruff_benchmark::{TestCase, TestCaseSpeed, TestFile, TestFileDownloadError}; | ||
use std::time::Duration; | ||
|
||
#[cfg(target_os = "windows")] | ||
#[global_allocator] | ||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc; | ||
|
||
#[cfg(all( | ||
not(target_os = "windows"), | ||
not(target_os = "openbsd"), | ||
any( | ||
target_arch = "x86_64", | ||
target_arch = "aarch64", | ||
target_arch = "powerpc64" | ||
) | ||
))] | ||
#[global_allocator] | ||
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc; | ||
|
||
fn create_test_cases() -> Result<Vec<TestCase>, TestFileDownloadError> { | ||
Ok(vec![ | ||
TestCase::fast(TestFile::try_download("numpy/globals.py", "https://github.com/numpy/numpy/blob/89d64415e349ca75a25250f22b874aa16e5c0973/numpy/_globals.py")?), | ||
TestCase::normal(TestFile::try_download( | ||
"pydantic/types.py", | ||
"https://raw.githubusercontent.com/pydantic/pydantic/main/pydantic/types.py", | ||
)?), | ||
TestCase::normal(TestFile::try_download("numpy/ctypeslib.py", "https://github.com/numpy/numpy/blob/main/numpy/ctypeslib.py")?), | ||
TestCase::slow(TestFile::try_download( | ||
"large/dataset.py", | ||
"https://raw.githubusercontent.com/DHI/mikeio/b7d26418f4db2909b0aa965253dbe83194d7bb5b/tests/test_dataset.py", | ||
)?), | ||
]) | ||
} | ||
|
||
fn benchmark_linter(criterion: &mut Criterion) { | ||
let test_cases = create_test_cases().unwrap(); | ||
let mut group = criterion.benchmark_group("linter"); | ||
|
||
for case in test_cases { | ||
group.throughput(Throughput::Bytes(case.code().len() as u64)); | ||
group.measurement_time(match case.speed() { | ||
TestCaseSpeed::Fast => Duration::from_secs(10), | ||
TestCaseSpeed::Normal => Duration::from_secs(20), | ||
TestCaseSpeed::Slow => Duration::from_secs(30), | ||
}); | ||
group.bench_with_input( | ||
BenchmarkId::from_parameter(case.name()), | ||
&case, | ||
|b, case| { | ||
b.iter(|| { | ||
lint_only( | ||
case.code(), | ||
&case.path(), | ||
None, | ||
&black_box(Settings::default()), | ||
flags::Noqa::Enabled, | ||
flags::Autofix::Enabled, | ||
) | ||
}); | ||
}, | ||
); | ||
} | ||
|
||
group.finish(); | ||
} | ||
|
||
criterion_group!(benches, benchmark_linter); | ||
criterion_main!(benches); |
Oops, something went wrong.