Skip to content

feat: improve tracing spans, add conversion benchmarks #394

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 4 commits into from
Mar 27, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/bencher_on_pr_or_fork_closed.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,4 +14,5 @@ jobs:
bencher archive \
--project bms \
--token '${{ secrets.BENCHER_API_TOKEN }}' \
--branch "$GITHUB_HEAD_REF"
--branch "$GITHUB_HEAD_REF"
continue-on-error: true
1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ script_integration_test_harness = { workspace = true }
test_utils = { workspace = true }
libtest-mimic = "0.8"
tracing-tracy = "0.11"
regex = "1.11"

[workspace]
members = [
Expand Down
163 changes: 154 additions & 9 deletions benches/benchmarks.rs
Original file line number Diff line number Diff line change
@@ -1,8 +1,19 @@
use bevy::log::tracing_subscriber;
use bevy::log::tracing_subscriber::layer::SubscriberExt;
use bevy::utils::{tracing, HashMap};
use bevy::log::{tracing_subscriber, Level};
use bevy::reflect::Reflect;
use bevy::utils::tracing;
use bevy::utils::tracing::span;
use bevy_mod_scripting_core::bindings::{
FromScript, IntoScript, Mut, Ref, ReflectReference, ScriptValue, Val,
};
use criterion::{criterion_main, measurement::Measurement, BenchmarkGroup, Criterion};
use script_integration_test_harness::{run_lua_benchmark, run_rhai_benchmark};
use criterion::{BatchSize, BenchmarkFilter};
use regex::Regex;
use script_integration_test_harness::test_functions::rand::Rng;
use script_integration_test_harness::{
perform_benchmark_with_generator, run_lua_benchmark, run_rhai_benchmark,
};
use std::collections::HashMap;
use std::{path::PathBuf, sync::LazyLock, time::Duration};
use test_utils::{discover_all_tests, Test};

Expand Down Expand Up @@ -65,10 +76,24 @@ impl BenchmarkExecutor for Test {
}
}

fn script_benchmarks(criterion: &mut Criterion) {
fn script_benchmarks(criterion: &mut Criterion, filter: Option<Regex>) {
// find manifest dir
let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
let tests = discover_all_tests(manifest_dir, |p| p.starts_with("benchmarks"));
let tests = discover_all_tests(manifest_dir, |p| {
p.path.starts_with("benchmarks")
&& if let Some(filter) = &filter {
let matching = filter.is_match(&p.benchmark_name());
if !matching {
println!(
"Skipping benchmark: '{}'. due to filter: '{filter}'",
p.benchmark_name()
);
};
matching
} else {
true
}
});

// group by benchmark group
let mut grouped: HashMap<String, Vec<Test>> =
Expand All @@ -83,9 +108,16 @@ fn script_benchmarks(criterion: &mut Criterion) {
}

for (group, tests) in grouped {
println!("Running benchmarks for group: {}", group);
let mut benchmark_group = criterion.benchmark_group(group);

for t in tests {
println!("Running benchmark: {}", t.benchmark_name());
span!(
Level::INFO,
"Benchmark harness for test",
test_name = &t.benchmark_name()
);
t.execute(&mut benchmark_group);
}

Expand All @@ -104,22 +136,135 @@ fn maybe_with_profiler(f: impl Fn(bool)) {

tracing::subscriber::set_global_default(subscriber).unwrap();

let _ = tracing_tracy::client::span!("test2");
tracing::info_span!("test");

f(true);
} else {
f(false);
}
}

/// benchmarks measuring conversion time for script values and other things
fn conversion_benchmarks(criterion: &mut Criterion) {
let mut group = criterion.benchmark_group("conversions");

#[derive(Reflect)]
struct ReflectyVal(pub u32);

perform_benchmark_with_generator(
"ScriptValue::List",
&|rng, _| {
let mut array = Vec::new();
for _ in 0..10 {
array.push(ScriptValue::Integer(rng.random()));
}
ScriptValue::List(array)
},
&|w, i| {
let i = i.into_script(w.clone()).unwrap();
let _ = Vec::<ScriptValue>::from_script(i, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);

perform_benchmark_with_generator(
"ScriptValue::Map",
&|rng, _| {
let mut map = HashMap::default();
for _ in 0..10 {
map.insert(
rng.random::<u32>().to_string(),
ScriptValue::Integer(rng.random()),
);
}
ScriptValue::Map(map)
},
&|w, i| {
let i = i.into_script(w.clone()).unwrap();
let _ = HashMap::<String, ScriptValue>::from_script(i, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);

perform_benchmark_with_generator(
"ScriptValue::Reference::from_into",
&|rng, world| {
let allocator = world.allocator();
let mut allocator = allocator.write();
ReflectReference::new_allocated(ReflectyVal(rng.random()), &mut allocator)
},
&|w, i| {
let i = i.into_script(w.clone()).unwrap();
let _ = ReflectReference::from_script(i, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);

perform_benchmark_with_generator(
"Val<T>::from_into",
&|rng, _| Val::new(ReflectyVal(rng.random::<u32>())),
&|w, i| {
let v = i.into_script(w.clone()).unwrap();
Val::<ReflectyVal>::from_script(v, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);

perform_benchmark_with_generator(
"Ref<T>::from",
&|rng, w| {
Val::new(ReflectyVal(rng.random::<u32>()))
.into_script(w)
.unwrap()
},
&|w, i| {
Ref::<ReflectyVal>::from_script(i, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);

perform_benchmark_with_generator(
"Mut<T>::from",
&|rng, w| {
Val::new(ReflectyVal(rng.random::<u32>()))
.into_script(w)
.unwrap()
},
&|w, i| {
Mut::<ReflectyVal>::from_script(i, w).unwrap();
},
&mut group,
BatchSize::SmallInput,
);
}

pub fn benches() {
maybe_with_profiler(|_profiler| {
let mut criterion: criterion::Criterion<_> = (criterion::Criterion::default())
.configure_from_args()
.measurement_time(Duration::from_secs(10));
let arguments = std::env::args()
.skip(1) // the executable name
.filter(|a| !a.starts_with("-"))
.collect::<Vec<String>>();

// take first argument as .*<val>.* regex for benchmarks
// criterion will already have that as a filter, but we want to make sure we're on the same page
let filter = if let Some(n) = arguments.first() {
println!("using filter: '{n}'");
let regex = Regex::new(n).unwrap();
let filter = BenchmarkFilter::Regex(regex.clone());
criterion = criterion.with_benchmark_filter(filter);
Some(regex)
} else {
None
};

script_benchmarks(&mut criterion);
script_benchmarks(&mut criterion, filter);
conversion_benchmarks(&mut criterion);
});
}
criterion_main!(benches);
2 changes: 1 addition & 1 deletion crates/bevy_mod_scripting_core/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ bevy = { workspace = true, default-features = false, features = ["bevy_asset"] }
thiserror = "1.0.31"
parking_lot = "0.12.1"
dashmap = "6"
smallvec = "1.11"
smallvec = { version = "1.11", features = ["union"] }
itertools = "0.13"
derivative = "2.2"
profiling = { workspace = true }
Expand Down
8 changes: 8 additions & 0 deletions crates/bevy_mod_scripting_core/src/asset.rs
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ pub struct ScriptAssetLoader {
pub preprocessor: Option<Box<dyn Fn(&mut [u8]) -> Result<(), ScriptError> + Send + Sync>>,
}

#[profiling::all_functions]
impl AssetLoader for ScriptAssetLoader {
type Asset = ScriptAsset;

Expand Down Expand Up @@ -121,6 +122,7 @@ pub struct ScriptAssetSettings {
pub supported_extensions: &'static [&'static str],
}

#[profiling::all_functions]
impl ScriptAssetSettings {
/// Selects the language for a given asset path
pub fn select_script_language(&self, path: &AssetPath) -> Language {
Expand Down Expand Up @@ -178,6 +180,7 @@ pub struct ScriptMetadata {
pub language: Language,
}

#[profiling::all_functions]
impl ScriptMetadataStore {
/// Inserts a new metadata entry
pub fn insert(&mut self, id: AssetId<ScriptAsset>, meta: ScriptMetadata) {
Expand All @@ -202,6 +205,7 @@ impl ScriptMetadataStore {
}

/// Converts incoming asset events, into internal script asset events, also loads and inserts metadata for newly added scripts
#[profiling::function]
pub(crate) fn dispatch_script_asset_events(
mut events: EventReader<AssetEvent<ScriptAsset>>,
mut script_asset_events: EventWriter<ScriptAssetEvent>,
Expand Down Expand Up @@ -256,6 +260,7 @@ pub(crate) fn dispatch_script_asset_events(
}

/// Listens to [`ScriptAssetEvent::Removed`] events and removes the corresponding script metadata.
#[profiling::function]
pub(crate) fn remove_script_metadata(
mut events: EventReader<ScriptAssetEvent>,
mut asset_path_map: ResMut<ScriptMetadataStore>,
Expand All @@ -273,6 +278,7 @@ pub(crate) fn remove_script_metadata(
/// Listens to [`ScriptAssetEvent`] events and dispatches [`CreateOrUpdateScript`] and [`DeleteScript`] commands accordingly.
///
/// Allows for hot-reloading of scripts.
#[profiling::function]
pub(crate) fn sync_script_data<P: IntoScriptPluginParams>(
mut events: EventReader<ScriptAssetEvent>,
script_assets: Res<Assets<ScriptAsset>>,
Expand Down Expand Up @@ -321,6 +327,7 @@ pub(crate) fn sync_script_data<P: IntoScriptPluginParams>(
}

/// Setup all the asset systems for the scripting plugin and the dependencies
#[profiling::function]
pub(crate) fn configure_asset_systems(app: &mut App) -> &mut App {
// these should be in the same set as bevy's asset systems
// currently this is in the PreUpdate set
Expand Down Expand Up @@ -348,6 +355,7 @@ pub(crate) fn configure_asset_systems(app: &mut App) -> &mut App {
}

/// Setup all the asset systems for the scripting plugin and the dependencies
#[profiling::function]
pub(crate) fn configure_asset_systems_for_plugin<P: IntoScriptPluginParams>(
app: &mut App,
) -> &mut App {
Expand Down
Loading
Loading