Skip to content

Commit

Permalink
Improve string formatting
Browse files Browse the repository at this point in the history
* improve performance of the `glommio/src/error.rs` to avoid allocating strings only to print them
* Use clippy to inline all format args, and fix them by hand afterwards:
* remove a few unneeded `&` in `format!("{}", &str_val)` -- this actually has a minor performance hit (about 6%) due to dynamic dispatch that rustc cannot inline.

```
❯ cargo clippy --workspace --fix --benches --tests --bins -- -A clippy::all -W clippy::uninlined_format_args
```

Note that there are many cases of `panic!` and `assert!` that also have inlinable variables that cannot be inlined due to 2018 edition. In 2021, their behavior is made consistent, but that requires migrating to 2021 first. `assert_eq!` doesn't have this issue.
  • Loading branch information
nyurik committed Aug 14, 2023
1 parent 517326b commit 2edd140
Show file tree
Hide file tree
Showing 31 changed files with 135 additions and 183 deletions.
6 changes: 3 additions & 3 deletions examples/deadline_writer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ fn competing_cpu_hog(
}

async fn static_writer(how_many: usize, shares: usize, cpuhog_tq: TaskQueueHandle) -> Duration {
let name = format!("shares-{}", shares);
let name = format!("shares-{shares}");
let tq =
glommio::executor().create_task_queue(Shares::Static(shares), Latency::NotImportant, &name);

Expand Down Expand Up @@ -196,7 +196,7 @@ fn main() {
let dur = static_writer(to_write, 1000, cpuhog_tq).await;
println!(
"Finished writing in {}",
Paint::green(format!("{:#.0?}", dur))
Paint::green(format!("{dur:#.0?}"))
);
println!(
"This was using {} shares, and short of reducing the priority of the CPU hog. {}",
Expand Down Expand Up @@ -235,7 +235,7 @@ fn main() {
let dur = deadline.push_work(test).await.unwrap();
println!(
"Finished writing in {}",
Paint::green(format!("{:#.2?}", dur))
Paint::green(format!("{dur:#.2?}"))
);
stop.set(true);
hog.await.unwrap();
Expand Down
4 changes: 2 additions & 2 deletions examples/gate.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,14 +17,14 @@ fn main() {
for i in 0..nr_tasks {
gate.spawn(enclose!((running_tasks, tasks_to_complete) async move {
running_tasks.signal(1);
println!("[Task {}] started, running tasks: {}", i, running_tasks.available());
println!("[Task {i}] started, running tasks: {}", running_tasks.available());
tasks_to_complete.acquire(1).await.unwrap();
}))
.unwrap()
.detach();
}

println!("Main: waiting for {} tasks", nr_tasks);
println!("Main: waiting for {nr_tasks} tasks");
running_tasks.acquire(nr_tasks).await.unwrap();

println!("Main: closing gate");
Expand Down
4 changes: 2 additions & 2 deletions examples/hyper.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ mod hyper_compat {
let _permit = conn_control.acquire_permit(1).await;
if let Err(x) = Http::new().with_executor(HyperExecutor).serve_connection(HyperStream(stream), service_fn(service)).await {
if !x.is_incomplete_message() {
eprintln!("Stream from {:?} failed with error {:?}", addr, x);
eprintln!("Stream from {addr:?} failed with error {x:?}");
}
}
}}).detach();
Expand Down Expand Up @@ -132,7 +132,7 @@ fn main() {
))
.on_all_shards(|| async move {
let id = glommio::executor().id();
println!("Starting executor {}", id);
println!("Starting executor {id}");
hyper_compat::serve_http(([0, 0, 0, 0], 8000), hyper_demo, 1024)
.await
.unwrap();
Expand Down
4 changes: 2 additions & 2 deletions examples/sharding.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ fn main() {

impl Handler<i32> for RequestHandler {
fn handle(&self, msg: Msg, _src_shard: usize, cur_shard: usize) -> HandlerResult {
println!("shard {} received {}", cur_shard, msg);
println!("shard {cur_shard} received {msg}");
assert_eq!(get_shard_for(&msg, self.nr_shards), cur_shard);
ready(()).boxed_local()
}
Expand All @@ -38,7 +38,7 @@ fn main() {
let handler = RequestHandler { nr_shards };
let mut sharded = Sharded::new(mesh, get_shard_for, handler).await.unwrap();
let me = sharded.shard_id();
let messages = repeat_with(|| fastrand::i32(0..100)).take(10).inspect(move |x| println!("shard {} generated {}", me, x));
let messages = repeat_with(|| fastrand::i32(0..100)).take(10).inspect(move |x| println!("shard {me} generated {x}"));
sharded.handle(messages).unwrap();
sharded.close().await;
}))
Expand Down
34 changes: 8 additions & 26 deletions examples/storage.rs
Original file line number Diff line number Diff line change
Expand Up @@ -62,14 +62,11 @@ async fn stream_write<T: AsyncWriteExt + std::marker::Unpin, S: Into<String>>(
let time = start.elapsed();
let bytes = converter::convert(file_size as _);
let rate = converter::convert((file_size as f64 / time.as_secs_f64()) as _);
println!("{}: Wrote {} in {:#?}, {}/s", &name, bytes, time, rate);
println!("{name}: Wrote {bytes} in {time:#?}, {rate}/s");
stream.close().await.unwrap();
let rate = converter::convert((file_size as f64 / start.elapsed().as_secs_f64()) as _);
let time = endw.elapsed();
println!(
"{}: Closed in {:#?}, Amortized total {}/s",
&name, time, rate
);
println!("{name}: Closed in {time:#?}, Amortized total {rate}/s");
}

async fn stream_scan<T: AsyncReadExt + std::marker::Unpin, S: Into<String>>(
Expand Down Expand Up @@ -97,11 +94,7 @@ async fn stream_scan<T: AsyncReadExt + std::marker::Unpin, S: Into<String>>(
let bytes = converter::convert(bytes_read as _);
let rate = converter::convert((bytes_read as f64 / time.as_secs_f64()) as _);
println!(
"{}: Scanned {} in {:#?}, {}/s, {} IOPS",
&name,
bytes,
time,
rate,
"{name}: Scanned {bytes} in {time:#?}, {rate}/s, {} IOPS",
(ops as f64 / time.as_secs_f64()) as usize
);
stream
Expand Down Expand Up @@ -134,11 +127,7 @@ async fn stream_scan_alt_api<S: Into<String>>(
let bytes = converter::convert(bytes_read as _);
let rate = converter::convert((bytes_read as f64 / time.as_secs_f64()) as _);
println!(
"{}: Scanned {} in {:#?}, {}/s, {} IOPS",
&name,
bytes,
time,
rate,
"{name}: Scanned {bytes} in {time:#?}, {rate}/s, {} IOPS",
(ops as f64 / time.as_secs_f64()) as usize
);
stream.close().await.unwrap();
Expand Down Expand Up @@ -236,10 +225,7 @@ async fn random_read<S: Into<String>>(
let bytes = converter::convert(random as _);
let dur = time.elapsed();
println!(
"{}: Random Read (uniform) size span of {}, for {:#?}, {} IOPS",
&name,
bytes,
dur,
"{name}: Random Read (uniform) size span of {bytes}, for {dur:#?}, {} IOPS",
(iops.get() as f64 / dur.as_secs_f64()) as usize
);
}
Expand Down Expand Up @@ -289,12 +275,8 @@ async fn random_many_read<S: Into<String>>(
let max_merged = converter::convert(max_buffer_size as _);
let dur = time.elapsed();
println!(
"{}: Random Bulk Read (uniform) size span of {}, for {:#?} (max merged size of {}), {} \
IOPS",
&name,
bytes,
dur,
max_merged,
"{name}: Random Bulk Read (uniform) size span of {bytes}, for {dur:#?} (max merged size \
of {max_merged}), {} IOPS",
(iops.get() as f64 / dur.as_secs_f64()) as usize
);
}
Expand Down Expand Up @@ -324,7 +306,7 @@ fn main() {
let mut dir = PathBuf::from(path);
assert!(dir.exists());
dir.push("benchfiles");
assert!(!dir.exists(), "{:?} already exists", dir);
assert!(!dir.exists(), "{dir:?} already exists");
let dir = BenchDirectory::new(dir);

let total_memory = sys_info::mem_info().unwrap().total << 10;
Expand Down
2 changes: 2 additions & 0 deletions glommio/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@ homepage = "https://github.com/DataDog/glommio"
keywords = ["linux", "rust", "async", "iouring", "thread-per-core"]
categories = ["asynchronous", "concurrency", "os", "filesystem", "network-programming"]
readme = "../README.md"
# This is also documented in the README.md under "Supported Rust Versions"
rust-version = "1.58"

[dependencies]
ahash = "0.7"
Expand Down
4 changes: 2 additions & 2 deletions glommio/benches/competing_io.rs
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ fn main() {
.spawn_local_into(
enclose!((gate, file)
async move {
run_io(&format!("iteration: {}", x), &file, IO_TO_PERFORM, 4096).await;
run_io(&format!("iteration: {x}"), &file, IO_TO_PERFORM, 4096).await;
gate.replace(false);
}),
lat_tq,
Expand Down Expand Up @@ -125,7 +125,7 @@ async fn run_io(name: &str, file: &ImmutableFile, count: usize, size: usize) {

let hist = Rc::try_unwrap(hist).unwrap().into_inner();

println!("\n --- {} ---", name);
println!("\n --- {name} ---");
println!(
"performed {}k read IO at {}k IOPS (took {:.2}s)",
count / 1_000,
Expand Down
2 changes: 1 addition & 1 deletion glommio/benches/foreign_wake.rs
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ fn main() {

fn test_latency(latency_req: Latency) {
println!();
println!("Latency requirement: {:?}", latency_req);
println!("Latency requirement: {latency_req:?}");

let runtime = Builder::new_multi_thread()
.worker_threads(2)
Expand Down
7 changes: 2 additions & 5 deletions glommio/benches/nop.rs
Original file line number Diff line number Diff line change
Expand Up @@ -64,18 +64,15 @@ fn main() {
.unwrap();
let measurement = ex.run(run_bench_tasks(bench.num_tasks, bench.num_events));

println!("{}", measurement);
println!("{measurement}");
measurements.push(measurement);
}

let sum = measurements.iter().fold(Duration::from_secs(0), |acc, v| {
acc + v.average_task_duration
});
let average = sum / num_bench_runs;
println!(
"Average task duration across {} runs: {:?}\n",
num_bench_runs, average
);
println!("Average task duration across {num_bench_runs} runs: {average:?}\n");
measurements.clear();
}
}
Expand Down
2 changes: 1 addition & 1 deletion glommio/src/channels/channel_mesh.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ impl<T: Send> Senders<T> {
let msg = if idx < self.nr_consumers() {
"Local message should not be sent via channel mesh".into()
} else {
format!("Shard {} is invalid in the channel mesh", idx)
format!("Shard {idx} is invalid in the channel mesh")
};
Err(GlommioError::IoError(Error::new(
ErrorKind::InvalidInput,
Expand Down
4 changes: 2 additions & 2 deletions glommio/src/channels/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ pub mod channel_mesh;
///
/// impl Handler<i32> for RequestHandler {
/// fn handle(&self, msg: Msg, _src_shard: usize, cur_shard: usize) -> HandlerResult {
/// println!("shard {} received {}", cur_shard, msg);
/// println!("shard {cur_shard} received {msg}");
/// assert_eq!(get_shard_for(&msg, self.nr_shards), cur_shard);
/// ready(()).boxed_local()
/// }
Expand All @@ -308,7 +308,7 @@ pub mod channel_mesh;
/// let handler = RequestHandler { nr_shards };
/// let mut sharded = Sharded::new(mesh, get_shard_for, handler).await.unwrap();
/// let me = sharded.shard_id();
/// let messages = repeat_with(|| fastrand::i32(0..10)).take(1000).inspect(move |x| println!("shard {} generated {}", me, x));
/// let messages = repeat_with(|| fastrand::i32(0..10)).take(1000).inspect(move |x| println!("shard {me} generated {x}"));
/// sharded.handle(messages).unwrap();
/// sharded.close().await;
/// }))
Expand Down
2 changes: 1 addition & 1 deletion glommio/src/channels/spsc_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ impl<T> fmt::Debug for Buffer<T> {
let id_to_str = |id| match id {
0 => "not connected".into(),
usize::MAX => "disconnected".into(),
x => format!("{}", x),
x => format!("{x}"),
};

let consumer_id = id_to_str(self.pcache.consumer_id.load(Ordering::Relaxed));
Expand Down
3 changes: 1 addition & 2 deletions glommio/src/controllers/deadline_queue.rs
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,7 @@ impl<T> SharesManager for InnerQueue<T> {
for (exp, source) in queue.iter() {
let remaining_time = exp.saturating_duration_since(now);
trace!(
"Remaining time for this source: {:#?}, total_units {}",
remaining_time,
"Remaining time for this source: {remaining_time:#?}, total_units {}",
source.total_units()
);
let time_fraction =
Expand Down
Loading

0 comments on commit 2edd140

Please sign in to comment.