Skip to content

Commit e2168de

Browse files
authored
chore: add simple h2 benchmark (#762)
This PR adds a simple benchmark to measure perf improvement changes. E.g., a potential fix for this issue: #531 The benchmark is simple: have a client send `100_000` requests to a server and wait for a response. Output: ``` cargo bench H2 running in current-thread runtime at 127.0.0.1:5928: Overall: 353ms. Fastest: 91ms Slowest: 315ms Avg : 249ms H2 running in multi-thread runtime at 127.0.0.1:5929: Overall: 533ms. Fastest: 88ms Slowest: 511ms Avg : 456ms ```
1 parent 51fe05a commit e2168de

File tree

2 files changed

+152
-0
lines changed

2 files changed

+152
-0
lines changed

Cargo.toml

+4
Original file line numberDiff line numberDiff line change
@@ -71,3 +71,7 @@ webpki-roots = "0.25"
7171

7272
[package.metadata.docs.rs]
7373
features = ["stream"]
74+
75+
[[bench]]
76+
name = "main"
77+
harness = false

benches/main.rs

+148
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,148 @@
1+
use bytes::Bytes;
2+
use h2::{
3+
client,
4+
server::{self, SendResponse},
5+
RecvStream,
6+
};
7+
use http::Request;
8+
9+
use std::{
10+
error::Error,
11+
time::{Duration, Instant},
12+
};
13+
14+
use tokio::net::{TcpListener, TcpStream};
15+
16+
const NUM_REQUESTS_TO_SEND: usize = 100_000;
17+
18+
// The actual server.
19+
async fn server(addr: &str) -> Result<(), Box<dyn Error + Send + Sync>> {
20+
let listener = TcpListener::bind(addr).await?;
21+
22+
loop {
23+
if let Ok((socket, _peer_addr)) = listener.accept().await {
24+
tokio::spawn(async move {
25+
if let Err(e) = serve(socket).await {
26+
println!(" -> err={:?}", e);
27+
}
28+
});
29+
}
30+
}
31+
}
32+
33+
async fn serve(socket: TcpStream) -> Result<(), Box<dyn Error + Send + Sync>> {
34+
let mut connection = server::handshake(socket).await?;
35+
while let Some(result) = connection.accept().await {
36+
let (request, respond) = result?;
37+
tokio::spawn(async move {
38+
if let Err(e) = handle_request(request, respond).await {
39+
println!("error while handling request: {}", e);
40+
}
41+
});
42+
}
43+
Ok(())
44+
}
45+
46+
async fn handle_request(
47+
mut request: Request<RecvStream>,
48+
mut respond: SendResponse<Bytes>,
49+
) -> Result<(), Box<dyn Error + Send + Sync>> {
50+
let body = request.body_mut();
51+
while let Some(data) = body.data().await {
52+
let data = data?;
53+
let _ = body.flow_control().release_capacity(data.len());
54+
}
55+
let response = http::Response::new(());
56+
let mut send = respond.send_response(response, false)?;
57+
send.send_data(Bytes::from_static(b"pong"), true)?;
58+
59+
Ok(())
60+
}
61+
62+
// The benchmark
63+
async fn send_requests(addr: &str) -> Result<(), Box<dyn Error>> {
64+
let tcp = loop {
65+
let Ok(tcp) = TcpStream::connect(addr).await else {
66+
continue;
67+
};
68+
break tcp;
69+
};
70+
let (client, h2) = client::handshake(tcp).await?;
71+
// Spawn a task to run the conn...
72+
tokio::spawn(async move {
73+
if let Err(e) = h2.await {
74+
println!("GOT ERR={:?}", e);
75+
}
76+
});
77+
78+
let mut handles = Vec::with_capacity(NUM_REQUESTS_TO_SEND);
79+
for _i in 0..NUM_REQUESTS_TO_SEND {
80+
let mut client = client.clone();
81+
let task = tokio::spawn(async move {
82+
let request = Request::builder().body(()).unwrap();
83+
84+
let instant = Instant::now();
85+
let (response, _) = client.send_request(request, true).unwrap();
86+
let response = response.await.unwrap();
87+
let mut body = response.into_body();
88+
while let Some(_chunk) = body.data().await {}
89+
instant.elapsed()
90+
});
91+
handles.push(task);
92+
}
93+
94+
let instant = Instant::now();
95+
let mut result = Vec::with_capacity(NUM_REQUESTS_TO_SEND);
96+
for handle in handles {
97+
result.push(handle.await.unwrap());
98+
}
99+
let mut sum = Duration::new(0, 0);
100+
for r in result.iter() {
101+
sum = sum.checked_add(*r).unwrap();
102+
}
103+
104+
println!("Overall: {}ms.", instant.elapsed().as_millis());
105+
println!("Fastest: {}ms", result.iter().min().unwrap().as_millis());
106+
println!("Slowest: {}ms", result.iter().max().unwrap().as_millis());
107+
println!(
108+
"Avg : {}ms",
109+
sum.div_f64(NUM_REQUESTS_TO_SEND as f64).as_millis()
110+
);
111+
Ok(())
112+
}
113+
114+
fn main() {
115+
let _ = env_logger::try_init();
116+
let addr = "127.0.0.1:5928";
117+
println!("H2 running in current-thread runtime at {addr}:");
118+
std::thread::spawn(|| {
119+
let rt = tokio::runtime::Builder::new_current_thread()
120+
.enable_all()
121+
.build()
122+
.unwrap();
123+
rt.block_on(server(addr)).unwrap();
124+
});
125+
126+
let rt = tokio::runtime::Builder::new_current_thread()
127+
.enable_all()
128+
.build()
129+
.unwrap();
130+
rt.block_on(send_requests(addr)).unwrap();
131+
132+
let addr = "127.0.0.1:5929";
133+
println!("H2 running in multi-thread runtime at {addr}:");
134+
std::thread::spawn(|| {
135+
let rt = tokio::runtime::Builder::new_multi_thread()
136+
.worker_threads(4)
137+
.enable_all()
138+
.build()
139+
.unwrap();
140+
rt.block_on(server(addr)).unwrap();
141+
});
142+
143+
let rt = tokio::runtime::Builder::new_current_thread()
144+
.enable_all()
145+
.build()
146+
.unwrap();
147+
rt.block_on(send_requests(addr)).unwrap();
148+
}

0 commit comments

Comments
 (0)