first commit

This commit is contained in:
2025-10-18 12:31:20 +02:00
commit 07ca1322bd
5 changed files with 1847 additions and 0 deletions

1652
bench/Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

8
bench/Cargo.toml Normal file
View File

@@ -0,0 +1,8 @@
[package]
name = "bench"
version = "0.1.0"
edition = "2024"
[dependencies]
tokio = { version = "1", features = ["full"] }
reqwest = { version = "0.12", features = ["json", "stream"] }

118
bench/src/main.rs Normal file
View File

@@ -0,0 +1,118 @@
use std::collections::HashMap;
use std::sync::{Arc, Mutex};
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
use tokio::time::sleep;
#[tokio::main]
async fn main() {
let target = "http://127.0.0.1:6008/"; // adjust your port
let expected_ip = "1.2.3.4";
let start_rps = 10_000;
let max_rps = 100_000;
let step = 1000;
let duration_per_step = Duration::from_secs(2);
println!("# Benchmarking {target}");
println!("rate(req/s)\tavg_latency(ms)\terrors");
let client = reqwest::Client::builder()
.timeout(Duration::from_secs(2))
.pool_idle_timeout(None)
.build()
.unwrap();
for rate in (start_rps..=max_rps).step_by(step) {
let interval = Duration::from_secs_f64(1.0 / rate as f64);
let end_time = Instant::now() + duration_per_step;
let total = Arc::new(AtomicU64::new(0));
let errors = Arc::new(AtomicU64::new(0));
let lat_sum = Arc::new(AtomicU64::new(0)); // µs
let err_kinds = Arc::new(Mutex::new(HashMap::<&'static str, u64>::new()));
while Instant::now() < end_time {
let client = client.clone();
let total = total.clone();
let errors = errors.clone();
let lat_sum = lat_sum.clone();
let err_kinds = err_kinds.clone();
tokio::spawn(async move {
let start = Instant::now();
let result = client
.get(target)
.header("X-Forwarded-For", expected_ip)
.send()
.await;
total.fetch_add(1, Ordering::Relaxed);
match result {
Ok(resp) => {
let status = resp.status();
match resp.text().await {
Ok(body) => {
if !status.is_success() {
errors.fetch_add(1, Ordering::Relaxed);
let mut map = err_kinds.lock().unwrap();
*map.entry("bad_status").or_insert(0) += 1;
} else if body.trim() != expected_ip {
errors.fetch_add(1, Ordering::Relaxed);
let mut map = err_kinds.lock().unwrap();
*map.entry("wrong_body").or_insert(0) += 1;
} else {
let latency = start.elapsed().as_micros() as u64;
lat_sum.fetch_add(latency, Ordering::Relaxed);
}
}
Err(_) => {
errors.fetch_add(1, Ordering::Relaxed);
let mut map = err_kinds.lock().unwrap();
*map.entry("read_fail").or_insert(0) += 1;
}
}
}
Err(e) => {
errors.fetch_add(1, Ordering::Relaxed);
let reason: &'static str = if e.is_connect() {
"connect_fail"
} else if e.is_timeout() {
"timeout"
} else if e.is_body() {
"body_err"
} else {
"other_err"
};
let mut map = err_kinds.lock().unwrap();
*map.entry(reason).or_insert(0) += 1;
}
}
});
sleep(interval).await;
}
sleep(Duration::from_secs(1)).await;
let total = total.load(Ordering::Relaxed);
let errors = errors.load(Ordering::Relaxed);
let ok = total.saturating_sub(errors);
let lat_sum = lat_sum.load(Ordering::Relaxed);
let avg_ms = if ok > 0 {
(lat_sum as f64 / ok as f64) / 1000.0
} else {
0.0
};
println!("{rate}\t\t{avg_ms:.3}\t\t{errors}");
let map = err_kinds.lock().unwrap();
if !map.is_empty() {
for (k, v) in map.iter() {
println!(" {k}: {v}");
}
}
}
}