photoncloud-monorepo/chainfire/crates/chainfire-storage/benches/storage_bench.rs
centra 5c6eb04a46 T036: Add VM cluster deployment configs for nixos-anywhere
- netboot-base.nix with SSH key auth
- Launch scripts for node01/02/03
- Node configuration.nix and disko.nix
- Nix modules for first-boot automation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2025-12-11 09:59:19 +09:00

123 lines
3.8 KiB
Rust

use chainfire_storage::kv_store::KvStore;
use chainfire_storage::RocksStore;
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use std::time::Duration;
use tempfile::TempDir;
const VALUE_SIZE: usize = 1024; // 1KB
const NUM_KEYS_THROUGHPUT: usize = 10_000; // 10K for throughput tests
fn bench_write_throughput(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let rocks_store = RocksStore::new(temp_dir.path()).unwrap();
let store = KvStore::new(rocks_store).unwrap();
let value = vec![b'x'; VALUE_SIZE];
let mut group = c.benchmark_group("write_throughput");
group.throughput(Throughput::Elements(NUM_KEYS_THROUGHPUT as u64));
group.sample_size(10);
group.measurement_time(Duration::from_secs(20));
group.bench_function(BenchmarkId::from_parameter(NUM_KEYS_THROUGHPUT), |b| {
b.iter(|| {
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i).into_bytes();
store.put(black_box(key), black_box(value.clone()), None).unwrap();
}
});
});
group.finish();
}
fn bench_read_throughput(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let rocks_store = RocksStore::new(temp_dir.path()).unwrap();
let store = KvStore::new(rocks_store).unwrap();
let value = vec![b'x'; VALUE_SIZE];
// Pre-populate keys
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i).into_bytes();
store.put(key, value.clone(), None).unwrap();
}
let mut group = c.benchmark_group("read_throughput");
group.throughput(Throughput::Elements(NUM_KEYS_THROUGHPUT as u64));
group.sample_size(10);
group.measurement_time(Duration::from_secs(20));
group.bench_function(BenchmarkId::from_parameter(NUM_KEYS_THROUGHPUT), |b| {
b.iter(|| {
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i).into_bytes();
let _ = store.get(black_box(&key)).unwrap();
}
});
});
group.finish();
}
fn bench_write_latency(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let rocks_store = RocksStore::new(temp_dir.path()).unwrap();
let store = KvStore::new(rocks_store).unwrap();
let value = vec![b'x'; VALUE_SIZE];
let mut group = c.benchmark_group("write_latency");
group.sample_size(1000); // Larger sample for better p99/p999 estimates
group.measurement_time(Duration::from_secs(30));
group.bench_function("single_write", |b| {
let mut key_counter = 0;
b.iter(|| {
let key = format!("latency_key_{:08}", key_counter).into_bytes();
key_counter += 1;
store.put(black_box(key), black_box(value.clone()), None).unwrap();
});
});
group.finish();
}
fn bench_read_latency(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let rocks_store = RocksStore::new(temp_dir.path()).unwrap();
let store = KvStore::new(rocks_store).unwrap();
let value = vec![b'x'; VALUE_SIZE];
// Pre-populate keys
for i in 0..1000 {
let key = format!("read_lat_key_{:08}", i).into_bytes();
store.put(key, value.clone(), None).unwrap();
}
let mut group = c.benchmark_group("read_latency");
group.sample_size(1000);
group.measurement_time(Duration::from_secs(30));
group.bench_function("single_read", |b| {
let mut key_counter = 0;
b.iter(|| {
let key = format!("read_lat_key_{:08}", key_counter % 1000).into_bytes();
key_counter += 1;
let _ = store.get(black_box(&key)).unwrap();
});
});
group.finish();
}
criterion_group!(
benches,
bench_write_throughput,
bench_read_throughput,
bench_write_latency,
bench_read_latency
);
criterion_main!(benches);