photoncloud-monorepo/flaredb/crates/flaredb-server/benches/storage_bench.rs
centra 5c6eb04a46 T036: Add VM cluster deployment configs for nixos-anywhere
- netboot-base.nix with SSH key auth
- Launch scripts for node01/02/03
- Node configuration.nix and disko.nix
- Nix modules for first-boot automation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2025-12-11 09:59:19 +09:00

199 lines
5.9 KiB
Rust

use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion, Throughput};
use rocksdb::{Options, DB};
use std::time::Duration;
use tempfile::TempDir;
const VALUE_SIZE: usize = 1024; // 1KB
const NUM_KEYS_THROUGHPUT: usize = 10_000; // 10K for throughput tests
const SCAN_RANGE_SIZE: usize = 1_000; // 1K keys per scan
fn create_test_db(temp_dir: &TempDir) -> DB {
let mut opts = Options::default();
opts.create_if_missing(true);
opts.set_write_buffer_size(64 * 1024 * 1024); // 64MB
opts.set_max_write_buffer_number(3);
opts.set_target_file_size_base(64 * 1024 * 1024);
opts.set_level_zero_file_num_compaction_trigger(8);
opts.set_level_zero_slowdown_writes_trigger(17);
opts.set_level_zero_stop_writes_trigger(24);
opts.set_num_levels(4);
opts.set_max_bytes_for_level_base(512 * 1024 * 1024);
opts.set_max_bytes_for_level_multiplier(8.0);
// Performance tuning
opts.set_compression_type(rocksdb::DBCompressionType::Lz4);
opts.set_allow_concurrent_memtable_write(true);
opts.set_enable_write_thread_adaptive_yield(true);
DB::open(&opts, temp_dir.path()).unwrap()
}
fn bench_write_throughput(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let db = create_test_db(&temp_dir);
let value = vec![b'x'; VALUE_SIZE];
let mut group = c.benchmark_group("write_throughput");
group.throughput(Throughput::Elements(NUM_KEYS_THROUGHPUT as u64));
group.sample_size(10);
group.measurement_time(Duration::from_secs(30));
group.bench_function(BenchmarkId::from_parameter(NUM_KEYS_THROUGHPUT), |b| {
b.iter(|| {
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i);
db.put(black_box(key.as_bytes()), black_box(&value)).unwrap();
}
});
});
group.finish();
drop(db);
}
fn bench_read_throughput(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let db = create_test_db(&temp_dir);
let value = vec![b'x'; VALUE_SIZE];
// Pre-populate keys
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i);
db.put(key.as_bytes(), &value).unwrap();
}
// Force flush to disk
db.flush().unwrap();
let mut group = c.benchmark_group("read_throughput");
group.throughput(Throughput::Elements(NUM_KEYS_THROUGHPUT as u64));
group.sample_size(10);
group.measurement_time(Duration::from_secs(30));
group.bench_function(BenchmarkId::from_parameter(NUM_KEYS_THROUGHPUT), |b| {
b.iter(|| {
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("bench_key_{:08}", i);
let _ = db.get(black_box(key.as_bytes())).unwrap();
}
});
});
group.finish();
drop(db);
}
fn bench_scan_throughput(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let db = create_test_db(&temp_dir);
let value = vec![b'x'; VALUE_SIZE];
// Pre-populate keys for scanning
for i in 0..NUM_KEYS_THROUGHPUT {
let key = format!("scan_key_{:08}", i);
db.put(key.as_bytes(), &value).unwrap();
}
// Force flush to disk
db.flush().unwrap();
let mut group = c.benchmark_group("scan_throughput");
group.throughput(Throughput::Elements(SCAN_RANGE_SIZE as u64));
group.sample_size(10);
group.measurement_time(Duration::from_secs(30));
group.bench_function(BenchmarkId::from_parameter(SCAN_RANGE_SIZE), |b| {
b.iter(|| {
let start_key = format!("scan_key_{:08}", 0);
let end_key = format!("scan_key_{:08}", SCAN_RANGE_SIZE);
let iter = db.iterator(rocksdb::IteratorMode::From(
start_key.as_bytes(),
rocksdb::Direction::Forward,
));
let mut count = 0;
for item in iter {
if let Ok((key, _value)) = item {
if black_box(key.as_ref()) >= end_key.as_bytes() {
break;
}
count += 1;
if count >= SCAN_RANGE_SIZE {
break;
}
}
}
});
});
group.finish();
drop(db);
}
fn bench_write_latency(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let db = create_test_db(&temp_dir);
let value = vec![b'x'; VALUE_SIZE];
let mut group = c.benchmark_group("write_latency");
group.sample_size(1000); // Larger sample for better p99/p999 estimates
group.measurement_time(Duration::from_secs(60));
group.bench_function("single_write", |b| {
let mut key_counter = 0;
b.iter(|| {
let key = format!("latency_key_{:08}", key_counter);
key_counter += 1;
db.put(black_box(key.as_bytes()), black_box(&value)).unwrap();
});
});
group.finish();
drop(db);
}
fn bench_read_latency(c: &mut Criterion) {
let temp_dir = TempDir::new().unwrap();
let db = create_test_db(&temp_dir);
let value = vec![b'x'; VALUE_SIZE];
// Pre-populate keys
for i in 0..1000 {
let key = format!("read_lat_key_{:08}", i);
db.put(key.as_bytes(), &value).unwrap();
}
db.flush().unwrap();
let mut group = c.benchmark_group("read_latency");
group.sample_size(1000);
group.measurement_time(Duration::from_secs(60));
group.bench_function("single_read", |b| {
let mut key_counter = 0;
b.iter(|| {
let key = format!("read_lat_key_{:08}", key_counter % 1000);
key_counter += 1;
let _ = db.get(black_box(key.as_bytes())).unwrap();
});
});
group.finish();
drop(db);
}
criterion_group!(
benches,
bench_write_throughput,
bench_read_throughput,
bench_scan_throughput,
bench_write_latency,
bench_read_latency
);
criterion_main!(benches);