photoncloud-monorepo/chainfire/crates/chainfire-storage/src/store.rs
centra 8f94aee1fa Fix R8: Convert submodule gitlinks to regular directories
- Remove gitlinks (160000 mode) for chainfire, flaredb, iam
- Add workspace contents as regular tracked files
- Update flake.nix to use simple paths instead of builtins.fetchGit

This resolves the nix build failure where submodule directories
appeared empty in the nix store.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
2025-12-09 16:51:20 +09:00

132 lines
3.9 KiB
Rust

//! RocksDB store management
use crate::cf;
use chainfire_types::error::StorageError;
use rocksdb::{BoundColumnFamily, ColumnFamilyDescriptor, Options, DB};
use std::path::Path;
use std::sync::Arc;
/// RocksDB store wrapper with column families
pub struct RocksStore {
db: Arc<DB>,
}
impl RocksStore {
/// Open or create a RocksDB database at the given path
pub fn new(path: impl AsRef<Path>) -> Result<Self, StorageError> {
let path = path.as_ref();
let mut db_opts = Options::default();
db_opts.create_if_missing(true);
db_opts.create_missing_column_families(true);
db_opts.set_max_background_jobs(4);
db_opts.set_bytes_per_sync(1024 * 1024); // 1MB
// Define column families
let cf_descriptors = vec![
ColumnFamilyDescriptor::new(cf::LOGS, Self::logs_cf_options()),
ColumnFamilyDescriptor::new(cf::META, Self::meta_cf_options()),
ColumnFamilyDescriptor::new(cf::KV, Self::kv_cf_options()),
ColumnFamilyDescriptor::new(cf::SNAPSHOT, Self::snapshot_cf_options()),
];
let db = DB::open_cf_descriptors(&db_opts, path, cf_descriptors)
.map_err(|e| StorageError::RocksDb(e.to_string()))?;
Ok(Self { db: Arc::new(db) })
}
/// Get the underlying DB handle
pub fn db(&self) -> &Arc<DB> {
&self.db
}
/// Get a column family handle
pub fn cf_handle(&self, name: &str) -> Option<Arc<BoundColumnFamily<'_>>> {
self.db.cf_handle(name)
}
/// Options for the logs column family
fn logs_cf_options() -> Options {
let mut opts = Options::default();
// Optimize for sequential reads/writes
opts.set_write_buffer_size(64 * 1024 * 1024); // 64MB
opts.set_max_write_buffer_number(3);
opts
}
/// Options for the metadata column family
fn meta_cf_options() -> Options {
let mut opts = Options::default();
// Small, frequently updated
opts.set_write_buffer_size(16 * 1024 * 1024); // 16MB
opts
}
/// Options for the KV column family
fn kv_cf_options() -> Options {
let mut opts = Options::default();
// Optimize for point lookups and range scans
opts.set_write_buffer_size(128 * 1024 * 1024); // 128MB
opts.set_max_write_buffer_number(4);
// Enable bloom filters for faster lookups
opts.set_prefix_extractor(rocksdb::SliceTransform::create_fixed_prefix(8));
opts
}
/// Options for the snapshot column family
fn snapshot_cf_options() -> Options {
let mut opts = Options::default();
opts.set_write_buffer_size(32 * 1024 * 1024); // 32MB
opts
}
}
impl Clone for RocksStore {
fn clone(&self) -> Self {
Self {
db: Arc::clone(&self.db),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use tempfile::tempdir;
#[test]
fn test_create_store() {
let dir = tempdir().unwrap();
let store = RocksStore::new(dir.path()).unwrap();
// Verify all column families exist
assert!(store.cf_handle(cf::LOGS).is_some());
assert!(store.cf_handle(cf::META).is_some());
assert!(store.cf_handle(cf::KV).is_some());
assert!(store.cf_handle(cf::SNAPSHOT).is_some());
}
#[test]
fn test_reopen_store() {
let dir = tempdir().unwrap();
// Create and close
{
let store = RocksStore::new(dir.path()).unwrap();
let cf = store.cf_handle(cf::META).unwrap();
store
.db()
.put_cf(&cf, b"test_key", b"test_value")
.unwrap();
}
// Reopen and verify data persisted
{
let store = RocksStore::new(dir.path()).unwrap();
let cf = store.cf_handle(cf::META).unwrap();
let value = store.db().get_cf(&cf, b"test_key").unwrap();
assert_eq!(value, Some(b"test_value".to_vec()));
}
}
}