photoncloud-monorepo/lightningstor/crates/lightningstor-distributed/src/config.rs

288 lines
8.7 KiB
Rust

//! Configuration types for distributed storage backends
use serde::{Deserialize, Serialize};
/// Redundancy strategy for object storage
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum RedundancyMode {
/// No redundancy (local storage only)
None,
/// Reed-Solomon erasure coding
ErasureCoded {
/// Number of data shards
data_shards: usize,
/// Number of parity shards
parity_shards: usize,
},
/// Simple N-way replication
Replicated {
/// Number of replicas (including primary)
replica_count: usize,
/// Read quorum (minimum replicas for successful read)
read_quorum: usize,
/// Write quorum (minimum replicas for successful write)
write_quorum: usize,
},
}
impl Default for RedundancyMode {
fn default() -> Self {
// Default: 4+2 erasure coding (1.5x overhead, tolerates 2 failures)
Self::ErasureCoded {
data_shards: 4,
parity_shards: 2,
}
}
}
impl RedundancyMode {
/// Create a new erasure coded configuration
pub fn erasure_coded(data_shards: usize, parity_shards: usize) -> Self {
Self::ErasureCoded {
data_shards,
parity_shards,
}
}
/// Create a new replicated configuration with default quorums
pub fn replicated(replica_count: usize) -> Self {
Self::Replicated {
replica_count,
read_quorum: 1,
write_quorum: (replica_count / 2) + 1,
}
}
/// Create a new replicated configuration with custom quorums
pub fn replicated_with_quorum(
replica_count: usize,
read_quorum: usize,
write_quorum: usize,
) -> Self {
Self::Replicated {
replica_count,
read_quorum,
write_quorum,
}
}
/// Get the minimum number of nodes required for this redundancy mode
pub fn min_nodes(&self) -> usize {
match self {
Self::None => 1,
Self::ErasureCoded {
data_shards,
parity_shards,
} => data_shards + parity_shards,
Self::Replicated { replica_count, .. } => *replica_count,
}
}
/// Get the storage overhead factor (1.0 = no overhead)
pub fn overhead_factor(&self) -> f64 {
match self {
Self::None => 1.0,
Self::ErasureCoded {
data_shards,
parity_shards,
} => (*data_shards + *parity_shards) as f64 / *data_shards as f64,
Self::Replicated { replica_count, .. } => *replica_count as f64,
}
}
/// Get the number of node failures that can be tolerated
pub fn fault_tolerance(&self) -> usize {
match self {
Self::None => 0,
Self::ErasureCoded { parity_shards, .. } => *parity_shards,
Self::Replicated {
replica_count,
write_quorum,
..
} => replica_count - write_quorum,
}
}
}
/// Chunk size configuration
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct ChunkConfig {
/// Default chunk size in bytes (default: 8 MiB)
#[serde(default = "ChunkConfig::default_chunk_size")]
pub chunk_size: usize,
/// Minimum chunk size in bytes (default: 1 MiB)
#[serde(default = "ChunkConfig::default_min_chunk_size")]
pub min_chunk_size: usize,
/// Maximum chunk size in bytes (default: 64 MiB)
#[serde(default = "ChunkConfig::default_max_chunk_size")]
pub max_chunk_size: usize,
}
impl ChunkConfig {
const fn default_chunk_size() -> usize {
8 * 1024 * 1024 // 8 MiB
}
const fn default_min_chunk_size() -> usize {
1024 * 1024 // 1 MiB
}
const fn default_max_chunk_size() -> usize {
64 * 1024 * 1024 // 64 MiB
}
/// Create a new chunk configuration with custom chunk size
pub fn new(chunk_size: usize) -> Self {
Self {
chunk_size,
min_chunk_size: Self::default_min_chunk_size(),
max_chunk_size: Self::default_max_chunk_size(),
}
}
}
impl Default for ChunkConfig {
fn default() -> Self {
Self {
chunk_size: Self::default_chunk_size(),
min_chunk_size: Self::default_min_chunk_size(),
max_chunk_size: Self::default_max_chunk_size(),
}
}
}
/// Distributed storage configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct DistributedConfig {
/// Redundancy mode
#[serde(default)]
pub redundancy: RedundancyMode,
/// Chunk configuration
#[serde(default)]
pub chunk: ChunkConfig,
/// Node endpoints (for static configuration)
#[serde(default)]
pub node_endpoints: Vec<String>,
/// Registry endpoint (for dynamic discovery via ChainFire)
pub registry_endpoint: Option<String>,
/// Connection timeout in milliseconds
#[serde(default = "DistributedConfig::default_connection_timeout")]
pub connection_timeout_ms: u64,
/// Request timeout in milliseconds
#[serde(default = "DistributedConfig::default_request_timeout")]
pub request_timeout_ms: u64,
/// Maximum retries for failed operations
#[serde(default = "DistributedConfig::default_max_retries")]
pub max_retries: u32,
}
impl DistributedConfig {
const fn default_connection_timeout() -> u64 {
5000 // 5 seconds
}
const fn default_request_timeout() -> u64 {
300000 // 5 minutes
}
const fn default_max_retries() -> u32 {
3
}
}
impl Default for DistributedConfig {
fn default() -> Self {
Self {
redundancy: RedundancyMode::default(),
chunk: ChunkConfig::default(),
node_endpoints: vec![],
registry_endpoint: None,
connection_timeout_ms: Self::default_connection_timeout(),
request_timeout_ms: Self::default_request_timeout(),
max_retries: Self::default_max_retries(),
}
}
}
/// Bucket-level storage configuration override
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct BucketStorageConfig {
/// Override redundancy mode for this bucket
pub redundancy: Option<RedundancyMode>,
/// Override chunk size for this bucket
pub chunk_size: Option<usize>,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_redundancy_mode_default() {
let mode = RedundancyMode::default();
assert!(matches!(
mode,
RedundancyMode::ErasureCoded {
data_shards: 4,
parity_shards: 2
}
));
}
#[test]
fn test_redundancy_mode_min_nodes() {
assert_eq!(RedundancyMode::None.min_nodes(), 1);
assert_eq!(RedundancyMode::erasure_coded(4, 2).min_nodes(), 6);
assert_eq!(RedundancyMode::replicated(3).min_nodes(), 3);
}
#[test]
fn test_redundancy_mode_overhead() {
assert!((RedundancyMode::None.overhead_factor() - 1.0).abs() < f64::EPSILON);
assert!((RedundancyMode::erasure_coded(4, 2).overhead_factor() - 1.5).abs() < f64::EPSILON);
assert!((RedundancyMode::replicated(3).overhead_factor() - 3.0).abs() < f64::EPSILON);
}
#[test]
fn test_redundancy_mode_fault_tolerance() {
assert_eq!(RedundancyMode::None.fault_tolerance(), 0);
assert_eq!(RedundancyMode::erasure_coded(4, 2).fault_tolerance(), 2);
// replica_count=3, write_quorum=2 -> can tolerate 1 failure
assert_eq!(RedundancyMode::replicated(3).fault_tolerance(), 1);
}
#[test]
fn test_chunk_config_default() {
let config = ChunkConfig::default();
assert_eq!(config.chunk_size, 8 * 1024 * 1024);
assert_eq!(config.min_chunk_size, 1024 * 1024);
assert_eq!(config.max_chunk_size, 64 * 1024 * 1024);
}
#[test]
fn test_distributed_config_default() {
let config = DistributedConfig::default();
assert!(matches!(
config.redundancy,
RedundancyMode::ErasureCoded { .. }
));
assert!(config.node_endpoints.is_empty());
assert!(config.registry_endpoint.is_none());
}
#[test]
fn test_redundancy_mode_serialization() {
let ec = RedundancyMode::erasure_coded(4, 2);
let json = serde_json::to_string(&ec).unwrap();
let parsed: RedundancyMode = serde_json::from_str(&json).unwrap();
assert_eq!(ec, parsed);
let rep = RedundancyMode::replicated(3);
let json = serde_json::to_string(&rep).unwrap();
let parsed: RedundancyMode = serde_json::from_str(&json).unwrap();
assert_eq!(rep, parsed);
}
}