//! LB metadata storage using FlareDB, PostgreSQL, or SQLite. use dashmap::DashMap; use fiberlb_types::{ Backend, BackendId, BackendStatus, Certificate, CertificateId, HealthCheck, HealthCheckId, L7Policy, L7PolicyId, L7Rule, L7RuleId, Listener, ListenerId, LoadBalancer, LoadBalancerId, Pool, PoolId, }; use flaredb_client::RdbClient; use sqlx::pool::PoolOptions; use sqlx::{Pool as SqlxPool, Postgres, Sqlite}; use std::sync::Arc; use tokio::sync::Mutex; /// Result type for metadata operations pub type Result = std::result::Result; /// Metadata operation error #[derive(Debug, thiserror::Error)] pub enum MetadataError { #[error("Storage error: {0}")] Storage(String), #[error("Serialization error: {0}")] Serialization(String), #[error("Not found: {0}")] NotFound(String), #[error("Invalid argument: {0}")] InvalidArgument(String), } /// Storage backend enum enum StorageBackend { FlareDB(Arc>), Sql(SqlStorageBackend), InMemory(Arc>), } enum SqlStorageBackend { Postgres(Arc>), Sqlite(Arc>), } /// LB Metadata store for load balancers, listeners, pools, and backends pub struct LbMetadataStore { backend: StorageBackend, } impl LbMetadataStore { /// Create a new metadata store with FlareDB backend pub async fn new(endpoint: Option) -> Result { Self::new_flaredb(endpoint).await } /// Create a new metadata store with FlareDB backend pub async fn new_flaredb(endpoint: Option) -> Result { Self::new_flaredb_with_pd(endpoint, None).await } /// Create a new metadata store with FlareDB backend and explicit PD address. pub async fn new_flaredb_with_pd( endpoint: Option, pd_endpoint: Option, ) -> Result { let endpoint = endpoint.unwrap_or_else(|| { std::env::var("FIBERLB_FLAREDB_ENDPOINT") .unwrap_or_else(|_| "127.0.0.1:2479".to_string()) }); let pd_endpoint = pd_endpoint .or_else(|| std::env::var("FIBERLB_CHAINFIRE_ENDPOINT").ok()) .map(|value| normalize_transport_addr(&value)) .unwrap_or_else(|| endpoint.clone()); let client = RdbClient::connect_with_pd_namespace(endpoint, pd_endpoint, "fiberlb") .await .map_err(|e| MetadataError::Storage(format!("Failed to connect to FlareDB: {}", e)))?; Ok(Self { backend: StorageBackend::FlareDB(Arc::new(Mutex::new(client))), }) } /// Create a metadata store backed by PostgreSQL or SQLite. pub async fn new_sql(database_url: &str, single_node: bool) -> Result { let url = database_url.trim(); if url.is_empty() { return Err(MetadataError::InvalidArgument( "metadata database URL is empty".to_string(), )); } if Self::is_postgres_url(url) { let pool = PoolOptions::::new() .max_connections(10) .connect(url) .await .map_err(|e| { MetadataError::Storage(format!("Failed to connect to Postgres: {}", e)) })?; Self::ensure_sql_schema_postgres(&pool).await?; return Ok(Self { backend: StorageBackend::Sql(SqlStorageBackend::Postgres(Arc::new(pool))), }); } if Self::is_sqlite_url(url) { if !single_node { return Err(MetadataError::InvalidArgument( "SQLite is allowed only in single-node mode".to_string(), )); } if url.contains(":memory:") { return Err(MetadataError::InvalidArgument( "In-memory SQLite is not allowed".to_string(), )); } let pool = PoolOptions::::new() .max_connections(1) .connect(url) .await .map_err(|e| { MetadataError::Storage(format!("Failed to connect to SQLite: {}", e)) })?; Self::ensure_sql_schema_sqlite(&pool).await?; return Ok(Self { backend: StorageBackend::Sql(SqlStorageBackend::Sqlite(Arc::new(pool))), }); } Err(MetadataError::InvalidArgument( "Unsupported metadata database URL (use postgres://, postgresql://, or sqlite:)" .to_string(), )) } /// Create a new in-memory metadata store (for testing) pub fn new_in_memory() -> Self { Self { backend: StorageBackend::InMemory(Arc::new(DashMap::new())), } } fn is_postgres_url(url: &str) -> bool { url.starts_with("postgres://") || url.starts_with("postgresql://") } fn is_sqlite_url(url: &str) -> bool { url.starts_with("sqlite:") } async fn ensure_sql_schema_postgres(pool: &SqlxPool) -> Result<()> { sqlx::query( "CREATE TABLE IF NOT EXISTS metadata_kv ( key TEXT PRIMARY KEY, value TEXT NOT NULL )", ) .execute(pool) .await .map_err(|e| MetadataError::Storage(format!("Failed to initialize Postgres schema: {}", e)))?; Ok(()) } async fn ensure_sql_schema_sqlite(pool: &SqlxPool) -> Result<()> { sqlx::query( "CREATE TABLE IF NOT EXISTS metadata_kv ( key TEXT PRIMARY KEY, value TEXT NOT NULL )", ) .execute(pool) .await .map_err(|e| MetadataError::Storage(format!("Failed to initialize SQLite schema: {}", e)))?; Ok(()) } // ========================================================================= // Internal storage helpers // ========================================================================= async fn put(&self, key: &str, value: &str) -> Result<()> { match &self.backend { StorageBackend::FlareDB(client) => { let mut c = client.lock().await; c.raw_put(key.as_bytes().to_vec(), value.as_bytes().to_vec()) .await .map_err(|e| MetadataError::Storage(format!("FlareDB put failed: {}", e)))?; } StorageBackend::Sql(sql) => match sql { SqlStorageBackend::Postgres(pool) => { sqlx::query( "INSERT INTO metadata_kv (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value", ) .bind(key) .bind(value) .execute(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("Postgres put failed: {}", e)) })?; } SqlStorageBackend::Sqlite(pool) => { sqlx::query( "INSERT INTO metadata_kv (key, value) VALUES (?1, ?2) ON CONFLICT(key) DO UPDATE SET value = excluded.value", ) .bind(key) .bind(value) .execute(pool.as_ref()) .await .map_err(|e| MetadataError::Storage(format!("SQLite put failed: {}", e)))?; } }, StorageBackend::InMemory(map) => { map.insert(key.to_string(), value.to_string()); } } Ok(()) } async fn get(&self, key: &str) -> Result> { match &self.backend { StorageBackend::FlareDB(client) => { let mut c = client.lock().await; let result = c .raw_get(key.as_bytes().to_vec()) .await .map_err(|e| MetadataError::Storage(format!("FlareDB get failed: {}", e)))?; Ok(result.map(|bytes| String::from_utf8_lossy(&bytes).to_string())) } StorageBackend::Sql(sql) => match sql { SqlStorageBackend::Postgres(pool) => { let value: Option = sqlx::query_scalar("SELECT value FROM metadata_kv WHERE key = $1") .bind(key) .fetch_optional(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("Postgres get failed: {}", e)) })?; Ok(value) } SqlStorageBackend::Sqlite(pool) => { let value: Option = sqlx::query_scalar("SELECT value FROM metadata_kv WHERE key = ?1") .bind(key) .fetch_optional(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("SQLite get failed: {}", e)) })?; Ok(value) } }, StorageBackend::InMemory(map) => Ok(map.get(key).map(|v| v.value().clone())), } } async fn delete_key(&self, key: &str) -> Result<()> { match &self.backend { StorageBackend::FlareDB(client) => { let mut c = client.lock().await; c.raw_delete(key.as_bytes().to_vec()) .await .map_err(|e| MetadataError::Storage(format!("FlareDB delete failed: {}", e)))?; } StorageBackend::Sql(sql) => match sql { SqlStorageBackend::Postgres(pool) => { sqlx::query("DELETE FROM metadata_kv WHERE key = $1") .bind(key) .execute(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("Postgres delete failed: {}", e)) })?; } SqlStorageBackend::Sqlite(pool) => { sqlx::query("DELETE FROM metadata_kv WHERE key = ?1") .bind(key) .execute(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("SQLite delete failed: {}", e)) })?; } }, StorageBackend::InMemory(map) => { map.remove(key); } } Ok(()) } async fn get_prefix(&self, prefix: &str) -> Result> { match &self.backend { StorageBackend::FlareDB(client) => { let mut c = client.lock().await; // Calculate end_key by incrementing the last byte of prefix let mut end_key = prefix.as_bytes().to_vec(); if let Some(last) = end_key.last_mut() { if *last == 0xff { // If last byte is 0xff, append a 0x00 end_key.push(0x00); } else { *last += 1; } } else { // Empty prefix - scan everything end_key.push(0xff); } let mut results = Vec::new(); let mut start_key = prefix.as_bytes().to_vec(); // Pagination loop to get all results loop { let (keys, values, next) = c .raw_scan( start_key.clone(), end_key.clone(), 1000, // Batch size ) .await .map_err(|e| { MetadataError::Storage(format!("FlareDB scan failed: {}", e)) })?; // Convert and add results for (k, v) in keys.iter().zip(values.iter()) { results.push(( String::from_utf8_lossy(k).to_string(), String::from_utf8_lossy(v).to_string(), )); } // Check if there are more results if let Some(next_key) = next { start_key = next_key; } else { break; } } Ok(results) } StorageBackend::Sql(sql) => { let like_pattern = format!("{}%", prefix); match sql { SqlStorageBackend::Postgres(pool) => { let rows: Vec<(String, String)> = sqlx::query_as( "SELECT key, value FROM metadata_kv WHERE key LIKE $1 ORDER BY key", ) .bind(like_pattern) .fetch_all(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("Postgres scan failed: {}", e)) })?; Ok(rows) } SqlStorageBackend::Sqlite(pool) => { let rows: Vec<(String, String)> = sqlx::query_as( "SELECT key, value FROM metadata_kv WHERE key LIKE ?1 ORDER BY key", ) .bind(like_pattern) .fetch_all(pool.as_ref()) .await .map_err(|e| { MetadataError::Storage(format!("SQLite scan failed: {}", e)) })?; Ok(rows) } } } StorageBackend::InMemory(map) => { let mut results = Vec::new(); for entry in map.iter() { if entry.key().starts_with(prefix) { results.push((entry.key().clone(), entry.value().clone())); } } Ok(results) } } } // ========================================================================= // Key builders // ========================================================================= fn lb_key(org_id: &str, project_id: &str, lb_id: &LoadBalancerId) -> String { format!("/fiberlb/loadbalancers/{}/{}/{}", org_id, project_id, lb_id) } fn lb_id_key(lb_id: &LoadBalancerId) -> String { format!("/fiberlb/lb_ids/{}", lb_id) } fn listener_key(lb_id: &LoadBalancerId, listener_id: &ListenerId) -> String { format!("/fiberlb/listeners/{}/{}", lb_id, listener_id) } fn listener_id_key(listener_id: &ListenerId) -> String { format!("/fiberlb/listener_ids/{}", listener_id) } fn listener_prefix(lb_id: &LoadBalancerId) -> String { format!("/fiberlb/listeners/{}/", lb_id) } fn pool_key(lb_id: &LoadBalancerId, pool_id: &PoolId) -> String { format!("/fiberlb/pools/{}/{}", lb_id, pool_id) } fn pool_id_key(pool_id: &PoolId) -> String { format!("/fiberlb/pool_ids/{}", pool_id) } fn pool_prefix(lb_id: &LoadBalancerId) -> String { format!("/fiberlb/pools/{}/", lb_id) } fn backend_key(pool_id: &PoolId, backend_id: &BackendId) -> String { format!("/fiberlb/backends/{}/{}", pool_id, backend_id) } fn backend_id_key(backend_id: &BackendId) -> String { format!("/fiberlb/backend_ids/{}", backend_id) } fn backend_prefix(pool_id: &PoolId) -> String { format!("/fiberlb/backends/{}/", pool_id) } fn health_check_key(pool_id: &PoolId, hc_id: &HealthCheckId) -> String { format!("/fiberlb/healthchecks/{}/{}", pool_id, hc_id) } fn health_check_prefix(pool_id: &PoolId) -> String { format!("/fiberlb/healthchecks/{}/", pool_id) } fn l7_policy_key(listener_id: &ListenerId, policy_id: &L7PolicyId) -> String { format!("/fiberlb/l7policies/{}/{}", listener_id, policy_id) } fn l7_policy_prefix(listener_id: &ListenerId) -> String { format!("/fiberlb/l7policies/{}/", listener_id) } fn l7_rule_key(policy_id: &L7PolicyId, rule_id: &L7RuleId) -> String { format!("/fiberlb/l7rules/{}/{}", policy_id, rule_id) } fn l7_rule_prefix(policy_id: &L7PolicyId) -> String { format!("/fiberlb/l7rules/{}/", policy_id) } fn certificate_key(lb_id: &LoadBalancerId, cert_id: &CertificateId) -> String { format!("/fiberlb/certificates/{}/{}", lb_id, cert_id) } fn certificate_prefix(lb_id: &LoadBalancerId) -> String { format!("/fiberlb/certificates/{}/", lb_id) } // ========================================================================= // LoadBalancer operations // ========================================================================= /// Save load balancer metadata pub async fn save_lb(&self, lb: &LoadBalancer) -> Result<()> { let key = Self::lb_key(&lb.org_id, &lb.project_id, &lb.id); let value = serde_json::to_string(lb) .map_err(|e| MetadataError::Serialization(format!("Failed to serialize LB: {}", e)))?; self.put(&key, &value).await?; // Also save LB ID mapping let id_key = Self::lb_id_key(&lb.id); self.put(&id_key, &key).await?; Ok(()) } /// Load load balancer by org/project/id pub async fn load_lb( &self, org_id: &str, project_id: &str, lb_id: &LoadBalancerId, ) -> Result> { let key = Self::lb_key(org_id, project_id, lb_id); if let Some(value) = self.get(&key).await? { let lb: LoadBalancer = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize LB: {}", e)) })?; Ok(Some(lb)) } else { Ok(None) } } /// Load load balancer by ID pub async fn load_lb_by_id(&self, lb_id: &LoadBalancerId) -> Result> { let id_key = Self::lb_id_key(lb_id); if let Some(lb_key) = self.get(&id_key).await? { if let Some(value) = self.get(&lb_key).await? { let lb: LoadBalancer = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize LB: {}", e)) })?; Ok(Some(lb)) } else { Ok(None) } } else { Ok(None) } } /// List load balancers for a tenant pub async fn list_lbs( &self, org_id: &str, project_id: Option<&str>, ) -> Result> { let prefix = if let Some(project_id) = project_id { format!("/fiberlb/loadbalancers/{}/{}/", org_id, project_id) } else { format!("/fiberlb/loadbalancers/{}/", org_id) }; let items = self.get_prefix(&prefix).await?; let mut lbs = Vec::new(); for (_, value) in items { if let Ok(lb) = serde_json::from_str::(&value) { lbs.push(lb); } } // Sort by name for consistent ordering lbs.sort_by(|a, b| a.name.cmp(&b.name)); Ok(lbs) } /// List all load balancers (admin scan) pub async fn list_all_lbs(&self) -> Result> { let items = self.get_prefix("/fiberlb/loadbalancers/").await?; let mut lbs = Vec::new(); for (_key, value) in items { if let Ok(lb) = serde_json::from_str::(&value) { lbs.push(lb); } } lbs.sort_by(|a, b| a.name.cmp(&b.name)); Ok(lbs) } /// Delete load balancer pub async fn delete_lb(&self, lb: &LoadBalancer) -> Result<()> { let key = Self::lb_key(&lb.org_id, &lb.project_id, &lb.id); let id_key = Self::lb_id_key(&lb.id); self.delete_key(&key).await?; self.delete_key(&id_key).await?; Ok(()) } // ========================================================================= // Listener operations // ========================================================================= /// Save listener pub async fn save_listener(&self, listener: &Listener) -> Result<()> { let key = Self::listener_key(&listener.loadbalancer_id, &listener.id); let value = serde_json::to_string(listener).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize listener: {}", e)) })?; self.put(&key, &value).await?; self.put(&Self::listener_id_key(&listener.id), &key).await } /// Load listener pub async fn load_listener( &self, lb_id: &LoadBalancerId, listener_id: &ListenerId, ) -> Result> { let key = Self::listener_key(lb_id, listener_id); if let Some(value) = self.get(&key).await? { let listener: Listener = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize listener: {}", e)) })?; Ok(Some(listener)) } else { Ok(None) } } /// Load listener by ID using the global ID index. pub async fn load_listener_by_id(&self, listener_id: &ListenerId) -> Result> { let id_key = Self::listener_id_key(listener_id); if let Some(listener_key) = self.get(&id_key).await? { if let Some(value) = self.get(&listener_key).await? { let listener: Listener = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize listener: {}", e)) })?; Ok(Some(listener)) } else { Ok(None) } } else { Ok(None) } } /// List listeners for a load balancer pub async fn list_listeners(&self, lb_id: &LoadBalancerId) -> Result> { let prefix = Self::listener_prefix(lb_id); let items = self.get_prefix(&prefix).await?; let mut listeners = Vec::new(); for (_, value) in items { if let Ok(listener) = serde_json::from_str::(&value) { listeners.push(listener); } } // Sort by port for consistent ordering listeners.sort_by(|a, b| a.port.cmp(&b.port)); Ok(listeners) } /// Delete listener pub async fn delete_listener(&self, listener: &Listener) -> Result<()> { let key = Self::listener_key(&listener.loadbalancer_id, &listener.id); self.delete_key(&key).await?; self.delete_key(&Self::listener_id_key(&listener.id)).await } /// Delete all listeners for a load balancer pub async fn delete_lb_listeners(&self, lb_id: &LoadBalancerId) -> Result<()> { let listeners = self.list_listeners(lb_id).await?; for listener in listeners { self.delete_listener(&listener).await?; } Ok(()) } // ========================================================================= // Pool operations // ========================================================================= /// Save pool pub async fn save_pool(&self, pool: &Pool) -> Result<()> { let key = Self::pool_key(&pool.loadbalancer_id, &pool.id); let value = serde_json::to_string(pool).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize pool: {}", e)) })?; self.put(&key, &value).await?; self.put(&Self::pool_id_key(&pool.id), &key).await } /// Load pool pub async fn load_pool( &self, lb_id: &LoadBalancerId, pool_id: &PoolId, ) -> Result> { let key = Self::pool_key(lb_id, pool_id); if let Some(value) = self.get(&key).await? { let pool: Pool = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize pool: {}", e)) })?; Ok(Some(pool)) } else { Ok(None) } } /// Load pool by ID using the global ID index. pub async fn load_pool_by_id(&self, pool_id: &PoolId) -> Result> { let id_key = Self::pool_id_key(pool_id); if let Some(pool_key) = self.get(&id_key).await? { if let Some(value) = self.get(&pool_key).await? { let pool: Pool = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize pool: {}", e)) })?; Ok(Some(pool)) } else { Ok(None) } } else { Ok(None) } } /// List pools for a load balancer pub async fn list_pools(&self, lb_id: &LoadBalancerId) -> Result> { let prefix = Self::pool_prefix(lb_id); let items = self.get_prefix(&prefix).await?; let mut pools = Vec::new(); for (_, value) in items { if let Ok(pool) = serde_json::from_str::(&value) { pools.push(pool); } } // Sort by name for consistent ordering pools.sort_by(|a, b| a.name.cmp(&b.name)); Ok(pools) } /// Delete pool pub async fn delete_pool(&self, pool: &Pool) -> Result<()> { let key = Self::pool_key(&pool.loadbalancer_id, &pool.id); self.delete_key(&key).await?; self.delete_key(&Self::pool_id_key(&pool.id)).await } /// Delete all pools for a load balancer pub async fn delete_lb_pools(&self, lb_id: &LoadBalancerId) -> Result<()> { let pools = self.list_pools(lb_id).await?; for pool in pools { // Delete backends first self.delete_pool_backends(&pool.id).await?; self.delete_pool(&pool).await?; } Ok(()) } // ========================================================================= // Backend operations // ========================================================================= /// Save backend pub async fn save_backend(&self, backend: &Backend) -> Result<()> { let key = Self::backend_key(&backend.pool_id, &backend.id); let value = serde_json::to_string(backend).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize backend: {}", e)) })?; self.put(&key, &value).await?; self.put(&Self::backend_id_key(&backend.id), &key).await } /// Load backend pub async fn load_backend( &self, pool_id: &PoolId, backend_id: &BackendId, ) -> Result> { let key = Self::backend_key(pool_id, backend_id); if let Some(value) = self.get(&key).await? { let backend: Backend = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize backend: {}", e)) })?; Ok(Some(backend)) } else { Ok(None) } } /// Load backend by ID using the global ID index. pub async fn load_backend_by_id(&self, backend_id: &BackendId) -> Result> { let id_key = Self::backend_id_key(backend_id); if let Some(backend_key) = self.get(&id_key).await? { if let Some(value) = self.get(&backend_key).await? { let backend: Backend = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize backend: {}", e)) })?; Ok(Some(backend)) } else { Ok(None) } } else { Ok(None) } } /// List backends for a pool pub async fn list_backends(&self, pool_id: &PoolId) -> Result> { let prefix = Self::backend_prefix(pool_id); let items = self.get_prefix(&prefix).await?; let mut backends = Vec::new(); for (_, value) in items { if let Ok(backend) = serde_json::from_str::(&value) { backends.push(backend); } } // Sort by name for consistent ordering backends.sort_by(|a, b| a.name.cmp(&b.name)); Ok(backends) } /// Delete backend pub async fn delete_backend(&self, backend: &Backend) -> Result<()> { let key = Self::backend_key(&backend.pool_id, &backend.id); self.delete_key(&key).await?; self.delete_key(&Self::backend_id_key(&backend.id)).await } /// Update backend health status pub async fn update_backend_health( &self, pool_id: &PoolId, backend_id: &BackendId, status: BackendStatus, ) -> Result<()> { let mut backend = self .load_backend(pool_id, backend_id) .await? .ok_or_else(|| MetadataError::NotFound(format!("backend {} not found", backend_id)))?; backend.status = status; backend.updated_at = std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() .as_secs(); self.save_backend(&backend).await } /// Delete all backends for a pool pub async fn delete_pool_backends(&self, pool_id: &PoolId) -> Result<()> { let backends = self.list_backends(pool_id).await?; for backend in backends { self.delete_backend(&backend).await?; } Ok(()) } // ========================================================================= // HealthCheck operations // ========================================================================= /// Save health check pub async fn save_health_check(&self, hc: &HealthCheck) -> Result<()> { let key = Self::health_check_key(&hc.pool_id, &hc.id); let value = serde_json::to_string(hc).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize health check: {}", e)) })?; self.put(&key, &value).await } /// Load health check pub async fn load_health_check( &self, pool_id: &PoolId, hc_id: &HealthCheckId, ) -> Result> { let key = Self::health_check_key(pool_id, hc_id); if let Some(value) = self.get(&key).await? { let hc: HealthCheck = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize health check: {}", e)) })?; Ok(Some(hc)) } else { Ok(None) } } /// List health checks for a pool pub async fn list_health_checks(&self, pool_id: &PoolId) -> Result> { let prefix = Self::health_check_prefix(pool_id); let items = self.get_prefix(&prefix).await?; let mut checks = Vec::new(); for (_, value) in items { if let Ok(hc) = serde_json::from_str::(&value) { checks.push(hc); } } // Sort by name for consistent ordering checks.sort_by(|a, b| a.name.cmp(&b.name)); Ok(checks) } /// Delete health check pub async fn delete_health_check(&self, hc: &HealthCheck) -> Result<()> { let key = Self::health_check_key(&hc.pool_id, &hc.id); self.delete_key(&key).await } /// Delete all health checks for a pool pub async fn delete_pool_health_checks(&self, pool_id: &PoolId) -> Result<()> { let checks = self.list_health_checks(pool_id).await?; for hc in checks { self.delete_health_check(&hc).await?; } Ok(()) } // ========================================================================= // L7 Policy operations // ========================================================================= /// Save L7 policy metadata pub async fn save_l7_policy(&self, policy: &L7Policy) -> Result<()> { let key = Self::l7_policy_key(&policy.listener_id, &policy.id); let value = serde_json::to_string(policy).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize L7Policy: {}", e)) })?; self.put(&key, &value).await } /// Load L7 policy by listener_id and policy_id pub async fn load_l7_policy( &self, listener_id: &ListenerId, policy_id: &L7PolicyId, ) -> Result> { let key = Self::l7_policy_key(listener_id, policy_id); match self.get(&key).await? { Some(value) => { let policy = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Policy: {}", e)) })?; Ok(Some(policy)) } None => Ok(None), } } /// Find listener by ID (scans all listeners) pub async fn find_listener_by_id(&self, listener_id: &ListenerId) -> Result> { self.load_listener_by_id(listener_id).await } /// Find L7 policy by policy_id only (scans all listeners) pub async fn find_l7_policy_by_id(&self, policy_id: &L7PolicyId) -> Result> { let prefix = "/fiberlb/l7policies/"; let items = self.get_prefix(prefix).await?; for (_key, value) in items { let policy: L7Policy = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Policy: {}", e)) })?; if policy.id == *policy_id { return Ok(Some(policy)); } } Ok(None) } /// List all L7 policies for a listener pub async fn list_l7_policies(&self, listener_id: &ListenerId) -> Result> { let prefix = Self::l7_policy_prefix(listener_id); let items = self.get_prefix(&prefix).await?; let mut policies = Vec::new(); for (_key, value) in items { let policy: L7Policy = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Policy: {}", e)) })?; policies.push(policy); } // Sort by position (lower = higher priority) policies.sort_by_key(|p| p.position); Ok(policies) } /// Delete L7 policy pub async fn delete_l7_policy(&self, policy: &L7Policy) -> Result<()> { // Delete all rules for this policy first self.delete_policy_rules(&policy.id).await?; let key = Self::l7_policy_key(&policy.listener_id, &policy.id); self.delete_key(&key).await } /// Delete all L7 policies for a listener pub async fn delete_listener_policies(&self, listener_id: &ListenerId) -> Result<()> { let policies = self.list_l7_policies(listener_id).await?; for policy in policies { self.delete_l7_policy(&policy).await?; } Ok(()) } // ========================================================================= // L7 Rule operations // ========================================================================= /// Save L7 rule metadata pub async fn save_l7_rule(&self, rule: &L7Rule) -> Result<()> { let key = Self::l7_rule_key(&rule.policy_id, &rule.id); let value = serde_json::to_string(rule).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize L7Rule: {}", e)) })?; self.put(&key, &value).await } /// Load L7 rule by policy_id and rule_id pub async fn load_l7_rule( &self, policy_id: &L7PolicyId, rule_id: &L7RuleId, ) -> Result> { let key = Self::l7_rule_key(policy_id, rule_id); match self.get(&key).await? { Some(value) => { let rule = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Rule: {}", e)) })?; Ok(Some(rule)) } None => Ok(None), } } /// Find L7 rule by rule_id only (scans all policies) pub async fn find_l7_rule_by_id(&self, rule_id: &L7RuleId) -> Result> { let prefix = "/fiberlb/l7rules/"; let items = self.get_prefix(prefix).await?; for (_key, value) in items { let rule: L7Rule = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Rule: {}", e)) })?; if rule.id == *rule_id { return Ok(Some(rule)); } } Ok(None) } /// List all L7 rules for a policy pub async fn list_l7_rules(&self, policy_id: &L7PolicyId) -> Result> { let prefix = Self::l7_rule_prefix(policy_id); let items = self.get_prefix(&prefix).await?; let mut rules = Vec::new(); for (_key, value) in items { let rule: L7Rule = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize L7Rule: {}", e)) })?; rules.push(rule); } Ok(rules) } /// Delete L7 rule pub async fn delete_l7_rule(&self, rule: &L7Rule) -> Result<()> { let key = Self::l7_rule_key(&rule.policy_id, &rule.id); self.delete_key(&key).await } /// Delete all L7 rules for a policy pub async fn delete_policy_rules(&self, policy_id: &L7PolicyId) -> Result<()> { let rules = self.list_l7_rules(policy_id).await?; for rule in rules { self.delete_l7_rule(&rule).await?; } Ok(()) } // ========================================================================= // Certificate operations // ========================================================================= /// Save certificate metadata pub async fn save_certificate(&self, cert: &Certificate) -> Result<()> { let key = Self::certificate_key(&cert.loadbalancer_id, &cert.id); let value = serde_json::to_string(cert).map_err(|e| { MetadataError::Serialization(format!("Failed to serialize Certificate: {}", e)) })?; self.put(&key, &value).await } /// Load certificate by lb_id and cert_id pub async fn load_certificate( &self, lb_id: &LoadBalancerId, cert_id: &CertificateId, ) -> Result> { let key = Self::certificate_key(lb_id, cert_id); match self.get(&key).await? { Some(value) => { let cert = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!( "Failed to deserialize Certificate: {}", e )) })?; Ok(Some(cert)) } None => Ok(None), } } /// Find certificate by cert_id only (scans all load balancers) pub async fn find_certificate_by_id( &self, cert_id: &CertificateId, ) -> Result> { let prefix = "/fiberlb/certificates/"; let items = self.get_prefix(prefix).await?; for (_key, value) in items { let cert: Certificate = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize Certificate: {}", e)) })?; if cert.id == *cert_id { return Ok(Some(cert)); } } Ok(None) } /// List all certificates for a load balancer pub async fn list_certificates(&self, lb_id: &LoadBalancerId) -> Result> { let prefix = Self::certificate_prefix(lb_id); let items = self.get_prefix(&prefix).await?; let mut certs = Vec::new(); for (_key, value) in items { let cert: Certificate = serde_json::from_str(&value).map_err(|e| { MetadataError::Serialization(format!("Failed to deserialize Certificate: {}", e)) })?; certs.push(cert); } Ok(certs) } /// Delete certificate pub async fn delete_certificate(&self, cert: &Certificate) -> Result<()> { let key = Self::certificate_key(&cert.loadbalancer_id, &cert.id); self.delete_key(&key).await } /// Delete all certificates for a load balancer pub async fn delete_lb_certificates(&self, lb_id: &LoadBalancerId) -> Result<()> { let certs = self.list_certificates(lb_id).await?; for cert in certs { self.delete_certificate(&cert).await?; } Ok(()) } // ========================================================================= // VIP Allocation (MVP: Simple sequential allocation from TEST-NET-3) // ========================================================================= /// Allocate a new VIP from the pool (203.0.113.0/24 - RFC 5737 TEST-NET-3) /// /// For MVP, uses simple sequential allocation starting from 203.0.113.1 /// In production, this would be replaced with a proper IPAM system pub async fn allocate_vip(&self) -> Result { const VIP_COUNTER_KEY: &str = "fiberlb/vip_counter"; const VIP_BASE: &str = "203.0.113"; // Read current counter (default to 0 if not exists) let counter: u32 = match self.get(VIP_COUNTER_KEY).await? { Some(value) => value.parse().unwrap_or(0), None => 0, }; // Increment counter let next_counter = counter + 1; // Check bounds (203.0.113.1 - 203.0.113.254) if next_counter > 254 { return Err(MetadataError::InvalidArgument( "VIP pool exhausted (203.0.113.0/24)".to_string(), )); } // Store incremented counter self.put(VIP_COUNTER_KEY, &next_counter.to_string()).await?; // Return allocated VIP Ok(format!("{}.{}", VIP_BASE, next_counter)) } } fn normalize_transport_addr(endpoint: &str) -> String { endpoint .trim() .trim_start_matches("http://") .trim_start_matches("https://") .trim_end_matches('/') .to_string() } #[cfg(test)] mod tests { use super::*; use fiberlb_types::{ListenerProtocol, PoolAlgorithm, PoolProtocol}; #[tokio::test] async fn test_lb_crud() { let store = LbMetadataStore::new_in_memory(); let lb = LoadBalancer::new("test-lb", "test-org", "test-project"); // Save store.save_lb(&lb).await.unwrap(); // Load by org/project/id let loaded = store .load_lb("test-org", "test-project", &lb.id) .await .unwrap() .unwrap(); assert_eq!(loaded.id, lb.id); assert_eq!(loaded.name, "test-lb"); // Load by ID let loaded_by_id = store.load_lb_by_id(&lb.id).await.unwrap().unwrap(); assert_eq!(loaded_by_id.name, "test-lb"); // List let lbs = store.list_lbs("test-org", None).await.unwrap(); assert_eq!(lbs.len(), 1); // Delete store.delete_lb(&lb).await.unwrap(); let deleted = store .load_lb("test-org", "test-project", &lb.id) .await .unwrap(); assert!(deleted.is_none()); } #[tokio::test] async fn test_listener_crud() { let store = LbMetadataStore::new_in_memory(); let lb = LoadBalancer::new("test-lb", "test-org", "test-project"); store.save_lb(&lb).await.unwrap(); let listener = Listener::new("http-frontend", lb.id, ListenerProtocol::Http, 80); // Save store.save_listener(&listener).await.unwrap(); // Load let loaded = store .load_listener(&lb.id, &listener.id) .await .unwrap() .unwrap(); assert_eq!(loaded.id, listener.id); assert_eq!(loaded.port, 80); // List let listeners = store.list_listeners(&lb.id).await.unwrap(); assert_eq!(listeners.len(), 1); // Delete store.delete_listener(&listener).await.unwrap(); let deleted = store.load_listener(&lb.id, &listener.id).await.unwrap(); assert!(deleted.is_none()); } #[tokio::test] async fn test_pool_crud() { let store = LbMetadataStore::new_in_memory(); let lb = LoadBalancer::new("test-lb", "test-org", "test-project"); store.save_lb(&lb).await.unwrap(); let pool = Pool::new( "web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http, ); // Save store.save_pool(&pool).await.unwrap(); // Load let loaded = store.load_pool(&lb.id, &pool.id).await.unwrap().unwrap(); assert_eq!(loaded.id, pool.id); assert_eq!(loaded.name, "web-pool"); // List let pools = store.list_pools(&lb.id).await.unwrap(); assert_eq!(pools.len(), 1); // Delete store.delete_pool(&pool).await.unwrap(); let deleted = store.load_pool(&lb.id, &pool.id).await.unwrap(); assert!(deleted.is_none()); } #[tokio::test] async fn test_backend_crud() { let store = LbMetadataStore::new_in_memory(); let lb = LoadBalancer::new("test-lb", "test-org", "test-project"); store.save_lb(&lb).await.unwrap(); let pool = Pool::new( "web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http, ); store.save_pool(&pool).await.unwrap(); let backend = Backend::new("web-1", pool.id, "10.0.0.1", 8080); // Save store.save_backend(&backend).await.unwrap(); // Load let loaded = store .load_backend(&pool.id, &backend.id) .await .unwrap() .unwrap(); assert_eq!(loaded.id, backend.id); assert_eq!(loaded.address, "10.0.0.1"); assert_eq!(loaded.port, 8080); // List let backends = store.list_backends(&pool.id).await.unwrap(); assert_eq!(backends.len(), 1); // Delete store.delete_backend(&backend).await.unwrap(); let deleted = store.load_backend(&pool.id, &backend.id).await.unwrap(); assert!(deleted.is_none()); } #[tokio::test] async fn test_cascade_delete() { let store = LbMetadataStore::new_in_memory(); // Create LB with listener, pool, and backends let lb = LoadBalancer::new("test-lb", "test-org", "test-project"); store.save_lb(&lb).await.unwrap(); let listener = Listener::new("http", lb.id, ListenerProtocol::Http, 80); store.save_listener(&listener).await.unwrap(); let pool = Pool::new( "web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http, ); store.save_pool(&pool).await.unwrap(); let backend1 = Backend::new("web-1", pool.id, "10.0.0.1", 8080); let backend2 = Backend::new("web-2", pool.id, "10.0.0.2", 8080); store.save_backend(&backend1).await.unwrap(); store.save_backend(&backend2).await.unwrap(); // Verify all exist assert_eq!(store.list_listeners(&lb.id).await.unwrap().len(), 1); assert_eq!(store.list_pools(&lb.id).await.unwrap().len(), 1); assert_eq!(store.list_backends(&pool.id).await.unwrap().len(), 2); // Delete pool backends store.delete_pool_backends(&pool.id).await.unwrap(); assert_eq!(store.list_backends(&pool.id).await.unwrap().len(), 0); // Delete LB pools (which deletes backends too) store.delete_lb_pools(&lb.id).await.unwrap(); assert_eq!(store.list_pools(&lb.id).await.unwrap().len(), 0); // Delete LB listeners store.delete_lb_listeners(&lb.id).await.unwrap(); assert_eq!(store.list_listeners(&lb.id).await.unwrap().len(), 0); } }