- netboot-base.nix with SSH key auth - Launch scripts for node01/02/03 - Node configuration.nix and disko.nix - Nix modules for first-boot automation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
839 lines
28 KiB
Rust
839 lines
28 KiB
Rust
//! LB Metadata storage using ChainFire, FlareDB, or in-memory store
|
|
|
|
use chainfire_client::Client as ChainFireClient;
|
|
use dashmap::DashMap;
|
|
use flaredb_client::RdbClient;
|
|
use fiberlb_types::{
|
|
Backend, BackendId, BackendStatus, HealthCheck, HealthCheckId, Listener, ListenerId, LoadBalancer, LoadBalancerId, Pool, PoolId,
|
|
};
|
|
use std::sync::Arc;
|
|
use tokio::sync::Mutex;
|
|
|
|
/// Result type for metadata operations
|
|
pub type Result<T> = std::result::Result<T, MetadataError>;
|
|
|
|
/// Metadata operation error
|
|
#[derive(Debug, thiserror::Error)]
|
|
pub enum MetadataError {
|
|
#[error("Storage error: {0}")]
|
|
Storage(String),
|
|
#[error("Serialization error: {0}")]
|
|
Serialization(String),
|
|
#[error("Not found: {0}")]
|
|
NotFound(String),
|
|
#[error("Invalid argument: {0}")]
|
|
InvalidArgument(String),
|
|
}
|
|
|
|
/// Storage backend enum
|
|
enum StorageBackend {
|
|
ChainFire(Arc<Mutex<ChainFireClient>>),
|
|
FlareDB(Arc<Mutex<RdbClient>>),
|
|
InMemory(Arc<DashMap<String, String>>),
|
|
}
|
|
|
|
/// LB Metadata store for load balancers, listeners, pools, and backends
|
|
pub struct LbMetadataStore {
|
|
backend: StorageBackend,
|
|
}
|
|
|
|
impl LbMetadataStore {
|
|
/// Create a new metadata store with ChainFire backend
|
|
pub async fn new(endpoint: Option<String>) -> Result<Self> {
|
|
let endpoint = endpoint.unwrap_or_else(|| {
|
|
std::env::var("FIBERLB_CHAINFIRE_ENDPOINT")
|
|
.unwrap_or_else(|_| "http://127.0.0.1:50051".to_string())
|
|
});
|
|
|
|
let client = ChainFireClient::connect(&endpoint)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("Failed to connect to ChainFire: {}", e)))?;
|
|
|
|
Ok(Self {
|
|
backend: StorageBackend::ChainFire(Arc::new(Mutex::new(client))),
|
|
})
|
|
}
|
|
|
|
/// Create a new metadata store with FlareDB backend
|
|
pub async fn new_flaredb(endpoint: Option<String>) -> Result<Self> {
|
|
let endpoint = endpoint.unwrap_or_else(|| {
|
|
std::env::var("FIBERLB_FLAREDB_ENDPOINT")
|
|
.unwrap_or_else(|_| "127.0.0.1:2379".to_string())
|
|
});
|
|
|
|
// FlareDB client needs both server and PD address
|
|
// For now, we use the same endpoint for both (PD address)
|
|
let client = RdbClient::connect_with_pd_namespace(
|
|
endpoint.clone(),
|
|
endpoint.clone(),
|
|
"fiberlb",
|
|
)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!(
|
|
"Failed to connect to FlareDB: {}", e
|
|
)))?;
|
|
|
|
Ok(Self {
|
|
backend: StorageBackend::FlareDB(Arc::new(Mutex::new(client))),
|
|
})
|
|
}
|
|
|
|
/// Create a new in-memory metadata store (for testing)
|
|
pub fn new_in_memory() -> Self {
|
|
Self {
|
|
backend: StorageBackend::InMemory(Arc::new(DashMap::new())),
|
|
}
|
|
}
|
|
|
|
// =========================================================================
|
|
// Internal storage helpers
|
|
// =========================================================================
|
|
|
|
async fn put(&self, key: &str, value: &str) -> Result<()> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.put_str(key, value)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire put failed: {}", e)))?;
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
c.raw_put(key.as_bytes().to_vec(), value.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB put failed: {}", e)))?;
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
map.insert(key.to_string(), value.to_string());
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
async fn get(&self, key: &str) -> Result<Option<String>> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.get_str(key)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire get failed: {}", e)))
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
let result = c.raw_get(key.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB get failed: {}", e)))?;
|
|
Ok(result.map(|bytes| String::from_utf8_lossy(&bytes).to_string()))
|
|
}
|
|
StorageBackend::InMemory(map) => Ok(map.get(key).map(|v| v.value().clone())),
|
|
}
|
|
}
|
|
|
|
async fn delete_key(&self, key: &str) -> Result<()> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
c.delete(key)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire delete failed: {}", e)))?;
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
c.raw_delete(key.as_bytes().to_vec())
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB delete failed: {}", e)))?;
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
map.remove(key);
|
|
}
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
async fn get_prefix(&self, prefix: &str) -> Result<Vec<(String, String)>> {
|
|
match &self.backend {
|
|
StorageBackend::ChainFire(client) => {
|
|
let mut c = client.lock().await;
|
|
let items = c
|
|
.get_prefix(prefix)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("ChainFire get_prefix failed: {}", e)))?;
|
|
Ok(items
|
|
.into_iter()
|
|
.map(|(k, v)| {
|
|
(
|
|
String::from_utf8_lossy(&k).to_string(),
|
|
String::from_utf8_lossy(&v).to_string(),
|
|
)
|
|
})
|
|
.collect())
|
|
}
|
|
StorageBackend::FlareDB(client) => {
|
|
let mut c = client.lock().await;
|
|
|
|
// Calculate end_key by incrementing the last byte of prefix
|
|
let mut end_key = prefix.as_bytes().to_vec();
|
|
if let Some(last) = end_key.last_mut() {
|
|
if *last == 0xff {
|
|
// If last byte is 0xff, append a 0x00
|
|
end_key.push(0x00);
|
|
} else {
|
|
*last += 1;
|
|
}
|
|
} else {
|
|
// Empty prefix - scan everything
|
|
end_key.push(0xff);
|
|
}
|
|
|
|
let mut results = Vec::new();
|
|
let mut start_key = prefix.as_bytes().to_vec();
|
|
|
|
// Pagination loop to get all results
|
|
loop {
|
|
let (keys, values, next) = c.raw_scan(
|
|
start_key.clone(),
|
|
end_key.clone(),
|
|
1000, // Batch size
|
|
)
|
|
.await
|
|
.map_err(|e| MetadataError::Storage(format!("FlareDB scan failed: {}", e)))?;
|
|
|
|
// Convert and add results
|
|
for (k, v) in keys.iter().zip(values.iter()) {
|
|
results.push((
|
|
String::from_utf8_lossy(k).to_string(),
|
|
String::from_utf8_lossy(v).to_string(),
|
|
));
|
|
}
|
|
|
|
// Check if there are more results
|
|
if let Some(next_key) = next {
|
|
start_key = next_key;
|
|
} else {
|
|
break;
|
|
}
|
|
}
|
|
|
|
Ok(results)
|
|
}
|
|
StorageBackend::InMemory(map) => {
|
|
let mut results = Vec::new();
|
|
for entry in map.iter() {
|
|
if entry.key().starts_with(prefix) {
|
|
results.push((entry.key().clone(), entry.value().clone()));
|
|
}
|
|
}
|
|
Ok(results)
|
|
}
|
|
}
|
|
}
|
|
|
|
// =========================================================================
|
|
// Key builders
|
|
// =========================================================================
|
|
|
|
fn lb_key(org_id: &str, project_id: &str, lb_id: &LoadBalancerId) -> String {
|
|
format!("/fiberlb/loadbalancers/{}/{}/{}", org_id, project_id, lb_id)
|
|
}
|
|
|
|
fn lb_id_key(lb_id: &LoadBalancerId) -> String {
|
|
format!("/fiberlb/lb_ids/{}", lb_id)
|
|
}
|
|
|
|
fn listener_key(lb_id: &LoadBalancerId, listener_id: &ListenerId) -> String {
|
|
format!("/fiberlb/listeners/{}/{}", lb_id, listener_id)
|
|
}
|
|
|
|
fn listener_prefix(lb_id: &LoadBalancerId) -> String {
|
|
format!("/fiberlb/listeners/{}/", lb_id)
|
|
}
|
|
|
|
fn pool_key(lb_id: &LoadBalancerId, pool_id: &PoolId) -> String {
|
|
format!("/fiberlb/pools/{}/{}", lb_id, pool_id)
|
|
}
|
|
|
|
fn pool_prefix(lb_id: &LoadBalancerId) -> String {
|
|
format!("/fiberlb/pools/{}/", lb_id)
|
|
}
|
|
|
|
fn backend_key(pool_id: &PoolId, backend_id: &BackendId) -> String {
|
|
format!("/fiberlb/backends/{}/{}", pool_id, backend_id)
|
|
}
|
|
|
|
fn backend_prefix(pool_id: &PoolId) -> String {
|
|
format!("/fiberlb/backends/{}/", pool_id)
|
|
}
|
|
|
|
fn health_check_key(pool_id: &PoolId, hc_id: &HealthCheckId) -> String {
|
|
format!("/fiberlb/healthchecks/{}/{}", pool_id, hc_id)
|
|
}
|
|
|
|
fn health_check_prefix(pool_id: &PoolId) -> String {
|
|
format!("/fiberlb/healthchecks/{}/", pool_id)
|
|
}
|
|
|
|
// =========================================================================
|
|
// LoadBalancer operations
|
|
// =========================================================================
|
|
|
|
/// Save load balancer metadata
|
|
pub async fn save_lb(&self, lb: &LoadBalancer) -> Result<()> {
|
|
let key = Self::lb_key(&lb.org_id, &lb.project_id, &lb.id);
|
|
let value = serde_json::to_string(lb)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize LB: {}", e)))?;
|
|
|
|
self.put(&key, &value).await?;
|
|
|
|
// Also save LB ID mapping
|
|
let id_key = Self::lb_id_key(&lb.id);
|
|
self.put(&id_key, &key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Load load balancer by org/project/id
|
|
pub async fn load_lb(
|
|
&self,
|
|
org_id: &str,
|
|
project_id: &str,
|
|
lb_id: &LoadBalancerId,
|
|
) -> Result<Option<LoadBalancer>> {
|
|
let key = Self::lb_key(org_id, project_id, lb_id);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let lb: LoadBalancer = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize LB: {}", e)))?;
|
|
Ok(Some(lb))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// Load load balancer by ID
|
|
pub async fn load_lb_by_id(&self, lb_id: &LoadBalancerId) -> Result<Option<LoadBalancer>> {
|
|
let id_key = Self::lb_id_key(lb_id);
|
|
|
|
if let Some(lb_key) = self.get(&id_key).await? {
|
|
if let Some(value) = self.get(&lb_key).await? {
|
|
let lb: LoadBalancer = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize LB: {}", e)))?;
|
|
Ok(Some(lb))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// List load balancers for a tenant
|
|
pub async fn list_lbs(&self, org_id: &str, project_id: Option<&str>) -> Result<Vec<LoadBalancer>> {
|
|
let prefix = if let Some(project_id) = project_id {
|
|
format!("/fiberlb/loadbalancers/{}/{}/", org_id, project_id)
|
|
} else {
|
|
format!("/fiberlb/loadbalancers/{}/", org_id)
|
|
};
|
|
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut lbs = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(lb) = serde_json::from_str::<LoadBalancer>(&value) {
|
|
lbs.push(lb);
|
|
}
|
|
}
|
|
|
|
// Sort by name for consistent ordering
|
|
lbs.sort_by(|a, b| a.name.cmp(&b.name));
|
|
|
|
Ok(lbs)
|
|
}
|
|
|
|
/// Delete load balancer
|
|
pub async fn delete_lb(&self, lb: &LoadBalancer) -> Result<()> {
|
|
let key = Self::lb_key(&lb.org_id, &lb.project_id, &lb.id);
|
|
let id_key = Self::lb_id_key(&lb.id);
|
|
|
|
self.delete_key(&key).await?;
|
|
self.delete_key(&id_key).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// Listener operations
|
|
// =========================================================================
|
|
|
|
/// Save listener
|
|
pub async fn save_listener(&self, listener: &Listener) -> Result<()> {
|
|
let key = Self::listener_key(&listener.loadbalancer_id, &listener.id);
|
|
let value = serde_json::to_string(listener)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize listener: {}", e)))?;
|
|
|
|
self.put(&key, &value).await
|
|
}
|
|
|
|
/// Load listener
|
|
pub async fn load_listener(
|
|
&self,
|
|
lb_id: &LoadBalancerId,
|
|
listener_id: &ListenerId,
|
|
) -> Result<Option<Listener>> {
|
|
let key = Self::listener_key(lb_id, listener_id);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let listener: Listener = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize listener: {}", e)))?;
|
|
Ok(Some(listener))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// List listeners for a load balancer
|
|
pub async fn list_listeners(&self, lb_id: &LoadBalancerId) -> Result<Vec<Listener>> {
|
|
let prefix = Self::listener_prefix(lb_id);
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut listeners = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(listener) = serde_json::from_str::<Listener>(&value) {
|
|
listeners.push(listener);
|
|
}
|
|
}
|
|
|
|
// Sort by port for consistent ordering
|
|
listeners.sort_by(|a, b| a.port.cmp(&b.port));
|
|
|
|
Ok(listeners)
|
|
}
|
|
|
|
/// Delete listener
|
|
pub async fn delete_listener(&self, listener: &Listener) -> Result<()> {
|
|
let key = Self::listener_key(&listener.loadbalancer_id, &listener.id);
|
|
self.delete_key(&key).await
|
|
}
|
|
|
|
/// Delete all listeners for a load balancer
|
|
pub async fn delete_lb_listeners(&self, lb_id: &LoadBalancerId) -> Result<()> {
|
|
let listeners = self.list_listeners(lb_id).await?;
|
|
for listener in listeners {
|
|
self.delete_listener(&listener).await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// Pool operations
|
|
// =========================================================================
|
|
|
|
/// Save pool
|
|
pub async fn save_pool(&self, pool: &Pool) -> Result<()> {
|
|
let key = Self::pool_key(&pool.loadbalancer_id, &pool.id);
|
|
let value = serde_json::to_string(pool)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize pool: {}", e)))?;
|
|
|
|
self.put(&key, &value).await
|
|
}
|
|
|
|
/// Load pool
|
|
pub async fn load_pool(&self, lb_id: &LoadBalancerId, pool_id: &PoolId) -> Result<Option<Pool>> {
|
|
let key = Self::pool_key(lb_id, pool_id);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let pool: Pool = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize pool: {}", e)))?;
|
|
Ok(Some(pool))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// List pools for a load balancer
|
|
pub async fn list_pools(&self, lb_id: &LoadBalancerId) -> Result<Vec<Pool>> {
|
|
let prefix = Self::pool_prefix(lb_id);
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut pools = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(pool) = serde_json::from_str::<Pool>(&value) {
|
|
pools.push(pool);
|
|
}
|
|
}
|
|
|
|
// Sort by name for consistent ordering
|
|
pools.sort_by(|a, b| a.name.cmp(&b.name));
|
|
|
|
Ok(pools)
|
|
}
|
|
|
|
/// Delete pool
|
|
pub async fn delete_pool(&self, pool: &Pool) -> Result<()> {
|
|
let key = Self::pool_key(&pool.loadbalancer_id, &pool.id);
|
|
self.delete_key(&key).await
|
|
}
|
|
|
|
/// Delete all pools for a load balancer
|
|
pub async fn delete_lb_pools(&self, lb_id: &LoadBalancerId) -> Result<()> {
|
|
let pools = self.list_pools(lb_id).await?;
|
|
for pool in pools {
|
|
// Delete backends first
|
|
self.delete_pool_backends(&pool.id).await?;
|
|
self.delete_pool(&pool).await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// Backend operations
|
|
// =========================================================================
|
|
|
|
/// Save backend
|
|
pub async fn save_backend(&self, backend: &Backend) -> Result<()> {
|
|
let key = Self::backend_key(&backend.pool_id, &backend.id);
|
|
let value = serde_json::to_string(backend)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize backend: {}", e)))?;
|
|
|
|
self.put(&key, &value).await
|
|
}
|
|
|
|
/// Load backend
|
|
pub async fn load_backend(
|
|
&self,
|
|
pool_id: &PoolId,
|
|
backend_id: &BackendId,
|
|
) -> Result<Option<Backend>> {
|
|
let key = Self::backend_key(pool_id, backend_id);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let backend: Backend = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize backend: {}", e)))?;
|
|
Ok(Some(backend))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// List backends for a pool
|
|
pub async fn list_backends(&self, pool_id: &PoolId) -> Result<Vec<Backend>> {
|
|
let prefix = Self::backend_prefix(pool_id);
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut backends = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(backend) = serde_json::from_str::<Backend>(&value) {
|
|
backends.push(backend);
|
|
}
|
|
}
|
|
|
|
// Sort by name for consistent ordering
|
|
backends.sort_by(|a, b| a.name.cmp(&b.name));
|
|
|
|
Ok(backends)
|
|
}
|
|
|
|
/// Delete backend
|
|
pub async fn delete_backend(&self, backend: &Backend) -> Result<()> {
|
|
let key = Self::backend_key(&backend.pool_id, &backend.id);
|
|
self.delete_key(&key).await
|
|
}
|
|
|
|
/// Update backend health status
|
|
pub async fn update_backend_health(
|
|
&self,
|
|
pool_id: &PoolId,
|
|
backend_id: &BackendId,
|
|
status: BackendStatus,
|
|
) -> Result<()> {
|
|
let mut backend = self
|
|
.load_backend(pool_id, backend_id)
|
|
.await?
|
|
.ok_or_else(|| MetadataError::NotFound(format!("backend {} not found", backend_id)))?;
|
|
|
|
backend.status = status;
|
|
backend.updated_at = std::time::SystemTime::now()
|
|
.duration_since(std::time::UNIX_EPOCH)
|
|
.unwrap_or_default()
|
|
.as_secs();
|
|
|
|
self.save_backend(&backend).await
|
|
}
|
|
|
|
/// Delete all backends for a pool
|
|
pub async fn delete_pool_backends(&self, pool_id: &PoolId) -> Result<()> {
|
|
let backends = self.list_backends(pool_id).await?;
|
|
for backend in backends {
|
|
self.delete_backend(&backend).await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// HealthCheck operations
|
|
// =========================================================================
|
|
|
|
/// Save health check
|
|
pub async fn save_health_check(&self, hc: &HealthCheck) -> Result<()> {
|
|
let key = Self::health_check_key(&hc.pool_id, &hc.id);
|
|
let value = serde_json::to_string(hc)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to serialize health check: {}", e)))?;
|
|
|
|
self.put(&key, &value).await
|
|
}
|
|
|
|
/// Load health check
|
|
pub async fn load_health_check(
|
|
&self,
|
|
pool_id: &PoolId,
|
|
hc_id: &HealthCheckId,
|
|
) -> Result<Option<HealthCheck>> {
|
|
let key = Self::health_check_key(pool_id, hc_id);
|
|
|
|
if let Some(value) = self.get(&key).await? {
|
|
let hc: HealthCheck = serde_json::from_str(&value)
|
|
.map_err(|e| MetadataError::Serialization(format!("Failed to deserialize health check: {}", e)))?;
|
|
Ok(Some(hc))
|
|
} else {
|
|
Ok(None)
|
|
}
|
|
}
|
|
|
|
/// List health checks for a pool
|
|
pub async fn list_health_checks(&self, pool_id: &PoolId) -> Result<Vec<HealthCheck>> {
|
|
let prefix = Self::health_check_prefix(pool_id);
|
|
let items = self.get_prefix(&prefix).await?;
|
|
|
|
let mut checks = Vec::new();
|
|
for (_, value) in items {
|
|
if let Ok(hc) = serde_json::from_str::<HealthCheck>(&value) {
|
|
checks.push(hc);
|
|
}
|
|
}
|
|
|
|
// Sort by name for consistent ordering
|
|
checks.sort_by(|a, b| a.name.cmp(&b.name));
|
|
|
|
Ok(checks)
|
|
}
|
|
|
|
/// Delete health check
|
|
pub async fn delete_health_check(&self, hc: &HealthCheck) -> Result<()> {
|
|
let key = Self::health_check_key(&hc.pool_id, &hc.id);
|
|
self.delete_key(&key).await
|
|
}
|
|
|
|
/// Delete all health checks for a pool
|
|
pub async fn delete_pool_health_checks(&self, pool_id: &PoolId) -> Result<()> {
|
|
let checks = self.list_health_checks(pool_id).await?;
|
|
for hc in checks {
|
|
self.delete_health_check(&hc).await?;
|
|
}
|
|
Ok(())
|
|
}
|
|
|
|
// =========================================================================
|
|
// VIP Allocation (MVP: Simple sequential allocation from TEST-NET-3)
|
|
// =========================================================================
|
|
|
|
/// Allocate a new VIP from the pool (203.0.113.0/24 - RFC 5737 TEST-NET-3)
|
|
///
|
|
/// For MVP, uses simple sequential allocation starting from 203.0.113.1
|
|
/// In production, this would be replaced with a proper IPAM system
|
|
pub async fn allocate_vip(&self) -> Result<String> {
|
|
const VIP_COUNTER_KEY: &str = "fiberlb/vip_counter";
|
|
const VIP_BASE: &str = "203.0.113";
|
|
|
|
// Read current counter (default to 0 if not exists)
|
|
let counter: u32 = match self.get(VIP_COUNTER_KEY).await? {
|
|
Some(value) => value.parse().unwrap_or(0),
|
|
None => 0,
|
|
};
|
|
|
|
// Increment counter
|
|
let next_counter = counter + 1;
|
|
|
|
// Check bounds (203.0.113.1 - 203.0.113.254)
|
|
if next_counter > 254 {
|
|
return Err(MetadataError::InvalidArgument(
|
|
"VIP pool exhausted (203.0.113.0/24)".to_string()
|
|
));
|
|
}
|
|
|
|
// Store incremented counter
|
|
self.put(VIP_COUNTER_KEY, &next_counter.to_string()).await?;
|
|
|
|
// Return allocated VIP
|
|
Ok(format!("{}.{}", VIP_BASE, next_counter))
|
|
}
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
use fiberlb_types::{ListenerProtocol, PoolAlgorithm, PoolProtocol};
|
|
|
|
#[tokio::test]
|
|
async fn test_lb_crud() {
|
|
let store = LbMetadataStore::new_in_memory();
|
|
|
|
let lb = LoadBalancer::new("test-lb", "test-org", "test-project");
|
|
|
|
// Save
|
|
store.save_lb(&lb).await.unwrap();
|
|
|
|
// Load by org/project/id
|
|
let loaded = store
|
|
.load_lb("test-org", "test-project", &lb.id)
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
assert_eq!(loaded.id, lb.id);
|
|
assert_eq!(loaded.name, "test-lb");
|
|
|
|
// Load by ID
|
|
let loaded_by_id = store.load_lb_by_id(&lb.id).await.unwrap().unwrap();
|
|
assert_eq!(loaded_by_id.name, "test-lb");
|
|
|
|
// List
|
|
let lbs = store.list_lbs("test-org", None).await.unwrap();
|
|
assert_eq!(lbs.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_lb(&lb).await.unwrap();
|
|
let deleted = store
|
|
.load_lb("test-org", "test-project", &lb.id)
|
|
.await
|
|
.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_listener_crud() {
|
|
let store = LbMetadataStore::new_in_memory();
|
|
|
|
let lb = LoadBalancer::new("test-lb", "test-org", "test-project");
|
|
store.save_lb(&lb).await.unwrap();
|
|
|
|
let listener = Listener::new("http-frontend", lb.id, ListenerProtocol::Http, 80);
|
|
|
|
// Save
|
|
store.save_listener(&listener).await.unwrap();
|
|
|
|
// Load
|
|
let loaded = store
|
|
.load_listener(&lb.id, &listener.id)
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
assert_eq!(loaded.id, listener.id);
|
|
assert_eq!(loaded.port, 80);
|
|
|
|
// List
|
|
let listeners = store.list_listeners(&lb.id).await.unwrap();
|
|
assert_eq!(listeners.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_listener(&listener).await.unwrap();
|
|
let deleted = store.load_listener(&lb.id, &listener.id).await.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_pool_crud() {
|
|
let store = LbMetadataStore::new_in_memory();
|
|
|
|
let lb = LoadBalancer::new("test-lb", "test-org", "test-project");
|
|
store.save_lb(&lb).await.unwrap();
|
|
|
|
let pool = Pool::new("web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http);
|
|
|
|
// Save
|
|
store.save_pool(&pool).await.unwrap();
|
|
|
|
// Load
|
|
let loaded = store.load_pool(&lb.id, &pool.id).await.unwrap().unwrap();
|
|
assert_eq!(loaded.id, pool.id);
|
|
assert_eq!(loaded.name, "web-pool");
|
|
|
|
// List
|
|
let pools = store.list_pools(&lb.id).await.unwrap();
|
|
assert_eq!(pools.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_pool(&pool).await.unwrap();
|
|
let deleted = store.load_pool(&lb.id, &pool.id).await.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_backend_crud() {
|
|
let store = LbMetadataStore::new_in_memory();
|
|
|
|
let lb = LoadBalancer::new("test-lb", "test-org", "test-project");
|
|
store.save_lb(&lb).await.unwrap();
|
|
|
|
let pool = Pool::new("web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http);
|
|
store.save_pool(&pool).await.unwrap();
|
|
|
|
let backend = Backend::new("web-1", pool.id, "10.0.0.1", 8080);
|
|
|
|
// Save
|
|
store.save_backend(&backend).await.unwrap();
|
|
|
|
// Load
|
|
let loaded = store
|
|
.load_backend(&pool.id, &backend.id)
|
|
.await
|
|
.unwrap()
|
|
.unwrap();
|
|
assert_eq!(loaded.id, backend.id);
|
|
assert_eq!(loaded.address, "10.0.0.1");
|
|
assert_eq!(loaded.port, 8080);
|
|
|
|
// List
|
|
let backends = store.list_backends(&pool.id).await.unwrap();
|
|
assert_eq!(backends.len(), 1);
|
|
|
|
// Delete
|
|
store.delete_backend(&backend).await.unwrap();
|
|
let deleted = store.load_backend(&pool.id, &backend.id).await.unwrap();
|
|
assert!(deleted.is_none());
|
|
}
|
|
|
|
#[tokio::test]
|
|
async fn test_cascade_delete() {
|
|
let store = LbMetadataStore::new_in_memory();
|
|
|
|
// Create LB with listener, pool, and backends
|
|
let lb = LoadBalancer::new("test-lb", "test-org", "test-project");
|
|
store.save_lb(&lb).await.unwrap();
|
|
|
|
let listener = Listener::new("http", lb.id, ListenerProtocol::Http, 80);
|
|
store.save_listener(&listener).await.unwrap();
|
|
|
|
let pool = Pool::new("web-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Http);
|
|
store.save_pool(&pool).await.unwrap();
|
|
|
|
let backend1 = Backend::new("web-1", pool.id, "10.0.0.1", 8080);
|
|
let backend2 = Backend::new("web-2", pool.id, "10.0.0.2", 8080);
|
|
store.save_backend(&backend1).await.unwrap();
|
|
store.save_backend(&backend2).await.unwrap();
|
|
|
|
// Verify all exist
|
|
assert_eq!(store.list_listeners(&lb.id).await.unwrap().len(), 1);
|
|
assert_eq!(store.list_pools(&lb.id).await.unwrap().len(), 1);
|
|
assert_eq!(store.list_backends(&pool.id).await.unwrap().len(), 2);
|
|
|
|
// Delete pool backends
|
|
store.delete_pool_backends(&pool.id).await.unwrap();
|
|
assert_eq!(store.list_backends(&pool.id).await.unwrap().len(), 0);
|
|
|
|
// Delete LB pools (which deletes backends too)
|
|
store.delete_lb_pools(&lb.id).await.unwrap();
|
|
assert_eq!(store.list_pools(&lb.id).await.unwrap().len(), 0);
|
|
|
|
// Delete LB listeners
|
|
store.delete_lb_listeners(&lb.id).await.unwrap();
|
|
assert_eq!(store.list_listeners(&lb.id).await.unwrap().len(), 0);
|
|
}
|
|
}
|