- netboot-base.nix with SSH key auth - Launch scripts for node01/02/03 - Node configuration.nix and disko.nix - Nix modules for first-boot automation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
225 lines
8.5 KiB
Rust
225 lines
8.5 KiB
Rust
//! FiberLB Controller - Manages LoadBalancer service VIP allocation
|
|
//!
|
|
//! This controller watches for Services with type=LoadBalancer and provisions
|
|
//! external VIPs by creating LoadBalancer resources in FiberLB.
|
|
|
|
use crate::storage::Storage;
|
|
use anyhow::Result;
|
|
use fiberlb_api::load_balancer_service_client::LoadBalancerServiceClient;
|
|
use fiberlb_api::{CreateLoadBalancerRequest, DeleteLoadBalancerRequest};
|
|
use k8shost_types::{LoadBalancerIngress, LoadBalancerStatus, ServiceStatus};
|
|
use std::sync::Arc;
|
|
use std::time::Duration;
|
|
use tokio::time::sleep;
|
|
use tracing::{debug, info, warn};
|
|
|
|
/// FiberLB controller for managing LoadBalancer service VIPs
|
|
pub struct FiberLbController {
|
|
storage: Arc<Storage>,
|
|
fiberlb_addr: String,
|
|
interval: Duration,
|
|
}
|
|
|
|
impl FiberLbController {
|
|
/// Create a new FiberLB controller
|
|
pub fn new(storage: Arc<Storage>, fiberlb_addr: String) -> Self {
|
|
Self {
|
|
storage,
|
|
fiberlb_addr,
|
|
interval: Duration::from_secs(10), // Check every 10 seconds
|
|
}
|
|
}
|
|
|
|
/// Start the controller loop
|
|
pub async fn run(self: Arc<Self>) {
|
|
info!(
|
|
"FiberLB controller started (FiberLB at {}, {}s interval)",
|
|
self.fiberlb_addr,
|
|
self.interval.as_secs()
|
|
);
|
|
|
|
loop {
|
|
if let Err(e) = self.reconcile_loadbalancers().await {
|
|
warn!("FiberLB controller cycle failed: {}", e);
|
|
}
|
|
|
|
sleep(self.interval).await;
|
|
}
|
|
}
|
|
|
|
/// Reconcile LoadBalancer services across all tenants
|
|
async fn reconcile_loadbalancers(&self) -> Result<()> {
|
|
// For MVP, iterate through known tenants
|
|
// In production, would get active tenants from IAM or FlareDB
|
|
let tenants = vec![("default-org".to_string(), "default-project".to_string())];
|
|
|
|
for (org_id, project_id) in tenants {
|
|
if let Err(e) = self.reconcile_tenant_loadbalancers(&org_id, &project_id).await {
|
|
warn!(
|
|
"Failed to reconcile LoadBalancers for tenant {}/{}: {}",
|
|
org_id, project_id, e
|
|
);
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Reconcile LoadBalancer services for a specific tenant
|
|
async fn reconcile_tenant_loadbalancers(&self, org_id: &str, project_id: &str) -> Result<()> {
|
|
// Get all services for this tenant
|
|
let services = self
|
|
.storage
|
|
.list_services(org_id, project_id, None)
|
|
.await?;
|
|
|
|
// Filter for LoadBalancer services that need provisioning
|
|
let lb_services: Vec<_> = services
|
|
.into_iter()
|
|
.filter(|svc| {
|
|
// Service is a LoadBalancer if:
|
|
// 1. type is "LoadBalancer"
|
|
// 2. status is None OR status.load_balancer is None (not yet provisioned)
|
|
svc.spec.r#type.as_deref() == Some("LoadBalancer")
|
|
&& (svc.status.is_none()
|
|
|| svc.status.as_ref().and_then(|s| s.load_balancer.as_ref()).is_none())
|
|
})
|
|
.collect();
|
|
|
|
if lb_services.is_empty() {
|
|
debug!("No LoadBalancer services to provision for tenant {}/{}", org_id, project_id);
|
|
return Ok(());
|
|
}
|
|
|
|
info!(
|
|
"Found {} LoadBalancer service(s) to provision for tenant {}/{}",
|
|
lb_services.len(),
|
|
org_id,
|
|
project_id
|
|
);
|
|
|
|
// Connect to FiberLB
|
|
let mut fiberlb_client =
|
|
match LoadBalancerServiceClient::connect(self.fiberlb_addr.clone()).await {
|
|
Ok(client) => client,
|
|
Err(e) => {
|
|
warn!("Failed to connect to FiberLB at {}: {}", self.fiberlb_addr, e);
|
|
return Ok(());
|
|
}
|
|
};
|
|
|
|
// Provision each LoadBalancer service
|
|
for mut service in lb_services {
|
|
let namespace = service.metadata.namespace.as_deref().unwrap_or("default");
|
|
let name = &service.metadata.name;
|
|
|
|
info!("Provisioning LoadBalancer for service {}/{}", namespace, name);
|
|
|
|
// Create LoadBalancer in FiberLB
|
|
let lb_name = format!("{}.{}", name, namespace);
|
|
let create_req = CreateLoadBalancerRequest {
|
|
name: lb_name.clone(),
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
description: format!("k8s service {}/{}", namespace, name),
|
|
};
|
|
|
|
match fiberlb_client.create_load_balancer(create_req).await {
|
|
Ok(response) => {
|
|
let lb = response.into_inner().loadbalancer;
|
|
if let Some(lb) = lb {
|
|
// vip_address is String in proto (defaults to empty if not set)
|
|
let vip = if lb.vip_address.is_empty() {
|
|
warn!("FiberLB returned LoadBalancer without VIP");
|
|
"0.0.0.0".to_string()
|
|
} else {
|
|
lb.vip_address
|
|
};
|
|
|
|
info!(
|
|
"FiberLB allocated VIP {} for service {}/{}",
|
|
vip, namespace, name
|
|
);
|
|
|
|
// Update service status with VIP
|
|
service.status = Some(ServiceStatus {
|
|
load_balancer: Some(LoadBalancerStatus {
|
|
ingress: vec![LoadBalancerIngress {
|
|
ip: Some(vip),
|
|
hostname: None,
|
|
}],
|
|
}),
|
|
});
|
|
|
|
// Increment resource version
|
|
let current_version = service
|
|
.metadata
|
|
.resource_version
|
|
.as_ref()
|
|
.and_then(|v| v.parse::<u64>().ok())
|
|
.unwrap_or(0);
|
|
service.metadata.resource_version = Some((current_version + 1).to_string());
|
|
|
|
// Store LoadBalancer ID in annotations for cleanup on deletion
|
|
service
|
|
.metadata
|
|
.annotations
|
|
.insert("fiberlb.plasmacloud.io/lb-id".to_string(), lb.id);
|
|
|
|
// Save updated service
|
|
if let Err(e) = self.storage.put_service(&service).await {
|
|
warn!(
|
|
"Failed to update service {}/{} with VIP: {}",
|
|
namespace, name, e
|
|
);
|
|
} else {
|
|
info!(
|
|
"Successfully provisioned VIP {} for service {}/{}",
|
|
service
|
|
.status
|
|
.as_ref()
|
|
.and_then(|s| s.load_balancer.as_ref())
|
|
.and_then(|lb| lb.ingress.first())
|
|
.and_then(|i| i.ip.as_ref())
|
|
.unwrap_or(&"<none>".to_string()),
|
|
namespace,
|
|
name
|
|
);
|
|
}
|
|
} else {
|
|
warn!("FiberLB returned empty LoadBalancer response");
|
|
}
|
|
}
|
|
Err(e) => {
|
|
warn!(
|
|
"Failed to create LoadBalancer in FiberLB for service {}/{}: {}",
|
|
namespace, name, e
|
|
);
|
|
}
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
/// Cleanup LoadBalancer when Service is deleted
|
|
///
|
|
/// This should be called when a Service with type=LoadBalancer is deleted.
|
|
/// For MVP, this is not automatically triggered - would need a deletion watch.
|
|
#[allow(dead_code)]
|
|
async fn cleanup_loadbalancer(&self, lb_id: &str) -> Result<()> {
|
|
let mut fiberlb_client = LoadBalancerServiceClient::connect(self.fiberlb_addr.clone())
|
|
.await?;
|
|
|
|
let delete_req = DeleteLoadBalancerRequest {
|
|
id: lb_id.to_string(),
|
|
};
|
|
|
|
fiberlb_client
|
|
.delete_load_balancer(delete_req)
|
|
.await?;
|
|
|
|
info!("Deleted LoadBalancer {} from FiberLB", lb_id);
|
|
Ok(())
|
|
}
|
|
}
|