photoncloud-monorepo/fiberlb/crates/fiberlb-server/tests/integration.rs
centra a7ec7e2158 Add T026 practical test + k8shost to flake + workspace files
- Created T026-practical-test task.yaml for MVP smoke testing
- Added k8shost-server to flake.nix (packages, apps, overlays)
- Staged all workspace directories for nix flake build
- Updated flake.nix shellHook to include k8shost

Resolves: T026.S1 blocker (R8 - nix submodule visibility)
2025-12-09 06:07:50 +09:00

313 lines
10 KiB
Rust

//! FiberLB Integration Tests
use std::sync::Arc;
use std::time::Duration;
use fiberlb_server::{DataPlane, HealthChecker, LbMetadataStore};
use fiberlb_types::{
Backend, BackendStatus, HealthCheck, HealthCheckType, Listener, ListenerProtocol,
LoadBalancer, Pool, PoolAlgorithm, PoolProtocol,
};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::watch;
/// Test 1: Full lifecycle CRUD for all entities
#[tokio::test]
async fn test_lb_lifecycle() {
// 1. Create in-memory metadata store
let metadata = Arc::new(LbMetadataStore::new_in_memory());
// 2. Create LoadBalancer
let lb = LoadBalancer::new("test-lb", "org-1", "proj-1");
metadata.save_lb(&lb).await.expect("save lb failed");
// Verify LB retrieval
let loaded_lb = metadata
.load_lb("org-1", "proj-1", &lb.id)
.await
.expect("load lb failed")
.expect("lb not found");
assert_eq!(loaded_lb.name, "test-lb");
assert_eq!(loaded_lb.org_id, "org-1");
// 3. Create Listener
let listener = Listener::new("http-listener", lb.id, ListenerProtocol::Tcp, 8080);
metadata
.save_listener(&listener)
.await
.expect("save listener failed");
// Verify Listener retrieval
let listeners = metadata
.list_listeners(&lb.id)
.await
.expect("list listeners failed");
assert_eq!(listeners.len(), 1);
assert_eq!(listeners[0].port, 8080);
// 4. Create Pool
let pool = Pool::new("backend-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Tcp);
metadata.save_pool(&pool).await.expect("save pool failed");
// Verify Pool retrieval
let pools = metadata.list_pools(&lb.id).await.expect("list pools failed");
assert_eq!(pools.len(), 1);
assert_eq!(pools[0].algorithm, PoolAlgorithm::RoundRobin);
// 5. Create Backend
let backend = Backend::new("backend-1", pool.id, "127.0.0.1", 9000);
metadata
.save_backend(&backend)
.await
.expect("save backend failed");
// Verify Backend retrieval
let backends = metadata
.list_backends(&pool.id)
.await
.expect("list backends failed");
assert_eq!(backends.len(), 1);
assert_eq!(backends[0].address, "127.0.0.1");
assert_eq!(backends[0].port, 9000);
// 6. Test listing LBs with filters
let all_lbs = metadata
.list_lbs("org-1", None)
.await
.expect("list lbs failed");
assert_eq!(all_lbs.len(), 1);
let project_lbs = metadata
.list_lbs("org-1", Some("proj-1"))
.await
.expect("list project lbs failed");
assert_eq!(project_lbs.len(), 1);
// 7. Test delete - clean up sub-resources first (cascade delete is in service layer)
metadata
.delete_backend(&backend)
.await
.expect("delete backend failed");
metadata
.delete_pool(&pool)
.await
.expect("delete pool failed");
metadata
.delete_listener(&listener)
.await
.expect("delete listener failed");
metadata.delete_lb(&lb).await.expect("delete lb failed");
// Verify everything is cleaned up
let remaining_lbs = metadata
.list_lbs("org-1", Some("proj-1"))
.await
.expect("list failed");
assert!(remaining_lbs.is_empty());
}
/// Test 2: Multiple backends with round-robin simulation
#[tokio::test]
async fn test_multi_backend_pool() {
let metadata = Arc::new(LbMetadataStore::new_in_memory());
// Create LB and Pool
let lb = LoadBalancer::new("multi-backend-lb", "org-1", "proj-1");
metadata.save_lb(&lb).await.unwrap();
let pool = Pool::new("multi-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Tcp);
metadata.save_pool(&pool).await.unwrap();
// Create multiple backends
for i in 1..=3 {
let backend = Backend::new(
&format!("backend-{}", i),
pool.id,
"127.0.0.1",
9000 + i as u16,
);
metadata.save_backend(&backend).await.unwrap();
}
// Verify all backends
let backends = metadata.list_backends(&pool.id).await.unwrap();
assert_eq!(backends.len(), 3);
// Verify different ports
let ports: Vec<u16> = backends.iter().map(|b| b.port).collect();
assert!(ports.contains(&9001));
assert!(ports.contains(&9002));
assert!(ports.contains(&9003));
}
/// Test 3: Health check status update
#[tokio::test]
async fn test_health_check_status_update() {
let metadata = Arc::new(LbMetadataStore::new_in_memory());
// Create LB, Pool, Backend
let lb = LoadBalancer::new("health-test-lb", "org-1", "proj-1");
metadata.save_lb(&lb).await.unwrap();
let pool = Pool::new("health-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Tcp);
metadata.save_pool(&pool).await.unwrap();
// Create backend with unreachable address
let mut backend = Backend::new("unhealthy-backend", pool.id, "192.0.2.1", 59999);
backend.status = BackendStatus::Unknown;
metadata.save_backend(&backend).await.unwrap();
// Create health checker with short timeout
let (shutdown_tx, shutdown_rx) = watch::channel(false);
let mut checker =
HealthChecker::new(metadata.clone(), Duration::from_secs(60), shutdown_rx)
.with_timeout(Duration::from_millis(100));
// Run a single check cycle (not the full loop)
// We simulate by directly checking the backend
let check_result = checker_tcp_check(&backend).await;
assert!(check_result.is_err(), "Should fail on unreachable address");
// Update status via metadata
metadata
.update_backend_health(&pool.id, &backend.id, BackendStatus::Offline)
.await
.unwrap();
// Verify status was updated
let loaded = metadata
.load_backend(&pool.id, &backend.id)
.await
.unwrap()
.unwrap();
assert_eq!(loaded.status, BackendStatus::Offline);
// Cleanup
drop(checker);
let _ = shutdown_tx.send(true);
}
/// Helper: Simulate TCP check
async fn checker_tcp_check(backend: &Backend) -> Result<(), String> {
let addr = format!("{}:{}", backend.address, backend.port);
tokio::time::timeout(
Duration::from_millis(100),
TcpStream::connect(&addr),
)
.await
.map_err(|_| "timeout".to_string())?
.map_err(|e| e.to_string())?;
Ok(())
}
/// Test 4: DataPlane TCP proxy (requires real TCP server)
#[tokio::test]
#[ignore = "Integration test requiring TCP server"]
async fn test_dataplane_tcp_proxy() {
let metadata = Arc::new(LbMetadataStore::new_in_memory());
// 1. Start mock backend server
let backend_port = 19000u16;
let backend_server = tokio::spawn(async move {
let listener = TcpListener::bind(format!("127.0.0.1:{}", backend_port))
.await
.expect("backend bind failed");
let (mut socket, _) = listener.accept().await.expect("accept failed");
// Echo back with prefix
let mut buf = [0u8; 1024];
let n = socket.read(&mut buf).await.expect("read failed");
socket
.write_all(format!("ECHO: {}", String::from_utf8_lossy(&buf[..n])).as_bytes())
.await
.expect("write failed");
});
// Give server time to start
tokio::time::sleep(Duration::from_millis(50)).await;
// 2. Setup LB config
let lb = LoadBalancer::new("proxy-lb", "org-1", "proj-1");
metadata.save_lb(&lb).await.unwrap();
let pool = Pool::new("proxy-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Tcp);
metadata.save_pool(&pool).await.unwrap();
let mut backend = Backend::new("proxy-backend", pool.id, "127.0.0.1", backend_port);
backend.status = BackendStatus::Online;
metadata.save_backend(&backend).await.unwrap();
let mut listener = Listener::new("proxy-listener", lb.id, ListenerProtocol::Tcp, 18080);
listener.default_pool_id = Some(pool.id);
metadata.save_listener(&listener).await.unwrap();
// 3. Start DataPlane
let dataplane = DataPlane::new(metadata.clone());
dataplane
.start_listener(listener.id)
.await
.expect("start listener failed");
// Give listener time to start
tokio::time::sleep(Duration::from_millis(50)).await;
// 4. Connect to VIP and test proxy
let mut client = TcpStream::connect("127.0.0.1:18080")
.await
.expect("client connect failed");
client.write_all(b"HELLO").await.expect("client write failed");
let mut response = vec![0u8; 128];
let n = client.read(&mut response).await.expect("client read failed");
let response_str = String::from_utf8_lossy(&response[..n]);
assert!(
response_str.contains("ECHO: HELLO"),
"Expected echo response, got: {}",
response_str
);
// 5. Cleanup
dataplane.stop_listener(&listener.id).await.unwrap();
backend_server.abort();
}
/// Test 5: Health check configuration
#[tokio::test]
async fn test_health_check_config() {
let metadata = Arc::new(LbMetadataStore::new_in_memory());
// Create LB and Pool
let lb = LoadBalancer::new("hc-config-lb", "org-1", "proj-1");
metadata.save_lb(&lb).await.unwrap();
let pool = Pool::new("hc-pool", lb.id, PoolAlgorithm::RoundRobin, PoolProtocol::Tcp);
metadata.save_pool(&pool).await.unwrap();
// Create TCP health check
let tcp_hc = HealthCheck::new_tcp("tcp-check", pool.id);
metadata.save_health_check(&tcp_hc).await.unwrap();
// Verify retrieval
let hcs = metadata.list_health_checks(&pool.id).await.unwrap();
assert_eq!(hcs.len(), 1);
assert_eq!(hcs[0].check_type, HealthCheckType::Tcp);
assert_eq!(hcs[0].interval_seconds, 30);
// Create HTTP health check
let http_hc = HealthCheck::new_http("http-check", pool.id, "/healthz");
metadata.save_health_check(&http_hc).await.unwrap();
let hcs = metadata.list_health_checks(&pool.id).await.unwrap();
assert_eq!(hcs.len(), 2);
// Find HTTP check
let http = hcs.iter().find(|h| h.check_type == HealthCheckType::Http);
assert!(http.is_some());
assert_eq!(
http.unwrap().http_config.as_ref().unwrap().path,
"/healthz"
);
}