photoncloud-monorepo/k8shost/crates/k8shost-server/src/main.rs
centra a7ec7e2158 Add T026 practical test + k8shost to flake + workspace files
- Created T026-practical-test task.yaml for MVP smoke testing
- Added k8shost-server to flake.nix (packages, apps, overlays)
- Staged all workspace directories for nix flake build
- Updated flake.nix shellHook to include k8shost

Resolves: T026.S1 blocker (R8 - nix submodule visibility)
2025-12-09 06:07:50 +09:00

187 lines
7 KiB
Rust

//! k8shost API Server
//!
//! This is the main Kubernetes API server for PlasmaCloud's k8shost component.
//! It provides a subset of the Kubernetes API compatible with kubectl and other
//! k8s tooling, while integrating with PlasmaCloud's infrastructure.
//!
//! Architecture:
//! - gRPC API server implementing k8shost-proto services
//! - RESTful HTTP/JSON API for kubectl compatibility (future)
//! - FlareDB backend for state storage
//! - Integration with IAM for multi-tenant authentication
//! - Scheduler for pod placement on nodes (future)
//! - Controller manager for built-in controllers (future)
mod auth;
mod cni;
mod services;
mod storage;
use anyhow::Result;
use auth::AuthService;
use k8shost_proto::{
deployment_service_server::{DeploymentService, DeploymentServiceServer},
node_service_server::NodeServiceServer,
pod_service_server::PodServiceServer,
service_service_server::ServiceServiceServer,
*,
};
use services::{node::NodeServiceImpl, pod::PodServiceImpl, service::ServiceServiceImpl};
use std::sync::Arc;
use storage::Storage;
use tonic::{transport::Server, Request, Response, Status};
use tracing::{info, warn};
#[tokio::main]
async fn main() -> Result<()> {
tracing_subscriber::fmt::init();
let addr = "[::]:6443".parse()?;
info!("k8shost API server starting on {}", addr);
// Initialize FlareDB storage
let pd_addr = std::env::var("FLAREDB_PD_ADDR").unwrap_or_else(|_| "127.0.0.1:2379".to_string());
info!("Connecting to FlareDB PD at {}", pd_addr);
let storage = match Storage::new(pd_addr).await {
Ok(s) => {
info!("Successfully connected to FlareDB");
Arc::new(s)
}
Err(e) => {
warn!("Failed to connect to FlareDB: {}. Server will start but may not function correctly.", e);
return Err(anyhow::anyhow!("Failed to connect to FlareDB: {}", e));
}
};
// Initialize IAM authentication service
let iam_addr = std::env::var("IAM_SERVER_ADDR").unwrap_or_else(|_| "127.0.0.1:50051".to_string());
info!("Connecting to IAM server at {}", iam_addr);
let auth_service = match AuthService::new(&iam_addr).await {
Ok(s) => {
info!("Successfully connected to IAM server");
Arc::new(s)
}
Err(e) => {
warn!("Failed to connect to IAM server: {}. Authentication will be disabled.", e);
// For now, we fail if IAM is unavailable
// In a more flexible setup, we might allow operation without auth for development
return Err(anyhow::anyhow!("Failed to connect to IAM server: {}", e));
}
};
// Create service implementations with storage
let pod_service = PodServiceImpl::new(storage.clone());
let service_service = ServiceServiceImpl::new(storage.clone());
let node_service = NodeServiceImpl::new(storage.clone());
let deployment_service = DeploymentServiceImpl::default(); // Still unimplemented
info!("Starting gRPC server with authentication...");
// Build server with authentication layer
// Note: We use separate interceptor closures for each service
Server::builder()
.add_service(
tonic::codegen::InterceptedService::new(
PodServiceServer::new(pod_service),
{
let auth = auth_service.clone();
move |req: Request<()>| -> Result<Request<()>, Status> {
let auth = auth.clone();
let runtime_handle = tokio::runtime::Handle::current();
runtime_handle.block_on(async move {
let tenant_context = auth.authenticate(&req).await?;
let mut req = req;
req.extensions_mut().insert(tenant_context);
Ok::<_, Status>(req)
})
}
},
),
)
.add_service(
tonic::codegen::InterceptedService::new(
ServiceServiceServer::new(service_service),
{
let auth = auth_service.clone();
move |req: Request<()>| -> Result<Request<()>, Status> {
let auth = auth.clone();
let runtime_handle = tokio::runtime::Handle::current();
runtime_handle.block_on(async move {
let tenant_context = auth.authenticate(&req).await?;
let mut req = req;
req.extensions_mut().insert(tenant_context);
Ok::<_, Status>(req)
})
}
},
),
)
.add_service(
tonic::codegen::InterceptedService::new(
NodeServiceServer::new(node_service),
{
let auth = auth_service.clone();
move |req: Request<()>| -> Result<Request<()>, Status> {
let auth = auth.clone();
let runtime_handle = tokio::runtime::Handle::current();
runtime_handle.block_on(async move {
let tenant_context = auth.authenticate(&req).await?;
let mut req = req;
req.extensions_mut().insert(tenant_context);
Ok::<_, Status>(req)
})
}
},
),
)
.add_service(DeploymentServiceServer::new(deployment_service))
.serve(addr)
.await?;
Ok(())
}
// Deployment Service Implementation (placeholder - not part of MVP)
#[derive(Debug, Default)]
struct DeploymentServiceImpl;
#[tonic::async_trait]
impl DeploymentService for DeploymentServiceImpl {
async fn create_deployment(
&self,
_request: Request<CreateDeploymentRequest>,
) -> Result<Response<CreateDeploymentResponse>, Status> {
Err(Status::unimplemented("create_deployment not yet implemented"))
}
async fn get_deployment(
&self,
_request: Request<GetDeploymentRequest>,
) -> Result<Response<GetDeploymentResponse>, Status> {
Err(Status::unimplemented("get_deployment not yet implemented"))
}
async fn list_deployments(
&self,
_request: Request<ListDeploymentsRequest>,
) -> Result<Response<ListDeploymentsResponse>, Status> {
Err(Status::unimplemented("list_deployments not yet implemented"))
}
async fn update_deployment(
&self,
_request: Request<UpdateDeploymentRequest>,
) -> Result<Response<UpdateDeploymentResponse>, Status> {
Err(Status::unimplemented("update_deployment not yet implemented"))
}
async fn delete_deployment(
&self,
_request: Request<DeleteDeploymentRequest>,
) -> Result<Response<DeleteDeploymentResponse>, Status> {
Err(Status::unimplemented("delete_deployment not yet implemented"))
}
}