From 23ec8b5edb3dd658ee7f610299beafde9732aa7e Mon Sep 17 00:00:00 2001
From: centra
Date: Wed, 1 Apr 2026 00:14:40 +0900
Subject: [PATCH] Implement k8shost deployment REST API
---
k8shost/crates/k8shost-server/src/main.rs | 17 +-
k8shost/crates/k8shost-server/src/rest.rs | 702 ++++++++++++++++++++--
nix/modules/k8shost.nix | 7 +
nix/test-cluster/run-cluster.sh | 63 +-
4 files changed, 693 insertions(+), 96 deletions(-)
diff --git a/k8shost/crates/k8shost-server/src/main.rs b/k8shost/crates/k8shost-server/src/main.rs
index a101aa5..0a1bfad 100644
--- a/k8shost/crates/k8shost-server/src/main.rs
+++ b/k8shost/crates/k8shost-server/src/main.rs
@@ -45,6 +45,10 @@ struct Args {
#[arg(long)]
addr: Option,
+ /// Listen address for HTTP REST server (e.g., "127.0.0.1:8085")
+ #[arg(long)]
+ http_addr: Option,
+
/// Log level (e.g., "info", "debug", "trace")
#[arg(long)]
log_level: Option,
@@ -112,7 +116,10 @@ async fn main() -> Result<(), Box> {
.addr
.map(|s| s.parse().unwrap_or(loaded_config.server.addr))
.unwrap_or(loaded_config.server.addr),
- http_addr: loaded_config.server.http_addr,
+ http_addr: args
+ .http_addr
+ .map(|s| s.parse().unwrap_or(loaded_config.server.http_addr))
+ .unwrap_or(loaded_config.server.http_addr),
log_level: args.log_level.unwrap_or(loaded_config.server.log_level),
},
flaredb: config::FlareDbConfig {
@@ -277,7 +284,10 @@ async fn main() -> Result<(), Box> {
auth_service.clone(),
));
let node_service = Arc::new(NodeServiceImpl::new(storage.clone(), auth_service.clone()));
- let deployment_service = DeploymentServiceImpl::new(storage.clone(), auth_service.clone());
+ let deployment_service = Arc::new(DeploymentServiceImpl::new(
+ storage.clone(),
+ auth_service.clone(),
+ ));
// Start scheduler in background with CreditService integration
let scheduler = Arc::new(scheduler::Scheduler::new_with_credit_service(storage.clone()).await);
@@ -335,7 +345,7 @@ async fn main() -> Result<(), Box> {
make_interceptor(auth_service.clone()),
))
.add_service(tonic::codegen::InterceptedService::new(
- DeploymentServiceServer::new(deployment_service),
+ DeploymentServiceServer::new(deployment_service.as_ref().clone()),
make_interceptor(auth_service.clone()),
))
.serve(config.server.addr);
@@ -343,6 +353,7 @@ async fn main() -> Result<(), Box> {
// HTTP REST API server
let http_addr = config.server.http_addr;
let rest_state = rest::RestApiState {
+ deployment_service: deployment_service.clone(),
pod_service: pod_service.clone(),
service_service: service_service.clone(),
node_service: node_service.clone(),
diff --git a/k8shost/crates/k8shost-server/src/rest.rs b/k8shost/crates/k8shost-server/src/rest.rs
index df997f7..4959100 100644
--- a/k8shost/crates/k8shost-server/src/rest.rs
+++ b/k8shost/crates/k8shost-server/src/rest.rs
@@ -6,6 +6,11 @@
//! - DELETE /api/v1/pods/{namespace}/{name} - Delete pod
//! - GET /api/v1/services - List services
//! - POST /api/v1/services - Create service
+//! - GET /api/v1/deployments - List deployments
+//! - POST /api/v1/deployments - Create deployment
+//! - GET /api/v1/deployments/{namespace}/{name} - Get deployment
+//! - PUT /api/v1/deployments/{namespace}/{name} - Update deployment
+//! - DELETE /api/v1/deployments/{namespace}/{name} - Delete deployment
//! - GET /api/v1/nodes - List nodes
//! - GET /health - Health check
@@ -18,21 +23,29 @@ use axum::{
};
use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext};
use k8shost_proto::{
- node_service_server::NodeService, pod_service_server::PodService,
- service_service_server::ServiceService, Container, CreatePodRequest, CreateServiceRequest,
- DeletePodRequest, DeleteServiceRequest, ListNodesRequest, ListPodsRequest, ListServicesRequest,
- Node as ProtoNode, ObjectMeta, Pod as ProtoPod, PodSpec, Service as ProtoService, ServicePort,
- ServiceSpec,
+ deployment_service_server::DeploymentService, node_service_server::NodeService,
+ pod_service_server::PodService, service_service_server::ServiceService, Container,
+ ContainerPort, CreateDeploymentRequest, CreatePodRequest, CreateServiceRequest,
+ DeleteDeploymentRequest, DeletePodRequest, DeleteServiceRequest, Deployment as ProtoDeployment,
+ DeploymentSpec, EnvVar, GetDeploymentRequest, LabelSelector, ListDeploymentsRequest,
+ ListNodesRequest, ListPodsRequest, ListServicesRequest, Node as ProtoNode, ObjectMeta,
+ Pod as ProtoPod, PodSpec, PodTemplateSpec, Service as ProtoService, ServicePort, ServiceSpec,
+ UpdateDeploymentRequest,
};
use serde::{Deserialize, Serialize};
+use std::collections::HashMap;
use std::sync::Arc;
use tonic::{Code, Request};
-use crate::services::{node::NodeServiceImpl, pod::PodServiceImpl, service::ServiceServiceImpl};
+use crate::services::{
+ deployment::DeploymentServiceImpl, node::NodeServiceImpl, pod::PodServiceImpl,
+ service::ServiceServiceImpl,
+};
/// REST API state
#[derive(Clone)]
pub struct RestApiState {
+ pub deployment_service: Arc,
pub pod_service: Arc,
pub service_service: Arc,
pub node_service: Arc,
@@ -103,7 +116,50 @@ pub struct CreateServiceRequestRest {
pub service_type: Option,
pub port: i32,
pub target_port: Option,
- pub selector: Option>,
+ pub selector: Option>,
+}
+
+/// Deployment creation request
+#[derive(Debug, Deserialize)]
+pub struct CreateDeploymentRequestRest {
+ pub name: String,
+ pub namespace: Option,
+ pub replicas: Option,
+ pub selector: HashMap,
+ pub template_labels: Option>,
+ pub containers: Vec,
+}
+
+/// Deployment update request
+#[derive(Debug, Deserialize)]
+pub struct UpdateDeploymentRequestRest {
+ pub replicas: Option,
+ pub selector: Option>,
+ pub template_labels: Option>,
+ pub containers: Option>,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct DeploymentContainerRequestRest {
+ pub name: String,
+ pub image: String,
+ pub command: Option>,
+ pub args: Option>,
+ pub ports: Option>,
+ pub env: Option>,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct DeploymentContainerPortRequestRest {
+ pub name: Option,
+ pub container_port: i32,
+ pub protocol: Option,
+}
+
+#[derive(Debug, Deserialize)]
+pub struct DeploymentEnvVarRequestRest {
+ pub name: String,
+ pub value: Option,
}
/// Query params for list operations
@@ -273,12 +329,169 @@ pub struct NodesResponse {
pub nodes: Vec,
}
+/// Deployment response
+#[derive(Debug, Serialize)]
+pub struct DeploymentResponse {
+ pub name: String,
+ pub namespace: String,
+ pub replicas: i32,
+ pub ready_replicas: i32,
+ pub available_replicas: i32,
+ pub selector: HashMap,
+ pub template_labels: HashMap,
+ pub containers: Vec,
+}
+
+#[derive(Debug, Serialize)]
+pub struct DeploymentContainerResponse {
+ pub name: String,
+ pub image: String,
+ pub command: Vec,
+ pub args: Vec,
+ pub ports: Vec,
+ pub env: Vec,
+}
+
+#[derive(Debug, Serialize)]
+pub struct DeploymentContainerPortResponse {
+ pub name: Option,
+ pub container_port: i32,
+ pub protocol: Option,
+}
+
+#[derive(Debug, Serialize)]
+pub struct DeploymentEnvVarResponse {
+ pub name: String,
+ pub value: Option,
+}
+
+impl From for DeploymentResponse {
+ fn from(deployment: ProtoDeployment) -> Self {
+ let metadata = deployment.metadata.unwrap_or(ObjectMeta {
+ name: String::new(),
+ namespace: None,
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ });
+ let DeploymentSpec {
+ replicas,
+ selector,
+ template,
+ } = deployment.spec.unwrap_or(DeploymentSpec {
+ replicas: None,
+ selector: None,
+ template: None,
+ });
+ let template = template.unwrap_or(PodTemplateSpec {
+ metadata: Some(ObjectMeta {
+ name: String::new(),
+ namespace: None,
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ }),
+ spec: Some(PodSpec {
+ containers: Vec::new(),
+ restart_policy: None,
+ node_name: None,
+ }),
+ });
+ let template_metadata = template.metadata.unwrap_or(ObjectMeta {
+ name: String::new(),
+ namespace: None,
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ });
+ let template_spec = template.spec.unwrap_or(PodSpec {
+ containers: Vec::new(),
+ restart_policy: None,
+ node_name: None,
+ });
+ let status = deployment.status;
+
+ Self {
+ name: metadata.name,
+ namespace: metadata.namespace.unwrap_or_else(|| "default".to_string()),
+ replicas: replicas.unwrap_or(1),
+ ready_replicas: status
+ .as_ref()
+ .and_then(|status| status.ready_replicas)
+ .unwrap_or(0),
+ available_replicas: status
+ .as_ref()
+ .and_then(|status| status.available_replicas)
+ .unwrap_or(0),
+ selector: selector
+ .map(|selector| selector.match_labels)
+ .unwrap_or_default(),
+ template_labels: template_metadata.labels,
+ containers: template_spec
+ .containers
+ .into_iter()
+ .map(|container| DeploymentContainerResponse {
+ name: container.name,
+ image: container.image,
+ command: container.command,
+ args: container.args,
+ ports: container
+ .ports
+ .into_iter()
+ .map(|port| DeploymentContainerPortResponse {
+ name: port.name,
+ container_port: port.container_port,
+ protocol: port.protocol,
+ })
+ .collect(),
+ env: container
+ .env
+ .into_iter()
+ .map(|env| DeploymentEnvVarResponse {
+ name: env.name,
+ value: env.value,
+ })
+ .collect(),
+ })
+ .collect(),
+ }
+ }
+}
+
+/// Deployments list response
+#[derive(Debug, Serialize)]
+pub struct DeploymentsResponse {
+ pub deployments: Vec,
+}
+
/// Build the REST API router
pub fn build_router(state: RestApiState) -> Router {
Router::new()
.route("/api/v1/pods", get(list_pods).post(create_pod))
.route("/api/v1/pods/{namespace}/{name}", delete(delete_pod))
.route("/api/v1/services", get(list_services).post(create_service))
+ .route(
+ "/api/v1/deployments",
+ get(list_deployments).post(create_deployment),
+ )
+ .route(
+ "/api/v1/deployments/{namespace}/{name}",
+ get(get_deployment)
+ .put(update_deployment)
+ .delete(delete_deployment),
+ )
.route(
"/api/v1/services/{namespace}/{name}",
delete(delete_service),
@@ -311,13 +524,11 @@ async fn list_pods(
});
req.extensions_mut().insert(tenant);
- let response = state.pod_service.list_pods(req).await.map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "LIST_FAILED",
- &e.message(),
- )
- })?;
+ let response = state
+ .pod_service
+ .list_pods(req)
+ .await
+ .map_err(map_tonic_status)?;
let pods: Vec = response
.into_inner()
@@ -368,13 +579,11 @@ async fn create_pod(
});
grpc_req.extensions_mut().insert(tenant);
- let response = state.pod_service.create_pod(grpc_req).await.map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "CREATE_FAILED",
- &e.message(),
- )
- })?;
+ let response = state
+ .pod_service
+ .create_pod(grpc_req)
+ .await
+ .map_err(map_tonic_status)?;
let pod = response.into_inner().pod.ok_or_else(|| {
error_response(
@@ -404,13 +613,11 @@ async fn delete_pod(
});
req.extensions_mut().insert(tenant);
- state.pod_service.delete_pod(req).await.map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "DELETE_FAILED",
- &e.message(),
- )
- })?;
+ state
+ .pod_service
+ .delete_pod(req)
+ .await
+ .map_err(map_tonic_status)?;
Ok((
StatusCode::OK,
@@ -436,13 +643,7 @@ async fn list_services(
.service_service
.list_services(req)
.await
- .map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "LIST_FAILED",
- &e.message(),
- )
- })?;
+ .map_err(map_tonic_status)?;
let services: Vec = response
.into_inner()
@@ -498,13 +699,7 @@ async fn create_service(
.service_service
.create_service(grpc_req)
.await
- .map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "CREATE_FAILED",
- &e.message(),
- )
- })?;
+ .map_err(map_tonic_status)?;
let service = response.into_inner().service.ok_or_else(|| {
error_response(
@@ -538,13 +733,7 @@ async fn delete_service(
.service_service
.delete_service(req)
.await
- .map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "DELETE_FAILED",
- &e.message(),
- )
- })?;
+ .map_err(map_tonic_status)?;
Ok((
StatusCode::OK,
@@ -563,13 +752,11 @@ async fn list_nodes(
let mut req = Request::new(ListNodesRequest {});
req.extensions_mut().insert(tenant);
- let response = state.node_service.list_nodes(req).await.map_err(|e| {
- error_response(
- StatusCode::INTERNAL_SERVER_ERROR,
- "LIST_FAILED",
- &e.message(),
- )
- })?;
+ let response = state
+ .node_service
+ .list_nodes(req)
+ .await
+ .map_err(map_tonic_status)?;
let nodes: Vec = response
.into_inner()
@@ -581,6 +768,393 @@ async fn list_nodes(
Ok(Json(SuccessResponse::new(NodesResponse { nodes })))
}
+/// GET /api/v1/deployments - List deployments
+async fn list_deployments(
+ State(state): State,
+ Query(params): Query,
+ headers: HeaderMap,
+) -> Result>, (StatusCode, Json)> {
+ let tenant = resolve_rest_tenant(&state, &headers).await?;
+ let mut req = Request::new(ListDeploymentsRequest {
+ namespace: params.namespace,
+ });
+ req.extensions_mut().insert(tenant);
+
+ let response = state
+ .deployment_service
+ .list_deployments(req)
+ .await
+ .map_err(map_tonic_status)?;
+
+ let deployments = response
+ .into_inner()
+ .items
+ .into_iter()
+ .map(DeploymentResponse::from)
+ .collect();
+
+ Ok(Json(SuccessResponse::new(DeploymentsResponse {
+ deployments,
+ })))
+}
+
+/// GET /api/v1/deployments/{namespace}/{name} - Get deployment
+async fn get_deployment(
+ State(state): State,
+ Path((namespace, name)): Path<(String, String)>,
+ headers: HeaderMap,
+) -> Result>, (StatusCode, Json)> {
+ let tenant = resolve_rest_tenant(&state, &headers).await?;
+ let mut req = Request::new(GetDeploymentRequest { namespace, name });
+ req.extensions_mut().insert(tenant);
+
+ let response = state
+ .deployment_service
+ .get_deployment(req)
+ .await
+ .map_err(map_tonic_status)?;
+ let deployment = response.into_inner().deployment.ok_or_else(|| {
+ error_response(
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "INTERNAL",
+ "No deployment returned",
+ )
+ })?;
+
+ Ok(Json(SuccessResponse::new(DeploymentResponse::from(
+ deployment,
+ ))))
+}
+
+/// POST /api/v1/deployments - Create deployment
+async fn create_deployment(
+ State(state): State,
+ headers: HeaderMap,
+ Json(req): Json,
+) -> Result<
+ (StatusCode, Json>),
+ (StatusCode, Json),
+> {
+ let tenant = resolve_rest_tenant(&state, &headers).await?;
+ let mut grpc_req = Request::new(CreateDeploymentRequest {
+ deployment: Some(build_proto_deployment(
+ req.name,
+ req.namespace,
+ req.replicas,
+ req.selector,
+ req.template_labels,
+ req.containers,
+ )),
+ });
+ grpc_req.extensions_mut().insert(tenant);
+
+ let response = state
+ .deployment_service
+ .create_deployment(grpc_req)
+ .await
+ .map_err(map_tonic_status)?;
+ let deployment = response.into_inner().deployment.ok_or_else(|| {
+ error_response(
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "INTERNAL",
+ "No deployment returned",
+ )
+ })?;
+
+ Ok((
+ StatusCode::CREATED,
+ Json(SuccessResponse::new(DeploymentResponse::from(deployment))),
+ ))
+}
+
+/// PUT /api/v1/deployments/{namespace}/{name} - Update deployment
+async fn update_deployment(
+ State(state): State,
+ Path((namespace, name)): Path<(String, String)>,
+ headers: HeaderMap,
+ Json(req): Json,
+) -> Result>, (StatusCode, Json)> {
+ let tenant = resolve_rest_tenant(&state, &headers).await?;
+
+ let mut get_req = Request::new(GetDeploymentRequest {
+ namespace: namespace.clone(),
+ name: name.clone(),
+ });
+ get_req.extensions_mut().insert(tenant.clone());
+ let existing = state
+ .deployment_service
+ .get_deployment(get_req)
+ .await
+ .map_err(map_tonic_status)?
+ .into_inner()
+ .deployment
+ .ok_or_else(|| {
+ error_response(
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "INTERNAL",
+ "No deployment returned",
+ )
+ })?;
+
+ let mut deployment = existing;
+ let spec = deployment.spec.get_or_insert(DeploymentSpec {
+ replicas: None,
+ selector: None,
+ template: None,
+ });
+
+ if let Some(replicas) = req.replicas {
+ spec.replicas = Some(replicas);
+ }
+ if let Some(selector) = req.selector {
+ spec.selector = Some(LabelSelector {
+ match_labels: selector,
+ });
+ }
+
+ let template = spec.template.get_or_insert(PodTemplateSpec {
+ metadata: Some(ObjectMeta {
+ name: String::new(),
+ namespace: Some(namespace.clone()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ }),
+ spec: Some(PodSpec {
+ containers: Vec::new(),
+ restart_policy: None,
+ node_name: None,
+ }),
+ });
+
+ if let Some(template_labels) = req.template_labels {
+ template
+ .metadata
+ .get_or_insert_with(|| ObjectMeta {
+ name: String::new(),
+ namespace: Some(namespace.clone()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ })
+ .labels = template_labels;
+ }
+ if let Some(containers) = req.containers {
+ template
+ .spec
+ .get_or_insert(PodSpec {
+ containers: Vec::new(),
+ restart_policy: None,
+ node_name: None,
+ })
+ .containers = containers
+ .into_iter()
+ .map(proto_container_from_rest)
+ .collect();
+ }
+
+ ensure_selector_labels_on_template(spec);
+
+ let metadata = deployment.metadata.get_or_insert(ObjectMeta {
+ name: name.clone(),
+ namespace: Some(namespace.clone()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ });
+ metadata.name = name;
+ metadata.namespace = Some(namespace);
+
+ let mut update_req = Request::new(UpdateDeploymentRequest {
+ deployment: Some(deployment),
+ });
+ update_req.extensions_mut().insert(tenant);
+
+ let response = state
+ .deployment_service
+ .update_deployment(update_req)
+ .await
+ .map_err(map_tonic_status)?;
+ let deployment = response.into_inner().deployment.ok_or_else(|| {
+ error_response(
+ StatusCode::INTERNAL_SERVER_ERROR,
+ "INTERNAL",
+ "No deployment returned",
+ )
+ })?;
+
+ Ok(Json(SuccessResponse::new(DeploymentResponse::from(
+ deployment,
+ ))))
+}
+
+/// DELETE /api/v1/deployments/{namespace}/{name} - Delete deployment
+async fn delete_deployment(
+ State(state): State,
+ Path((namespace, name)): Path<(String, String)>,
+ headers: HeaderMap,
+) -> Result<(StatusCode, Json>), (StatusCode, Json)>
+{
+ let tenant = resolve_rest_tenant(&state, &headers).await?;
+ let mut req = Request::new(DeleteDeploymentRequest {
+ namespace: namespace.clone(),
+ name: name.clone(),
+ });
+ req.extensions_mut().insert(tenant);
+
+ state
+ .deployment_service
+ .delete_deployment(req)
+ .await
+ .map_err(map_tonic_status)?;
+
+ Ok((
+ StatusCode::OK,
+ Json(SuccessResponse::new(serde_json::json!({
+ "name": name,
+ "namespace": namespace,
+ "deleted": true
+ }))),
+ ))
+}
+
+fn build_proto_deployment(
+ name: String,
+ namespace: Option,
+ replicas: Option,
+ selector: HashMap,
+ template_labels: Option>,
+ containers: Vec,
+) -> ProtoDeployment {
+ let namespace = namespace.unwrap_or_else(|| "default".to_string());
+ let mut labels = template_labels.unwrap_or_else(|| selector.clone());
+ for (key, value) in &selector {
+ labels.entry(key.clone()).or_insert_with(|| value.clone());
+ }
+
+ ProtoDeployment {
+ metadata: Some(ObjectMeta {
+ name,
+ namespace: Some(namespace.clone()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ }),
+ spec: Some(DeploymentSpec {
+ replicas,
+ selector: Some(LabelSelector {
+ match_labels: selector,
+ }),
+ template: Some(PodTemplateSpec {
+ metadata: Some(ObjectMeta {
+ name: String::new(),
+ namespace: Some(namespace),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels,
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ }),
+ spec: Some(PodSpec {
+ containers: containers
+ .into_iter()
+ .map(proto_container_from_rest)
+ .collect(),
+ restart_policy: Some("Always".to_string()),
+ node_name: None,
+ }),
+ }),
+ }),
+ status: None,
+ }
+}
+
+fn proto_container_from_rest(container: DeploymentContainerRequestRest) -> Container {
+ Container {
+ name: container.name,
+ image: container.image,
+ command: container.command.unwrap_or_default(),
+ args: container.args.unwrap_or_default(),
+ ports: container
+ .ports
+ .unwrap_or_default()
+ .into_iter()
+ .map(|port| ContainerPort {
+ name: port.name,
+ container_port: port.container_port,
+ protocol: port.protocol,
+ })
+ .collect(),
+ env: container
+ .env
+ .unwrap_or_default()
+ .into_iter()
+ .map(|env| EnvVar {
+ name: env.name,
+ value: env.value,
+ })
+ .collect(),
+ }
+}
+
+fn ensure_selector_labels_on_template(spec: &mut DeploymentSpec) {
+ let selector = spec
+ .selector
+ .as_ref()
+ .map(|selector| selector.match_labels.clone())
+ .unwrap_or_default();
+ let template = spec.template.get_or_insert(PodTemplateSpec {
+ metadata: Some(ObjectMeta {
+ name: String::new(),
+ namespace: Some("default".to_string()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ }),
+ spec: Some(PodSpec {
+ containers: Vec::new(),
+ restart_policy: None,
+ node_name: None,
+ }),
+ });
+ let metadata = template.metadata.get_or_insert_with(|| ObjectMeta {
+ name: String::new(),
+ namespace: Some("default".to_string()),
+ uid: None,
+ resource_version: None,
+ creation_timestamp: None,
+ labels: HashMap::new(),
+ annotations: HashMap::new(),
+ org_id: None,
+ project_id: None,
+ });
+ for (key, value) in selector {
+ metadata.labels.entry(key).or_insert(value);
+ }
+}
+
/// Helper to create error response
fn error_response(
status: StatusCode,
@@ -608,17 +1182,22 @@ async fn resolve_rest_tenant(
.auth_service
.authenticate_headers(headers)
.await
- .map_err(map_auth_status)?;
- resolve_tenant_ids_from_context(&tenant, "", "").map_err(map_auth_status)?;
+ .map_err(map_tonic_status)?;
+ resolve_tenant_ids_from_context(&tenant, "", "").map_err(map_tonic_status)?;
Ok(tenant)
}
-fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) {
+fn map_tonic_status(status: tonic::Status) -> (StatusCode, Json) {
let status_code = match status.code() {
Code::Unauthenticated => StatusCode::UNAUTHORIZED,
Code::PermissionDenied => StatusCode::FORBIDDEN,
Code::InvalidArgument => StatusCode::BAD_REQUEST,
Code::NotFound => StatusCode::NOT_FOUND,
+ Code::AlreadyExists => StatusCode::CONFLICT,
+ Code::FailedPrecondition => StatusCode::PRECONDITION_FAILED,
+ Code::ResourceExhausted => StatusCode::TOO_MANY_REQUESTS,
+ Code::DeadlineExceeded => StatusCode::GATEWAY_TIMEOUT,
+ Code::Unavailable => StatusCode::SERVICE_UNAVAILABLE,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
let code = match status.code() {
@@ -626,6 +1205,11 @@ fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) {
Code::PermissionDenied => "FORBIDDEN",
Code::InvalidArgument => "INVALID_ARGUMENT",
Code::NotFound => "NOT_FOUND",
+ Code::AlreadyExists => "ALREADY_EXISTS",
+ Code::FailedPrecondition => "FAILED_PRECONDITION",
+ Code::ResourceExhausted => "RESOURCE_EXHAUSTED",
+ Code::DeadlineExceeded => "DEADLINE_EXCEEDED",
+ Code::Unavailable => "UNAVAILABLE",
_ => "INTERNAL",
};
diff --git a/nix/modules/k8shost.nix b/nix/modules/k8shost.nix
index 670fdd6..17cd18e 100644
--- a/nix/modules/k8shost.nix
+++ b/nix/modules/k8shost.nix
@@ -13,6 +13,12 @@ in
description = "Port for k8shost gRPC API server";
};
+ httpPort = lib.mkOption {
+ type = lib.types.port;
+ default = 8085;
+ description = "Port for k8shost HTTP REST API server";
+ };
+
iamAddr = lib.mkOption {
type = lib.types.nullOr lib.types.str;
default = null;
@@ -126,6 +132,7 @@ in
ExecStart = lib.concatStringsSep " " ([
"${cfg.package}/bin/k8shost-server"
"--addr 0.0.0.0:${toString cfg.port}"
+ "--http-addr 127.0.0.1:${toString cfg.httpPort}"
] ++ lib.optional (cfg.iamAddr != null) "--iam-server-addr ${cfg.iamAddr}"
++ lib.optional (cfg.chainfireAddr != null) "--chainfire-endpoint ${cfg.chainfireAddr}"
++ lib.optional (cfg.prismnetAddr != null) "--prismnet-server-addr ${cfg.prismnetAddr}"
diff --git a/nix/test-cluster/run-cluster.sh b/nix/test-cluster/run-cluster.sh
index 22b1832..23c900f 100755
--- a/nix/test-cluster/run-cluster.sh
+++ b/nix/test-cluster/run-cluster.sh
@@ -3463,13 +3463,14 @@ validate_fiberlb_flow() {
validate_k8shost_flow() {
log "Validating K8sHost node, pod, service, and controller integrations"
- local iam_tunnel="" prism_tunnel="" dns_tunnel="" lb_tunnel="" k8s_tunnel=""
+ local iam_tunnel="" prism_tunnel="" dns_tunnel="" lb_tunnel="" k8s_tunnel="" k8s_http_tunnel=""
iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
prism_tunnel="$(start_ssh_tunnel node01 15081 50081)"
dns_tunnel="$(start_ssh_tunnel node01 15084 50084)"
lb_tunnel="$(start_ssh_tunnel node01 15085 50085)"
k8s_tunnel="$(start_ssh_tunnel node01 15087 50087)"
- trap 'stop_ssh_tunnel node01 "${k8s_tunnel}"; stop_ssh_tunnel node01 "${lb_tunnel}"; stop_ssh_tunnel node01 "${dns_tunnel}"; stop_ssh_tunnel node01 "${prism_tunnel}"; stop_ssh_tunnel node01 "${iam_tunnel}"' RETURN
+ k8s_http_tunnel="$(start_ssh_tunnel node01 18087 8085)"
+ trap 'stop_ssh_tunnel node01 "${k8s_http_tunnel}"; stop_ssh_tunnel node01 "${k8s_tunnel}"; stop_ssh_tunnel node01 "${lb_tunnel}"; stop_ssh_tunnel node01 "${dns_tunnel}"; stop_ssh_tunnel node01 "${prism_tunnel}"; stop_ssh_tunnel node01 "${iam_tunnel}"' RETURN
local org_id="default-org"
local project_id="default-project"
@@ -3503,19 +3504,16 @@ validate_k8shost_flow() {
127.0.0.1:15087 k8shost.NodeService/ListNodes \
| jq -e --arg name "${node_name}" '.items | any(.metadata.name == $name)' >/dev/null
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${K8SHOST_PROTO_DIR}" \
- -proto "${K8SHOST_PROTO}" \
- -d "$(jq -cn --arg name "${deployment_name}" --arg org "${org_id}" --arg project "${project_id}" '{deployment:{metadata:{name:$name, namespace:"default", orgId:$org, projectId:$project}, spec:{replicas:2, selector:{matchLabels:{app:"k8shost-deployment-smoke", deployment:$name}}, template:{metadata:{name:"", namespace:"default", labels:{app:"k8shost-deployment-smoke", deployment:$name}}, spec:{containers:[{name:"backend", image:"smoke", ports:[{containerPort:8082, protocol:"TCP"}]}]}}}}}')" \
- 127.0.0.1:15087 k8shost.DeploymentService/CreateDeployment >/dev/null
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${K8SHOST_PROTO_DIR}" \
- -proto "${K8SHOST_PROTO}" \
- -d "$(jq -cn '{namespace:"default"}')" \
- 127.0.0.1:15087 k8shost.DeploymentService/ListDeployments \
- | jq -e --arg name "${deployment_name}" '.items | any(.metadata.name == $name)' >/dev/null
+ curl -fsS \
+ -H "Authorization: Bearer ${token}" \
+ -H "Content-Type: application/json" \
+ -d "$(jq -cn --arg name "${deployment_name}" '{name:$name, namespace:"default", replicas:2, selector:{app:"k8shost-deployment-smoke", deployment:$name}, containers:[{name:"backend", image:"smoke", ports:[{container_port:8082, protocol:"TCP"}]}]}')" \
+ http://127.0.0.1:18087/api/v1/deployments \
+ | jq -e --arg name "${deployment_name}" '.data.name == $name and .data.replicas == 2' >/dev/null
+ curl -fsS \
+ -H "Authorization: Bearer ${token}" \
+ http://127.0.0.1:18087/api/v1/deployments?namespace=default \
+ | jq -e --arg name "${deployment_name}" '.data.deployments | any(.name == $name)' >/dev/null
deadline=$((SECONDS + HTTP_WAIT_TIMEOUT))
while true; do
@@ -3537,19 +3535,17 @@ validate_k8shost_flow() {
sleep 2
done
- local deployment_json
- deployment_json="$(grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${K8SHOST_PROTO_DIR}" \
- -proto "${K8SHOST_PROTO}" \
- -d "$(jq -cn --arg ns "default" --arg name "${deployment_name}" '{namespace:$ns, name:$name}')" \
- 127.0.0.1:15087 k8shost.DeploymentService/GetDeployment)"
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${K8SHOST_PROTO_DIR}" \
- -proto "${K8SHOST_PROTO}" \
- -d "$(printf '%s' "${deployment_json}" | jq '.deployment.spec.replicas = 1 | {deployment:.deployment}')" \
- 127.0.0.1:15087 k8shost.DeploymentService/UpdateDeployment >/dev/null
+ curl -fsS \
+ -H "Authorization: Bearer ${token}" \
+ http://127.0.0.1:18087/api/v1/deployments/default/${deployment_name} \
+ | jq -e --arg name "${deployment_name}" '.data.name == $name and .data.ready_replicas >= 0' >/dev/null
+ curl -fsS \
+ -X PUT \
+ -H "Authorization: Bearer ${token}" \
+ -H "Content-Type: application/json" \
+ -d '{"replicas":1}' \
+ http://127.0.0.1:18087/api/v1/deployments/default/${deployment_name} \
+ | jq -e '.data.replicas == 1' >/dev/null
deadline=$((SECONDS + HTTP_WAIT_TIMEOUT))
while true; do
@@ -3569,12 +3565,11 @@ validate_k8shost_flow() {
sleep 2
done
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${K8SHOST_PROTO_DIR}" \
- -proto "${K8SHOST_PROTO}" \
- -d "$(jq -cn --arg ns "default" --arg name "${deployment_name}" '{namespace:$ns, name:$name}')" \
- 127.0.0.1:15087 k8shost.DeploymentService/DeleteDeployment >/dev/null
+ curl -fsS \
+ -X DELETE \
+ -H "Authorization: Bearer ${token}" \
+ http://127.0.0.1:18087/api/v1/deployments/default/${deployment_name} \
+ | jq -e '.data.deleted == true' >/dev/null
deadline=$((SECONDS + HTTP_WAIT_TIMEOUT))
while true; do