diff --git a/deployer/Cargo.lock b/deployer/Cargo.lock index ed8efc1..f1c60de 100644 --- a/deployer/Cargo.lock +++ b/deployer/Cargo.lock @@ -2039,6 +2039,9 @@ dependencies = [ "deployer-types", "fiberlb-api", "flashdns-api", + "iam-client", + "iam-types", + "prismnet-api", "serde", "serde_json", "tokio", @@ -2093,6 +2096,17 @@ dependencies = [ "syn", ] +[[package]] +name = "prismnet-api" +version = "0.1.0" +dependencies = [ + "prost", + "prost-types", + "protoc-bin-vendored", + "tonic", + "tonic-build", +] + [[package]] name = "proc-macro2" version = "1.0.106" diff --git a/deployer/Cargo.toml b/deployer/Cargo.toml index a9dac2f..c35537b 100644 --- a/deployer/Cargo.toml +++ b/deployer/Cargo.toml @@ -45,3 +45,4 @@ fiberlb-api = { path = "../fiberlb/crates/fiberlb-api" } flashdns-api = { path = "../flashdns/crates/flashdns-api" } iam-client = { path = "../iam/crates/iam-client" } iam-types = { path = "../iam/crates/iam-types" } +prismnet-api = { path = "../prismnet/crates/prismnet-api" } diff --git a/deployer/crates/plasmacloud-reconciler/Cargo.toml b/deployer/crates/plasmacloud-reconciler/Cargo.toml index ea1ee1d..f904416 100644 --- a/deployer/crates/plasmacloud-reconciler/Cargo.toml +++ b/deployer/crates/plasmacloud-reconciler/Cargo.toml @@ -19,5 +19,8 @@ tracing-subscriber.workspace = true fiberlb-api.workspace = true flashdns-api.workspace = true deployer-types.workspace = true +iam-client.workspace = true +iam-types.workspace = true +prismnet-api.workspace = true clap = { version = "4.5", features = ["derive"] } tonic = "0.12" diff --git a/deployer/crates/plasmacloud-reconciler/src/auth.rs b/deployer/crates/plasmacloud-reconciler/src/auth.rs new file mode 100644 index 0000000..cd13ad0 --- /dev/null +++ b/deployer/crates/plasmacloud-reconciler/src/auth.rs @@ -0,0 +1,79 @@ +use anyhow::Result; +use iam_client::client::IamClientConfig; +use iam_client::IamClient; +use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope}; +use tonic::metadata::MetadataValue; +use tonic::Request; + +pub fn authorized_request(message: T, token: &str) -> Request { + let mut req = Request::new(message); + let header = format!("Bearer {}", token); + let value = MetadataValue::try_from(header.as_str()).expect("valid bearer token metadata"); + req.metadata_mut().insert("authorization", value); + req +} + +pub async fn issue_controller_token( + iam_server_addr: &str, + principal_id: &str, + org_id: &str, + project_id: &str, +) -> Result { + let mut config = IamClientConfig::new(iam_server_addr).with_timeout(5000); + if iam_server_addr.starts_with("http://") || !iam_server_addr.starts_with("https://") { + config = config.without_tls(); + } + + let client = IamClient::connect(config).await?; + let principal_ref = PrincipalRef::service_account(principal_id); + let principal = match client.get_principal(&principal_ref).await? { + Some(existing) => existing, + None => { + client + .create_service_account(principal_id, principal_id, org_id, project_id) + .await? + } + }; + + ensure_project_admin_binding(&client, &principal, org_id, project_id).await?; + + let scope = Scope::project(project_id, org_id); + client + .issue_token( + &principal, + vec!["roles/ProjectAdmin".to_string()], + scope, + 3600, + ) + .await + .map_err(Into::into) +} + +async fn ensure_project_admin_binding( + client: &IamClient, + principal: &Principal, + org_id: &str, + project_id: &str, +) -> Result<()> { + let scope = Scope::project(project_id, org_id); + let bindings = client + .list_bindings_for_principal(&principal.to_ref()) + .await?; + + let already_bound = bindings + .iter() + .any(|binding| binding.role_ref == "roles/ProjectAdmin" && binding.scope == scope); + if already_bound { + return Ok(()); + } + + let binding = PolicyBinding::new( + format!("{}-project-admin-{}-{}", principal.id, org_id, project_id), + principal.to_ref(), + "roles/ProjectAdmin", + scope, + ) + .with_created_by("plasmacloud-reconciler"); + client.create_binding(&binding).await?; + Ok(()) +} diff --git a/deployer/crates/plasmacloud-reconciler/src/main.rs b/deployer/crates/plasmacloud-reconciler/src/main.rs index cdbfde4..8e54d06 100644 --- a/deployer/crates/plasmacloud-reconciler/src/main.rs +++ b/deployer/crates/plasmacloud-reconciler/src/main.rs @@ -39,7 +39,9 @@ use flashdns_api::proto::{ ZoneInfo, }; +mod auth; mod hosts; +mod tenant_network; mod watcher; #[derive(Parser)] @@ -75,6 +77,9 @@ enum Command { prune: bool, }, + /// Apply tenant-scoped PrismNET declarations + TenantNetwork(tenant_network::TenantNetworkCommand), + /// Reconcile host deployments into per-node desired-system state Hosts(hosts::HostsCommand), } @@ -300,6 +305,9 @@ async fn main() -> Result<()> { let spec: DnsConfig = read_json(&config).await?; reconcile_dns(spec, endpoint, prune).await?; } + Command::TenantNetwork(command) => { + tenant_network::run(command).await?; + } Command::Hosts(command) => { hosts::run(command).await?; } diff --git a/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs b/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs new file mode 100644 index 0000000..5188066 --- /dev/null +++ b/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs @@ -0,0 +1,2053 @@ +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::time::{Duration, Instant}; + +use anyhow::{anyhow, bail, Context, Result}; +use clap::Args; +use serde::Deserialize; +use std::net::Ipv4Addr; +use tonic::transport::{Channel, Endpoint}; +use tracing::{info, warn}; + +use prismnet_api::ipam_service_client::IpamServiceClient; +use prismnet_api::port_service_client::PortServiceClient; +use prismnet_api::router_service_client::RouterServiceClient; +use prismnet_api::security_group_service_client::SecurityGroupServiceClient; +use prismnet_api::subnet_service_client::SubnetServiceClient; +use prismnet_api::vpc_service_client::VpcServiceClient; +use prismnet_api::{ + AddRuleRequest, CreatePortRequest, CreateSecurityGroupRequest, CreateServiceIpPoolRequest, + CreateRouterRequest, CreateSubnetRequest, CreateVpcRequest, DeletePortRequest, + DeleteRouterRequest, DeleteSecurityGroupRequest, DeleteServiceIpPoolRequest, + DeleteSubnetRequest, DeleteVpcRequest, IpProtocol, ListPortsRequest, ListRoutersRequest, + ListSecurityGroupsRequest, ListServiceIpPoolsRequest, ListSubnetsRequest, ListVpcsRequest, + Port, RemoveRuleRequest, Router, RuleDirection, SecurityGroup, SecurityGroupRule, + ServiceIpPool, ServiceIpPoolType, Subnet, UpdatePortRequest, UpdateRouterRequest, + UpdateSecurityGroupRequest, UpdateServiceIpPoolRequest, UpdateSubnetRequest, + UpdateVpcRequest, Vpc, +}; + +use crate::auth::{authorized_request, issue_controller_token}; + +const SERVICE_READY_TIMEOUT: Duration = Duration::from_secs(180); +const SERVICE_RETRY_INTERVAL: Duration = Duration::from_secs(2); +const CONNECT_TIMEOUT: Duration = Duration::from_secs(5); + +#[derive(Args, Debug)] +pub struct TenantNetworkCommand { + #[arg(long)] + config: PathBuf, + + #[arg(long)] + endpoint: String, + + #[arg(long)] + iam_endpoint: String, + + #[arg(long)] + controller_principal_id: String, + + #[arg(long, default_value_t = false)] + prune: bool, +} + +#[derive(Debug, Deserialize)] +struct TenantNetworkConfig { + #[serde(default)] + tenants: Vec, +} + +#[derive(Debug, Deserialize)] +struct TenantSpec { + org_id: String, + project_id: String, + #[serde(default)] + security_groups: Vec, + #[serde(default)] + service_ip_pools: Vec, + #[serde(default)] + vpcs: Vec, +} + +#[derive(Debug, Deserialize)] +struct ServiceIpPoolSpec { + name: String, + cidr_block: String, + #[serde(default)] + description: Option, + #[serde(default)] + pool_type: Option, +} + +#[derive(Debug, Deserialize)] +struct SecurityGroupSpec { + name: String, + #[serde(default)] + description: Option, + #[serde(default)] + rules: Vec, +} + +#[derive(Debug, Deserialize)] +struct SecurityGroupRuleSpec { + direction: String, + #[serde(default)] + protocol: Option, + #[serde(default)] + port_range_min: Option, + #[serde(default)] + port_range_max: Option, + #[serde(default)] + remote_cidr: Option, + #[serde(default)] + remote_group: Option, + #[serde(default)] + description: Option, +} + +#[derive(Debug, Deserialize)] +struct VpcSpec { + name: String, + cidr_block: String, + #[serde(default)] + description: Option, + #[serde(default)] + router: Option, + #[serde(default)] + subnets: Vec, +} + +#[derive(Debug, Deserialize)] +struct RouterSpec { + name: String, + gateway_cidr: String, + mac_address: String, + external_ip: String, + #[serde(default)] + description: Option, +} + +#[derive(Debug, Deserialize)] +struct SubnetSpec { + name: String, + cidr_block: String, + #[serde(default)] + gateway_ip: Option, + #[serde(default)] + description: Option, + #[serde(default)] + dhcp_enabled: Option, + #[serde(default)] + ports: Vec, +} + +#[derive(Debug, Deserialize)] +struct PortSpec { + name: String, + #[serde(default)] + description: Option, + #[serde(default)] + ip_address: Option, + #[serde(default)] + security_groups: Vec, + #[serde(default)] + admin_state_up: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +struct RuleFingerprint { + direction: i32, + protocol: i32, + port_range_min: u32, + port_range_max: u32, + remote_cidr: String, + remote_group_id: String, + description: String, +} + +pub async fn run(command: TenantNetworkCommand) -> Result<()> { + let config = read_json(&command.config).await?; + validate_config(&config)?; + reconcile( + config, + command.endpoint, + command.iam_endpoint, + command.controller_principal_id, + command.prune, + ) + .await +} + +async fn read_json Deserialize<'a>>(path: &PathBuf) -> Result { + let contents = tokio::fs::read_to_string(path) + .await + .with_context(|| format!("failed to read {}", path.display()))?; + let config = serde_json::from_str(&contents) + .with_context(|| format!("failed to parse {}", path.display()))?; + Ok(config) +} + +async fn reconcile( + config: TenantNetworkConfig, + endpoint: String, + iam_endpoint: String, + controller_principal_id: String, + prune: bool, +) -> Result<()> { + let prismnet_channel = connect_with_retry("PrismNET", &endpoint).await?; + let mut vpc_client = VpcServiceClient::new(prismnet_channel.clone()); + let mut subnet_client = SubnetServiceClient::new(prismnet_channel.clone()); + let mut port_client = PortServiceClient::new(prismnet_channel.clone()); + let mut router_client = RouterServiceClient::new(prismnet_channel.clone()); + let mut sg_client = SecurityGroupServiceClient::new(prismnet_channel.clone()); + let mut ipam_client = IpamServiceClient::new(prismnet_channel); + + for tenant in &config.tenants { + let token = issue_controller_token_with_retry( + &iam_endpoint, + &controller_principal_id, + &tenant.org_id, + &tenant.project_id, + ) + .await?; + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + prune, + "reconciling tenant network declarations" + ); + + reconcile_tenant( + &mut vpc_client, + &mut subnet_client, + &mut port_client, + &mut router_client, + &mut sg_client, + &mut ipam_client, + tenant, + &token, + prune, + ) + .await?; + } + + Ok(()) +} + +async fn connect_with_retry(service_name: &str, endpoint: &str) -> Result { + let deadline = Instant::now() + SERVICE_READY_TIMEOUT; + + loop { + match Endpoint::from_shared(endpoint.to_string()) + .context("invalid gRPC endpoint")? + .connect_timeout(CONNECT_TIMEOUT) + .connect() + .await + { + Ok(channel) => return Ok(channel), + Err(error) => { + if Instant::now() >= deadline { + return Err(anyhow!(error).context(format!( + "{service_name} at {endpoint} did not become ready within {} seconds", + SERVICE_READY_TIMEOUT.as_secs() + ))); + } + warn!( + service = service_name, + endpoint, + error = %error, + "service is not ready yet; retrying" + ); + tokio::time::sleep(SERVICE_RETRY_INTERVAL).await; + } + } + } +} + +async fn issue_controller_token_with_retry( + iam_endpoint: &str, + controller_principal_id: &str, + org_id: &str, + project_id: &str, +) -> Result { + let deadline = Instant::now() + SERVICE_READY_TIMEOUT; + + loop { + match issue_controller_token(iam_endpoint, controller_principal_id, org_id, project_id).await + { + Ok(token) => return Ok(token), + Err(error) => { + if Instant::now() >= deadline { + return Err(error.context(format!( + "failed to issue controller token for tenant {org_id}/{project_id} within {} seconds", + SERVICE_READY_TIMEOUT.as_secs() + ))); + } + warn!( + iam_endpoint, + org_id, + project_id, + error = %error, + "IAM is not ready to issue controller tokens yet; retrying" + ); + tokio::time::sleep(SERVICE_RETRY_INTERVAL).await; + } + } + } +} + +async fn reconcile_tenant( + vpc_client: &mut VpcServiceClient, + subnet_client: &mut SubnetServiceClient, + port_client: &mut PortServiceClient, + router_client: &mut RouterServiceClient, + sg_client: &mut SecurityGroupServiceClient, + ipam_client: &mut IpamServiceClient, + tenant: &TenantSpec, + token: &str, + prune: bool, +) -> Result<()> { + let mut security_groups = list_security_groups(sg_client, tenant, token).await?; + let mut security_group_ids = HashMap::new(); + for spec in &tenant.security_groups { + let sg = ensure_security_group(sg_client, tenant, spec, &security_groups, token).await?; + security_group_ids.insert(spec.name.clone(), sg.id.clone()); + } + + security_groups = list_security_groups(sg_client, tenant, token).await?; + for spec in &tenant.security_groups { + let actual = security_groups + .iter() + .find(|sg| sg.name == spec.name) + .with_context(|| format!("security group {} not found after reconciliation", spec.name))?; + sync_security_group_rules(sg_client, tenant, spec, actual, &security_group_ids, token, prune) + .await?; + } + + let service_ip_pools = list_service_ip_pools(ipam_client, tenant, token).await?; + let mut desired_service_ip_pool_names = HashSet::new(); + for pool_spec in &tenant.service_ip_pools { + desired_service_ip_pool_names.insert(pool_spec.name.clone()); + ensure_service_ip_pool(ipam_client, tenant, pool_spec, &service_ip_pools, token).await?; + } + + if prune { + let current_pools = list_service_ip_pools(ipam_client, tenant, token).await?; + for pool in current_pools { + if !desired_service_ip_pool_names.contains(&pool.name) { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + pool = %pool.name, + "deleting unmanaged service ip pool" + ); + ipam_client + .delete_service_ip_pool(authorized_request( + DeleteServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: pool.id, + }, + token, + )) + .await?; + } + } + } + + let vpcs = list_vpcs(vpc_client, tenant, token).await?; + let mut desired_vpc_names = HashSet::new(); + for vpc_spec in &tenant.vpcs { + desired_vpc_names.insert(vpc_spec.name.clone()); + let vpc = ensure_vpc(vpc_client, tenant, vpc_spec, &vpcs, token).await?; + reconcile_vpc( + subnet_client, + port_client, + router_client, + tenant, + vpc_spec, + &vpc, + &security_group_ids, + token, + prune, + ) + .await?; + } + + if prune { + let current_vpcs = list_vpcs(vpc_client, tenant, token).await?; + for vpc in current_vpcs { + if !desired_vpc_names.contains(&vpc.name) { + delete_vpc_tree( + vpc_client, + subnet_client, + port_client, + router_client, + tenant, + &vpc, + token, + ) + .await?; + } + } + + let current_security_groups = list_security_groups(sg_client, tenant, token).await?; + let desired_security_groups: HashSet<_> = tenant + .security_groups + .iter() + .map(|sg| sg.name.as_str()) + .collect(); + for sg in current_security_groups { + if !desired_security_groups.contains(sg.name.as_str()) { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + security_group = %sg.name, + "deleting unmanaged security group" + ); + sg_client + .delete_security_group(authorized_request( + DeleteSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: sg.id, + }, + token, + )) + .await?; + } + } + } + + Ok(()) +} + +async fn reconcile_vpc( + subnet_client: &mut SubnetServiceClient, + port_client: &mut PortServiceClient, + router_client: &mut RouterServiceClient, + tenant: &TenantSpec, + vpc_spec: &VpcSpec, + vpc: &Vpc, + security_group_ids: &HashMap, + token: &str, + prune: bool, +) -> Result<()> { + let routers = list_routers(router_client, tenant, Some(&vpc.id), token).await?; + if let Some(router_spec) = vpc_spec.router.as_ref() { + let router = ensure_router(router_client, tenant, vpc, router_spec, &routers, token).await?; + if prune { + for existing in routers { + if existing.id != router.id { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %existing.name, + "deleting unmanaged router" + ); + router_client + .delete_router(authorized_request( + DeleteRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: existing.id, + }, + token, + )) + .await?; + } + } + } + } else if prune { + for router in routers { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %router.name, + "deleting unmanaged router" + ); + router_client + .delete_router(authorized_request( + DeleteRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: router.id, + }, + token, + )) + .await?; + } + } + + let subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?; + let mut desired_subnet_names = HashSet::new(); + for subnet_spec in &vpc_spec.subnets { + desired_subnet_names.insert(subnet_spec.name.clone()); + let subnet = ensure_subnet(subnet_client, tenant, vpc, subnet_spec, &subnets, token).await?; + reconcile_subnet( + port_client, + tenant, + subnet_spec, + &subnet, + security_group_ids, + token, + prune, + ) + .await?; + } + + if prune { + let current_subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?; + for subnet in current_subnets { + if !desired_subnet_names.contains(&subnet.name) { + delete_subnet_tree(subnet_client, port_client, tenant, vpc, &subnet, token).await?; + } + } + } + + Ok(()) +} + +async fn reconcile_subnet( + port_client: &mut PortServiceClient, + tenant: &TenantSpec, + subnet_spec: &SubnetSpec, + subnet: &Subnet, + security_group_ids: &HashMap, + token: &str, + prune: bool, +) -> Result<()> { + let ports = list_ports(port_client, tenant, &subnet.id, token).await?; + let mut desired_port_names = HashSet::new(); + for port_spec in &subnet_spec.ports { + desired_port_names.insert(port_spec.name.clone()); + ensure_port( + port_client, + tenant, + subnet, + port_spec, + &ports, + security_group_ids, + token, + ) + .await?; + } + + if prune { + let current_ports = list_ports(port_client, tenant, &subnet.id, token).await?; + for port in current_ports { + if !desired_port_names.contains(&port.name) { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + subnet = %subnet.name, + port = %port.name, + "deleting unmanaged port" + ); + port_client + .delete_port(authorized_request( + DeletePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet.id.clone(), + id: port.id, + }, + token, + )) + .await?; + } + } + } + + Ok(()) +} + +async fn ensure_security_group( + sg_client: &mut SecurityGroupServiceClient, + tenant: &TenantSpec, + spec: &SecurityGroupSpec, + existing: &[SecurityGroup], + token: &str, +) -> Result { + if let Some(sg) = existing.iter().find(|sg| sg.name == spec.name) { + let desired_description = string_or_default(spec.description.as_deref()); + if sg.description != desired_description { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + security_group = %spec.name, + "updating security group" + ); + let response = sg_client + .update_security_group(authorized_request( + UpdateSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: sg.id.clone(), + name: spec.name.clone(), + description: desired_description, + }, + token, + )) + .await? + .into_inner(); + return response + .security_group + .context("missing security group in update response"); + } + + return Ok(sg.clone()); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + security_group = %spec.name, + "creating security group" + ); + let response = sg_client + .create_security_group(authorized_request( + CreateSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + }, + token, + )) + .await? + .into_inner(); + + response + .security_group + .context("missing security group in create response") +} + +async fn sync_security_group_rules( + sg_client: &mut SecurityGroupServiceClient, + tenant: &TenantSpec, + spec: &SecurityGroupSpec, + actual: &SecurityGroup, + security_group_ids: &HashMap, + token: &str, + prune: bool, +) -> Result<()> { + let actual_rules: HashMap = actual + .rules + .iter() + .map(|rule| (fingerprint_actual_rule(rule), rule.id.clone())) + .collect(); + + let mut desired_rules = HashSet::new(); + for desired_rule in &spec.rules { + let fingerprint = fingerprint_desired_rule(desired_rule, security_group_ids)?; + let is_new = !actual_rules.contains_key(&fingerprint); + desired_rules.insert(fingerprint.clone()); + + if is_new { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + security_group = %spec.name, + "adding security group rule" + ); + sg_client + .add_rule(authorized_request( + AddRuleRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + security_group_id: actual.id.clone(), + direction: fingerprint.direction, + protocol: fingerprint.protocol, + port_range_min: fingerprint.port_range_min, + port_range_max: fingerprint.port_range_max, + remote_cidr: fingerprint.remote_cidr.clone(), + remote_group_id: fingerprint.remote_group_id.clone(), + description: fingerprint.description.clone(), + }, + token, + )) + .await?; + } + } + + if prune { + for (fingerprint, rule_id) in actual_rules { + if !desired_rules.contains(&fingerprint) { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + security_group = %spec.name, + "removing unmanaged security group rule" + ); + sg_client + .remove_rule(authorized_request( + RemoveRuleRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + security_group_id: actual.id.clone(), + rule_id, + }, + token, + )) + .await?; + } + } + } + + Ok(()) +} + +async fn ensure_vpc( + vpc_client: &mut VpcServiceClient, + tenant: &TenantSpec, + spec: &VpcSpec, + existing: &[Vpc], + token: &str, +) -> Result { + if let Some(vpc) = existing.iter().find(|vpc| vpc.name == spec.name) { + ensure_field_matches( + "vpc", + &spec.name, + "cidr_block", + &vpc.cidr_block, + &spec.cidr_block, + )?; + + let desired_description = string_or_default(spec.description.as_deref()); + if vpc.description != desired_description { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %spec.name, + "updating vpc" + ); + let response = vpc_client + .update_vpc(authorized_request( + UpdateVpcRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: vpc.id.clone(), + name: spec.name.clone(), + description: desired_description, + }, + token, + )) + .await? + .into_inner(); + return response.vpc.context("missing vpc in update response"); + } + + return Ok(vpc.clone()); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %spec.name, + "creating vpc" + ); + let response = vpc_client + .create_vpc(authorized_request( + CreateVpcRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + cidr_block: spec.cidr_block.clone(), + }, + token, + )) + .await? + .into_inner(); + + response.vpc.context("missing vpc in create response") +} + +async fn ensure_router( + router_client: &mut RouterServiceClient, + tenant: &TenantSpec, + vpc: &Vpc, + spec: &RouterSpec, + existing: &[Router], + token: &str, +) -> Result { + if let Some(router) = existing.iter().find(|router| router.name == spec.name) { + ensure_field_matches( + "router", + &spec.name, + "gateway_cidr", + &router.gateway_cidr, + &spec.gateway_cidr, + )?; + ensure_field_matches( + "router", + &spec.name, + "mac_address", + &router.mac_address, + &spec.mac_address, + )?; + ensure_field_matches( + "router", + &spec.name, + "external_ip", + &router.external_ip, + &spec.external_ip, + )?; + + let desired_description = string_or_default(spec.description.as_deref()); + if router.description != desired_description { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %spec.name, + "updating router" + ); + let response = router_client + .update_router(authorized_request( + UpdateRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: router.id.clone(), + name: spec.name.clone(), + description: desired_description, + }, + token, + )) + .await? + .into_inner(); + return response.router.context("missing router in update response"); + } + + return Ok(router.clone()); + } + + if existing.len() == 1 { + let router = &existing[0]; + ensure_field_matches( + "router", + &router.name, + "gateway_cidr", + &router.gateway_cidr, + &spec.gateway_cidr, + )?; + ensure_field_matches( + "router", + &router.name, + "mac_address", + &router.mac_address, + &spec.mac_address, + )?; + ensure_field_matches( + "router", + &router.name, + "external_ip", + &router.external_ip, + &spec.external_ip, + )?; + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %router.name, + "renaming router to match declaration" + ); + let response = router_client + .update_router(authorized_request( + UpdateRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: router.id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + }, + token, + )) + .await? + .into_inner(); + return response.router.context("missing router in update response"); + } + + if existing.len() > 1 { + bail!( + "vpc {} has multiple routers; reconcile cannot determine which one to keep", + vpc.name + ); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %spec.name, + "creating router" + ); + let response = router_client + .create_router(authorized_request( + CreateRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: vpc.id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + gateway_cidr: spec.gateway_cidr.clone(), + mac_address: spec.mac_address.clone(), + external_ip: spec.external_ip.clone(), + }, + token, + )) + .await? + .into_inner(); + + response.router.context("missing router in create response") +} + +async fn ensure_subnet( + subnet_client: &mut SubnetServiceClient, + tenant: &TenantSpec, + vpc: &Vpc, + spec: &SubnetSpec, + existing: &[Subnet], + token: &str, +) -> Result { + if let Some(subnet) = existing.iter().find(|subnet| subnet.name == spec.name) { + ensure_field_matches( + "subnet", + &spec.name, + "cidr_block", + &subnet.cidr_block, + &spec.cidr_block, + )?; + + if let Some(gateway_ip) = spec.gateway_ip.as_deref() { + ensure_field_matches("subnet", &spec.name, "gateway_ip", &subnet.gateway_ip, gateway_ip)?; + } + + let desired_description = string_or_default(spec.description.as_deref()); + let desired_dhcp_enabled = spec.dhcp_enabled.unwrap_or(true); + if subnet.description != desired_description || subnet.dhcp_enabled != desired_dhcp_enabled { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + subnet = %spec.name, + "updating subnet" + ); + let response = subnet_client + .update_subnet(authorized_request( + UpdateSubnetRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: vpc.id.clone(), + id: subnet.id.clone(), + name: spec.name.clone(), + description: desired_description, + dhcp_enabled: desired_dhcp_enabled, + }, + token, + )) + .await? + .into_inner(); + return response.subnet.context("missing subnet in update response"); + } + + return Ok(subnet.clone()); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + subnet = %spec.name, + "creating subnet" + ); + let response = subnet_client + .create_subnet(authorized_request( + CreateSubnetRequest { + vpc_id: vpc.id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + cidr_block: spec.cidr_block.clone(), + gateway_ip: string_or_default(spec.gateway_ip.as_deref()), + dhcp_enabled: spec.dhcp_enabled.unwrap_or(true), + }, + token, + )) + .await? + .into_inner(); + + response.subnet.context("missing subnet in create response") +} + +async fn ensure_port( + port_client: &mut PortServiceClient, + tenant: &TenantSpec, + subnet: &Subnet, + spec: &PortSpec, + existing: &[Port], + security_group_ids: &HashMap, + token: &str, +) -> Result { + let desired_security_group_ids = resolve_security_group_ids(&spec.security_groups, security_group_ids)?; + if let Some(port) = existing.iter().find(|port| port.name == spec.name) { + if let Some(ip_address) = spec.ip_address.as_deref() { + ensure_field_matches("port", &spec.name, "ip_address", &port.ip_address, ip_address)?; + } + + let desired_description = string_or_default(spec.description.as_deref()); + let desired_admin_state_up = spec.admin_state_up.unwrap_or(true); + let actual_security_group_ids: HashSet<_> = + port.security_group_ids.iter().cloned().collect(); + let desired_security_group_ids_set: HashSet<_> = + desired_security_group_ids.iter().cloned().collect(); + let needs_update = port.description != desired_description + || port.admin_state_up != desired_admin_state_up + || actual_security_group_ids != desired_security_group_ids_set; + + if needs_update { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + subnet = %subnet.name, + port = %spec.name, + "updating port" + ); + let response = port_client + .update_port(authorized_request( + UpdatePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet.id.clone(), + id: port.id.clone(), + name: spec.name.clone(), + description: desired_description, + security_group_ids: desired_security_group_ids, + admin_state_up: desired_admin_state_up, + }, + token, + )) + .await? + .into_inner(); + return response.port.context("missing port in update response"); + } + + return Ok(port.clone()); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + subnet = %subnet.name, + port = %spec.name, + "creating port" + ); + let mut port = port_client + .create_port(authorized_request( + CreatePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet.id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + ip_address: string_or_default(spec.ip_address.as_deref()), + security_group_ids: desired_security_group_ids.clone(), + }, + token, + )) + .await? + .into_inner() + .port + .context("missing port in create response")?; + + if !spec.admin_state_up.unwrap_or(true) { + port = port_client + .update_port(authorized_request( + UpdatePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet.id.clone(), + id: port.id.clone(), + name: String::new(), + description: String::new(), + security_group_ids: Vec::new(), + admin_state_up: false, + }, + token, + )) + .await? + .into_inner() + .port + .context("missing port in update response")?; + } + + Ok(port) +} + +async fn delete_vpc_tree( + vpc_client: &mut VpcServiceClient, + subnet_client: &mut SubnetServiceClient, + port_client: &mut PortServiceClient, + router_client: &mut RouterServiceClient, + tenant: &TenantSpec, + vpc: &Vpc, + token: &str, +) -> Result<()> { + let routers = list_routers(router_client, tenant, Some(&vpc.id), token).await?; + for router in routers { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + router = %router.name, + "deleting unmanaged router" + ); + router_client + .delete_router(authorized_request( + DeleteRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: router.id, + }, + token, + )) + .await?; + } + + let subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?; + for subnet in subnets { + delete_subnet_tree(subnet_client, port_client, tenant, vpc, &subnet, token).await?; + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + "deleting unmanaged vpc" + ); + vpc_client + .delete_vpc(authorized_request( + DeleteVpcRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: vpc.id.clone(), + }, + token, + )) + .await?; + + Ok(()) +} + +async fn delete_subnet_tree( + subnet_client: &mut SubnetServiceClient, + port_client: &mut PortServiceClient, + tenant: &TenantSpec, + vpc: &Vpc, + subnet: &Subnet, + token: &str, +) -> Result<()> { + let ports = list_ports(port_client, tenant, &subnet.id, token).await?; + for port in ports { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + subnet = %subnet.name, + port = %port.name, + "deleting unmanaged port" + ); + port_client + .delete_port(authorized_request( + DeletePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet.id.clone(), + id: port.id, + }, + token, + )) + .await?; + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + vpc = %vpc.name, + subnet = %subnet.name, + "deleting unmanaged subnet" + ); + subnet_client + .delete_subnet(authorized_request( + DeleteSubnetRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: vpc.id.clone(), + id: subnet.id.clone(), + }, + token, + )) + .await?; + + Ok(()) +} + +async fn ensure_service_ip_pool( + ipam_client: &mut IpamServiceClient, + tenant: &TenantSpec, + spec: &ServiceIpPoolSpec, + existing: &[ServiceIpPool], + token: &str, +) -> Result { + let desired_pool_type = parse_service_ip_pool_type(spec.pool_type.as_deref())?; + if let Some(pool) = existing.iter().find(|pool| pool.name == spec.name) { + ensure_field_matches( + "service ip pool", + &spec.name, + "cidr_block", + &pool.cidr_block, + &spec.cidr_block, + )?; + ensure_field_matches( + "service ip pool", + &spec.name, + "pool_type", + service_ip_pool_type_name(pool.pool_type), + service_ip_pool_type_name(desired_pool_type), + )?; + + let desired_description = string_or_default(spec.description.as_deref()); + if pool.description != desired_description { + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + pool = %spec.name, + "updating service ip pool" + ); + let response = ipam_client + .update_service_ip_pool(authorized_request( + UpdateServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: pool.id.clone(), + name: spec.name.clone(), + description: desired_description, + }, + token, + )) + .await? + .into_inner(); + return response.pool.context("missing service ip pool in update response"); + } + + return Ok(pool.clone()); + } + + info!( + org_id = %tenant.org_id, + project_id = %tenant.project_id, + pool = %spec.name, + "creating service ip pool" + ); + let response = ipam_client + .create_service_ip_pool(authorized_request( + CreateServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + name: spec.name.clone(), + description: string_or_default(spec.description.as_deref()), + cidr_block: spec.cidr_block.clone(), + pool_type: desired_pool_type, + }, + token, + )) + .await? + .into_inner(); + + response + .pool + .context("missing service ip pool in create response") +} + +async fn list_vpcs( + vpc_client: &mut VpcServiceClient, + tenant: &TenantSpec, + token: &str, +) -> Result> { + let response = vpc_client + .list_vpcs(authorized_request( + ListVpcsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.vpcs) +} + +async fn list_routers( + router_client: &mut RouterServiceClient, + tenant: &TenantSpec, + vpc_id: Option<&str>, + token: &str, +) -> Result> { + let response = router_client + .list_routers(authorized_request( + ListRoutersRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: vpc_id.unwrap_or_default().to_string(), + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.routers) +} + +async fn list_subnets( + subnet_client: &mut SubnetServiceClient, + tenant: &TenantSpec, + vpc_id: &str, + token: &str, +) -> Result> { + let response = subnet_client + .list_subnets(authorized_request( + ListSubnetsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: vpc_id.to_string(), + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.subnets) +} + +async fn list_ports( + port_client: &mut PortServiceClient, + tenant: &TenantSpec, + subnet_id: &str, + token: &str, +) -> Result> { + let response = port_client + .list_ports(authorized_request( + ListPortsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: subnet_id.to_string(), + device_id: String::new(), + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.ports) +} + +async fn list_security_groups( + sg_client: &mut SecurityGroupServiceClient, + tenant: &TenantSpec, + token: &str, +) -> Result> { + let response = sg_client + .list_security_groups(authorized_request( + ListSecurityGroupsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.security_groups) +} + +async fn list_service_ip_pools( + ipam_client: &mut IpamServiceClient, + tenant: &TenantSpec, + token: &str, +) -> Result> { + let response = ipam_client + .list_service_ip_pools(authorized_request( + ListServiceIpPoolsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + pool_type: 0, + page_size: 500, + page_token: String::new(), + }, + token, + )) + .await? + .into_inner(); + Ok(response.pools) +} + +fn resolve_security_group_ids( + names: &[String], + security_group_ids: &HashMap, +) -> Result> { + let mut resolved = Vec::with_capacity(names.len()); + for name in names { + let id = security_group_ids + .get(name) + .with_context(|| format!("unknown security group reference {}", name))?; + resolved.push(id.clone()); + } + resolved.sort(); + resolved.dedup(); + Ok(resolved) +} + +fn fingerprint_actual_rule(rule: &SecurityGroupRule) -> RuleFingerprint { + RuleFingerprint { + direction: rule.direction, + protocol: rule.protocol, + port_range_min: rule.port_range_min, + port_range_max: rule.port_range_max, + remote_cidr: rule.remote_cidr.clone(), + remote_group_id: rule.remote_group_id.clone(), + description: rule.description.clone(), + } +} + +fn fingerprint_desired_rule( + rule: &SecurityGroupRuleSpec, + security_group_ids: &HashMap, +) -> Result { + let remote_group_id = match rule.remote_group.as_deref() { + Some(name) => security_group_ids + .get(name) + .with_context(|| format!("unknown remote security group {}", name))? + .clone(), + None => String::new(), + }; + + Ok(RuleFingerprint { + direction: parse_direction(&rule.direction)?, + protocol: parse_protocol(rule.protocol.as_deref())?, + port_range_min: rule.port_range_min.unwrap_or(0), + port_range_max: rule.port_range_max.unwrap_or(0), + remote_cidr: string_or_default(rule.remote_cidr.as_deref()), + remote_group_id, + description: string_or_default(rule.description.as_deref()), + }) +} + +fn parse_direction(direction: &str) -> Result { + match direction.to_ascii_lowercase().as_str() { + "ingress" => Ok(RuleDirection::Ingress as i32), + "egress" => Ok(RuleDirection::Egress as i32), + other => bail!("unsupported rule direction {}", other), + } +} + +fn parse_protocol(protocol: Option<&str>) -> Result { + match protocol + .unwrap_or("any") + .to_ascii_lowercase() + .as_str() + { + "any" => Ok(IpProtocol::Any as i32), + "tcp" => Ok(IpProtocol::Tcp as i32), + "udp" => Ok(IpProtocol::Udp as i32), + "icmp" => Ok(IpProtocol::Icmp as i32), + "icmpv6" => Ok(IpProtocol::Icmpv6 as i32), + other => bail!("unsupported ip protocol {}", other), + } +} + +fn parse_service_ip_pool_type(pool_type: Option<&str>) -> Result { + match pool_type.unwrap_or("cluster_ip").to_ascii_lowercase().as_str() { + "cluster_ip" => Ok(ServiceIpPoolType::ClusterIp as i32), + "load_balancer" => Ok(ServiceIpPoolType::LoadBalancer as i32), + "node_port" => Ok(ServiceIpPoolType::NodePort as i32), + other => bail!("unsupported service ip pool type {}", other), + } +} + +fn service_ip_pool_type_name(pool_type: i32) -> &'static str { + match pool_type { + x if x == ServiceIpPoolType::ClusterIp as i32 => "cluster_ip", + x if x == ServiceIpPoolType::LoadBalancer as i32 => "load_balancer", + x if x == ServiceIpPoolType::NodePort as i32 => "node_port", + _ => "unspecified", + } +} + +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8)> { + let (ip, prefix) = cidr + .split_once('/') + .with_context(|| format!("invalid CIDR {}", cidr))?; + let ip: Ipv4Addr = ip + .parse() + .with_context(|| format!("invalid IPv4 address in CIDR {}", cidr))?; + let prefix: u8 = prefix + .parse() + .with_context(|| format!("invalid prefix in CIDR {}", cidr))?; + if prefix > 32 { + bail!("invalid prefix in CIDR {}", cidr); + } + Ok((ip, prefix)) +} + +fn parse_ipv4(ip: &str) -> Result { + ip.parse() + .with_context(|| format!("invalid IPv4 address {}", ip)) +} + +fn validate_mac_address(mac_address: &str) -> Result<()> { + let octets: Vec<_> = mac_address.split(':').collect(); + if octets.len() != 6 + || octets + .iter() + .any(|octet| octet.len() != 2 || u8::from_str_radix(octet, 16).is_err()) + { + bail!("invalid mac_address {}", mac_address); + } + Ok(()) +} + +fn cidr_range(cidr: (Ipv4Addr, u8)) -> (u32, u32) { + let mask = if cidr.1 == 0 { + 0 + } else { + u32::MAX << (32 - cidr.1) + }; + let start = u32::from(cidr.0) & mask; + let size = if cidr.1 == 32 { 1 } else { 1u64 << (32 - cidr.1) }; + let end = start + (size as u32) - 1; + (start, end) +} + +fn cidr_contains(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool { + let mask = if cidr.1 == 0 { 0 } else { u32::MAX << (32 - cidr.1) }; + (u32::from(cidr.0) & mask) == (u32::from(ip) & mask) +} + +fn cidr_contains_cidr(parent: (Ipv4Addr, u8), child: (Ipv4Addr, u8)) -> bool { + let (parent_start, parent_end) = cidr_range(parent); + let (child_start, child_end) = cidr_range(child); + child_start >= parent_start && child_end <= parent_end +} + +fn cidr_overlaps(a: (Ipv4Addr, u8), b: (Ipv4Addr, u8)) -> bool { + let (a_start, a_end) = cidr_range(a); + let (b_start, b_end) = cidr_range(b); + a_start <= b_end && b_start <= a_end +} + +fn validate_router_spec(vpc: &VpcSpec, router: &RouterSpec) -> Result<()> { + let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?; + let (gateway_ip, _) = parse_ipv4_cidr(&router.gateway_cidr)?; + let _: Ipv4Addr = router + .external_ip + .parse() + .with_context(|| format!("invalid external_ip {}", router.external_ip))?; + validate_mac_address(&router.mac_address)?; + if !cidr_contains(vpc_cidr, gateway_ip) { + bail!( + "router {} gateway_cidr must fall inside vpc {} cidr_block", + router.name, + vpc.name + ); + } + Ok(()) +} + +fn ensure_field_matches( + resource_kind: &str, + resource_name: &str, + field_name: &str, + actual: &str, + desired: &str, +) -> Result<()> { + if actual != desired { + bail!( + "{} {} has immutable field drift: {} is {:?}, expected {:?}", + resource_kind, + resource_name, + field_name, + actual, + desired + ); + } + Ok(()) +} + +fn string_or_default(value: Option<&str>) -> String { + value.unwrap_or_default().to_string() +} + +fn validate_config(config: &TenantNetworkConfig) -> Result<()> { + let mut tenant_scopes = HashSet::new(); + for tenant in &config.tenants { + if tenant.org_id.trim().is_empty() || tenant.project_id.trim().is_empty() { + bail!("tenant org_id/project_id must be non-empty"); + } + + if !tenant_scopes.insert((tenant.org_id.clone(), tenant.project_id.clone())) { + bail!( + "duplicate tenant scope {}/{} in tenant networking config", + tenant.org_id, + tenant.project_id + ); + } + + validate_unique_names( + "security group", + tenant + .security_groups + .iter() + .map(|sg| sg.name.as_str()), + )?; + validate_unique_names( + "service ip pool", + tenant.service_ip_pools.iter().map(|pool| pool.name.as_str()), + )?; + validate_unique_names("vpc", tenant.vpcs.iter().map(|vpc| vpc.name.as_str()))?; + + let security_group_names: HashSet<_> = + tenant.security_groups.iter().map(|sg| sg.name.as_str()).collect(); + for pool in &tenant.service_ip_pools { + if pool.cidr_block.trim().is_empty() { + bail!("service ip pool {} must set cidr_block", pool.name); + } + parse_ipv4_cidr(&pool.cidr_block)?; + parse_service_ip_pool_type(pool.pool_type.as_deref())?; + } + for sg in &tenant.security_groups { + for rule in &sg.rules { + if rule.remote_cidr.is_some() && rule.remote_group.is_some() { + bail!( + "security group {} rule cannot set both remote_cidr and remote_group", + sg.name + ); + } + if let Some(remote_group) = rule.remote_group.as_deref() { + if !security_group_names.contains(remote_group) { + bail!( + "security group {} references unknown remote_group {}", + sg.name, + remote_group + ); + } + } + if let Some(remote_cidr) = rule.remote_cidr.as_deref() { + parse_ipv4_cidr(remote_cidr)?; + } + if let (Some(min), Some(max)) = (rule.port_range_min, rule.port_range_max) { + if min > max { + bail!( + "security group {} has invalid port range {}-{}", + sg.name, + min, + max + ); + } + } + parse_direction(&rule.direction)?; + parse_protocol(rule.protocol.as_deref())?; + } + } + + for vpc in &tenant.vpcs { + let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?; + if let Some(router) = vpc.router.as_ref() { + if router.name.trim().is_empty() { + bail!("router name must be non-empty"); + } + if router.gateway_cidr.trim().is_empty() + || router.mac_address.trim().is_empty() + || router.external_ip.trim().is_empty() + { + bail!("router {} must set gateway_cidr, mac_address, and external_ip", router.name); + } + validate_router_spec(vpc, router)?; + } + validate_unique_names("subnet", vpc.subnets.iter().map(|subnet| subnet.name.as_str()))?; + let mut seen_subnet_cidrs: Vec<(&str, (Ipv4Addr, u8))> = Vec::new(); + for subnet in &vpc.subnets { + let subnet_cidr = parse_ipv4_cidr(&subnet.cidr_block)?; + if !cidr_contains_cidr(vpc_cidr, subnet_cidr) { + bail!( + "subnet {} cidr_block must fall inside vpc {} cidr_block", + subnet.name, + vpc.name + ); + } + if let Some((other_name, _)) = seen_subnet_cidrs + .iter() + .find(|(_, other_cidr)| cidr_overlaps(*other_cidr, subnet_cidr)) + { + bail!( + "subnet {} overlaps subnet {} in vpc {}", + subnet.name, + other_name, + vpc.name + ); + } + seen_subnet_cidrs.push((subnet.name.as_str(), subnet_cidr)); + let gateway_ip = subnet + .gateway_ip + .as_deref() + .map(parse_ipv4) + .transpose()?; + if let Some(gateway_ip) = gateway_ip { + if !cidr_contains(subnet_cidr, gateway_ip) { + bail!( + "subnet {} gateway_ip must fall inside subnet cidr_block", + subnet.name + ); + } + } + validate_unique_names("port", subnet.ports.iter().map(|port| port.name.as_str()))?; + let mut fixed_ips = HashSet::new(); + for port in &subnet.ports { + for security_group in &port.security_groups { + if !security_group_names.contains(security_group.as_str()) { + bail!( + "port {} references unknown security group {}", + port.name, + security_group + ); + } + } + if let Some(ip_address) = port.ip_address.as_deref() { + let ip_address = parse_ipv4(ip_address)?; + if !cidr_contains(subnet_cidr, ip_address) { + bail!( + "port {} ip_address must fall inside subnet {} cidr_block", + port.name, + subnet.name + ); + } + if gateway_ip.map(|gateway_ip| gateway_ip == ip_address).unwrap_or(false) { + bail!( + "port {} ip_address cannot reuse subnet {} gateway_ip", + port.name, + subnet.name + ); + } + if !fixed_ips.insert(ip_address) { + bail!( + "subnet {} declares duplicate fixed ip_address {}", + subnet.name, + ip_address + ); + } + } + } + } + } + } + + Ok(()) +} + +fn validate_unique_names<'a>( + resource_kind: &str, + names: impl IntoIterator, +) -> Result<()> { + let mut seen = HashSet::new(); + for name in names { + if name.trim().is_empty() { + bail!("{} name must be non-empty", resource_kind); + } + if !seen.insert(name) { + bail!("duplicate {} name {}", resource_kind, name); + } + } + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rejects_duplicate_tenant_scope() { + let config = TenantNetworkConfig { + tenants: vec![ + TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: Vec::new(), + }, + TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: Vec::new(), + }, + ], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn resolves_rule_remote_group_by_name() { + let mut security_group_ids = HashMap::new(); + security_group_ids.insert("web".to_string(), "sg-1".to_string()); + + let fingerprint = fingerprint_desired_rule( + &SecurityGroupRuleSpec { + direction: "ingress".into(), + protocol: Some("tcp".into()), + port_range_min: Some(443), + port_range_max: Some(443), + remote_cidr: None, + remote_group: Some("web".into()), + description: Some("allow web".into()), + }, + &security_group_ids, + ) + .unwrap(); + + assert_eq!(fingerprint.direction, RuleDirection::Ingress as i32); + assert_eq!(fingerprint.protocol, IpProtocol::Tcp as i32); + assert_eq!(fingerprint.remote_group_id, "sg-1"); + } + + #[test] + fn rejects_port_reference_to_unknown_security_group() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: vec![SecurityGroupSpec { + name: "web".into(), + description: None, + rules: Vec::new(), + }], + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: None, + subnets: vec![SubnetSpec { + name: "subnet-a".into(), + cidr_block: "10.0.1.0/24".into(), + gateway_ip: None, + description: None, + dhcp_enabled: None, + ports: vec![PortSpec { + name: "port-a".into(), + description: None, + ip_address: None, + security_groups: vec!["db".into()], + admin_state_up: None, + }], + }], + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_invalid_service_ip_pool_type() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: vec![ServiceIpPoolSpec { + name: "lb".into(), + cidr_block: "10.200.0.0/24".into(), + description: None, + pool_type: Some("bogus".into()), + }], + vpcs: Vec::new(), + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_router_gateway_outside_vpc() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: Some(RouterSpec { + name: "edge".into(), + gateway_cidr: "10.1.0.1/24".into(), + mac_address: "02:00:00:00:00:01".into(), + external_ip: "203.0.113.10".into(), + description: None, + }), + subnets: Vec::new(), + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_router_with_invalid_mac() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: Some(RouterSpec { + name: "edge".into(), + gateway_cidr: "10.0.0.1/24".into(), + mac_address: "bad-mac".into(), + external_ip: "203.0.113.10".into(), + description: None, + }), + subnets: Vec::new(), + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_subnet_outside_vpc() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: None, + subnets: vec![SubnetSpec { + name: "subnet-a".into(), + cidr_block: "10.1.0.0/24".into(), + gateway_ip: None, + description: None, + dhcp_enabled: None, + ports: Vec::new(), + }], + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_overlapping_subnets() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: None, + subnets: vec![ + SubnetSpec { + name: "subnet-a".into(), + cidr_block: "10.0.1.0/24".into(), + gateway_ip: None, + description: None, + dhcp_enabled: None, + ports: Vec::new(), + }, + SubnetSpec { + name: "subnet-b".into(), + cidr_block: "10.0.1.128/25".into(), + gateway_ip: None, + description: None, + dhcp_enabled: None, + ports: Vec::new(), + }, + ], + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } + + #[test] + fn rejects_duplicate_fixed_port_ip() { + let config = TenantNetworkConfig { + tenants: vec![TenantSpec { + org_id: "org-1".into(), + project_id: "proj-1".into(), + security_groups: Vec::new(), + service_ip_pools: Vec::new(), + vpcs: vec![VpcSpec { + name: "vpc-a".into(), + cidr_block: "10.0.0.0/16".into(), + description: None, + router: None, + subnets: vec![SubnetSpec { + name: "subnet-a".into(), + cidr_block: "10.0.1.0/24".into(), + gateway_ip: Some("10.0.1.1".into()), + description: None, + dhcp_enabled: None, + ports: vec![ + PortSpec { + name: "port-a".into(), + description: None, + ip_address: Some("10.0.1.10".into()), + security_groups: Vec::new(), + admin_state_up: None, + }, + PortSpec { + name: "port-b".into(), + description: None, + ip_address: Some("10.0.1.10".into()), + security_groups: Vec::new(), + admin_state_up: None, + }, + ], + }], + }], + }], + }; + + assert!(validate_config(&config).is_err()); + } +} diff --git a/docs/testing.md b/docs/testing.md index 4d820ea..fb3d1d6 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -19,14 +19,16 @@ This flow: ```bash nix run ./nix/test-cluster#cluster -- fresh-smoke +nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp nix run ./nix/test-cluster#cluster -- fresh-matrix nix run ./nix/test-cluster#cluster -- fresh-bench-storage nix build .#checks.x86_64-linux.deployer-vm-smoke ``` -Use these three commands as the release-facing local proof set: +Use these commands as the release-facing local proof set: - `fresh-smoke`: whole-cluster readiness, core behavior, and fault injection +- `fresh-demo-vm-webapp`: focused VM demo showing a web app inside the guest with SQLite state persisted on the attached PhotonCloud volume across restart and migration - `fresh-matrix`: composed service scenarios such as `prismnet + flashdns + fiberlb` and PrismNet-backed VM hosting bundles with `plasmavmc + coronafs + lightningstor` - `fresh-bench-storage`: CoronaFS local-vs-shared-volume throughput, cross-worker volume visibility, and LightningStor large/small-object throughput capture - `deployer-vm-smoke`: prebuilt NixOS system closure handoff into `nix-agent`, proving host rollout can activate a host-built target without guest-side compilation @@ -37,6 +39,7 @@ Use these three commands as the release-facing local proof set: nix run ./nix/test-cluster#cluster -- status nix run ./nix/test-cluster#cluster -- logs node01 nix run ./nix/test-cluster#cluster -- ssh node04 +nix run ./nix/test-cluster#cluster -- demo-vm-webapp nix run ./nix/test-cluster#cluster -- matrix nix run ./nix/test-cluster#cluster -- bench-storage nix run ./nix/test-cluster#cluster -- fresh-matrix diff --git a/flake.nix b/flake.nix index 3a42ff4..81d5e47 100644 --- a/flake.nix +++ b/flake.nix @@ -203,6 +203,7 @@ "flaredb" "flashdns" "iam" + "prismnet" ]; }; @@ -1064,6 +1065,127 @@ }; checks = { + workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit" { + nativeBuildInputs = [ pkgs.python3 ]; + } '' + ${pkgs.python3}/bin/python - <<'PY' ${./.} + from __future__ import annotations + + import re + import sys + import tomllib + from pathlib import Path + from typing import Any + + + def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]: + source = flake_path.read_text() + match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S) + if match is None: + raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}") + + roots: dict[str, list[str]] = {} + for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S): + roots[name] = re.findall(r'"([^"]+)"', body) + return roots + + + def collect_path_dependencies(value: Any) -> list[str]: + found: list[str] = [] + + if isinstance(value, dict): + path = value.get("path") + if isinstance(path, str): + found.append(path) + for nested in value.values(): + found.extend(collect_path_dependencies(nested)) + elif isinstance(value, list): + for nested in value: + found.extend(collect_path_dependencies(nested)) + + return found + + + def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]: + workspace_manifest = repo_root / workspace_name / "Cargo.toml" + manifests = [workspace_manifest] + workspace_data = tomllib.loads(workspace_manifest.read_text()) + members = workspace_data.get("workspace", {}).get("members", []) + + for member in members: + for candidate in workspace_manifest.parent.glob(member): + manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml" + if manifest.is_file(): + manifests.append(manifest) + + unique_manifests: list[Path] = [] + seen: set[Path] = set() + for manifest in manifests: + resolved = manifest.resolve() + if resolved in seen: + continue + seen.add(resolved) + unique_manifests.append(manifest) + return unique_manifests + + + def required_root(dep_rel: Path) -> str: + parts = dep_rel.parts + if not parts: + return "" + if parts[0] == "crates" and len(parts) >= 2: + return "/".join(parts[:2]) + return parts[0] + + + def is_covered(dep_rel: str, configured_roots: list[str]) -> bool: + return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots) + + + def main() -> int: + repo_root = Path(sys.argv[1]).resolve() + workspace_roots = extract_workspace_source_roots(repo_root / "flake.nix") + failures: list[str] = [] + + for workspace_name, configured_roots in sorted(workspace_roots.items()): + workspace_manifest = repo_root / workspace_name / "Cargo.toml" + if not workspace_manifest.is_file(): + continue + + for manifest in workspace_manifests(repo_root, workspace_name): + manifest_data = tomllib.loads(manifest.read_text()) + for dep_path in collect_path_dependencies(manifest_data): + dependency_dir = (manifest.parent / dep_path).resolve() + try: + dep_rel = dependency_dir.relative_to(repo_root) + except ValueError: + continue + + dep_rel_str = dep_rel.as_posix() + if is_covered(dep_rel_str, configured_roots): + continue + + failures.append( + f"{workspace_name}: missing source root '{required_root(dep_rel)}' " + f"for dependency '{dep_rel_str}' referenced by " + f"{manifest.relative_to(repo_root).as_posix()}" + ) + + if failures: + print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr) + for failure in failures: + print(f" - {failure}", file=sys.stderr) + return 1 + + print("workspaceSourceRoots covers all workspace path dependencies.") + return 0 + + + raise SystemExit(main()) + PY + touch "$out" + ''; + first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest ( import ./nix/tests/first-boot-topology-vm-smoke.nix { inherit pkgs; diff --git a/iam/Cargo.lock b/iam/Cargo.lock index 6a6ab2c..51e309a 100644 --- a/iam/Cargo.lock +++ b/iam/Cargo.lock @@ -1294,6 +1294,7 @@ dependencies = [ "serde_json", "thiserror 1.0.69", "tokio", + "tokio-stream", "toml", "tonic", "tonic-health", diff --git a/iam/crates/iam-server/Cargo.toml b/iam/crates/iam-server/Cargo.toml index e674048..376d546 100644 --- a/iam/crates/iam-server/Cargo.toml +++ b/iam/crates/iam-server/Cargo.toml @@ -21,6 +21,7 @@ serde = { workspace = true } serde_json = { workspace = true } thiserror = { workspace = true } tokio = { workspace = true, features = ["full"] } +tokio-stream = { workspace = true, features = ["net"] } tracing = { workspace = true } tracing-subscriber = { workspace = true } tonic = { workspace = true } diff --git a/iam/crates/iam-server/src/main.rs b/iam/crates/iam-server/src/main.rs index 86d71ee..d2ee8e5 100644 --- a/iam/crates/iam-server/src/main.rs +++ b/iam/crates/iam-server/src/main.rs @@ -16,6 +16,7 @@ use tonic::service::Interceptor; use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig}; use tonic::{metadata::MetadataMap, Request, Status}; use tonic_health::server::health_reporter; +use tokio_stream::wrappers::TcpListenerStream; use tracing::{info, warn}; use iam_api::{ @@ -283,6 +284,20 @@ async fn main() -> Result<(), Box> { info!("Starting IAM server on {}", config.server.addr); + // Reserve the public listeners before opening outbound cluster/backend connections. + // Without this, a peer connection can claim the service port as an ephemeral source port + // and make the later gRPC bind fail with EADDRINUSE. + let grpc_addr = config.server.addr; + let http_addr = config.server.http_addr; + let grpc_listener = tokio::net::TcpListener::bind(grpc_addr).await?; + let http_listener = tokio::net::TcpListener::bind(http_addr).await?; + + info!( + grpc_addr = %grpc_addr, + http_addr = %http_addr, + "IAM listeners reserved" + ); + if let Some(endpoint) = &config.cluster.chainfire_endpoint { let normalized = normalize_chainfire_endpoint(endpoint); info!( @@ -514,17 +529,15 @@ async fn main() -> Result<(), Box> { .add_service(IamCredentialServer::new(credential_service)) .add_service(GatewayAuthServiceServer::new(gateway_auth_service)) .add_service(admin_server) - .serve(config.server.addr); + .serve_with_incoming(TcpListenerStream::new(grpc_listener)); // HTTP REST API server - let http_addr = config.server.http_addr; let rest_state = rest::RestApiState { - server_addr: config.server.addr.to_string(), + server_addr: grpc_addr.to_string(), tls_enabled: config.server.tls.is_some(), admin_token: admin_token.clone(), }; let rest_app = rest::build_router(rest_state); - let http_listener = tokio::net::TcpListener::bind(&http_addr).await?; info!(http_addr = %http_addr, "HTTP REST API server starting"); diff --git a/k8shost/Cargo.lock b/k8shost/Cargo.lock index ac2d6ef..84c69a2 100644 --- a/k8shost/Cargo.lock +++ b/k8shost/Cargo.lock @@ -2217,6 +2217,18 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" +[[package]] +name = "nix" +version = "0.29.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" +dependencies = [ + "bitflags 2.11.0", + "cfg-if", + "cfg_aliases", + "libc", +] + [[package]] name = "nom" version = "7.1.3" @@ -2539,6 +2551,7 @@ name = "plasmavmc-kvm" version = "0.1.0" dependencies = [ "async-trait", + "nix", "plasmavmc-hypervisor", "plasmavmc-types", "serde", @@ -2576,6 +2589,7 @@ dependencies = [ "reqwest 0.12.28", "serde", "serde_json", + "sha2", "thiserror 1.0.69", "tokio", "tokio-stream", diff --git a/nix/modules/default.nix b/nix/modules/default.nix index 547952b..b2a23aa 100644 --- a/nix/modules/default.nix +++ b/nix/modules/default.nix @@ -3,6 +3,7 @@ ./chainfire.nix ./plasmacloud-cluster.nix ./install-target.nix + ./service-port-reservations.nix ./creditservice.nix ./coronafs.nix ./flaredb.nix @@ -11,6 +12,9 @@ ./prismnet.nix ./flashdns.nix ./fiberlb.nix + ./plasmacloud-network.nix + ./plasmacloud-resources.nix + ./plasmacloud-tenant-networking.nix ./lightningstor.nix ./k8shost.nix ./nightlight.nix diff --git a/nix/modules/plasmacloud-tenant-networking.nix b/nix/modules/plasmacloud-tenant-networking.nix new file mode 100644 index 0000000..3c17134 --- /dev/null +++ b/nix/modules/plasmacloud-tenant-networking.nix @@ -0,0 +1,373 @@ +{ config, lib, pkgs, ... }: + +with lib; + +let + cfg = config.plasmacloud.tenantNetworking; + jsonFormat = pkgs.formats.json {}; + + serviceIpPoolType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Service IP pool name"; + }; + + cidr_block = mkOption { + type = types.str; + description = "Service IP pool CIDR"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Service IP pool description"; + }; + + pool_type = mkOption { + type = types.nullOr (types.enum [ "cluster_ip" "load_balancer" "node_port" ]); + default = null; + description = "Service IP pool type"; + }; + }; + }; + + portType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Port name"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Port description"; + }; + + ip_address = mkOption { + type = types.nullOr types.str; + default = null; + description = "Requested fixed IP address"; + }; + + security_groups = mkOption { + type = types.listOf types.str; + default = []; + description = "Security group names attached to the port"; + }; + + admin_state_up = mkOption { + type = types.nullOr types.bool; + default = null; + description = "Administrative state for the port"; + }; + }; + }; + + subnetType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Subnet name"; + }; + + cidr_block = mkOption { + type = types.str; + description = "Subnet CIDR"; + }; + + gateway_ip = mkOption { + type = types.nullOr types.str; + default = null; + description = "Gateway IP"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Subnet description"; + }; + + dhcp_enabled = mkOption { + type = types.nullOr types.bool; + default = null; + description = "Enable DHCP for the subnet"; + }; + + ports = mkOption { + type = types.listOf portType; + default = []; + description = "Ports within the subnet"; + }; + }; + }; + + routerType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Router name"; + }; + + gateway_cidr = mkOption { + type = types.str; + description = "Gateway interface CIDR attached to the VPC"; + }; + + mac_address = mkOption { + type = types.str; + description = "Router interface MAC address"; + }; + + external_ip = mkOption { + type = types.str; + description = "SNAT external IPv4 address"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Router description"; + }; + }; + }; + + vpcType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "VPC name"; + }; + + cidr_block = mkOption { + type = types.str; + description = "VPC CIDR"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "VPC description"; + }; + + router = mkOption { + type = types.nullOr routerType; + default = null; + description = "Optional tenant edge router for the VPC"; + }; + + subnets = mkOption { + type = types.listOf subnetType; + default = []; + description = "Subnets within the VPC"; + }; + }; + }; + + securityGroupRuleType = types.submodule { + options = { + direction = mkOption { + type = types.enum [ "ingress" "egress" ]; + description = "Rule direction"; + }; + + protocol = mkOption { + type = types.nullOr (types.enum [ "any" "tcp" "udp" "icmp" "icmpv6" ]); + default = null; + description = "IP protocol"; + }; + + port_range_min = mkOption { + type = types.nullOr types.int; + default = null; + description = "Minimum port in range"; + }; + + port_range_max = mkOption { + type = types.nullOr types.int; + default = null; + description = "Maximum port in range"; + }; + + remote_cidr = mkOption { + type = types.nullOr types.str; + default = null; + description = "Remote CIDR"; + }; + + remote_group = mkOption { + type = types.nullOr types.str; + default = null; + description = "Remote security group name"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Rule description"; + }; + }; + }; + + securityGroupType = types.submodule { + options = { + name = mkOption { + type = types.str; + description = "Security group name"; + }; + + description = mkOption { + type = types.nullOr types.str; + default = null; + description = "Security group description"; + }; + + rules = mkOption { + type = types.listOf securityGroupRuleType; + default = []; + description = "Security group rules"; + }; + }; + }; + + tenantType = types.submodule { + options = { + org_id = mkOption { + type = types.str; + description = "Tenant organization ID"; + }; + + project_id = mkOption { + type = types.str; + description = "Tenant project ID"; + }; + + security_groups = mkOption { + type = types.listOf securityGroupType; + default = []; + description = "Tenant-scoped security groups"; + }; + + service_ip_pools = mkOption { + type = types.listOf serviceIpPoolType; + default = []; + description = "Tenant-scoped Service IP pools"; + }; + + vpcs = mkOption { + type = types.listOf vpcType; + default = []; + description = "Tenant-scoped VPCs and their nested resources"; + }; + }; + }; + + configFile = jsonFormat.generate "plasmacloud-tenant-networking.json" { + inherit (cfg) tenants; + }; + configPath = cfg.configPath; + configRelative = removePrefix "/etc/" configPath; + +in { + options.plasmacloud.tenantNetworking = { + enable = mkEnableOption "tenant-scoped PrismNET declarations"; + + endpoint = mkOption { + type = types.str; + default = "http://127.0.0.1:50081"; + description = "PrismNET gRPC endpoint"; + }; + + iamEndpoint = mkOption { + type = types.str; + default = "http://127.0.0.1:50080"; + description = "IAM gRPC endpoint used to mint tenant-scoped controller tokens"; + }; + + controllerPrincipalId = mkOption { + type = types.str; + default = "plasmacloud-reconciler"; + description = "Service account used by the reconciler when applying tenant declarations"; + }; + + tenants = mkOption { + type = types.listOf tenantType; + default = []; + description = "Tenant-scoped network declarations. This is separate from platform networking under plasmacloud.network."; + }; + + configPath = mkOption { + type = types.str; + default = "/etc/plasmacloud/tenant-networking.json"; + description = "Path for rendered tenant networking config"; + }; + + applyOnBoot = mkOption { + type = types.bool; + default = true; + description = "Apply declarations at boot"; + }; + + applyOnChange = mkOption { + type = types.bool; + default = true; + description = "Apply declarations when the config file changes"; + }; + + prune = mkOption { + type = types.bool; + default = false; + description = "Delete tenant network resources not declared for managed tenants"; + }; + + package = mkOption { + type = types.package; + default = pkgs.plasmacloud-reconciler or (throw "plasmacloud-reconciler package not found"); + description = "Reconciler package for tenant networking declarations"; + }; + }; + + config = mkIf cfg.enable { + assertions = [ + { + assertion = hasPrefix "/etc/" configPath; + message = "plasmacloud.tenantNetworking.configPath must be under /etc"; + } + ]; + + environment.etc."${configRelative}".source = configFile; + + systemd.services.plasmacloud-tenant-networking-apply = { + description = "Apply PlasmaCloud tenant networking declarations"; + after = + [ "network-online.target" ] + ++ optional config.services.prismnet.enable "prismnet.service" + ++ optional config.services.iam.enable "iam.service"; + wants = + [ "network-online.target" ] + ++ optional config.services.prismnet.enable "prismnet.service" + ++ optional config.services.iam.enable "iam.service"; + wantedBy = optional cfg.applyOnBoot "multi-user.target"; + + serviceConfig = { + Type = "oneshot"; + RemainAfterExit = true; + ExecStart = + "${cfg.package}/bin/plasmacloud-reconciler tenant-network" + + " --config ${configPath}" + + " --endpoint ${cfg.endpoint}" + + " --iam-endpoint ${cfg.iamEndpoint}" + + " --controller-principal-id ${cfg.controllerPrincipalId}" + + optionalString cfg.prune " --prune"; + }; + }; + + systemd.paths.plasmacloud-tenant-networking-apply = mkIf cfg.applyOnChange { + wantedBy = [ "multi-user.target" ]; + pathConfig = { + PathChanged = configPath; + }; + }; + }; +} diff --git a/nix/modules/plasmavmc.nix b/nix/modules/plasmavmc.nix index 6ed8f1d..aa900e1 100644 --- a/nix/modules/plasmavmc.nix +++ b/nix/modules/plasmavmc.nix @@ -329,7 +329,7 @@ in wantedBy = [ "multi-user.target" ]; after = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps; wants = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps; - path = [ pkgs.qemu pkgs.coreutils pkgs.curl ]; + path = [ pkgs.qemu pkgs.coreutils pkgs.curl pkgs.iproute2 pkgs.dnsmasq ]; preStart = lib.optionalString (localIamHealthUrl != null) '' for _ in $(seq 1 90); do @@ -377,13 +377,14 @@ in # Security hardening - relaxed for KVM access NoNewPrivileges = false; # Needed for KVM + AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ]; PrivateTmp = true; ProtectSystem = "strict"; ProtectHome = true; ReadWritePaths = [ cfg.dataDir "/run/libvirt" cfg.managedVolumeRoot ] ++ lib.optionals (coronafsDataDir != null) [ coronafsDataDir ]; - DeviceAllow = [ "/dev/kvm rw" ]; + DeviceAllow = [ "/dev/kvm rw" "/dev/net/tun rw" ]; # Start command ExecStart = "${cfg.package}/bin/plasmavmc-server --config ${plasmavmcConfigFile}"; diff --git a/nix/modules/service-port-reservations.nix b/nix/modules/service-port-reservations.nix new file mode 100644 index 0000000..7165312 --- /dev/null +++ b/nix/modules/service-port-reservations.nix @@ -0,0 +1,10 @@ +{ lib, ... }: + +{ + boot.kernel.sysctl = { + # PhotonCloud control-plane services bind within this band. Reserve it from the + # ephemeral allocator so outbound peer/backend connections cannot steal a service + # port during boot and block the later listener bind. + "net.ipv4.ip_local_reserved_ports" = lib.mkDefault "50051-50090"; + }; +} diff --git a/nix/test-cluster/README.md b/nix/test-cluster/README.md index 11a2fec..e63ddc3 100644 --- a/nix/test-cluster/README.md +++ b/nix/test-cluster/README.md @@ -45,6 +45,8 @@ nix run ./nix/test-cluster#cluster -- build nix run ./nix/test-cluster#cluster -- start nix run ./nix/test-cluster#cluster -- smoke nix run ./nix/test-cluster#cluster -- fresh-smoke +nix run ./nix/test-cluster#cluster -- demo-vm-webapp +nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp nix run ./nix/test-cluster#cluster -- matrix nix run ./nix/test-cluster#cluster -- fresh-matrix nix run ./nix/test-cluster#cluster -- bench-storage @@ -61,6 +63,8 @@ Preferred entrypoint for publishable verification: `nix run ./nix/test-cluster#c `make cluster-smoke` is a convenience wrapper for the same clean host-build VM validation flow. +`nix run ./nix/test-cluster#cluster -- demo-vm-webapp` creates a PrismNet-attached VM, boots a tiny web app inside the guest, stores its state in SQLite on the attached data volume, and then proves that the counter survives guest restart plus cross-worker migration. + `nix run ./nix/test-cluster#cluster -- matrix` reuses the current running cluster to exercise composed service scenarios such as `prismnet + flashdns + fiberlb`, PrismNet-backed VM hosting with `plasmavmc + prismnet + coronafs + lightningstor`, the Kubernetes-style hosting bundle, and API-gateway-mediated `nightlight` / `creditservice` flows. Preferred entrypoint for publishable matrix verification: `nix run ./nix/test-cluster#cluster -- fresh-matrix` diff --git a/nix/test-cluster/node01.nix b/nix/test-cluster/node01.nix index 405ac65..c04cbc3 100644 --- a/nix/test-cluster/node01.nix +++ b/nix/test-cluster/node01.nix @@ -11,6 +11,7 @@ ../modules/flaredb.nix ../modules/iam.nix ../modules/prismnet.nix + ../modules/plasmacloud-tenant-networking.nix ../modules/flashdns.nix ../modules/fiberlb.nix ../modules/k8shost.nix @@ -166,4 +167,91 @@ services.lightningstor.s3AccessKeyId = "photoncloud-test"; services.lightningstor.s3SecretKey = "photoncloud-test-secret"; + + plasmacloud.tenantNetworking = { + enable = true; + endpoint = "http://127.0.0.1:50081"; + iamEndpoint = "http://127.0.0.1:50080"; + controllerPrincipalId = "plasmacloud-reconciler"; + prune = true; + tenants = [ + { + org_id = "matrix-tenant-org"; + project_id = "matrix-tenant-project"; + security_groups = [ + { + name = "vm-default"; + description = "Default tenant SG for matrix VMs"; + rules = [ + { + direction = "ingress"; + protocol = "tcp"; + port_range_min = 22; + port_range_max = 22; + remote_cidr = "10.100.0.0/24"; + description = "Allow SSH from the cluster network"; + } + { + direction = "egress"; + protocol = "any"; + remote_cidr = "0.0.0.0/0"; + description = "Allow outbound traffic"; + } + ]; + } + { + name = "web"; + description = "HTTP ingress from default tenant members"; + rules = [ + { + direction = "ingress"; + protocol = "tcp"; + port_range_min = 80; + port_range_max = 80; + remote_group = "vm-default"; + description = "Allow HTTP from vm-default members"; + } + ]; + } + ]; + service_ip_pools = [ + { + name = "cluster-services"; + cidr_block = "10.62.200.0/24"; + description = "ClusterIP allocations for matrix tenant services"; + pool_type = "cluster_ip"; + } + { + name = "public-services"; + cidr_block = "10.62.210.0/24"; + description = "Load balancer allocations for matrix tenant services"; + pool_type = "load_balancer"; + } + ]; + vpcs = [ + { + name = "matrix-vpc"; + cidr_block = "10.62.0.0/16"; + description = "Declarative PrismNET tenant network for VM matrix validation"; + router = { + name = "matrix-router"; + gateway_cidr = "10.62.0.1/24"; + mac_address = "02:00:00:00:62:01"; + external_ip = "203.0.113.62"; + description = "Tenant edge router"; + }; + subnets = [ + { + name = "matrix-subnet"; + cidr_block = "10.62.10.0/24"; + gateway_ip = "10.62.10.1"; + description = "Primary VM subnet for matrix validation"; + dhcp_enabled = true; + } + ]; + } + ]; + } + ]; + }; } diff --git a/nix/test-cluster/node06.nix b/nix/test-cluster/node06.nix index f160b64..0127616 100644 --- a/nix/test-cluster/node06.nix +++ b/nix/test-cluster/node06.nix @@ -48,6 +48,26 @@ pathPrefix = "/api/v1/subnets"; upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; } + { + name = "prismnet-routers"; + pathPrefix = "/api/v1/routers"; + upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; + } + { + name = "prismnet-security-groups"; + pathPrefix = "/api/v1/security-groups"; + upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; + } + { + name = "prismnet-ports"; + pathPrefix = "/api/v1/ports"; + upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; + } + { + name = "prismnet-service-ip-pools"; + pathPrefix = "/api/v1/service-ip-pools"; + upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; + } { name = "plasmavmc-vms"; pathPrefix = "/api/v1/vms"; diff --git a/nix/test-cluster/run-cluster.sh b/nix/test-cluster/run-cluster.sh index 87dc0b1..6d3ad09 100755 --- a/nix/test-cluster/run-cluster.sh +++ b/nix/test-cluster/run-cluster.sh @@ -35,7 +35,9 @@ SSH_PASSWORD="${PHOTON_VM_ROOT_PASSWORD:-test}" SSH_CONNECT_TIMEOUT="${PHOTON_VM_SSH_CONNECT_TIMEOUT:-5}" SSH_WAIT_TIMEOUT="${PHOTON_VM_SSH_WAIT_TIMEOUT:-300}" UNIT_WAIT_TIMEOUT="${PHOTON_VM_UNIT_WAIT_TIMEOUT:-240}" +UNIT_CHECK_TIMEOUT="${PHOTON_VM_UNIT_CHECK_TIMEOUT:-15}" HTTP_WAIT_TIMEOUT="${PHOTON_VM_HTTP_WAIT_TIMEOUT:-180}" +VM_DEMO_HTTP_PORT="${PHOTON_VM_DEMO_HTTP_PORT:-8080}" KVM_WAIT_TIMEOUT="${PHOTON_VM_KVM_WAIT_TIMEOUT:-180}" FLAREDB_WAIT_TIMEOUT="${PHOTON_VM_FLAREDB_WAIT_TIMEOUT:-180}" GRPCURL_MAX_MSG_SIZE="${PHOTON_VM_GRPCURL_MAX_MSG_SIZE:-1073741824}" @@ -83,6 +85,15 @@ PLASMAVMC_PROTO="${PLASMAVMC_PROTO_DIR}/plasmavmc.proto" FLAREDB_PROTO_DIR="${REPO_ROOT}/flaredb/crates/flaredb-proto/src" FLAREDB_PROTO="${FLAREDB_PROTO_DIR}/kvrpc.proto" FLAREDB_SQL_PROTO="${FLAREDB_PROTO_DIR}/sqlrpc.proto" +MATRIX_TENANT_ORG_ID="matrix-tenant-org" +MATRIX_TENANT_PROJECT_ID="matrix-tenant-project" +MATRIX_TENANT_VPC_NAME="matrix-vpc" +MATRIX_TENANT_SUBNET_NAME="matrix-subnet" +MATRIX_TENANT_ROUTER_NAME="matrix-router" +MATRIX_TENANT_DEFAULT_SG_NAME="vm-default" +MATRIX_TENANT_WEB_SG_NAME="web" +MATRIX_TENANT_CLUSTER_POOL_NAME="cluster-services" +MATRIX_TENANT_LB_POOL_NAME="public-services" # shellcheck disable=SC2034 NODE_PHASES=( @@ -530,6 +541,26 @@ wait_for_prismnet_port_detachment() { done } +wait_for_prismnet_port_absent() { + local token="$1" + local org_id="$2" + local project_id="$3" + local subnet_id="$4" + local port_id="$5" + local timeout="${6:-${HTTP_WAIT_TIMEOUT}}" + local deadline=$((SECONDS + timeout)) + + while true; do + if ! prismnet_get_port_json "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null 2>&1; then + return 0 + fi + if (( SECONDS >= deadline )); then + die "timed out waiting for PrismNet port ${port_id} to be deleted" + fi + sleep 2 + done +} + wait_for_vm_network_spec() { local token="$1" local get_vm_json="$2" @@ -567,6 +598,28 @@ wait_for_vm_network_spec() { done } +api_gateway_request() { + local method="$1" + local token="$2" + local path="$3" + local body="${4:-}" + local url="http://127.0.0.1:18080${path}" + + if [[ -n "${body}" ]]; then + curl -fsS \ + -X "${method}" \ + -H "authorization: Bearer ${token}" \ + -H "content-type: application/json" \ + --data "${body}" \ + "${url}" + else + curl -fsS \ + -X "${method}" \ + -H "authorization: Bearer ${token}" \ + "${url}" + fi +} + build_link() { printf '%s/build-%s' "$(vm_dir)" "$1" } @@ -2149,10 +2202,15 @@ wait_for_unit() { local deadline=$((SECONDS + timeout)) local stable_checks=0 local required_stable_checks=3 + local ssh_port + ssh_port="$(ssh_port_for_node "${node}")" log "Waiting for ${unit}.service on ${node}" while (( stable_checks < required_stable_checks )); do - if ssh_node "${node}" "state=\$(systemctl show --property=ActiveState --value ${unit}.service); sub=\$(systemctl show --property=SubState --value ${unit}.service); [[ \"\${state}\" == active && (\"\${sub}\" == running || \"\${sub}\" == exited) ]]" >/dev/null 2>&1; then + if timeout "${UNIT_CHECK_TIMEOUT}" \ + sshpass -p "${SSH_PASSWORD}" \ + ssh "${SSH_OPTS[@]}" -p "${ssh_port}" root@127.0.0.1 \ + "systemctl is-active --quiet ${unit}.service" >/dev/null 2>&1; then stable_checks=$((stable_checks + 1)) else stable_checks=0 @@ -2243,6 +2301,55 @@ EOF done } +vm_demo_url() { + local ip="$1" + local path="${2:-/}" + printf 'http://%s:%s%s\n' "${ip}" "${VM_DEMO_HTTP_PORT}" "${path}" +} + +wait_for_vm_demo_http() { + local node="$1" + local ip="$2" + local timeout="${3:-${HTTP_WAIT_TIMEOUT}}" + + wait_for_http "${node}" "$(vm_demo_url "${ip}" "/health")" "${timeout}" +} + +vm_demo_request_json() { + local node="$1" + local method="$2" + local ip="$3" + local path="$4" + + ssh_node_script "${node}" "${method}" "$(vm_demo_url "${ip}" "${path}")" <<'EOF' +set -euo pipefail +method="$1" +url="$2" +curl -fsS -X "${method}" "${url}" +EOF +} + +assert_vm_demo_state() { + local state_json="$1" + local expected_visits="$2" + local expected_root_boots="$3" + local expected_data_boots="$4" + + printf '%s' "${state_json}" | jq -e \ + --argjson visits "${expected_visits}" \ + --argjson root_boots "${expected_root_boots}" \ + --argjson data_boots "${expected_data_boots}" \ + --argjson listen_port "${VM_DEMO_HTTP_PORT}" \ + --arg db_path "/mnt/photon-vm-data/demo.sqlite3" ' + .status == "ok" + and .visits == $visits + and .root_boot_count == $root_boots + and .data_boot_count == $data_boots + and .listen_port == $listen_port + and .db_path == $db_path + ' >/dev/null || die "unexpected VM demo payload: ${state_json}" +} + wait_for_host_http() { local url="$1" local timeout="${2:-${HTTP_WAIT_TIMEOUT}}" @@ -2577,10 +2684,10 @@ wait_for_qemu_volume_present() { while true; do qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)" - if [[ "${qemu_processes}" == *"${volume_ref}"* ]]; then + if qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}"; then return 0 fi - if [[ -n "${alternate_ref}" && "${qemu_processes}" == *"${alternate_ref}"* ]]; then + if qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then return 0 fi if (( SECONDS >= deadline )); then @@ -2601,7 +2708,8 @@ wait_for_qemu_volume_absent() { while true; do qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)" - if [[ "${qemu_processes}" != *"${volume_ref}"* ]] && [[ -z "${alternate_ref}" || "${qemu_processes}" != *"${alternate_ref}"* ]]; then + if ! qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}" \ + && ! qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then return 0 fi if (( SECONDS >= deadline )); then @@ -2612,6 +2720,39 @@ wait_for_qemu_volume_absent() { done } +qemu_processes_contain_ref() { + local qemu_processes="$1" + local ref="${2:-}" + + [[ -n "${ref}" ]] || return 1 + if [[ "${qemu_processes}" == *"${ref}"* ]]; then + return 0 + fi + + if [[ "${ref}" == nbd://* ]]; then + local authority host port + authority="${ref#nbd://}" + authority="${authority%%/*}" + if [[ "${authority}" == \[*\] ]]; then + host="${authority#\[}" + host="${host%\]}" + port="10809" + elif [[ "${authority}" == *:* ]]; then + host="${authority%:*}" + port="${authority##*:}" + else + host="${authority}" + port="10809" + fi + if [[ -n "${host}" && -n "${port}" ]] \ + && [[ "${qemu_processes}" == *"\"host\":\"${host}\",\"port\":\"${port}\""* ]]; then + return 0 + fi + fi + + return 1 +} + try_get_vm_json() { local token="$1" local get_vm_json="$2" @@ -2625,6 +2766,15 @@ try_get_vm_json() { 127.0.0.1:${vm_port} plasmavmc.v1.VmService/GetVm } +vm_disk_volume_id_from_json() { + local vm_json="$1" + local disk_id="$2" + + printf '%s' "${vm_json}" | jq -r --arg disk_id "${disk_id}" ' + (.spec.disks // [])[]? | select(.id == $disk_id) | .source.volumeId // empty + ' | head -n1 +} + try_get_volume_json() { local token="$1" local get_volume_json="$2" @@ -3300,6 +3450,151 @@ validate_prismnet_flow() { stop_ssh_tunnel node01 "${iam_tunnel}" } +validate_tenant_networking_flow() { + log "Validating declarative tenant networking via API gateway and PrismNet" + + local iam_tunnel="" prism_tunnel="" gateway_tunnel="" + iam_tunnel="$(start_ssh_tunnel node01 15080 50080)" + prism_tunnel="$(start_ssh_tunnel node01 15081 50081)" + gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)" + trap 'stop_ssh_tunnel node06 "${gateway_tunnel}"; stop_ssh_tunnel node01 "${prism_tunnel}"; stop_ssh_tunnel node01 "${iam_tunnel}"' RETURN + + wait_for_unit node01 plasmacloud-tenant-networking-apply 120 + wait_for_http node06 http://127.0.0.1:8080/health + + ssh_node node01 "systemctl start plasmacloud-tenant-networking-apply.service" + wait_for_unit node01 plasmacloud-tenant-networking-apply 120 + + local org_id="${MATRIX_TENANT_ORG_ID}" + local project_id="${MATRIX_TENANT_PROJECT_ID}" + local principal_id="tenant-networking-smoke-$(date +%s)" + local token + token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")" + + local vpcs_json subnets_json routers_json security_groups_json pools_json + local vpc_id subnet_id router_id default_sg_id web_sg_id cluster_pool_id lb_pool_id + local allocate_response allocated_ip service_uid + + vpcs_json="$(api_gateway_request GET "${token}" "/api/v1/vpcs")" + vpc_id="$(printf '%s' "${vpcs_json}" | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" ' + .data.vpcs[] | select(.name == $name) | .id + ')" + [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "declarative tenant VPC ${MATRIX_TENANT_VPC_NAME} was not exposed through the API gateway" + printf '%s' "${vpcs_json}" | jq -e --arg name "${MATRIX_TENANT_VPC_NAME}" ' + .data.vpcs | any(.name == $name and .cidr_block == "10.62.0.0/16" and .status == "active") + ' >/dev/null || die "unexpected VPC payload for declarative tenant network" + + subnets_json="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}")" + subnet_id="$(printf '%s' "${subnets_json}" | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" ' + .data.subnets[] | select(.name == $name) | .id + ')" + [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "declarative tenant subnet ${MATRIX_TENANT_SUBNET_NAME} was not exposed through the API gateway" + printf '%s' "${subnets_json}" | jq -e --arg name "${MATRIX_TENANT_SUBNET_NAME}" ' + .data.subnets | any( + .name == $name and + .cidr_block == "10.62.10.0/24" and + .gateway_ip == "10.62.10.1" and + .status == "active" + ) + ' >/dev/null || die "unexpected subnet payload for declarative tenant network" + + routers_json="$(api_gateway_request GET "${token}" "/api/v1/routers?vpc_id=${vpc_id}")" + router_id="$(printf '%s' "${routers_json}" | jq -r --arg name "${MATRIX_TENANT_ROUTER_NAME}" ' + .data.routers[] | select(.name == $name) | .id + ')" + [[ -n "${router_id}" && "${router_id}" != "null" ]] || die "declarative tenant router ${MATRIX_TENANT_ROUTER_NAME} was not exposed through the API gateway" + printf '%s' "${routers_json}" | jq -e --arg name "${MATRIX_TENANT_ROUTER_NAME}" ' + .data.routers | any( + .name == $name and + .gateway_cidr == "10.62.0.1/24" and + .external_ip == "203.0.113.62" and + .status == "active" + ) + ' >/dev/null || die "unexpected router payload for declarative tenant network" + + security_groups_json="$(api_gateway_request GET "${token}" "/api/v1/security-groups")" + default_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" ' + .data.security_groups[] | select(.name == $name) | .id + ')" + web_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" ' + .data.security_groups[] | select(.name == $name) | .id + ')" + [[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "default security group ${MATRIX_TENANT_DEFAULT_SG_NAME} missing from declarative tenant networking" + [[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "security group ${MATRIX_TENANT_WEB_SG_NAME} missing from declarative tenant networking" + printf '%s' "${security_groups_json}" | jq -e \ + --arg default_name "${MATRIX_TENANT_DEFAULT_SG_NAME}" \ + --arg web_name "${MATRIX_TENANT_WEB_SG_NAME}" \ + --arg default_id "${default_sg_id}" ' + (.data.security_groups | any(.name == $default_name and (.rules | any(.direction == "egress")))) + and + (.data.security_groups | any( + .name == $web_name and + (.rules | any( + .direction == "ingress" and + .protocol == "tcp" and + .port_range_min == 80 and + .port_range_max == 80 and + .remote_group_id == $default_id + )) + )) + ' >/dev/null || die "declarative security group rules did not match expected shape" + + pools_json="$(api_gateway_request GET "${token}" "/api/v1/service-ip-pools")" + cluster_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" ' + .data.pools[] | select(.name == $name) | .id + ')" + lb_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_LB_POOL_NAME}" ' + .data.pools[] | select(.name == $name) | .id + ')" + [[ -n "${cluster_pool_id}" && "${cluster_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_CLUSTER_POOL_NAME} missing from declarative tenant networking" + [[ -n "${lb_pool_id}" && "${lb_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_LB_POOL_NAME} missing from declarative tenant networking" + printf '%s' "${pools_json}" | jq -e \ + --arg cluster_name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" \ + --arg lb_name "${MATRIX_TENANT_LB_POOL_NAME}" ' + (.data.pools | any(.name == $cluster_name and .pool_type == "cluster_ip" and .cidr_block == "10.62.200.0/24")) + and + (.data.pools | any(.name == $lb_name and .pool_type == "load_balancer" and .cidr_block == "10.62.210.0/24")) + ' >/dev/null || die "unexpected service IP pool payload for declarative tenant network" + + service_uid="matrix-service-$(date +%s)" + allocate_response="$(grpcurl -plaintext \ + -H "authorization: Bearer ${token}" \ + -import-path "${PRISMNET_PROTO_DIR}" \ + -proto "${PRISMNET_PROTO}" \ + -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg pool "${cluster_pool_id}" --arg service_uid "${service_uid}" '{orgId:$org, projectId:$project, poolId:$pool, serviceUid:$service_uid, requestedIp:""}')" \ + 127.0.0.1:15081 prismnet.IpamService/AllocateServiceIP)" + allocated_ip="$(printf '%s' "${allocate_response}" | jq -r '.ipAddress')" + [[ -n "${allocated_ip}" && "${allocated_ip}" != "null" ]] || die "failed to allocate a service IP from ${MATRIX_TENANT_CLUSTER_POOL_NAME}" + + api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \ + | jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) != null' >/dev/null \ + || die "allocated service IP ${allocated_ip} was not reflected in the REST pool view" + + grpcurl -plaintext \ + -H "authorization: Bearer ${token}" \ + -import-path "${PRISMNET_PROTO_DIR}" \ + -proto "${PRISMNET_PROTO}" \ + -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg ip "${allocated_ip}" '{orgId:$org, projectId:$project, ipAddress:$ip}')" \ + 127.0.0.1:15081 prismnet.IpamService/ReleaseServiceIP >/dev/null + + local release_deadline=$((SECONDS + HTTP_WAIT_TIMEOUT)) + while true; do + if api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \ + | jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) == null' >/dev/null; then + break + fi + if (( SECONDS >= release_deadline )); then + die "timed out waiting for released service IP ${allocated_ip} to disappear from ${MATRIX_TENANT_CLUSTER_POOL_NAME}" + fi + sleep 2 + done + + trap - RETURN + stop_ssh_tunnel node06 "${gateway_tunnel}" + stop_ssh_tunnel node01 "${prism_tunnel}" + stop_ssh_tunnel node01 "${iam_tunnel}" +} + validate_flashdns_flow() { log "Validating FlashDNS zone, record, and authoritative query flow" @@ -4096,9 +4391,10 @@ validate_lightningstor_distributed_storage() { validate_vm_storage_flow() { log "Validating PlasmaVMC image import, shared-volume execution, and cross-node migration" - local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel="" + local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel="" gateway_tunnel="" local node04_coronafs_tunnel="" node05_coronafs_tunnel="" local current_worker_coronafs_port="" peer_worker_coronafs_port="" + local demo_http_sg_id="" local vm_port=15082 iam_tunnel="$(start_ssh_tunnel node01 15080 50080)" prism_tunnel="$(start_ssh_tunnel node01 15081 50081)" @@ -4107,10 +4403,11 @@ validate_vm_storage_flow() { coronafs_tunnel="$(start_ssh_tunnel node01 15088 "${CORONAFS_API_PORT}")" node04_coronafs_tunnel="$(start_ssh_tunnel node04 25088 "${CORONAFS_API_PORT}")" node05_coronafs_tunnel="$(start_ssh_tunnel node05 35088 "${CORONAFS_API_PORT}")" + gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)" local image_source_path="" local vm_watch_output="" local node01_proto_root="/var/lib/plasmavmc/test-protos" - local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac="" + local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac="" default_sg_id="" web_sg_id="" cleanup_vm_storage_flow() { if [[ -n "${token:-}" && -n "${port_id:-}" && -n "${subnet_id:-}" ]]; then grpcurl -plaintext \ @@ -4120,28 +4417,15 @@ validate_vm_storage_flow() { -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \ 127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null 2>&1 || true fi - if [[ -n "${token:-}" && -n "${subnet_id:-}" && -n "${vpc_id:-}" ]]; then - grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \ - 127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null 2>&1 || true - fi - if [[ -n "${token:-}" && -n "${vpc_id:-}" ]]; then - grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \ - 127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null 2>&1 || true - fi if [[ -n "${image_source_path}" && "${image_source_path}" != /nix/store/* ]]; then ssh_node node01 "rm -f ${image_source_path}" >/dev/null 2>&1 || true fi if [[ -n "${vm_watch_output}" ]]; then ssh_node node01 "rm -f ${vm_watch_output} ${vm_watch_output}.pid ${vm_watch_output}.stderr" >/dev/null 2>&1 || true fi + if [[ -n "${token:-}" && -n "${demo_http_sg_id:-}" ]]; then + api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null 2>&1 || true + fi stop_ssh_tunnel node05 "${node05_coronafs_tunnel}" stop_ssh_tunnel node04 "${node04_coronafs_tunnel}" stop_ssh_tunnel node01 "${coronafs_tunnel}" @@ -4149,48 +4433,64 @@ validate_vm_storage_flow() { stop_ssh_tunnel node01 "${ls_tunnel}" stop_ssh_tunnel node01 "${prism_tunnel}" stop_ssh_tunnel node01 "${iam_tunnel}" + stop_ssh_tunnel node06 "${gateway_tunnel}" } trap cleanup_vm_storage_flow RETURN wait_for_plasmavmc_workers_registered 15082 - local org_id="vm-smoke-org" - local project_id="vm-smoke-project" + local org_id="${MATRIX_TENANT_ORG_ID}" + local project_id="${MATRIX_TENANT_PROJECT_ID}" local principal_id="plasmavmc-smoke-$(date +%s)" local token + local demo_state_json="" + local demo_visit_json="" token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")" - log "Matrix case: PlasmaVMC + PrismNet" - vpc_id="$(create_prismnet_vpc_with_retry \ - "${token}" \ - "${org_id}" \ - "${project_id}" \ - "vm-network-vpc" \ - "vm storage matrix networking" \ - "10.62.0.0/16" | jq -r '.vpc.id')" - [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to create PrismNet VPC for PlasmaVMC matrix" - - subnet_id="$(grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg vpc "${vpc_id}" '{vpcId:$vpc, name:"vm-network-subnet", description:"vm storage matrix subnet", cidrBlock:"10.62.10.0/24", gatewayIp:"10.62.10.1", dhcpEnabled:true}')" \ - 127.0.0.1:15081 prismnet.SubnetService/CreateSubnet | jq -r '.subnet.id')" - [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to create PrismNet subnet for PlasmaVMC matrix" - - local prismnet_port_response - prismnet_port_response="$(grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, name:"vm-network-port", description:"vm storage matrix port", ipAddress:""}')" \ - 127.0.0.1:15081 prismnet.PortService/CreatePort)" - port_id="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.id')" - port_ip="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.ipAddress')" - port_mac="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.macAddress')" - [[ -n "${port_id}" && "${port_id}" != "null" ]] || die "failed to create PrismNet port for PlasmaVMC matrix" - [[ -n "${port_ip}" && "${port_ip}" != "null" ]] || die "PrismNet port ${port_id} did not return an IP address" - [[ -n "${port_mac}" && "${port_mac}" != "null" ]] || die "PrismNet port ${port_id} did not return a MAC address" + log "Matrix case: PlasmaVMC + declarative PrismNet tenant networking" + vpc_id="$(api_gateway_request GET "${token}" "/api/v1/vpcs" \ + | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" '.data.vpcs[] | select(.name == $name) | .id')" + [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to locate declarative PrismNet VPC ${MATRIX_TENANT_VPC_NAME} for PlasmaVMC matrix" + subnet_id="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}" \ + | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" '.data.subnets[] | select(.name == $name) | .id')" + [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to locate declarative PrismNet subnet ${MATRIX_TENANT_SUBNET_NAME} for PlasmaVMC matrix" + default_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \ + | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')" + web_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \ + | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')" + [[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_DEFAULT_SG_NAME} for PlasmaVMC matrix" + [[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_WEB_SG_NAME} for PlasmaVMC matrix" + demo_http_sg_id="$( + api_gateway_request POST "${token}" "/api/v1/security-groups" "$( + jq -cn \ + --arg name "vm-demo-web-$(date +%s)" \ + --arg org "${org_id}" \ + --arg project "${project_id}" \ + '{ + name:$name, + org_id:$org, + project_id:$project, + description:"temporary ingress for the VM web demo" + }' + )" | jq -r '.data.id' + )" + [[ -n "${demo_http_sg_id}" && "${demo_http_sg_id}" != "null" ]] || die "failed to create a temporary security group for the VM web demo" + api_gateway_request POST "${token}" "/api/v1/security-groups/${demo_http_sg_id}/rules" "$( + jq -cn \ + --arg org "${org_id}" \ + --arg project "${project_id}" \ + --argjson port "${VM_DEMO_HTTP_PORT}" \ + '{ + org_id:$org, + project_id:$project, + direction:"ingress", + protocol:"tcp", + port_range_min:$port, + port_range_max:$port, + remote_cidr:"0.0.0.0/0", + description:"allow worker-originated HTTP checks for the VM web demo" + }' + )" >/dev/null ensure_lightningstor_bucket 15086 "${token}" "plasmavmc-images" "${org_id}" "${project_id}" wait_for_lightningstor_write_quorum 15086 "${token}" "plasmavmc-images" "PlasmaVMC image import" @@ -4311,6 +4611,10 @@ EOS --arg org "${org_id}" \ --arg project "${project_id}" \ --arg image_id "${image_id}" \ + --arg subnet_id "${subnet_id}" \ + --arg default_sg_id "${default_sg_id}" \ + --arg web_sg_id "${web_sg_id}" \ + --arg demo_http_sg_id "${demo_http_sg_id}" \ '{ name:$name, org_id:$org, @@ -4330,74 +4634,28 @@ EOS source:{type:"blank"}, size_gib:2 } + ], + network:[ + { + id:"tenant0", + subnet_id:$subnet_id, + model:"virtio-net", + security_groups:[$default_sg_id, $web_sg_id, $demo_http_sg_id] + } ] }' )" - local create_vm_grpc_json - create_vm_grpc_json="$( - jq -cn \ - --arg name "$(printf '%s' "${create_vm_rest_json}" | jq -r '.name')" \ - --arg org "${org_id}" \ - --arg project "${project_id}" \ - --arg image_id "${image_id}" \ - --arg subnet_id "${subnet_id}" \ - --arg port_id "${port_id}" \ - '{ - name:$name, - orgId:$org, - projectId:$project, - hypervisor:"HYPERVISOR_TYPE_KVM", - spec:{ - cpu:{vcpus:1, coresPerSocket:1, sockets:1}, - memory:{sizeMib:1024}, - disks:[ - { - id:"root", - source:{imageId:$image_id}, - sizeGib:4, - bus:"DISK_BUS_VIRTIO", - cache:"DISK_CACHE_WRITEBACK", - bootIndex:1 - }, - { - id:"data", - source:{blank:true}, - sizeGib:2, - bus:"DISK_BUS_VIRTIO", - cache:"DISK_CACHE_WRITEBACK" - } - ], - network:[ - { - id:"tenant0", - subnetId:$subnet_id, - portId:$port_id, - model:"NIC_MODEL_VIRTIO_NET" - } - ] - } - }' - )" - local create_response vm_id - create_response="$( - ssh_node_script node01 "${node01_proto_root}" "${token}" "$(printf '%s' "${create_vm_grpc_json}" | base64 | tr -d '\n')" <<'EOS' -set -euo pipefail -proto_root="$1" -token="$2" -request_b64="$3" -request_json="$(printf '%s' "${request_b64}" | base64 -d)" -grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${proto_root}/plasmavmc" \ - -proto "${proto_root}/plasmavmc/plasmavmc.proto" \ - -d "${request_json}" \ - 127.0.0.1:50082 plasmavmc.v1.VmService/CreateVm -EOS - )" - vm_id="$(printf '%s' "${create_response}" | jq -r '.id')" + create_response="$(api_gateway_request POST "${token}" "/api/v1/vms" "${create_vm_rest_json}")" + vm_id="$(printf '%s' "${create_response}" | jq -r '.data.id')" [[ -n "${vm_id}" && "${vm_id}" != "null" ]] || die "failed to create VM through PlasmaVMC" + port_id="$(printf '%s' "${create_response}" | jq -r '.data.network[0].port_id // empty')" + port_ip="$(printf '%s' "${create_response}" | jq -r '.data.network[0].ip_address // empty')" + port_mac="$(printf '%s' "${create_response}" | jq -r '.data.network[0].mac_address // empty')" + [[ -n "${port_id}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet port_id" + [[ -n "${port_ip}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet IP address" + [[ -n "${port_mac}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet MAC address" vm_watch_output="/tmp/plasmavmc-watch-${vm_id}.json" start_plasmavmc_vm_watch node01 "${node01_proto_root}" "${token}" "${org_id}" "${project_id}" "${vm_id}" "${vm_watch_output}" sleep 2 @@ -4435,7 +4693,12 @@ EOS current_worker_coronafs_port=35088 peer_worker_coronafs_port=25088 fi - wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}" >/dev/null + local vm_spec_json volume_id data_volume_id + vm_spec_json="$(wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}")" + volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "root")" + data_volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "data")" + [[ -n "${volume_id}" ]] || die "failed to resolve root volume ID from VM spec" + [[ -n "${data_volume_id}" ]] || die "failed to resolve data volume ID from VM spec" wait_for_prismnet_port_binding "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" "${vm_id}" >/dev/null grpcurl -plaintext \ @@ -4465,8 +4728,6 @@ EOS done log "Matrix case: PlasmaVMC + PrismNet + CoronaFS + LightningStor" - local volume_id="${vm_id}-root" - local data_volume_id="${vm_id}-data" local volume_path="${CORONAFS_VOLUME_ROOT}/${volume_id}.raw" local data_volume_path="${CORONAFS_VOLUME_ROOT}/${data_volume_id}.raw" local volume_export_json data_volume_export_json volume_uri data_volume_uri @@ -4498,6 +4759,12 @@ EOS wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM startup" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=1" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=1" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=1" + wait_for_vm_demo_http "${node_id}" "${port_ip}" + demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")" + assert_vm_demo_state "${demo_state_json}" 0 1 1 + demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")" + assert_vm_demo_state "${demo_visit_json}" 1 1 1 local get_root_volume_json get_data_volume_json local root_volume_state_json data_volume_state_json local root_attachment_generation data_attachment_generation @@ -4604,6 +4871,12 @@ EOS fi wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=2" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=2" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=2" + wait_for_vm_demo_http "${node_id}" "${port_ip}" + demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")" + assert_vm_demo_state "${demo_state_json}" 1 2 2 + demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")" + assert_vm_demo_state "${demo_visit_json}" 2 2 2 wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM restart" root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" @@ -4686,7 +4959,12 @@ EOS wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}" wait_for_qemu_volume_absent "${source_node}" "${volume_path}" "${source_volume_qemu_ref}" wait_for_qemu_volume_absent "${source_node}" "${data_volume_path}" "${source_data_volume_qemu_ref}" - wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=2" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=3" + wait_for_vm_demo_http "${node_id}" "${port_ip}" + demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")" + assert_vm_demo_state "${demo_state_json}" 2 3 3 + demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")" + assert_vm_demo_state "${demo_visit_json}" 3 3 3 root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" [[ "$(printf '%s' "${root_volume_state_json}" | jq -r '.attachedToNode // empty')" == "${node_id}" ]] || die "root volume ${volume_id} is not owned by migrated node ${node_id}" @@ -4768,8 +5046,12 @@ EOS [[ -n "${current_data_volume_qemu_ref}" ]] || die "worker ${node_id} did not republish an attachable local ref for ${data_volume_id} after post-migration restart" wait_for_qemu_volume_present "${node_id}" "${volume_path}" "${current_volume_qemu_ref}" wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}" - wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=3" - wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=3" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=4" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=4" + wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=4" + wait_for_vm_demo_http "${node_id}" "${port_ip}" + demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")" + assert_vm_demo_state "${demo_state_json}" 3 4 4 wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM post-migration restart" root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" @@ -4830,7 +5112,10 @@ EOS done wait_for_plasmavmc_vm_watch_completion node01 "${vm_watch_output}" 60 assert_plasmavmc_vm_watch_events node01 "${vm_watch_output}" "${vm_id}" - wait_for_prismnet_port_detachment "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null + wait_for_prismnet_port_absent "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null + port_id="" + api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null + demo_http_sg_id="" ssh_node "${node_id}" "bash -lc '[[ ! -d $(printf '%q' "$(vm_runtime_dir_path "${vm_id}")") ]]'" ssh_node node01 "bash -lc '[[ ! -f ${volume_path} ]]'" @@ -4879,28 +5164,6 @@ EOS die "shared-fs VM data volume unexpectedly persisted to LightningStor object storage" fi - grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \ - 127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null - port_id="" - grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \ - 127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null - subnet_id="" - grpcurl -plaintext \ - -H "authorization: Bearer ${token}" \ - -import-path "${PRISMNET_PROTO_DIR}" \ - -proto "${PRISMNET_PROTO}" \ - -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \ - 127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null - vpc_id="" - grpcurl -plaintext \ -H "authorization: Bearer ${token}" \ -import-path "${PLASMAVMC_PROTO_DIR}" \ @@ -6890,8 +7153,11 @@ benchmark_plasmavmc_guest_runtime() { fi local start_ns attach_ns ready_ns attach_sec ready_sec - local root_volume_id="${vm_id}-root" - local data_volume_id="${vm_id}-data" + local root_volume_id data_volume_id + root_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "root")" + data_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "data")" + [[ -n "${root_volume_id}" ]] || die "runtime benchmark VM did not expose a root volume ID" + [[ -n "${data_volume_id}" ]] || die "runtime benchmark VM did not expose a data volume ID" local root_uri data_uri start_ns="$(date +%s%N)" @@ -7434,6 +7700,7 @@ validate_cluster() { validate_control_plane validate_iam_flow validate_prismnet_flow + validate_tenant_networking_flow validate_flashdns_flow validate_fiberlb_flow validate_workers @@ -7484,6 +7751,16 @@ fresh_storage_smoke_requested() { storage_smoke_requested } +demo_vm_webapp_requested() { + start_requested "$@" + validate_vm_storage_flow +} + +fresh_demo_vm_webapp_requested() { + clean_requested "$@" + demo_vm_webapp_requested "$@" +} + matrix_requested() { start_requested "$@" validate_component_matrix @@ -7771,6 +8048,8 @@ Commands: fresh-smoke clean local runtime state, rebuild on the host, start, and validate storage-smoke start the storage lab (node01-05) and validate CoronaFS/LightningStor/PlasmaVMC fresh-storage-smoke clean local runtime state, rebuild node01-05 on the host, start, and validate the storage lab + demo-vm-webapp start the cluster and run the VM web app demo with persistent volume state + fresh-demo-vm-webapp clean local runtime state, rebuild on the host, start, and run the VM web app demo matrix Start the cluster and validate composed service configurations against the current running VMs fresh-matrix clean local runtime state, rebuild on the host, start, and validate composed service configurations bench-storage start the cluster and benchmark CoronaFS plus LightningStor against the current running VMs @@ -7797,6 +8076,8 @@ Examples: $0 fresh-smoke $0 storage-smoke $0 fresh-storage-smoke + $0 demo-vm-webapp + $0 fresh-demo-vm-webapp $0 matrix $0 fresh-matrix $0 bench-storage @@ -7830,6 +8111,8 @@ main() { fresh-smoke) fresh_smoke_requested "$@" ;; storage-smoke) storage_smoke_requested ;; fresh-storage-smoke) fresh_storage_smoke_requested ;; + demo-vm-webapp) demo_vm_webapp_requested "$@" ;; + fresh-demo-vm-webapp) fresh_demo_vm_webapp_requested "$@" ;; matrix) matrix_requested "$@" ;; fresh-matrix) fresh_matrix_requested "$@" ;; bench-storage) bench_storage_requested "$@" ;; diff --git a/nix/test-cluster/vm-guest-image.nix b/nix/test-cluster/vm-guest-image.nix index b9e0b9c..e758fab 100644 --- a/nix/test-cluster/vm-guest-image.nix +++ b/nix/test-cluster/vm-guest-image.nix @@ -1,6 +1,132 @@ { modulesPath, lib, pkgs, ... }: -{ +let + photonVmDemoApi = pkgs.writeText "photon-vm-demo-api.py" '' + import json + import os + import socket + import sqlite3 + from http import HTTPStatus + from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer + + DATA_MOUNT = "/mnt/photon-vm-data" + DB_PATH = os.path.join(DATA_MOUNT, "demo.sqlite3") + ROOT_BOOT_COUNT_PATH = "/var/lib/photon-vm-smoke/boot-count" + DATA_BOOT_COUNT_PATH = os.path.join(DATA_MOUNT, "boot-count") + CONSOLE_PATH = "/dev/ttyS0" + LISTEN_HOST = "0.0.0.0" + LISTEN_PORT = 8080 + + + def log_console(message: str) -> None: + try: + with open(CONSOLE_PATH, "a", encoding="utf-8") as console: + console.write(message + "\n") + except OSError: + pass + + + def read_int(path: str) -> int: + try: + with open(path, "r", encoding="utf-8") as handle: + return int(handle.read().strip() or "0") + except (FileNotFoundError, ValueError, OSError): + return 0 + + + def init_db() -> None: + os.makedirs(DATA_MOUNT, exist_ok=True) + conn = sqlite3.connect(DB_PATH) + try: + conn.execute( + "CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)" + ) + conn.execute( + "INSERT INTO counters (name, value) VALUES ('visits', 0) " + "ON CONFLICT(name) DO NOTHING" + ) + conn.commit() + finally: + conn.close() + + + def current_state(increment: bool = False) -> dict: + conn = sqlite3.connect(DB_PATH, timeout=30) + try: + conn.execute( + "CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)" + ) + conn.execute( + "INSERT INTO counters (name, value) VALUES ('visits', 0) " + "ON CONFLICT(name) DO NOTHING" + ) + if increment: + conn.execute( + "UPDATE counters SET value = value + 1 WHERE name = 'visits'" + ) + visits = conn.execute( + "SELECT value FROM counters WHERE name = 'visits'" + ).fetchone()[0] + conn.commit() + finally: + conn.close() + return { + "status": "ok", + "hostname": socket.gethostname(), + "listen_port": LISTEN_PORT, + "db_path": DB_PATH, + "visits": visits, + "root_boot_count": read_int(ROOT_BOOT_COUNT_PATH), + "data_boot_count": read_int(DATA_BOOT_COUNT_PATH), + } + + + class Handler(BaseHTTPRequestHandler): + server_version = "PhotonVMDemo/1.0" + + def log_message(self, format: str, *args) -> None: + return + + def _send_json(self, payload: dict, status: int = HTTPStatus.OK) -> None: + body = json.dumps(payload, sort_keys=True).encode("utf-8") + self.send_response(status) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(body))) + self.end_headers() + self.wfile.write(body) + + def do_GET(self) -> None: + if self.path == "/health": + self._send_json({"status": "ok"}) + return + if self.path == "/state": + self._send_json(current_state()) + return + self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) + + def do_POST(self) -> None: + if self.path == "/visit": + payload = current_state(increment=True) + log_console("PHOTON_VM_DEMO_VISIT visits=%s" % payload["visits"]) + self._send_json(payload) + return + self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND) + + + def main() -> None: + init_db() + server = ThreadingHTTPServer((LISTEN_HOST, LISTEN_PORT), Handler) + log_console( + "PHOTON_VM_DEMO_WEB_READY count=%s port=%s db=%s" + % (read_int(ROOT_BOOT_COUNT_PATH), LISTEN_PORT, DB_PATH) + ) + server.serve_forever() + + + if __name__ == "__main__": + main() + ''; +in { imports = [ (modulesPath + "/virtualisation/disk-image.nix") (modulesPath + "/profiles/qemu-guest.nix") @@ -18,6 +144,7 @@ networking.hostName = "photon-vm-smoke"; networking.useDHCP = lib.mkDefault true; + networking.firewall.enable = false; services.getty.autologinUser = "root"; users.mutableUsers = false; @@ -144,5 +271,35 @@ ''; }; + systemd.services.photon-vm-demo-api = { + description = "PhotonCloud VM demo web app"; + wantedBy = [ "multi-user.target" ]; + wants = [ "network-online.target" "photon-vm-smoke.service" ]; + after = [ "network-online.target" "photon-vm-smoke.service" ]; + path = with pkgs; [ + bash + coreutils + python3 + util-linux + ]; + serviceConfig = { + Type = "simple"; + Restart = "always"; + RestartSec = "1"; + }; + script = '' + deadline=$((SECONDS + 60)) + while ! mountpoint -q /mnt/photon-vm-data; do + if [ "$SECONDS" -ge "$deadline" ]; then + echo "PHOTON_VM_DEMO_WEB_ERROR step=mount-timeout" >/dev/ttyS0 + exit 1 + fi + sleep 1 + done + + exec python3 ${photonVmDemoApi} + ''; + }; + system.stateVersion = "24.05"; } diff --git a/plasmavmc/Cargo.lock b/plasmavmc/Cargo.lock index 1d4a95b..8de1f77 100644 --- a/plasmavmc/Cargo.lock +++ b/plasmavmc/Cargo.lock @@ -37,17 +37,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ahash" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" -dependencies = [ - "getrandom 0.2.17", - "once_cell", - "version_check", -] - [[package]] name = "ahash" version = "0.8.12" @@ -134,15 +123,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "anyerror" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71add24cc141a1e8326f249b74c41cfd217aeb2a67c9c6cf9134d175469afd49" -dependencies = [ - "serde", -] - [[package]] name = "anyhow" version = "1.0.102" @@ -172,12 +152,6 @@ dependencies = [ "password-hash", ] -[[package]] -name = "arrayvec" -version = "0.7.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" - [[package]] name = "async-stream" version = "0.3.6" @@ -197,7 +171,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -208,7 +182,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -264,11 +238,9 @@ dependencies = [ "axum-core 0.4.5", "bytes", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.8.1", - "hyper-util", "itoa", "matchit 0.7.3", "memchr", @@ -277,15 +249,10 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "serde_json", - "serde_path_to_error", - "serde_urlencoded", - "sync_wrapper 1.0.2", - "tokio", + "sync_wrapper", "tower 0.5.3", "tower-layer", "tower-service", - "tracing", ] [[package]] @@ -298,10 +265,10 @@ dependencies = [ "bytes", "form_urlencoded", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.8.1", + "hyper", "hyper-util", "itoa", "matchit 0.8.4", @@ -314,7 +281,7 @@ dependencies = [ "serde_json", "serde_path_to_error", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tower 0.5.3", "tower-layer", @@ -331,16 +298,15 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", - "tracing", ] [[package]] @@ -351,29 +317,17 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" dependencies = [ "bytes", "futures-core", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", "mime", "pin-project-lite", - "sync_wrapper 1.0.2", + "sync_wrapper", "tower-layer", "tower-service", "tracing", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - -[[package]] -name = "base64" -version = "0.21.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" - [[package]] name = "base64" version = "0.22.1" @@ -386,57 +340,12 @@ version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - -[[package]] -name = "bindgen" -version = "0.72.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" -dependencies = [ - "bitflags 2.11.0", - "cexpr", - "clang-sys", - "itertools 0.13.0", - "proc-macro2", - "quote", - "regex", - "rustc-hash", - "shlex", - "syn 2.0.117", -] - -[[package]] -name = "bitflags" -version = "1.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" - [[package]] name = "bitflags" version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" -[[package]] -name = "bitvec" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c" -dependencies = [ - "funty", - "radium", - "tap", - "wyz", -] - [[package]] name = "blake2" version = "0.10.6" @@ -455,70 +364,12 @@ dependencies = [ "generic-array", ] -[[package]] -name = "borsh" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a" -dependencies = [ - "borsh-derive", - "bytes", - "cfg_aliases", -] - -[[package]] -name = "borsh-derive" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59" -dependencies = [ - "once_cell", - "proc-macro-crate", - "proc-macro2", - "quote", - "syn 2.0.117", -] - [[package]] name = "bumpalo" version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" -[[package]] -name = "byte-unit" -version = "5.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c6d47a4e2961fb8721bcfc54feae6455f2f64e7054f9bc67e875f0e77f4c58d" -dependencies = [ - "rust_decimal", - "schemars", - "serde", - "utf8-width", -] - -[[package]] -name = "bytecheck" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" -dependencies = [ - "bytecheck_derive", - "ptr_meta", - "simdutf8", -] - -[[package]] -name = "bytecheck_derive" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "byteorder" version = "1.5.0" @@ -531,16 +382,6 @@ version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" -[[package]] -name = "bzip2-sys" -version = "0.1.13+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14" -dependencies = [ - "cc", - "pkg-config", -] - [[package]] name = "cc" version = "1.2.57" @@ -553,15 +394,6 @@ dependencies = [ "shlex", ] -[[package]] -name = "cexpr" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" -dependencies = [ - "nom", -] - [[package]] name = "cfg-if" version = "1.0.4" @@ -574,26 +406,6 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" -[[package]] -name = "chainfire-api" -version = "0.1.0" -dependencies = [ - "async-trait", - "bincode", - "chainfire-raft", - "chainfire-storage", - "chainfire-types", - "chainfire-watch", - "futures", - "prost", - "prost-types", - "tokio", - "tokio-stream", - "tonic", - "tonic-build", - "tracing", -] - [[package]] name = "chainfire-client" version = "0.1.0" @@ -610,24 +422,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "chainfire-gossip" -version = "0.1.0" -dependencies = [ - "bincode", - "bytes", - "chainfire-types", - "dashmap", - "foca", - "futures", - "parking_lot", - "rand 0.9.2", - "serde", - "thiserror 1.0.69", - "tokio", - "tracing", -] - [[package]] name = "chainfire-proto" version = "0.1.0" @@ -641,77 +435,6 @@ dependencies = [ "tonic-build", ] -[[package]] -name = "chainfire-raft" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "bincode", - "bytes", - "chainfire-storage", - "chainfire-types", - "dashmap", - "futures", - "parking_lot", - "rand 0.8.5", - "serde", - "thiserror 1.0.69", - "tokio", - "tracing", -] - -[[package]] -name = "chainfire-server" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "axum 0.7.9", - "chainfire-api", - "chainfire-gossip", - "chainfire-raft", - "chainfire-storage", - "chainfire-types", - "chainfire-watch", - "chrono", - "clap", - "config", - "futures", - "http 1.4.0", - "http-body-util", - "metrics", - "metrics-exporter-prometheus", - "reqwest 0.12.28", - "serde", - "serde_json", - "tokio", - "toml 0.8.23", - "tonic", - "tonic-health", - "tower 0.5.3", - "tower-http", - "tracing", - "tracing-subscriber", - "uuid", -] - -[[package]] -name = "chainfire-storage" -version = "0.1.0" -dependencies = [ - "async-trait", - "bincode", - "bytes", - "chainfire-types", - "dashmap", - "parking_lot", - "rocksdb", - "serde", - "tokio", - "tracing", -] - [[package]] name = "chainfire-types" version = "0.1.0" @@ -721,19 +444,6 @@ dependencies = [ "thiserror 1.0.69", ] -[[package]] -name = "chainfire-watch" -version = "0.1.0" -dependencies = [ - "chainfire-types", - "dashmap", - "futures", - "parking_lot", - "tokio", - "tokio-stream", - "tracing", -] - [[package]] name = "chrono" version = "0.4.44" @@ -758,16 +468,6 @@ dependencies = [ "inout", ] -[[package]] -name = "clang-sys" -version = "1.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" -dependencies = [ - "glob", - "libc", -] - [[package]] name = "clap" version = "4.6.0" @@ -799,7 +499,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -817,15 +517,6 @@ dependencies = [ "cc", ] -[[package]] -name = "cobs" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1" -dependencies = [ - "thiserror 2.0.18", -] - [[package]] name = "colorchoice" version = "1.0.5" @@ -841,35 +532,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "config" -version = "0.13.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca" -dependencies = [ - "async-trait", - "json5", - "lazy_static", - "nom", - "pathdiff", - "ron", - "rust-ini", - "serde", - "serde_json", - "toml 0.5.11", - "yaml-rust", -] - -[[package]] -name = "core-foundation" -version = "0.9.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "core-foundation" version = "0.10.1" @@ -910,32 +572,6 @@ version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" -[[package]] -name = "creditservice-api" -version = "0.1.0" -dependencies = [ - "apigateway-api", - "async-trait", - "chrono", - "creditservice-proto", - "creditservice-types", - "flaredb-client", - "iam-types", - "photon-auth-client", - "prost", - "prost-types", - "reqwest 0.11.27", - "serde", - "serde_json", - "sqlx", - "thiserror 1.0.69", - "tokio", - "tonic", - "tonic-health", - "tracing", - "uuid", -] - [[package]] name = "creditservice-client" version = "0.1.0" @@ -958,17 +594,6 @@ dependencies = [ "tonic-build", ] -[[package]] -name = "creditservice-types" -version = "0.1.0" -dependencies = [ - "chrono", - "rust_decimal", - "serde", - "thiserror 1.0.69", - "uuid", -] - [[package]] name = "crossbeam-epoch" version = "0.9.18" @@ -1036,27 +661,6 @@ dependencies = [ "powerfmt", ] -[[package]] -name = "derive_more" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" -dependencies = [ - "derive_more-impl", -] - -[[package]] -name = "derive_more-impl" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", - "unicode-xid", -] - [[package]] name = "digest" version = "0.10.7" @@ -1076,15 +680,9 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] -[[package]] -name = "dlv-list" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257" - [[package]] name = "dotenvy" version = "0.15.7" @@ -1097,12 +695,6 @@ version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" -[[package]] -name = "dyn-clone" -version = "1.0.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" - [[package]] name = "either" version = "1.15.0" @@ -1112,15 +704,6 @@ dependencies = [ "serde", ] -[[package]] -name = "encoding_rs" -version = "0.8.35" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3" -dependencies = [ - "cfg-if", -] - [[package]] name = "equivalent" version = "1.0.2" @@ -1200,93 +783,6 @@ dependencies = [ "tonic-build", ] -[[package]] -name = "flaredb-raft" -version = "0.1.0" -dependencies = [ - "bincode", - "flaredb-proto", - "flaredb-storage", - "flaredb-types", - "openraft", - "serde", - "serde_json", - "tokio", - "tonic", - "tracing", -] - -[[package]] -name = "flaredb-server" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "axum 0.8.4", - "chrono", - "clap", - "config", - "flaredb-client", - "flaredb-proto", - "flaredb-raft", - "flaredb-sql", - "flaredb-storage", - "flaredb-types", - "futures", - "metrics", - "metrics-exporter-prometheus", - "openraft", - "prost", - "rocksdb", - "serde", - "serde_json", - "sha2", - "tokio", - "tokio-stream", - "toml 0.8.23", - "tonic", - "tonic-health", - "tracing", - "tracing-subscriber", - "uuid", -] - -[[package]] -name = "flaredb-sql" -version = "0.1.0" -dependencies = [ - "anyhow", - "bincode", - "bytes", - "flaredb-client", - "flaredb-proto", - "serde", - "serde_json", - "sqlparser", - "thiserror 1.0.69", - "tokio", - "tonic", - "tracing", -] - -[[package]] -name = "flaredb-storage" -version = "0.1.0" -dependencies = [ - "async-trait", - "rocksdb", - "thiserror 1.0.69", -] - -[[package]] -name = "flaredb-types" -version = "0.1.0" -dependencies = [ - "anyhow", - "serde", - "thiserror 1.0.69", -] - [[package]] name = "flume" version = "0.11.1" @@ -1304,19 +800,6 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" -[[package]] -name = "foca" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f59e967f3f675997e4a4a6b99d2a75148d59d64c46211b78b4f34ebb951b273" -dependencies = [ - "bytes", - "postcard", - "rand 0.9.2", - "serde", - "tracing", -] - [[package]] name = "foldhash" version = "0.1.5" @@ -1338,12 +821,6 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" -[[package]] -name = "funty" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" - [[package]] name = "futures" version = "0.3.32" @@ -1411,7 +888,7 @@ checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -1490,37 +967,12 @@ dependencies = [ "polyval", ] -[[package]] -name = "glob" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" - [[package]] name = "glob-match" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d" -[[package]] -name = "h2" -version = "0.3.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 0.2.12", - "indexmap 2.13.0", - "slab", - "tokio", - "tokio-util", - "tracing", -] - [[package]] name = "h2" version = "0.4.13" @@ -1532,7 +984,7 @@ dependencies = [ "fnv", "futures-core", "futures-sink", - "http 1.4.0", + "http", "indexmap 2.13.0", "slab", "tokio", @@ -1545,9 +997,6 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" -dependencies = [ - "ahash 0.7.8", -] [[package]] name = "hashbrown" @@ -1555,7 +1004,7 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.12", + "ahash", ] [[package]] @@ -1629,17 +1078,6 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "http" -version = "0.2.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" -dependencies = [ - "bytes", - "fnv", - "itoa", -] - [[package]] name = "http" version = "1.4.0" @@ -1650,17 +1088,6 @@ dependencies = [ "itoa", ] -[[package]] -name = "http-body" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" -dependencies = [ - "bytes", - "http 0.2.12", - "pin-project-lite", -] - [[package]] name = "http-body" version = "1.0.1" @@ -1668,7 +1095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", - "http 1.4.0", + "http", ] [[package]] @@ -1679,8 +1106,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "pin-project-lite", ] @@ -1696,30 +1123,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "hyper" -version = "0.14.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7" -dependencies = [ - "bytes", - "futures-channel", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "httparse", - "httpdate", - "itoa", - "pin-project-lite", - "socket2 0.5.10", - "tokio", - "tower-service", - "tracing", - "want", -] - [[package]] name = "hyper" version = "1.8.1" @@ -1730,9 +1133,9 @@ dependencies = [ "bytes", "futures-channel", "futures-core", - "h2 0.4.13", - "http 1.4.0", - "http-body 1.0.1", + "h2", + "http", + "http-body", "httparse", "httpdate", "itoa", @@ -1743,35 +1146,21 @@ dependencies = [ "want", ] -[[package]] -name = "hyper-rustls" -version = "0.24.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" -dependencies = [ - "futures-util", - "http 0.2.12", - "hyper 0.14.32", - "rustls 0.21.12", - "tokio", - "tokio-rustls 0.24.1", -] - [[package]] name = "hyper-rustls" version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "http 1.4.0", - "hyper 1.8.1", + "http", + "hyper", "hyper-util", "log", - "rustls 0.23.37", + "rustls", "rustls-native-certs", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.4", + "tokio-rustls", "tower-service", "webpki-roots 1.0.6", ] @@ -1782,7 +1171,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 1.8.1", + "hyper", "hyper-util", "pin-project-lite", "tokio", @@ -1795,13 +1184,13 @@ version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "futures-channel", "futures-util", - "http 1.4.0", - "http-body 1.0.1", - "hyper 1.8.1", + "http", + "http-body", + "hyper", "ipnet", "libc", "percent-encoding", @@ -1820,7 +1209,7 @@ dependencies = [ "apigateway-api", "argon2", "async-trait", - "base64 0.22.1", + "base64", "iam-audit", "iam-authn", "iam-authz", @@ -1860,12 +1249,12 @@ name = "iam-authn" version = "0.1.0" dependencies = [ "async-trait", - "base64 0.22.1", + "base64", "hmac", "iam-types", "jsonwebtoken", "rand 0.8.5", - "reqwest 0.12.28", + "reqwest", "serde", "serde_json", "sha2", @@ -1910,7 +1299,7 @@ dependencies = [ name = "iam-service-auth" version = "0.1.0" dependencies = [ - "http 1.4.0", + "http", "iam-client", "iam-types", "serde_json", @@ -2134,15 +1523,6 @@ version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" -[[package]] -name = "itertools" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" -dependencies = [ - "either", -] - [[package]] name = "itertools" version = "0.14.0" @@ -2178,24 +1558,13 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json5" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1" -dependencies = [ - "pest", - "pest_derive", - "serde", -] - [[package]] name = "jsonwebtoken" version = "9.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde" dependencies = [ - "base64 0.22.1", + "base64", "js-sys", "pem", "ring", @@ -2222,27 +1591,12 @@ version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a" dependencies = [ - "bitflags 2.11.0", + "bitflags", "libc", "plain", "redox_syscall 0.7.3", ] -[[package]] -name = "librocksdb-sys" -version = "0.17.3+10.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9" -dependencies = [ - "bindgen", - "bzip2-sys", - "cc", - "libc", - "libz-sys", - "lz4-sys", - "zstd-sys", -] - [[package]] name = "libsqlite3-sys" version = "0.30.1" @@ -2254,17 +1608,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52f4c29e2a68ac30c9087e1b772dc9f44a2b66ed44edf2266cf2be9b03dafc1" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "lightningstor-api" version = "0.1.0" @@ -2291,12 +1634,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - [[package]] name = "linux-raw-sys" version = "0.12.1" @@ -2330,22 +1667,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" -[[package]] -name = "lz4-sys" -version = "1.11.1+lz4-1.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" -dependencies = [ - "cc", - "libc", -] - -[[package]] -name = "maplit" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d" - [[package]] name = "matchers" version = "0.2.0" @@ -2389,7 +1710,7 @@ version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3045b4193fbdc5b5681f32f11070da9be3609f189a79f3390706d42587f46bb5" dependencies = [ - "ahash 0.8.12", + "ahash", "portable-atomic", ] @@ -2399,10 +1720,10 @@ version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6" dependencies = [ - "base64 0.22.1", + "base64", "http-body-util", - "hyper 1.8.1", - "hyper-rustls 0.27.7", + "hyper", + "hyper-rustls", "hyper-util", "indexmap 2.13.0", "ipnet", @@ -2435,12 +1756,6 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" -[[package]] -name = "minimal-lexical" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" - [[package]] name = "mio" version = "1.1.1" @@ -2464,22 +1779,12 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ - "bitflags 2.11.0", + "bitflags", "cfg-if", "cfg_aliases", "libc", ] -[[package]] -name = "nom" -version = "7.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" -dependencies = [ - "memchr", - "minimal-lexical", -] - [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -2551,58 +1856,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openraft" -version = "0.9.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc22bb6823c606299be05f3cc0d2ac30216412e05352eaf192a481c12ea055fc" -dependencies = [ - "anyerror", - "byte-unit", - "chrono", - "clap", - "derive_more", - "futures", - "maplit", - "openraft-macros", - "rand 0.8.5", - "serde", - "thiserror 1.0.69", - "tokio", - "tracing", - "tracing-futures", - "validit", -] - -[[package]] -name = "openraft-macros" -version = "0.9.21" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8e5c7db6c8f2137b45a63096e09ac5a89177799b4bb0073915a5f41ee156651" -dependencies = [ - "chrono", - "proc-macro2", - "quote", - "semver", - "syn 2.0.117", -] - [[package]] name = "openssl-probe" version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" -[[package]] -name = "ordered-multimap" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a" -dependencies = [ - "dlv-list", - "hashbrown 0.12.3", -] - [[package]] name = "parking" version = "2.2.1" @@ -2643,19 +1902,13 @@ dependencies = [ "subtle", ] -[[package]] -name = "pathdiff" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" - [[package]] name = "pem" version = "3.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3" dependencies = [ - "base64 0.22.1", + "base64", "serde", ] @@ -2665,49 +1918,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" -[[package]] -name = "pest" -version = "2.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662" -dependencies = [ - "memchr", - "ucd-trie", -] - -[[package]] -name = "pest_derive" -version = "2.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77" -dependencies = [ - "pest", - "pest_generator", -] - -[[package]] -name = "pest_generator" -version = "2.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f" -dependencies = [ - "pest", - "pest_meta", - "proc-macro2", - "quote", - "syn 2.0.117", -] - -[[package]] -name = "pest_meta" -version = "2.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220" -dependencies = [ - "pest", - "sha2", -] - [[package]] name = "petgraph" version = "0.7.1" @@ -2718,14 +1928,6 @@ dependencies = [ "indexmap 2.13.0", ] -[[package]] -name = "photon-auth-client" -version = "0.1.0" -dependencies = [ - "anyhow", - "iam-service-auth", -] - [[package]] name = "pin-project" version = "1.1.11" @@ -2743,7 +1945,7 @@ checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -2838,19 +2040,17 @@ dependencies = [ "axum 0.8.4", "bytes", "chainfire-client", - "chainfire-server", "chrono", "clap", - "creditservice-api", "creditservice-client", - "creditservice-proto", "dashmap", "flaredb-client", - "flaredb-proto", - "flaredb-server", "iam-api", + "iam-authn", + "iam-authz", "iam-client", "iam-service-auth", + "iam-store", "iam-types", "lightningstor-api", "metrics-exporter-prometheus", @@ -2863,7 +2063,7 @@ dependencies = [ "prismnet-server", "prismnet-types", "prost", - "reqwest 0.12.28", + "reqwest", "serde", "serde_json", "sha2", @@ -2871,7 +2071,7 @@ dependencies = [ "thiserror 1.0.69", "tokio", "tokio-stream", - "toml 0.8.23", + "toml", "tonic", "tonic-health", "tracing", @@ -2907,16 +2107,6 @@ version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" -[[package]] -name = "postcard" -version = "1.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24" -dependencies = [ - "cobs", - "serde", -] - [[package]] name = "potential_utf" version = "0.1.4" @@ -2948,7 +2138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.117", + "syn", ] [[package]] @@ -2984,7 +2174,7 @@ dependencies = [ "sqlx", "thiserror 1.0.69", "tokio", - "toml 0.8.23", + "toml", "tonic", "tonic-health", "tracing", @@ -3000,15 +2190,6 @@ dependencies = [ "uuid", ] -[[package]] -name = "proc-macro-crate" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" -dependencies = [ - "toml_edit 0.23.4", -] - [[package]] name = "proc-macro2" version = "1.0.106" @@ -3035,7 +2216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ "heck", - "itertools 0.14.0", + "itertools", "log", "multimap", "once_cell", @@ -3044,7 +2225,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.117", + "syn", "tempfile", ] @@ -3055,10 +2236,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools", "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -3134,26 +2315,6 @@ version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95067976aca6421a523e491fce939a3e65249bac4b977adee0ee9771568e8aa3" -[[package]] -name = "ptr_meta" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1" -dependencies = [ - "ptr_meta_derive", -] - -[[package]] -name = "ptr_meta_derive" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "quanta" version = "0.12.6" @@ -3181,7 +2342,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.23.37", + "rustls", "socket2 0.6.3", "thiserror 2.0.18", "tokio", @@ -3201,7 +2362,7 @@ dependencies = [ "rand 0.9.2", "ring", "rustc-hash", - "rustls 0.23.37", + "rustls", "rustls-pki-types", "slab", "thiserror 2.0.18", @@ -3239,12 +2400,6 @@ version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" -[[package]] -name = "radium" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" - [[package]] name = "rand" version = "0.8.5" @@ -3310,7 +2465,7 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.11.0", + "bitflags", ] [[package]] @@ -3319,7 +2474,7 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.11.0", + "bitflags", ] [[package]] @@ -3328,27 +2483,7 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16" dependencies = [ - "bitflags 2.11.0", -] - -[[package]] -name = "ref-cast" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.117", + "bitflags", ] [[package]] @@ -3380,84 +2515,34 @@ version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" -[[package]] -name = "rend" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" -dependencies = [ - "bytecheck", -] - -[[package]] -name = "reqwest" -version = "0.11.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", - "hyper-rustls 0.24.2", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-rustls 0.24.1", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "webpki-roots 0.25.4", - "winreg", -] - [[package]] name = "reqwest" version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "futures-core", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "http-body-util", - "hyper 1.8.1", - "hyper-rustls 0.27.7", + "hyper", + "hyper-rustls", "hyper-util", "js-sys", "log", "percent-encoding", "pin-project-lite", "quinn", - "rustls 0.23.37", + "rustls", "rustls-pki-types", "serde", "serde_json", "serde_urlencoded", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", - "tokio-rustls 0.26.4", + "tokio-rustls", "tower 0.5.3", "tower-http", "tower-service", @@ -3482,82 +2567,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "rkyv" -version = "0.7.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1" -dependencies = [ - "bitvec", - "bytecheck", - "bytes", - "hashbrown 0.12.3", - "ptr_meta", - "rend", - "rkyv_derive", - "seahash", - "tinyvec", - "uuid", -] - -[[package]] -name = "rkyv_derive" -version = "0.7.46" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "rocksdb" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f" -dependencies = [ - "libc", - "librocksdb-sys", -] - -[[package]] -name = "ron" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a" -dependencies = [ - "base64 0.13.1", - "bitflags 1.3.2", - "serde", -] - -[[package]] -name = "rust-ini" -version = "0.18.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df" -dependencies = [ - "cfg-if", - "ordered-multimap", -] - -[[package]] -name = "rust_decimal" -version = "1.40.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" -dependencies = [ - "arrayvec", - "borsh", - "bytes", - "num-traits", - "rand 0.8.5", - "rkyv", - "serde", - "serde_json", -] - [[package]] name = "rustc-hash" version = "2.1.1" @@ -3570,25 +2579,13 @@ version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.11.0", + "bitflags", "errno", "libc", "linux-raw-sys", "windows-sys 0.61.2", ] -[[package]] -name = "rustls" -version = "0.21.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" -dependencies = [ - "log", - "ring", - "rustls-webpki 0.101.7", - "sct", -] - [[package]] name = "rustls" version = "0.23.37" @@ -3600,7 +2597,7 @@ dependencies = [ "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.103.10", + "rustls-webpki", "subtle", "zeroize", ] @@ -3617,15 +2614,6 @@ dependencies = [ "security-framework", ] -[[package]] -name = "rustls-pemfile" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" -dependencies = [ - "base64 0.21.7", -] - [[package]] name = "rustls-pemfile" version = "2.2.0" @@ -3645,16 +2633,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "rustls-webpki" -version = "0.101.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "rustls-webpki" version = "0.103.10" @@ -3688,48 +2666,20 @@ dependencies = [ "windows-sys 0.61.2", ] -[[package]] -name = "schemars" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc" -dependencies = [ - "dyn-clone", - "ref-cast", - "serde", - "serde_json", -] - [[package]] name = "scopeguard" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sct" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" -dependencies = [ - "ring", - "untrusted", -] - -[[package]] -name = "seahash" -version = "4.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b" - [[package]] name = "security-framework" version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.11.0", - "core-foundation 0.10.1", + "bitflags", + "core-foundation", "core-foundation-sys", "libc", "security-framework-sys", @@ -3745,12 +2695,6 @@ dependencies = [ "libc", ] -[[package]] -name = "semver" -version = "1.0.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" - [[package]] name = "serde" version = "1.0.219" @@ -3768,7 +2712,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -3850,12 +2794,6 @@ dependencies = [ "libc", ] -[[package]] -name = "simdutf8" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" - [[package]] name = "simple_asn1" version = "0.6.3" @@ -3918,15 +2856,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "sqlparser" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "743b4dc2cbde11890ccb254a8fc9d537fa41b36da00de2a1c5e9848c9bc42bd7" -dependencies = [ - "log", -] - [[package]] name = "sqlx" version = "0.8.6" @@ -3945,7 +2874,7 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "crc", "crossbeam-queue", @@ -3962,7 +2891,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rustls 0.23.37", + "rustls", "serde", "serde_json", "sha2", @@ -3985,7 +2914,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.117", + "syn", ] [[package]] @@ -4007,7 +2936,7 @@ dependencies = [ "sqlx-core", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.117", + "syn", "tokio", "url", ] @@ -4019,8 +2948,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46" dependencies = [ "atoi", - "base64 0.22.1", - "bitflags 2.11.0", + "base64", + "bitflags", "byteorder", "crc", "dotenvy", @@ -4102,17 +3031,6 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" -[[package]] -name = "syn" -version = "1.0.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" -dependencies = [ - "proc-macro2", - "quote", - "unicode-ident", -] - [[package]] name = "syn" version = "2.0.117" @@ -4124,12 +3042,6 @@ dependencies = [ "unicode-ident", ] -[[package]] -name = "sync_wrapper" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" - [[package]] name = "sync_wrapper" version = "1.0.2" @@ -4147,36 +3059,9 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] -[[package]] -name = "system-configuration" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" -dependencies = [ - "bitflags 1.3.2", - "core-foundation 0.9.4", - "system-configuration-sys", -] - -[[package]] -name = "system-configuration-sys" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "tap" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" - [[package]] name = "tempfile" version = "3.27.0" @@ -4216,7 +3101,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -4227,7 +3112,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -4320,17 +3205,7 @@ checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", -] - -[[package]] -name = "tokio-rustls" -version = "0.24.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081" -dependencies = [ - "rustls 0.21.12", - "tokio", + "syn", ] [[package]] @@ -4339,7 +3214,7 @@ version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.37", + "rustls", "tokio", ] @@ -4367,15 +3242,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "toml" -version = "0.5.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" -dependencies = [ - "serde", -] - [[package]] name = "toml" version = "0.8.23" @@ -4384,8 +3250,8 @@ checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" dependencies = [ "serde", "serde_spanned", - "toml_datetime 0.6.11", - "toml_edit 0.22.27", + "toml_datetime", + "toml_edit", ] [[package]] @@ -4397,15 +3263,6 @@ dependencies = [ "serde", ] -[[package]] -name = "toml_datetime" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3" -dependencies = [ - "serde", -] - [[package]] name = "toml_edit" version = "0.22.27" @@ -4415,30 +3272,9 @@ dependencies = [ "indexmap 2.13.0", "serde", "serde_spanned", - "toml_datetime 0.6.11", + "toml_datetime", "toml_write", - "winnow 0.7.15", -] - -[[package]] -name = "toml_edit" -version = "0.23.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93" -dependencies = [ - "indexmap 2.13.0", - "toml_datetime 0.7.0", - "toml_parser", - "winnow 0.7.15", -] - -[[package]] -name = "toml_parser" -version = "1.0.10+spec-1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420" -dependencies = [ - "winnow 1.0.0", + "winnow", ] [[package]] @@ -4456,23 +3292,23 @@ dependencies = [ "async-stream", "async-trait", "axum 0.7.9", - "base64 0.22.1", + "base64", "bytes", - "h2 0.4.13", - "http 1.4.0", - "http-body 1.0.1", + "h2", + "http", + "http-body", "http-body-util", - "hyper 1.8.1", + "hyper", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", "prost", "rustls-native-certs", - "rustls-pemfile 2.2.0", + "rustls-pemfile", "socket2 0.5.10", "tokio", - "tokio-rustls 0.26.4", + "tokio-rustls", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -4491,7 +3327,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -4536,7 +3372,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project-lite", - "sync_wrapper 1.0.2", + "sync_wrapper", "tokio", "tower-layer", "tower-service", @@ -4549,17 +3385,16 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags 2.11.0", + "bitflags", "bytes", "futures-util", - "http 1.4.0", - "http-body 1.0.1", + "http", + "http-body", "iri-string", "pin-project-lite", "tower 0.5.3", "tower-layer", "tower-service", - "tracing", ] [[package]] @@ -4594,7 +3429,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -4607,16 +3442,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-futures" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2" -dependencies = [ - "pin-project", - "tracing", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -4658,12 +3483,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" -[[package]] -name = "ucd-trie" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971" - [[package]] name = "unicode-bidi" version = "0.3.18" @@ -4691,12 +3510,6 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d" -[[package]] -name = "unicode-xid" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" - [[package]] name = "universal-hash" version = "0.5.1" @@ -4725,12 +3538,6 @@ dependencies = [ "serde", ] -[[package]] -name = "utf8-width" -version = "0.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1292c0d970b54115d14f2492fe0170adf21d68a1de108eebc51c1df4f346a091" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -4755,15 +3562,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "validit" -version = "0.2.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4efba0434d5a0a62d4f22070b44ce055dc18cb64d4fa98276aa523dadfaba0e7" -dependencies = [ - "anyerror", -] - [[package]] name = "valuable" version = "0.1.1" @@ -4858,7 +3656,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.117", + "syn", "wasm-bindgen-shared", ] @@ -4891,12 +3689,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki-roots" -version = "0.25.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" - [[package]] name = "webpki-roots" version = "0.26.11" @@ -4968,7 +3760,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -4979,7 +3771,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -5237,22 +4029,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winnow" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8" - -[[package]] -name = "winreg" -version = "0.50.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "wit-bindgen" version = "0.51.0" @@ -5265,24 +4041,6 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" -[[package]] -name = "wyz" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed" -dependencies = [ - "tap", -] - -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yoke" version = "0.8.1" @@ -5302,7 +4060,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", "synstructure", ] @@ -5323,7 +4081,7 @@ checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", ] [[package]] @@ -5343,7 +4101,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", + "syn", "synstructure", ] @@ -5383,15 +4141,5 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.117", -] - -[[package]] -name = "zstd-sys" -version = "2.0.16+zstd.1.5.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" -dependencies = [ - "cc", - "pkg-config", + "syn", ] diff --git a/plasmavmc/crates/plasmavmc-kvm/src/env.rs b/plasmavmc/crates/plasmavmc-kvm/src/env.rs index 16726d9..f30d6ca 100644 --- a/plasmavmc/crates/plasmavmc-kvm/src/env.rs +++ b/plasmavmc/crates/plasmavmc-kvm/src/env.rs @@ -10,7 +10,6 @@ pub const ENV_INITRD_PATH: &str = "PLASMAVMC_INITRD_PATH"; pub const ENV_RUNTIME_DIR: &str = "PLASMAVMC_RUNTIME_DIR"; pub const ENV_QMP_TIMEOUT_SECS: &str = "PLASMAVMC_QMP_TIMEOUT_SECS"; pub const ENV_NBD_MAX_QUEUES: &str = "PLASMAVMC_NBD_MAX_QUEUES"; -pub const ENV_NBD_AIO_MODE: &str = "PLASMAVMC_NBD_AIO_MODE"; /// Resolve QEMU binary path, falling back to a provided default. pub fn resolve_qemu_path(default: impl AsRef) -> PathBuf { @@ -55,15 +54,6 @@ pub fn resolve_nbd_max_queues() -> u16 { .unwrap_or(16) } -pub fn resolve_nbd_aio_mode() -> &'static str { - match std::env::var(ENV_NBD_AIO_MODE).ok().as_deref() { - Some("threads") => "threads", - Some("native") => "native", - Some("io_uring") => "io_uring", - _ => "io_uring", - } -} - #[cfg(test)] pub(crate) fn env_test_lock() -> &'static Mutex<()> { static LOCK: OnceLock> = OnceLock::new(); @@ -161,29 +151,4 @@ mod tests { assert_eq!(resolve_nbd_max_queues(), 12); std::env::remove_var(ENV_NBD_MAX_QUEUES); } - - #[test] - fn resolve_nbd_aio_mode_defaults_to_io_uring() { - let _guard = env_test_lock().lock().unwrap(); - std::env::remove_var(ENV_NBD_AIO_MODE); - assert_eq!(resolve_nbd_aio_mode(), "io_uring"); - } - - #[test] - fn resolve_nbd_aio_mode_accepts_supported_modes() { - let _guard = env_test_lock().lock().unwrap(); - for mode in ["threads", "native", "io_uring"] { - std::env::set_var(ENV_NBD_AIO_MODE, mode); - assert_eq!(resolve_nbd_aio_mode(), mode); - } - std::env::remove_var(ENV_NBD_AIO_MODE); - } - - #[test] - fn resolve_nbd_aio_mode_falls_back_for_invalid_values() { - let _guard = env_test_lock().lock().unwrap(); - std::env::set_var(ENV_NBD_AIO_MODE, "bogus"); - assert_eq!(resolve_nbd_aio_mode(), "io_uring"); - std::env::remove_var(ENV_NBD_AIO_MODE); - } } diff --git a/plasmavmc/crates/plasmavmc-kvm/src/lib.rs b/plasmavmc/crates/plasmavmc-kvm/src/lib.rs index 40d4d02..5618643 100644 --- a/plasmavmc/crates/plasmavmc-kvm/src/lib.rs +++ b/plasmavmc/crates/plasmavmc-kvm/src/lib.rs @@ -4,12 +4,17 @@ //! It uses QEMU with KVM acceleration to run virtual machines. mod env; +mod network; mod qmp; use async_trait::async_trait; use env::{ - resolve_kernel_initrd, resolve_nbd_aio_mode, resolve_nbd_max_queues, resolve_qcow2_path, - resolve_qemu_path, resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH, + resolve_kernel_initrd, resolve_nbd_max_queues, resolve_qcow2_path, resolve_qemu_path, + resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH, +}; +use network::{ + cleanup_vm_networks, decode_network_states, encode_network_states, ensure_vm_networks, + tap_name_for_nic, NETWORK_STATE_KEY, }; use nix::sys::signal::{kill as nix_kill, Signal}; use nix::unistd::Pid; @@ -76,7 +81,8 @@ fn disk_aio_mode(disk: &AttachedDisk) -> Option<&'static str> { match (&disk.attachment, disk.cache) { (DiskAttachment::File { .. }, DiskCache::None) => Some("native"), (DiskAttachment::File { .. }, _) => Some("threads"), - (DiskAttachment::Nbd { .. }, _) => Some(resolve_nbd_aio_mode()), + // QEMU's NBD blockdev backend does not accept an `aio` parameter. + (DiskAttachment::Nbd { .. }, _) => None, (DiskAttachment::CephRbd { .. }, _) => None, } } @@ -118,6 +124,23 @@ fn bootindex_suffix(boot_index: Option) -> String { .unwrap_or_default() } +fn nic_device_driver(model: NicModel) -> &'static str { + match model { + NicModel::VirtioNet => "virtio-net-pci", + NicModel::E1000 => "e1000", + } +} + +fn nic_device_component(nic: &NetworkSpec, fallback_index: usize) -> String { + sanitize_device_component( + nic.port_id + .as_deref() + .filter(|value| !value.is_empty()) + .unwrap_or(&nic.id), + fallback_index, + ) +} + fn qmp_timeout() -> Duration { Duration::from_secs(resolve_qmp_timeout_secs()) } @@ -434,6 +457,7 @@ fn build_qemu_args( "-S".into(), ]; args.extend(build_disk_args(vm, disks)?); + args.extend(build_network_args(vm)); if let Some(kernel) = kernel { args.push("-kernel".into()); @@ -449,6 +473,31 @@ fn build_qemu_args( Ok(args) } +fn build_network_args(vm: &VirtualMachine) -> Vec { + let mut args = Vec::new(); + for (index, nic) in vm.spec.network.iter().enumerate() { + let device_id = nic_device_component(nic, index); + let tap_name = tap_name_for_nic(nic); + args.push("-netdev".into()); + args.push(format!( + "tap,id=netdev-{id},ifname={tap},script=no,downscript=no", + id = device_id, + tap = tap_name + )); + args.push("-device".into()); + let mut device = format!( + "{driver},id=net-{id},netdev=netdev-{id}", + driver = nic_device_driver(nic.model), + id = device_id + ); + if let Some(mac) = nic.mac_address.as_deref() { + device.push_str(&format!(",mac={mac}")); + } + args.push(device); + } + args +} + /// Build QEMU args for an incoming migration listener. fn build_qemu_args_incoming( vm: &VirtualMachine, @@ -568,6 +617,11 @@ impl HypervisorBackend for KvmBackend { tokio::fs::create_dir_all(&runtime_dir) .await .map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?; + tokio::fs::create_dir_all(&self.runtime_dir) + .await + .map_err(|e| { + Error::HypervisorError(format!("Failed to create backend runtime root: {e}")) + })?; let qmp_socket = runtime_dir.join("qmp.sock"); let console_log = runtime_dir.join("console.log"); @@ -575,16 +629,23 @@ impl HypervisorBackend for KvmBackend { let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&console_log).await; let qemu_bin = resolve_qemu_path(&self.qemu_path); + let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?; let (kernel_path, initrd_path) = resolve_kernel_initrd(); - let args = build_qemu_args( + let args = match build_qemu_args( vm, disks, &qmp_socket, &console_log, kernel_path.as_deref(), initrd_path.as_deref(), - )?; + ) { + Ok(args) => args, + Err(error) => { + let _ = cleanup_vm_networks(&network_states).await; + return Err(error); + } + }; let mut cmd = Command::new(&qemu_bin); cmd.args(&args); @@ -597,9 +658,15 @@ impl HypervisorBackend for KvmBackend { "Spawning KVM QEMU" ); - let mut child = cmd - .spawn() - .map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?; + let mut child = match cmd.spawn() { + Ok(child) => child, + Err(error) => { + let _ = cleanup_vm_networks(&network_states).await; + return Err(Error::HypervisorError(format!( + "Failed to spawn QEMU: {error}" + ))); + } + }; let pid = child.id().map(|p| p); // Wait for QMP readiness before detaching so slow nested workers do not leave orphans. @@ -614,6 +681,7 @@ impl HypervisorBackend for KvmBackend { let _ = child.start_kill(); let _ = child.wait().await; let _ = tokio::fs::remove_file(&qmp_socket).await; + let _ = cleanup_vm_networks(&network_states).await; return Err(err); } @@ -629,6 +697,10 @@ impl HypervisorBackend for KvmBackend { handle .backend_state .insert("console_log".into(), console_log.display().to_string()); + handle.backend_state.insert( + NETWORK_STATE_KEY.into(), + encode_network_states(&network_states)?, + ); handle.pid = pid; handle.attached_disks = disks.to_vec(); @@ -789,15 +861,21 @@ impl HypervisorBackend for KvmBackend { tokio::fs::create_dir_all(&runtime_dir) .await .map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?; + tokio::fs::create_dir_all(&self.runtime_dir) + .await + .map_err(|e| { + Error::HypervisorError(format!("Failed to create backend runtime root: {e}")) + })?; let qmp_socket = runtime_dir.join("qmp.sock"); let console_log = runtime_dir.join("console.log"); let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&console_log).await; let qemu_bin = resolve_qemu_path(&self.qemu_path); + let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?; let (kernel_path, initrd_path) = resolve_kernel_initrd(); - let args = build_qemu_args_incoming( + let args = match build_qemu_args_incoming( vm, disks, &qmp_socket, @@ -805,7 +883,13 @@ impl HypervisorBackend for KvmBackend { kernel_path.as_deref(), initrd_path.as_deref(), listen_uri, - )?; + ) { + Ok(args) => args, + Err(error) => { + let _ = cleanup_vm_networks(&network_states).await; + return Err(error); + } + }; let mut cmd = Command::new(&qemu_bin); cmd.args(&args); @@ -818,9 +902,15 @@ impl HypervisorBackend for KvmBackend { "Spawning QEMU for incoming migration" ); - let mut child = cmd - .spawn() - .map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?; + let mut child = match cmd.spawn() { + Ok(child) => child, + Err(error) => { + let _ = cleanup_vm_networks(&network_states).await; + return Err(Error::HypervisorError(format!( + "Failed to spawn QEMU: {error}" + ))); + } + }; let pid = child.id().map(|p| p); if let Err(err) = wait_for_qmp(&qmp_socket, qmp_timeout()).await { @@ -834,6 +924,7 @@ impl HypervisorBackend for KvmBackend { let _ = child.start_kill(); let _ = child.wait().await; let _ = tokio::fs::remove_file(&qmp_socket).await; + let _ = cleanup_vm_networks(&network_states).await; return Err(err); } @@ -848,6 +939,10 @@ impl HypervisorBackend for KvmBackend { handle .backend_state .insert("console_log".into(), console_log.display().to_string()); + handle.backend_state.insert( + NETWORK_STATE_KEY.into(), + encode_network_states(&network_states)?, + ); handle.pid = pid; handle.attached_disks = disks.to_vec(); @@ -913,6 +1008,7 @@ impl HypervisorBackend for KvmBackend { async fn delete(&self, handle: &VmHandle) -> Result<()> { tracing::info!(vm_id = %handle.vm_id, "Deleting VM resources"); + let network_states = decode_network_states(handle.backend_state.get(NETWORK_STATE_KEY))?; if handle.pid.is_some() || self.qmp_socket_path(handle).exists() { let _ = self.kill(handle).await; @@ -940,6 +1036,7 @@ impl HypervisorBackend for KvmBackend { Error::HypervisorError(format!("Failed to remove runtime dir: {e}")) })?; } + cleanup_vm_networks(&network_states).await?; tracing::info!(vm_id = %handle.vm_id, "Deleted VM resources"); @@ -1054,6 +1151,10 @@ impl HypervisorBackend for KvmBackend { let qmp_socket = self.qmp_socket_path(handle); wait_for_qmp(&qmp_socket, qmp_timeout()).await?; let mut client = QmpClient::connect(&qmp_socket).await?; + let network_states = + ensure_vm_networks(&self.runtime_dir, std::slice::from_ref(nic)).await?; + let tap_name = tap_name_for_nic(nic); + let device_id = nic_device_component(nic, 0); // Generate MAC address if not provided let mac_addr = nic @@ -1068,23 +1169,29 @@ impl HypervisorBackend for KvmBackend { // Step 1: Add network backend via netdev_add let netdev_args = serde_json::json!({ "type": "tap", - "id": format!("netdev-{}", nic.id), - "ifname": format!("tap-{}", nic.id), + "id": format!("netdev-{}", device_id), + "ifname": tap_name, "script": "no", "downscript": "no" }); - client.command("netdev_add", Some(netdev_args)).await?; + if let Err(error) = client.command("netdev_add", Some(netdev_args)).await { + let _ = cleanup_vm_networks(&network_states).await; + return Err(error); + } // Step 2: Add virtio-net-pci frontend device let device_args = serde_json::json!({ - "driver": "virtio-net-pci", - "id": format!("net-{}", nic.id), - "netdev": format!("netdev-{}", nic.id), + "driver": nic_device_driver(nic.model), + "id": format!("net-{}", device_id), + "netdev": format!("netdev-{}", device_id), "mac": mac_addr }); - client.command("device_add", Some(device_args)).await?; + if let Err(error) = client.command("device_add", Some(device_args)).await { + let _ = cleanup_vm_networks(&network_states).await; + return Err(error); + } tracing::info!( vm_id = %handle.vm_id, @@ -1106,6 +1213,7 @@ impl HypervisorBackend for KvmBackend { let qmp_socket = self.qmp_socket_path(handle); wait_for_qmp(&qmp_socket, qmp_timeout()).await?; let mut client = QmpClient::connect(&qmp_socket).await?; + let nic_id = sanitize_device_component(nic_id, 0); // Remove the virtio-net-pci device (netdev backend will be cleaned up automatically) let device_args = serde_json::json!({ @@ -1281,8 +1389,6 @@ mod tests { #[test] fn build_qemu_args_coerces_writeback_cache_to_none_for_nbd_disks() { - let _guard = crate::env::env_test_lock().lock().unwrap(); - std::env::remove_var(crate::env::ENV_NBD_AIO_MODE); let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default()); let disks = vec![AttachedDisk { id: "root".into(), @@ -1300,13 +1406,11 @@ mod tests { let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap(); let args_joined = args.join(" "); assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}")); - assert!(args_joined.contains("\"aio\":\"io_uring\"")); + assert!(!args_joined.contains("\"aio\":")); } #[test] - fn build_qemu_args_uses_io_uring_for_nbd_none_cache_by_default() { - let _guard = crate::env::env_test_lock().lock().unwrap(); - std::env::remove_var(crate::env::ENV_NBD_AIO_MODE); + fn build_qemu_args_does_not_set_aio_for_nbd_disks() { let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default()); let disks = vec![AttachedDisk { id: "root".into(), @@ -1324,32 +1428,7 @@ mod tests { let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap(); let args_joined = args.join(" "); assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}")); - assert!(args_joined.contains("\"aio\":\"io_uring\"")); - } - - #[test] - fn build_qemu_args_honors_nbd_aio_override() { - let _guard = crate::env::env_test_lock().lock().unwrap(); - std::env::set_var(crate::env::ENV_NBD_AIO_MODE, "threads"); - let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default()); - let disks = vec![AttachedDisk { - id: "root".into(), - attachment: DiskAttachment::Nbd { - uri: "nbd://10.100.0.11:11000".into(), - format: VolumeFormat::Raw, - }, - bus: DiskBus::Virtio, - cache: DiskCache::None, - boot_index: Some(1), - read_only: false, - }]; - let qmp = PathBuf::from("/tmp/qmp.sock"); - let console = PathBuf::from("/tmp/console.log"); - let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap(); - let args_joined = args.join(" "); - assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}")); - assert!(args_joined.contains("\"aio\":\"threads\"")); - std::env::remove_var(crate::env::ENV_NBD_AIO_MODE); + assert!(!args_joined.contains("\"aio\":")); } #[test] diff --git a/plasmavmc/crates/plasmavmc-kvm/src/network.rs b/plasmavmc/crates/plasmavmc-kvm/src/network.rs new file mode 100644 index 0000000..f7f26b0 --- /dev/null +++ b/plasmavmc/crates/plasmavmc-kvm/src/network.rs @@ -0,0 +1,678 @@ +use nix::sys::signal::{kill as nix_kill, Signal}; +use nix::unistd::Pid; +use plasmavmc_types::{Error, NetworkSpec, Result}; +use serde::{Deserialize, Serialize}; +use std::collections::HashSet; +use std::net::Ipv4Addr; +use std::os::unix::fs::MetadataExt; +use std::path::{Path, PathBuf}; +use tokio::fs; +use tokio::process::Command; +use tokio::time::{sleep, Duration, Instant}; + +pub const NETWORK_STATE_KEY: &str = "network_state"; +const DNSMASQ_START_TIMEOUT: Duration = Duration::from_secs(5); + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct NetworkRuntimeState { + pub nic_id: String, + pub subnet_id: String, + pub port_id: String, + pub bridge_name: String, + pub tap_name: String, + pub mac_address: String, + pub ip_address: String, + pub cidr_block: String, + pub gateway_ip: String, + pub dhcp_enabled: bool, + pub network_dir: String, + pub dnsmasq_conf: String, + pub hosts_file: String, + pub lease_file: String, + pub pid_file: String, + pub host_alias: String, +} + +#[derive(Debug, Clone)] +struct NicDataplaneConfig { + state: NetworkRuntimeState, + gateway_prefix: String, + dhcp_range_start: String, + dhcp_range_end: String, +} + +pub fn tap_name_for_nic(nic: &NetworkSpec) -> String { + let seed = nic + .port_id + .as_deref() + .or(nic.subnet_id.as_deref()) + .unwrap_or(&nic.id); + interface_name("pct", seed) +} + +pub fn encode_network_states(states: &[NetworkRuntimeState]) -> Result { + serde_json::to_string(states) + .map_err(|error| Error::HypervisorError(format!("failed to encode network state: {error}"))) +} + +pub fn decode_network_states(serialized: Option<&String>) -> Result> { + match serialized { + Some(value) if !value.trim().is_empty() => serde_json::from_str(value).map_err(|error| { + Error::HypervisorError(format!("failed to decode network state: {error}")) + }), + _ => Ok(Vec::new()), + } +} + +pub async fn ensure_vm_networks( + runtime_root: &Path, + nics: &[NetworkSpec], +) -> Result> { + let mut states = Vec::with_capacity(nics.len()); + for nic in nics { + let config = dataplane_config(runtime_root, nic)?; + states.push(config.state.clone()); + if let Err(error) = ensure_bridge(&config).await { + let _ = cleanup_vm_networks(&states).await; + return Err(error); + } + if let Err(error) = ensure_dnsmasq(&config).await { + let _ = cleanup_vm_networks(&states).await; + return Err(error); + } + if let Err(error) = ensure_tap(runtime_root, &config).await { + let _ = cleanup_vm_networks(&states).await; + return Err(error); + } + } + Ok(states) +} + +pub async fn cleanup_vm_networks(states: &[NetworkRuntimeState]) -> Result<()> { + let mut errors = Vec::new(); + let mut seen_bridges = HashSet::new(); + + for state in states.iter().rev() { + if let Err(error) = delete_interface_if_present(&state.tap_name).await { + errors.push(error.to_string()); + } + + if let Err(error) = remove_host_entry(state).await { + errors.push(error.to_string()); + } + + if !seen_bridges.insert(state.bridge_name.clone()) { + continue; + } + + match bridge_has_hosts(state).await { + Ok(true) => { + if let Err(error) = reload_dnsmasq(state).await { + errors.push(error.to_string()); + } + } + Ok(false) => { + if let Err(error) = stop_dnsmasq(state).await { + errors.push(error.to_string()); + } + if let Err(error) = delete_interface_if_present(&state.bridge_name).await { + errors.push(error.to_string()); + } + let _ = fs::remove_dir_all(&state.network_dir).await; + } + Err(error) => errors.push(error.to_string()), + } + } + + if errors.is_empty() { + Ok(()) + } else { + Err(Error::HypervisorError(format!( + "network cleanup failed: {}", + errors.join("; ") + ))) + } +} + +fn dataplane_config(runtime_root: &Path, nic: &NetworkSpec) -> Result { + let subnet_id = nic + .subnet_id + .clone() + .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires subnet_id".into()))?; + let port_id = nic.port_id.clone().unwrap_or_else(|| nic.id.clone()); + let mac_address = nic + .mac_address + .clone() + .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires mac_address".into()))?; + let ip_address = nic + .ip_address + .clone() + .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires ip_address".into()))?; + let cidr_block = nic + .cidr_block + .clone() + .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires cidr_block".into()))?; + let gateway_ip = nic + .gateway_ip + .clone() + .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires gateway_ip".into()))?; + + let (cidr_ip, prefix) = parse_ipv4_cidr(&cidr_block)?; + let gateway = parse_ipv4(&gateway_ip, "gateway_ip")?; + if !cidr_contains_ip(cidr_ip, prefix, gateway) { + return Err(Error::HypervisorError(format!( + "gateway {gateway_ip} is outside subnet {cidr_block}" + ))); + } + let (dhcp_range_start, dhcp_range_end) = dhcp_range(cidr_ip, prefix, gateway)?; + let bridge_name = interface_name("pcbr", &subnet_id); + let tap_name = tap_name_for_nic(nic); + let network_dir = runtime_root.join("networks").join(&subnet_id); + let host_alias = format!("port-{}", compact_id(&port_id, 12)); + let state = NetworkRuntimeState { + nic_id: nic.id.clone(), + subnet_id, + port_id, + bridge_name, + tap_name, + mac_address, + ip_address, + cidr_block, + gateway_ip: gateway_ip.clone(), + dhcp_enabled: nic.dhcp_enabled, + network_dir: network_dir.display().to_string(), + dnsmasq_conf: network_dir.join("dnsmasq.conf").display().to_string(), + hosts_file: network_dir.join("hosts").display().to_string(), + lease_file: network_dir.join("leases").display().to_string(), + pid_file: network_dir.join("dnsmasq.pid").display().to_string(), + host_alias, + }; + + Ok(NicDataplaneConfig { + gateway_prefix: format!("{gateway_ip}/{prefix}"), + dhcp_range_start, + dhcp_range_end, + state, + }) +} + +async fn ensure_bridge(config: &NicDataplaneConfig) -> Result<()> { + if !link_exists(&config.state.bridge_name).await? { + run_command( + "ip", + [ + "link", + "add", + "name", + config.state.bridge_name.as_str(), + "type", + "bridge", + ], + ) + .await?; + } + + run_command( + "ip", + [ + "addr", + "replace", + config.gateway_prefix.as_str(), + "dev", + config.state.bridge_name.as_str(), + ], + ) + .await?; + run_command( + "ip", + [ + "link", + "set", + "dev", + config.state.bridge_name.as_str(), + "up", + ], + ) + .await +} + +async fn ensure_dnsmasq(config: &NicDataplaneConfig) -> Result<()> { + fs::create_dir_all(&config.state.network_dir) + .await + .map_err(|error| { + Error::HypervisorError(format!( + "failed to create network runtime directory {}: {error}", + config.state.network_dir + )) + })?; + write_hosts_file(&config.state).await?; + + let dnsmasq_conf = format!( + "interface={bridge}\n\ +bind-interfaces\n\ +except-interface=lo\n\ +port=0\n\ +dhcp-authoritative\n\ +dhcp-option=option:router,{gateway}\n\ +dhcp-range={range_start},{range_end},{mask},1h\n\ +dhcp-hostsfile={hosts_file}\n\ +dhcp-leasefile={lease_file}\n\ +pid-file={pid_file}\n", + bridge = config.state.bridge_name, + gateway = config.state.gateway_ip, + range_start = config.dhcp_range_start, + range_end = config.dhcp_range_end, + mask = cidr_mask(&config.state.cidr_block)?, + hosts_file = config.state.hosts_file, + lease_file = config.state.lease_file, + pid_file = config.state.pid_file, + ); + fs::write(&config.state.dnsmasq_conf, dnsmasq_conf) + .await + .map_err(|error| { + Error::HypervisorError(format!( + "failed to write dnsmasq config {}: {error}", + config.state.dnsmasq_conf + )) + })?; + + if dnsmasq_running(&config.state).await? { + reload_dnsmasq(&config.state).await?; + return Ok(()); + } + + let mut command = Command::new("dnsmasq"); + command.arg(format!("--conf-file={}", config.state.dnsmasq_conf)); + let output = command + .output() + .await + .map_err(|error| Error::HypervisorError(format!("failed to spawn dnsmasq: {error}")))?; + if !output.status.success() { + return Err(command_failed("dnsmasq", &[], &output)); + } + + let deadline = Instant::now() + DNSMASQ_START_TIMEOUT; + while Instant::now() < deadline { + if dnsmasq_running(&config.state).await? { + return Ok(()); + } + sleep(Duration::from_millis(100)).await; + } + + Err(Error::HypervisorError(format!( + "dnsmasq did not start for bridge {}", + config.state.bridge_name + ))) +} + +async fn ensure_tap(runtime_root: &Path, config: &NicDataplaneConfig) -> Result<()> { + let _ = delete_interface_if_present(&config.state.tap_name).await; + let metadata = fs::metadata(runtime_root).await.map_err(|error| { + Error::HypervisorError(format!( + "failed to inspect runtime root {}: {error}", + runtime_root.display() + )) + })?; + let uid = metadata.uid().to_string(); + let gid = metadata.gid().to_string(); + + run_command( + "ip", + [ + "tuntap", + "add", + "dev", + config.state.tap_name.as_str(), + "mode", + "tap", + "user", + uid.as_str(), + "group", + gid.as_str(), + ], + ) + .await?; + run_command( + "ip", + [ + "link", + "set", + "dev", + config.state.tap_name.as_str(), + "master", + config.state.bridge_name.as_str(), + ], + ) + .await?; + run_command( + "ip", + ["link", "set", "dev", config.state.tap_name.as_str(), "up"], + ) + .await +} + +async fn write_hosts_file(state: &NetworkRuntimeState) -> Result<()> { + let hosts_path = PathBuf::from(&state.hosts_file); + let existing = match fs::read_to_string(&hosts_path).await { + Ok(contents) => contents, + Err(error) if error.kind() == std::io::ErrorKind::NotFound => String::new(), + Err(error) => { + return Err(Error::HypervisorError(format!( + "failed to read dnsmasq hosts file {}: {error}", + hosts_path.display() + ))) + } + }; + + let mut lines: Vec = existing + .lines() + .filter(|line| !line.trim().is_empty() && !line.contains(&state.host_alias)) + .map(ToOwned::to_owned) + .collect(); + lines.push(format!( + "{mac},{ip},{alias}", + mac = state.mac_address, + ip = state.ip_address, + alias = state.host_alias + )); + let mut rendered = lines.join("\n"); + if !rendered.is_empty() { + rendered.push('\n'); + } + fs::write(&hosts_path, rendered).await.map_err(|error| { + Error::HypervisorError(format!( + "failed to write dnsmasq hosts file {}: {error}", + hosts_path.display() + )) + }) +} + +async fn remove_host_entry(state: &NetworkRuntimeState) -> Result<()> { + let hosts_path = PathBuf::from(&state.hosts_file); + let existing = match fs::read_to_string(&hosts_path).await { + Ok(contents) => contents, + Err(error) if error.kind() == std::io::ErrorKind::NotFound => return Ok(()), + Err(error) => { + return Err(Error::HypervisorError(format!( + "failed to read dnsmasq hosts file {}: {error}", + hosts_path.display() + ))) + } + }; + let filtered: Vec<&str> = existing + .lines() + .filter(|line| !line.contains(&state.host_alias)) + .collect(); + let mut rendered = filtered.join("\n"); + if !rendered.is_empty() { + rendered.push('\n'); + } + fs::write(&hosts_path, rendered).await.map_err(|error| { + Error::HypervisorError(format!( + "failed to update dnsmasq hosts file {}: {error}", + hosts_path.display() + )) + }) +} + +async fn bridge_has_hosts(state: &NetworkRuntimeState) -> Result { + match fs::read_to_string(&state.hosts_file).await { + Ok(contents) => Ok(contents.lines().any(|line| !line.trim().is_empty())), + Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false), + Err(error) => Err(Error::HypervisorError(format!( + "failed to inspect dnsmasq hosts file {}: {error}", + state.hosts_file + ))), + } +} + +async fn dnsmasq_running(state: &NetworkRuntimeState) -> Result { + let pid = read_pid_file(&state.pid_file).await?; + if let Some(pid) = pid { + return Ok(pid_running(pid)); + } + Ok(false) +} + +async fn reload_dnsmasq(state: &NetworkRuntimeState) -> Result<()> { + if let Some(pid) = read_pid_file(&state.pid_file).await? { + signal_pid(pid, Signal::SIGHUP)?; + } + Ok(()) +} + +async fn stop_dnsmasq(state: &NetworkRuntimeState) -> Result<()> { + if let Some(pid) = read_pid_file(&state.pid_file).await? { + signal_pid(pid, Signal::SIGTERM)?; + let deadline = Instant::now() + Duration::from_secs(2); + while pid_running(pid) && Instant::now() < deadline { + sleep(Duration::from_millis(50)).await; + } + if pid_running(pid) { + signal_pid(pid, Signal::SIGKILL)?; + } + } + let _ = fs::remove_file(&state.pid_file).await; + Ok(()) +} + +async fn read_pid_file(path: &str) -> Result> { + match fs::read_to_string(path).await { + Ok(contents) => Ok(contents.trim().parse::().ok()), + Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(None), + Err(error) => Err(Error::HypervisorError(format!( + "failed to read pid file {path}: {error}" + ))), + } +} + +fn signal_pid(pid: u32, signal: Signal) -> Result<()> { + nix_kill(Pid::from_raw(pid as i32), signal).map_err(|error| { + Error::HypervisorError(format!( + "failed to signal pid {pid} with {signal:?}: {error}" + )) + }) +} + +fn pid_running(pid: u32) -> bool { + match nix_kill(Pid::from_raw(pid as i32), None::) { + Ok(()) => true, + Err(nix::errno::Errno::EPERM) => true, + Err(nix::errno::Errno::ESRCH) => false, + Err(_) => false, + } +} + +async fn link_exists(name: &str) -> Result { + let output = Command::new("ip") + .args(["link", "show", "dev", name]) + .output() + .await + .map_err(|error| Error::HypervisorError(format!("failed to query link {name}: {error}")))?; + Ok(output.status.success()) +} + +async fn delete_interface_if_present(name: &str) -> Result<()> { + if !link_exists(name).await? { + return Ok(()); + } + run_command("ip", ["link", "set", "dev", name, "down"]).await?; + run_command("ip", ["link", "delete", "dev", name]).await +} + +async fn run_command(program: &str, args: [&str; N]) -> Result<()> { + let output = Command::new(program) + .args(args) + .output() + .await + .map_err(|error| Error::HypervisorError(format!("failed to spawn {program}: {error}")))?; + if output.status.success() { + Ok(()) + } else { + Err(command_failed(program, &args, &output)) + } +} + +fn command_failed(program: &str, args: &[&str], output: &std::process::Output) -> Error { + let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string(); + let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string(); + let detail = if !stderr.is_empty() { + stderr + } else if !stdout.is_empty() { + stdout + } else { + format!("exit code {:?}", output.status.code()) + }; + Error::HypervisorError(format!("{program} {} failed: {detail}", args.join(" "))) +} + +fn parse_ipv4(value: &str, field: &str) -> Result { + value + .parse() + .map_err(|error| Error::HypervisorError(format!("invalid {field} {value}: {error}"))) +} + +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8)> { + let (ip, prefix) = cidr.split_once('/').ok_or_else(|| { + Error::HypervisorError(format!("invalid cidr_block {cidr}: missing prefix")) + })?; + let ip = parse_ipv4(ip, "cidr_block ip")?; + let prefix = prefix + .parse::() + .map_err(|error| Error::HypervisorError(format!("invalid cidr_block {cidr}: {error}")))?; + if prefix > 32 { + return Err(Error::HypervisorError(format!( + "invalid cidr_block {cidr}: prefix must be <= 32" + ))); + } + Ok((ip, prefix)) +} + +fn cidr_contains_ip(cidr_ip: Ipv4Addr, prefix: u8, ip: Ipv4Addr) -> bool { + let mask = if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + }; + (u32::from(cidr_ip) & mask) == (u32::from(ip) & mask) +} + +fn cidr_mask(cidr: &str) -> Result { + let (_, prefix) = parse_ipv4_cidr(cidr)?; + let mask = if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + }; + Ok(Ipv4Addr::from(mask).to_string()) +} + +fn dhcp_range(cidr_ip: Ipv4Addr, prefix: u8, gateway: Ipv4Addr) -> Result<(String, String)> { + if prefix >= 31 { + return Err(Error::UnsupportedFeature( + "KVM local bridge dataplane requires an IPv4 subnet larger than /31".into(), + )); + } + let mask = if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + }; + let network = u32::from(cidr_ip) & mask; + let broadcast = network | !mask; + let gateway = u32::from(gateway); + let mut start = network + 1; + let mut end = broadcast - 1; + if start == gateway { + start += 1; + } + if end == gateway { + end = end.saturating_sub(1); + } + if start > end { + return Err(Error::HypervisorError( + "subnet does not have enough usable DHCP addresses".into(), + )); + } + Ok(( + Ipv4Addr::from(start).to_string(), + Ipv4Addr::from(end).to_string(), + )) +} + +fn compact_id(value: &str, limit: usize) -> String { + let compact: String = value + .chars() + .filter(|ch| ch.is_ascii_alphanumeric()) + .map(|ch| ch.to_ascii_lowercase()) + .collect(); + if compact.is_empty() { + "0".repeat(limit.max(1)) + } else { + compact.chars().take(limit).collect() + } +} + +fn interface_name(prefix: &str, seed: &str) -> String { + let available = 15usize.saturating_sub(prefix.len()); + format!("{prefix}{}", compact_id(seed, available.max(1))) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn tap_name_prefers_port_id() { + let nic = NetworkSpec { + id: "tenant0".into(), + port_id: Some("12345678-1234-1234-1234-1234567890ab".into()), + ..NetworkSpec::default() + }; + assert_eq!(tap_name_for_nic(&nic), "pct123456781234"); + } + + #[test] + fn interface_names_fit_kernel_limit() { + let name = interface_name("pcbr", "12345678-1234-1234-1234-1234567890ab"); + assert!(name.len() <= 15); + assert_eq!(name, "pcbr12345678123"); + } + + #[test] + fn dhcp_range_skips_gateway() { + let (start, end) = dhcp_range( + "10.62.10.0".parse().unwrap(), + 24, + "10.62.10.1".parse().unwrap(), + ) + .unwrap(); + assert_eq!(start, "10.62.10.2"); + assert_eq!(end, "10.62.10.254"); + } + + #[test] + fn encode_and_decode_network_state_round_trip() { + let state = NetworkRuntimeState { + nic_id: "tenant0".into(), + subnet_id: "subnet".into(), + port_id: "port".into(), + bridge_name: "pcbrsubnet".into(), + tap_name: "pctport".into(), + mac_address: "02:00:00:00:62:10".into(), + ip_address: "10.62.10.10".into(), + cidr_block: "10.62.10.0/24".into(), + gateway_ip: "10.62.10.1".into(), + dhcp_enabled: true, + network_dir: "/run/libvirt/plasmavmc/networks/subnet".into(), + dnsmasq_conf: "/run/libvirt/plasmavmc/networks/subnet/dnsmasq.conf".into(), + hosts_file: "/run/libvirt/plasmavmc/networks/subnet/hosts".into(), + lease_file: "/run/libvirt/plasmavmc/networks/subnet/leases".into(), + pid_file: "/run/libvirt/plasmavmc/networks/subnet/dnsmasq.pid".into(), + host_alias: "port-port".into(), + }; + let encoded = encode_network_states(&[state.clone()]).unwrap(); + let decoded = decode_network_states(Some(&encoded)).unwrap(); + assert_eq!(decoded, vec![state]); + } +} diff --git a/plasmavmc/crates/plasmavmc-server/Cargo.toml b/plasmavmc/crates/plasmavmc-server/Cargo.toml index d572009..31e8b53 100644 --- a/plasmavmc/crates/plasmavmc-server/Cargo.toml +++ b/plasmavmc/crates/plasmavmc-server/Cargo.toml @@ -51,14 +51,12 @@ chrono = { version = "0.4", features = ["serde"] } [dev-dependencies] tempfile = { workspace = true } chrono = "0.4" -chainfire-server = { path = "../../../chainfire/crates/chainfire-server" } -flaredb-server = { path = "../../../flaredb/crates/flaredb-server" } -flaredb-proto = { path = "../../../flaredb/crates/flaredb-proto" } iam-api = { path = "../../../iam/crates/iam-api" } +iam-authn = { path = "../../../iam/crates/iam-authn" } +iam-authz = { path = "../../../iam/crates/iam-authz" } +iam-store = { path = "../../../iam/crates/iam-store" } prismnet-server = { path = "../../../prismnet/crates/prismnet-server" } prismnet-types = { path = "../../../prismnet/crates/prismnet-types" } -creditservice-api = { path = "../../../creditservice/crates/creditservice-api" } -creditservice-proto = { path = "../../../creditservice/crates/creditservice-proto" } [lints] workspace = true diff --git a/plasmavmc/crates/plasmavmc-server/src/artifact_store.rs b/plasmavmc/crates/plasmavmc-server/src/artifact_store.rs index 4ac4839..ef4cefb 100644 --- a/plasmavmc/crates/plasmavmc-server/src/artifact_store.rs +++ b/plasmavmc/crates/plasmavmc-server/src/artifact_store.rs @@ -82,7 +82,7 @@ struct ValidatedImportUrl { struct ImportedImageSource { source_type: String, - host: String, + host: Option, } impl ArtifactStore { @@ -177,7 +177,7 @@ impl ArtifactStore { &staging_path, source_format, source_type, - Some(host), + host, ) .await } @@ -750,14 +750,70 @@ impl ArtifactStore { .map_err(|e| Status::internal(format!("failed to create {parent:?}: {e}")))?; } + if let Some(local_path) = local_source_path(source_url)? { + self.copy_local_source(&local_path, path).await?; + return Ok(ImportedImageSource { + source_type: "file".to_string(), + host: None, + }); + } + let validated = self.validate_import_url(source_url).await?; self.download_https_source(&validated, path).await?; Ok(ImportedImageSource { source_type: "https".to_string(), - host: validated.host, + host: Some(validated.host), }) } + async fn copy_local_source(&self, source_path: &Path, path: &Path) -> Result<(), Status> { + let source = tokio::fs::canonicalize(source_path).await.map_err(|e| { + Status::invalid_argument(format!( + "failed to access local source_url path {}: {e}", + source_path.display() + )) + })?; + let metadata = tokio::fs::metadata(&source).await.map_err(|e| { + Status::invalid_argument(format!( + "failed to stat local source_url path {}: {e}", + source.display() + )) + })?; + if !metadata.is_file() { + return Err(Status::invalid_argument(format!( + "local source_url path {} is not a regular file", + source.display() + ))); + } + if metadata.len() > self.max_image_import_size_bytes { + return Err(Status::resource_exhausted(format!( + "local source_url exceeds the configured maximum size of {} bytes", + self.max_image_import_size_bytes + ))); + } + + let temp_path = path.with_extension("local"); + if tokio::fs::try_exists(&temp_path).await.unwrap_or(false) { + let _ = tokio::fs::remove_file(&temp_path).await; + } + + tokio::fs::copy(&source, &temp_path).await.map_err(|e| { + Status::internal(format!( + "failed to copy local source_url {} into {}: {e}", + source.display(), + temp_path.display() + )) + })?; + tokio::fs::rename(&temp_path, path).await.map_err(|e| { + Status::internal(format!( + "failed to finalize local source_url copy into {}: {e}", + path.display() + )) + })?; + ensure_cache_file_permissions(path).await?; + Ok(()) + } + async fn convert_to_qcow2(&self, source: &Path, destination: &Path) -> Result<(), Status> { if tokio::fs::try_exists(destination) .await @@ -917,12 +973,6 @@ impl ArtifactStore { } async fn validate_import_url(&self, source_url: &str) -> Result { - if source_url.starts_with("file://") || source_url.starts_with('/') { - return Err(Status::invalid_argument( - "source_url must use https:// and may not reference local files", - )); - } - let url = Url::parse(source_url) .map_err(|e| Status::invalid_argument(format!("invalid source_url: {e}")))?; if url.scheme() != "https" { @@ -1413,6 +1463,21 @@ fn resolve_binary_path( Ok(candidate) } +fn local_source_path(source_url: &str) -> Result, Status> { + if source_url.starts_with("file://") { + let url = Url::parse(source_url) + .map_err(|e| Status::invalid_argument(format!("invalid source_url: {e}")))?; + let path = url + .to_file_path() + .map_err(|_| Status::invalid_argument("source_url file:// path must be absolute"))?; + return Ok(Some(path)); + } + if source_url.starts_with('/') { + return Ok(Some(PathBuf::from(source_url))); + } + Ok(None) +} + async fn ensure_cache_dir_permissions(path: &Path) -> Result<(), Status> { #[cfg(unix)] { @@ -1513,4 +1578,20 @@ mod tests { "org/project/11111111-1111-1111-1111-111111111111.qcow2" ); } + + #[test] + fn local_source_path_accepts_local_files_and_ignores_https_urls() { + assert_eq!( + local_source_path("file:///tmp/source.qcow2").unwrap(), + Some(PathBuf::from("/tmp/source.qcow2")) + ); + assert_eq!( + local_source_path("/var/lib/source.qcow2").unwrap(), + Some(PathBuf::from("/var/lib/source.qcow2")) + ); + assert_eq!( + local_source_path("https://example.com/source.qcow2").unwrap(), + None + ); + } } diff --git a/plasmavmc/crates/plasmavmc-server/src/prismnet_client.rs b/plasmavmc/crates/plasmavmc-server/src/prismnet_client.rs index e8d62dd..508ae89 100644 --- a/plasmavmc/crates/plasmavmc-server/src/prismnet_client.rs +++ b/plasmavmc/crates/plasmavmc-server/src/prismnet_client.rs @@ -1,8 +1,9 @@ //! PrismNET client for port management use prismnet_api::proto::{ - port_service_client::PortServiceClient, GetPortRequest, AttachDeviceRequest, - DetachDeviceRequest, + port_service_client::PortServiceClient, subnet_service_client::SubnetServiceClient, + AttachDeviceRequest, CreatePortRequest, DeletePortRequest, DetachDeviceRequest, GetPortRequest, + GetSubnetRequest, }; use tonic::metadata::MetadataValue; use tonic::transport::Channel; @@ -11,6 +12,7 @@ use tonic::transport::Channel; pub struct PrismNETClient { auth_token: String, port_client: PortServiceClient, + subnet_client: SubnetServiceClient, } impl PrismNETClient { @@ -18,21 +20,21 @@ impl PrismNETClient { pub async fn new( endpoint: String, auth_token: String, - ) -> Result> { - let channel = Channel::from_shared(endpoint)? - .connect() - .await?; - let port_client = PortServiceClient::new(channel); + ) -> Result> { + let channel = Channel::from_shared(endpoint)?.connect().await?; + let port_client = PortServiceClient::new(channel.clone()); + let subnet_client = SubnetServiceClient::new(channel); Ok(Self { auth_token, port_client, + subnet_client, }) } fn request_with_auth( auth_token: &str, payload: T, - ) -> Result, Box> { + ) -> Result, Box> { let mut request = tonic::Request::new(payload); let token_value = MetadataValue::try_from(auth_token)?; request @@ -48,15 +50,74 @@ impl PrismNETClient { project_id: &str, subnet_id: &str, port_id: &str, - ) -> Result> { - let request = Self::request_with_auth(&self.auth_token, GetPortRequest { - org_id: org_id.to_string(), - project_id: project_id.to_string(), - subnet_id: subnet_id.to_string(), - id: port_id.to_string(), - })?; + ) -> Result> { + let request = Self::request_with_auth( + &self.auth_token, + GetPortRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + subnet_id: subnet_id.to_string(), + id: port_id.to_string(), + }, + )?; let response = self.port_client.get_port(request).await?; - Ok(response.into_inner().port.ok_or("Port not found in response")?) + Ok(response + .into_inner() + .port + .ok_or("Port not found in response")?) + } + + /// Get subnet details, resolving by subnet ID when VPC ID is not known locally. + pub async fn get_subnet( + &mut self, + org_id: &str, + project_id: &str, + subnet_id: &str, + ) -> Result> { + let request = Self::request_with_auth( + &self.auth_token, + GetSubnetRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + vpc_id: String::new(), + id: subnet_id.to_string(), + }, + )?; + let response = self.subnet_client.get_subnet(request).await?; + Ok(response + .into_inner() + .subnet + .ok_or("Subnet not found in response")?) + } + + /// Create a port for a VM-managed NIC + pub async fn create_port( + &mut self, + org_id: &str, + project_id: &str, + subnet_id: &str, + name: &str, + description: Option<&str>, + ip_address: Option<&str>, + security_group_ids: Vec, + ) -> Result> { + let request = Self::request_with_auth( + &self.auth_token, + CreatePortRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + subnet_id: subnet_id.to_string(), + name: name.to_string(), + description: description.unwrap_or_default().to_string(), + ip_address: ip_address.unwrap_or_default().to_string(), + security_group_ids, + }, + )?; + let response = self.port_client.create_port(request).await?; + Ok(response + .into_inner() + .port + .ok_or("Port not found in response")?) } /// Attach a device to a port @@ -68,15 +129,18 @@ impl PrismNETClient { port_id: &str, device_id: &str, device_type: i32, - ) -> Result<(), Box> { - let request = Self::request_with_auth(&self.auth_token, AttachDeviceRequest { - org_id: org_id.to_string(), - project_id: project_id.to_string(), - subnet_id: subnet_id.to_string(), - port_id: port_id.to_string(), - device_id: device_id.to_string(), - device_type, - })?; + ) -> Result<(), Box> { + let request = Self::request_with_auth( + &self.auth_token, + AttachDeviceRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + subnet_id: subnet_id.to_string(), + port_id: port_id.to_string(), + device_id: device_id.to_string(), + device_type, + }, + )?; self.port_client.attach_device(request).await?; Ok(()) } @@ -88,16 +152,40 @@ impl PrismNETClient { project_id: &str, subnet_id: &str, port_id: &str, - ) -> Result<(), Box> { - let request = Self::request_with_auth(&self.auth_token, DetachDeviceRequest { - org_id: org_id.to_string(), - project_id: project_id.to_string(), - subnet_id: subnet_id.to_string(), - port_id: port_id.to_string(), - })?; + ) -> Result<(), Box> { + let request = Self::request_with_auth( + &self.auth_token, + DetachDeviceRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + subnet_id: subnet_id.to_string(), + port_id: port_id.to_string(), + }, + )?; self.port_client.detach_device(request).await?; Ok(()) } + + /// Delete a port + pub async fn delete_port( + &mut self, + org_id: &str, + project_id: &str, + subnet_id: &str, + port_id: &str, + ) -> Result<(), Box> { + let request = Self::request_with_auth( + &self.auth_token, + DeletePortRequest { + org_id: org_id.to_string(), + project_id: project_id.to_string(), + subnet_id: subnet_id.to_string(), + id: port_id.to_string(), + }, + )?; + self.port_client.delete_port(request).await?; + Ok(()) + } } #[cfg(test)] diff --git a/plasmavmc/crates/plasmavmc-server/src/rest.rs b/plasmavmc/crates/plasmavmc-server/src/rest.rs index 0c40c7a..167d408 100644 --- a/plasmavmc/crates/plasmavmc-server/src/rest.rs +++ b/plasmavmc/crates/plasmavmc-server/src/rest.rs @@ -11,23 +11,22 @@ use axum::{ extract::{Path, State}, - http::StatusCode, http::HeaderMap, + http::StatusCode, routing::{get, post}, Json, Router, }; use plasmavmc_api::proto::{ - CreateVmRequest, DeleteVmRequest, GetVmRequest, ListVmsRequest, - StartVmRequest, StopVmRequest, MigrateVmRequest, VirtualMachine as ProtoVm, - vm_service_server::VmService, + vm_service_server::VmService, CreateVmRequest, DeleteVmRequest, GetVmRequest, ListVmsRequest, + MigrateVmRequest, StartVmRequest, StopVmRequest, VirtualMachine as ProtoVm, }; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use tonic::Request; use tonic::Code; +use tonic::Request; -use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext}; use crate::VmServiceImpl; +use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext}; /// REST API state #[derive(Clone)] @@ -93,6 +92,8 @@ pub struct CreateVmRequestRest { pub hypervisor: Option, #[serde(default)] pub disks: Vec, + #[serde(default)] + pub network: Vec, } #[derive(Debug, Deserialize)] @@ -113,6 +114,22 @@ pub enum DiskSourceRest { Blank, } +#[derive(Debug, Deserialize)] +pub struct NetworkSpecRest { + pub id: Option, + pub network_id: Option, + pub subnet_id: Option, + pub port_id: Option, + pub mac_address: Option, + pub ip_address: Option, + pub cidr_block: Option, + pub gateway_ip: Option, + pub dhcp_enabled: Option, + pub model: Option, + #[serde(default)] + pub security_groups: Vec, +} + /// VM migration request #[derive(Debug, Deserialize)] pub struct MigrateVmRequestRest { @@ -126,23 +143,115 @@ pub struct MigrateVmRequestRest { pub struct VmResponse { pub id: String, pub name: String, + pub org_id: String, + pub project_id: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub node_id: Option, pub state: String, + pub hypervisor: String, pub cpus: u32, pub memory_mb: u64, + pub network: Vec, +} + +#[derive(Debug, Serialize)] +pub struct VmNetworkResponse { + pub id: String, + pub network_id: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub subnet_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub port_id: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub mac_address: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub ip_address: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub cidr_block: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub gateway_ip: Option, + pub dhcp_enabled: bool, + pub model: String, + pub security_groups: Vec, +} + +fn nic_model_to_string(model: i32) -> String { + match plasmavmc_api::proto::NicModel::try_from(model) + .unwrap_or(plasmavmc_api::proto::NicModel::Unspecified) + { + plasmavmc_api::proto::NicModel::VirtioNet => "virtio-net".to_string(), + plasmavmc_api::proto::NicModel::E1000 => "e1000".to_string(), + plasmavmc_api::proto::NicModel::Unspecified => "unspecified".to_string(), + } +} + +fn hypervisor_to_string(hypervisor: i32) -> String { + match plasmavmc_api::proto::HypervisorType::try_from(hypervisor) + .unwrap_or(plasmavmc_api::proto::HypervisorType::Unspecified) + { + plasmavmc_api::proto::HypervisorType::Kvm => "kvm".to_string(), + plasmavmc_api::proto::HypervisorType::Firecracker => "firecracker".to_string(), + plasmavmc_api::proto::HypervisorType::Mvisor => "mvisor".to_string(), + plasmavmc_api::proto::HypervisorType::Unspecified => "unspecified".to_string(), + } +} + +impl From for VmNetworkResponse { + fn from(network: plasmavmc_api::proto::NetworkSpec) -> Self { + Self { + id: network.id, + network_id: network.network_id, + subnet_id: (!network.subnet_id.is_empty()).then_some(network.subnet_id), + port_id: (!network.port_id.is_empty()).then_some(network.port_id), + mac_address: (!network.mac_address.is_empty()).then_some(network.mac_address), + ip_address: (!network.ip_address.is_empty()).then_some(network.ip_address), + cidr_block: (!network.cidr_block.is_empty()).then_some(network.cidr_block), + gateway_ip: (!network.gateway_ip.is_empty()).then_some(network.gateway_ip), + dhcp_enabled: network.dhcp_enabled, + model: nic_model_to_string(network.model), + security_groups: network.security_groups, + } + } } impl From for VmResponse { fn from(vm: ProtoVm) -> Self { - let cpus = vm.spec.as_ref().and_then(|s| s.cpu.as_ref()).map(|c| c.vcpus).unwrap_or(1); - let memory_mb = vm.spec.as_ref().and_then(|s| s.memory.as_ref()).map(|m| m.size_mib).unwrap_or(512); + let cpus = vm + .spec + .as_ref() + .and_then(|s| s.cpu.as_ref()) + .map(|c| c.vcpus) + .unwrap_or(1); + let memory_mb = vm + .spec + .as_ref() + .and_then(|s| s.memory.as_ref()) + .map(|m| m.size_mib) + .unwrap_or(512); let state = format!("{:?}", vm.state()); + let network = vm + .spec + .as_ref() + .map(|spec| { + spec.network + .clone() + .into_iter() + .map(VmNetworkResponse::from) + .collect() + }) + .unwrap_or_default(); Self { id: vm.id, name: vm.name, + org_id: vm.org_id, + project_id: vm.project_id, + node_id: (!vm.node_id.is_empty()).then_some(vm.node_id), state, + hypervisor: hypervisor_to_string(vm.hypervisor), cpus, memory_mb, + network, } } } @@ -169,7 +278,9 @@ pub fn build_router(state: RestApiState) -> Router { async fn health_check() -> (StatusCode, Json>) { ( StatusCode::OK, - Json(SuccessResponse::new(serde_json::json!({ "status": "healthy" }))), + Json(SuccessResponse::new( + serde_json::json!({ "status": "healthy" }), + )), ) } @@ -188,11 +299,18 @@ async fn list_vms( }); req.extensions_mut().insert(tenant); - let response = state.vm_service.list_vms(req) + let response = state + .vm_service + .list_vms(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "LIST_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - let vms: Vec = response.into_inner().vms.into_iter().map(VmResponse::from).collect(); + let vms: Vec = response + .into_inner() + .vms + .into_iter() + .map(VmResponse::from) + .collect(); Ok(Json(SuccessResponse::new(VmsResponse { vms }))) } @@ -204,19 +322,29 @@ async fn create_vm( Json(req): Json, ) -> Result<(StatusCode, Json>), (StatusCode, Json)> { use plasmavmc_api::proto::{ - disk_source, CpuSpec, DiskBus, DiskCache, DiskSource, DiskSpec, HypervisorType, - MemorySpec, + disk_source, CpuSpec, DiskBus, DiskCache, DiskSource, DiskSpec, HypervisorType, MemorySpec, + NicModel as ProtoNicModel, }; - let hypervisor_type = match req.hypervisor.as_deref() { + let CreateVmRequestRest { + name, + org_id, + project_id, + vcpus, + memory_mib, + hypervisor, + disks, + network, + } = req; + + let hypervisor_type = match hypervisor.as_deref() { Some("kvm") => HypervisorType::Kvm, Some("firecracker") => HypervisorType::Firecracker, Some("mvisor") => HypervisorType::Mvisor, _ => HypervisorType::Unspecified, }; - let disks = req - .disks + let disks = disks .into_iter() .map(|disk| DiskSpec { id: disk.id, @@ -245,26 +373,46 @@ async fn create_vm( }) .collect(); + let network = network + .into_iter() + .enumerate() + .map(|(index, nic)| plasmavmc_api::proto::NetworkSpec { + id: nic.id.unwrap_or_else(|| format!("nic{}", index)), + network_id: nic.network_id.unwrap_or_else(|| "default".to_string()), + subnet_id: nic.subnet_id.unwrap_or_default(), + port_id: nic.port_id.unwrap_or_default(), + mac_address: nic.mac_address.unwrap_or_default(), + ip_address: nic.ip_address.unwrap_or_default(), + cidr_block: nic.cidr_block.unwrap_or_default(), + gateway_ip: nic.gateway_ip.unwrap_or_default(), + dhcp_enabled: nic.dhcp_enabled.unwrap_or(false), + model: match nic.model.as_deref() { + Some("e1000") => ProtoNicModel::E1000 as i32, + _ => ProtoNicModel::VirtioNet as i32, + }, + security_groups: nic.security_groups, + }) + .collect(); + let tenant = - resolve_rest_tenant(&state, &headers, req.org_id.as_deref(), req.project_id.as_deref()) - .await?; + resolve_rest_tenant(&state, &headers, org_id.as_deref(), project_id.as_deref()).await?; let mut grpc_req = Request::new(CreateVmRequest { - name: req.name, + name, org_id: tenant.org_id.clone(), project_id: tenant.project_id.clone(), spec: Some(plasmavmc_api::proto::VmSpec { cpu: Some(CpuSpec { - vcpus: req.vcpus.unwrap_or(1), + vcpus: vcpus.unwrap_or(1), cores_per_socket: 1, sockets: 1, cpu_model: String::new(), }), memory: Some(MemorySpec { - size_mib: req.memory_mib.unwrap_or(512), + size_mib: memory_mib.unwrap_or(512), hugepages: false, }), disks, - network: vec![], + network, boot: None, security: None, }), @@ -274,13 +422,17 @@ async fn create_vm( }); grpc_req.extensions_mut().insert(tenant); - let response = state.vm_service.create_vm(grpc_req) + let response = state + .vm_service + .create_vm(grpc_req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; Ok(( StatusCode::CREATED, - Json(SuccessResponse::new(VmResponse::from(response.into_inner()))), + Json(SuccessResponse::new(VmResponse::from( + response.into_inner(), + ))), )) } @@ -298,17 +450,15 @@ async fn get_vm( }); req.extensions_mut().insert(tenant); - let response = state.vm_service.get_vm(req) + let response = state + .vm_service + .get_vm(req) .await - .map_err(|e| { - if e.code() == tonic::Code::NotFound { - error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "VM not found") - } else { - error_response(StatusCode::INTERNAL_SERVER_ERROR, "GET_FAILED", &e.message()) - } - })?; + .map_err(map_tonic_status)?; - Ok(Json(SuccessResponse::new(VmResponse::from(response.into_inner())))) + Ok(Json(SuccessResponse::new(VmResponse::from( + response.into_inner(), + )))) } /// DELETE /api/v1/vms/{id} - Delete VM @@ -316,7 +466,8 @@ async fn delete_vm( State(state): State, Path(id): Path, headers: HeaderMap, -) -> Result<(StatusCode, Json>), (StatusCode, Json)> { +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ let tenant = resolve_rest_tenant(&state, &headers, None, None).await?; let mut req = Request::new(DeleteVmRequest { org_id: tenant.org_id.clone(), @@ -326,13 +477,17 @@ async fn delete_vm( }); req.extensions_mut().insert(tenant); - state.vm_service.delete_vm(req) + state + .vm_service + .delete_vm(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "DELETE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; Ok(( StatusCode::OK, - Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), + Json(SuccessResponse::new( + serde_json::json!({ "id": id, "deleted": true }), + )), )) } @@ -350,11 +505,15 @@ async fn start_vm( }); req.extensions_mut().insert(tenant); - state.vm_service.start_vm(req) + state + .vm_service + .start_vm(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "START_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - Ok(Json(SuccessResponse::new(serde_json::json!({ "id": id, "action": "started" })))) + Ok(Json(SuccessResponse::new( + serde_json::json!({ "id": id, "action": "started" }), + ))) } /// POST /api/v1/vms/{id}/stop - Stop VM @@ -373,11 +532,15 @@ async fn stop_vm( }); req.extensions_mut().insert(tenant); - state.vm_service.stop_vm(req) + state + .vm_service + .stop_vm(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "STOP_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - Ok(Json(SuccessResponse::new(serde_json::json!({ "id": id, "action": "stopped" })))) + Ok(Json(SuccessResponse::new( + serde_json::json!({ "id": id, "action": "stopped" }), + ))) } /// POST /api/v1/vms/{id}/migrate - Migrate VM @@ -402,9 +565,11 @@ async fn migrate_vm( .vm_service .migrate_vm(grpc_req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "MIGRATE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - Ok(Json(SuccessResponse::new(VmResponse::from(response.into_inner())))) + Ok(Json(SuccessResponse::new(VmResponse::from( + response.into_inner(), + )))) } /// Helper to create error response @@ -448,11 +613,18 @@ async fn resolve_rest_tenant( } fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) { + map_tonic_status(status) +} + +fn map_tonic_status(status: tonic::Status) -> (StatusCode, Json) { let status_code = match status.code() { Code::Unauthenticated => StatusCode::UNAUTHORIZED, Code::PermissionDenied => StatusCode::FORBIDDEN, Code::InvalidArgument => StatusCode::BAD_REQUEST, Code::NotFound => StatusCode::NOT_FOUND, + Code::AlreadyExists => StatusCode::CONFLICT, + Code::ResourceExhausted => StatusCode::TOO_MANY_REQUESTS, + Code::FailedPrecondition => StatusCode::UNPROCESSABLE_ENTITY, _ => StatusCode::INTERNAL_SERVER_ERROR, }; let code = match status.code() { @@ -460,8 +632,102 @@ fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) { Code::PermissionDenied => "FORBIDDEN", Code::InvalidArgument => "INVALID_ARGUMENT", Code::NotFound => "NOT_FOUND", + Code::AlreadyExists => "ALREADY_EXISTS", + Code::ResourceExhausted => "RESOURCE_EXHAUSTED", + Code::FailedPrecondition => "FAILED_PRECONDITION", _ => "INTERNAL", }; error_response(status_code, code, status.message()) } + +#[cfg(test)] +mod tests { + use super::*; + use plasmavmc_api::proto::{ + CpuSpec, HypervisorType, MemorySpec, NetworkSpec, NicModel, VirtualMachine as ProtoVm, + VmSpec, + }; + + #[test] + fn map_tonic_status_preserves_client_error_categories() { + let (status, Json(body)) = map_tonic_status(tonic::Status::not_found("missing vm")); + assert_eq!(status, StatusCode::NOT_FOUND); + assert_eq!(body.error.code, "NOT_FOUND"); + + let (status, Json(body)) = map_tonic_status(tonic::Status::invalid_argument("bad nic")); + assert_eq!(status, StatusCode::BAD_REQUEST); + assert_eq!(body.error.code, "INVALID_ARGUMENT"); + + let (status, Json(body)) = + map_tonic_status(tonic::Status::failed_precondition("network attach failed")); + assert_eq!(status, StatusCode::UNPROCESSABLE_ENTITY); + assert_eq!(body.error.code, "FAILED_PRECONDITION"); + } + + #[test] + fn vm_response_exposes_network_details() { + let response = VmResponse::from(ProtoVm { + id: "vm-1".to_string(), + name: "vm-1".to_string(), + org_id: "org-1".to_string(), + project_id: "proj-1".to_string(), + state: plasmavmc_api::proto::VmState::Running as i32, + spec: Some(VmSpec { + cpu: Some(CpuSpec { + vcpus: 2, + cores_per_socket: 1, + sockets: 1, + cpu_model: String::new(), + }), + memory: Some(MemorySpec { + size_mib: 2048, + hugepages: false, + }), + disks: vec![], + network: vec![NetworkSpec { + id: "nic0".to_string(), + network_id: "default".to_string(), + mac_address: "02:00:00:00:00:01".to_string(), + ip_address: "10.62.10.15".to_string(), + cidr_block: "10.62.10.0/24".to_string(), + gateway_ip: "10.62.10.1".to_string(), + dhcp_enabled: true, + model: NicModel::VirtioNet as i32, + security_groups: vec!["sg-1".to_string()], + port_id: "port-1".to_string(), + subnet_id: "subnet-1".to_string(), + }], + boot: None, + security: None, + }), + status: None, + node_id: "node04".to_string(), + hypervisor: HypervisorType::Kvm as i32, + created_at: 0, + updated_at: 0, + created_by: String::new(), + metadata: Default::default(), + labels: Default::default(), + }); + + assert_eq!(response.hypervisor, "kvm"); + assert_eq!(response.node_id.as_deref(), Some("node04")); + assert_eq!(response.network.len(), 1); + assert_eq!(response.network[0].port_id.as_deref(), Some("port-1")); + assert_eq!(response.network[0].subnet_id.as_deref(), Some("subnet-1")); + assert_eq!( + response.network[0].ip_address.as_deref(), + Some("10.62.10.15") + ); + assert_eq!( + response.network[0].cidr_block.as_deref(), + Some("10.62.10.0/24") + ); + assert_eq!( + response.network[0].gateway_ip.as_deref(), + Some("10.62.10.1") + ); + assert!(response.network[0].dhcp_enabled); + } +} diff --git a/plasmavmc/crates/plasmavmc-server/src/vm_service.rs b/plasmavmc/crates/plasmavmc-server/src/vm_service.rs index 5daaff1..f50cce7 100644 --- a/plasmavmc/crates/plasmavmc-server/src/vm_service.rs +++ b/plasmavmc/crates/plasmavmc-server/src/vm_service.rs @@ -45,6 +45,7 @@ use plasmavmc_types::{ NetworkSpec, NicModel, Node, NodeCapacity, NodeId, NodeState, OsType, Visibility, VmId, VmState, Volume, VolumeBacking, VolumeDriverKind, VolumeFormat, VolumeStatus, }; +use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::hash::{Hash, Hasher}; use std::sync::Arc; @@ -75,6 +76,7 @@ const NODE_ENDPOINT_LABEL: &str = "plasmavmc_endpoint"; const FAILOVER_META_KEY: &str = "failover_at"; const FAILOVER_TARGET_KEY: &str = "failover_target"; const PRISMNET_VM_DEVICE_TYPE: i32 = prismnet_api::proto::DeviceType::Vm as i32; +const PRISMNET_AUTO_PORTS_METADATA_KEY: &str = "plasmavmc.prismnet.auto_ports"; const STORE_OP_TIMEOUT: Duration = Duration::from_secs(5); /// VM Service implementation @@ -113,6 +115,13 @@ struct TenantKey { vm_id: String, } +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +struct AutoPrismnetPort { + nic_id: String, + subnet_id: String, + port_id: String, +} + impl PartialEq for TenantKey { fn eq(&self, other: &Self) -> bool { self.org_id == other.org_id @@ -332,6 +341,50 @@ impl VmServiceImpl { Ok(()) } + fn validate_network_reference(network: &plasmavmc_types::NetworkSpec) -> Result<(), Status> { + if network.port_id.is_some() && network.subnet_id.is_none() { + return Err(Status::invalid_argument( + "subnet_id is required when port_id is specified", + )); + } + if !network.security_groups.is_empty() && network.subnet_id.is_none() { + return Err(Status::invalid_argument( + "subnet_id is required when security_groups are specified", + )); + } + if let Some(subnet_id) = network.subnet_id.as_deref() { + Self::require_uuid(subnet_id, "subnet_id")?; + } + if let Some(port_id) = network.port_id.as_deref() { + Self::require_uuid(port_id, "port_id")?; + } + for security_group in &network.security_groups { + Self::require_uuid(security_group, "security_group_id")?; + } + Ok(()) + } + + fn validate_vm_network_references(spec: &plasmavmc_types::VmSpec) -> Result<(), Status> { + for network in &spec.network { + Self::validate_network_reference(network)?; + } + Ok(()) + } + + fn map_prismnet_error( + error: &(dyn std::error::Error + Send + Sync + 'static), + action: &str, + ) -> Status { + if let Some(status) = error.downcast_ref::() { + return Status::new( + status.code(), + format!("failed to {action}: {}", status.message()), + ); + } + + Status::failed_precondition(format!("failed to {action}: {error}")) + } + fn ensure_internal_rpc(tenant: &TenantContext) -> Result<(), Status> { if tenant.principal_kind != PrincipalKind::ServiceAccount || !tenant.principal_id.starts_with("plasmavmc-") @@ -563,6 +616,28 @@ impl VmServiceImpl { .collect() } + fn load_auto_prismnet_ports(vm: &plasmavmc_types::VirtualMachine) -> Vec { + vm.metadata + .get(PRISMNET_AUTO_PORTS_METADATA_KEY) + .and_then(|value| serde_json::from_str(value).ok()) + .unwrap_or_default() + } + + fn record_auto_prismnet_port( + vm: &mut plasmavmc_types::VirtualMachine, + auto_port: AutoPrismnetPort, + ) -> Result<(), Box> { + let mut auto_ports = Self::load_auto_prismnet_ports(vm); + if auto_ports.iter().all(|entry| entry != &auto_port) { + auto_ports.push(auto_port); + vm.metadata.insert( + PRISMNET_AUTO_PORTS_METADATA_KEY.to_string(), + serde_json::to_string(&auto_ports)?, + ); + } + Ok(()) + } + async fn ensure_nodes_loaded(&self) { if !self.nodes.is_empty() { return; @@ -813,6 +888,17 @@ impl VmServiceImpl { } else { Some(n.ip_address) }, + cidr_block: if n.cidr_block.is_empty() { + None + } else { + Some(n.cidr_block) + }, + gateway_ip: if n.gateway_ip.is_empty() { + None + } else { + Some(n.gateway_ip) + }, + dhcp_enabled: n.dhcp_enabled, model: Self::map_nic_model(n.model), security_groups: n.security_groups, } @@ -889,6 +975,17 @@ impl VmServiceImpl { } else { Some(n.ip_address) }, + cidr_block: if n.cidr_block.is_empty() { + None + } else { + Some(n.cidr_block) + }, + gateway_ip: if n.gateway_ip.is_empty() { + None + } else { + Some(n.gateway_ip) + }, + dhcp_enabled: n.dhcp_enabled, model: Self::map_nic_model(n.model), security_groups: n.security_groups, }) @@ -1014,6 +1111,9 @@ impl VmServiceImpl { port_id: n.port_id.clone().unwrap_or_default(), mac_address: n.mac_address.clone().unwrap_or_default(), ip_address: n.ip_address.clone().unwrap_or_default(), + cidr_block: n.cidr_block.clone().unwrap_or_default(), + gateway_ip: n.gateway_ip.clone().unwrap_or_default(), + dhcp_enabled: n.dhcp_enabled, model: match n.model { NicModel::VirtioNet => ProtoNicModel::VirtioNet as i32, NicModel::E1000 => ProtoNicModel::E1000 as i32, @@ -1625,7 +1725,7 @@ impl VmServiceImpl { async fn attach_prismnet_ports( &self, vm: &mut plasmavmc_types::VirtualMachine, - ) -> Result<(), Box> { + ) -> Result<(), Box> { let Some(ref endpoint) = self.prismnet_endpoint else { return Ok(()); }; @@ -1635,42 +1735,74 @@ impl VmServiceImpl { .await?; let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?; - for net_spec in &mut vm.spec.network { - if let (Some(ref subnet_id), Some(ref port_id)) = - (&net_spec.subnet_id, &net_spec.port_id) - { - // Get port details from PrismNET - let port = client - .get_port(&vm.org_id, &vm.project_id, subnet_id, port_id) - .await?; + for nic_index in 0..vm.spec.network.len() { + let nic = vm.spec.network[nic_index].clone(); + let Some(subnet_id) = nic.subnet_id.clone() else { + continue; + }; - // Update network spec with port information + let port = if let Some(port_id) = nic.port_id.clone() { + client + .get_port(&vm.org_id, &vm.project_id, &subnet_id, &port_id) + .await? + } else { + let port = client + .create_port( + &vm.org_id, + &vm.project_id, + &subnet_id, + &format!("{}-{}", vm.name, nic.id), + Some(&format!("Auto-managed port for VM {}", vm.id)), + nic.ip_address.as_deref(), + nic.security_groups.clone(), + ) + .await?; + Self::record_auto_prismnet_port( + vm, + AutoPrismnetPort { + nic_id: nic.id.clone(), + subnet_id: subnet_id.clone(), + port_id: port.id.clone(), + }, + )?; + port + }; + let subnet = client + .get_subnet(&vm.org_id, &vm.project_id, &subnet_id) + .await?; + + { + let net_spec = &mut vm.spec.network[nic_index]; + net_spec.port_id = Some(port.id.clone()); net_spec.mac_address = Some(port.mac_address.clone()); net_spec.ip_address = if port.ip_address.is_empty() { None } else { Some(port.ip_address.clone()) }; - - // Attach VM to the PrismNET port using the generated enum value. - client - .attach_device( - &vm.org_id, - &vm.project_id, - subnet_id, - port_id, - &vm.id.to_string(), - PRISMNET_VM_DEVICE_TYPE, - ) - .await?; - - tracing::info!( - vm_id = %vm.id, - port_id = %port_id, - mac = %port.mac_address, - "Attached VM to PrismNET port" - ); + net_spec.cidr_block = (!subnet.cidr_block.is_empty()).then_some(subnet.cidr_block); + net_spec.gateway_ip = (!subnet.gateway_ip.is_empty()).then_some(subnet.gateway_ip); + net_spec.dhcp_enabled = subnet.dhcp_enabled; } + + client + .attach_device( + &vm.org_id, + &vm.project_id, + &subnet_id, + &port.id, + &vm.id.to_string(), + PRISMNET_VM_DEVICE_TYPE, + ) + .await?; + + tracing::info!( + vm_id = %vm.id, + nic_id = %nic.id, + port_id = %port.id, + mac = %port.mac_address, + "Attached VM to PrismNET port" + ); } Ok(()) } @@ -1679,7 +1811,7 @@ impl VmServiceImpl { async fn detach_prismnet_ports( &self, vm: &plasmavmc_types::VirtualMachine, - ) -> Result<(), Box> { + ) -> Result<(), Box> { let Some(ref endpoint) = self.prismnet_endpoint else { return Ok(()); }; @@ -1708,6 +1840,53 @@ impl VmServiceImpl { Ok(()) } + async fn delete_auto_prismnet_ports( + &self, + vm: &plasmavmc_types::VirtualMachine, + ) -> Result<(), Box> { + let Some(ref endpoint) = self.prismnet_endpoint else { + return Ok(()); + }; + let auto_ports = Self::load_auto_prismnet_ports(vm); + if auto_ports.is_empty() { + return Ok(()); + } + + let auth_token = self + .issue_internal_token(&vm.org_id, &vm.project_id) + .await?; + let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?; + + for auto_port in auto_ports { + let result = client + .delete_port( + &vm.org_id, + &vm.project_id, + &auto_port.subnet_id, + &auto_port.port_id, + ) + .await; + if let Err(error) = result { + if error + .downcast_ref::() + .is_some_and(|status| status.code() == tonic::Code::NotFound) + { + continue; + } + return Err(error); + } + + tracing::info!( + vm_id = %vm.id, + nic_id = %auto_port.nic_id, + port_id = %auto_port.port_id, + "Deleted auto-managed PrismNET port" + ); + } + + Ok(()) + } + async fn rollback_prepared_vm_resources( &self, vm: &plasmavmc_types::VirtualMachine, @@ -1720,6 +1899,13 @@ impl VmServiceImpl { "Failed to detach PrismNET ports during VM rollback" ); } + if let Err(error) = self.delete_auto_prismnet_ports(vm).await { + tracing::warn!( + vm_id = %vm.id, + error = %error, + "Failed to delete auto-managed PrismNET ports during VM rollback" + ); + } if let Err(error) = self .volume_manager .rollback_vm_volumes(vm, delete_auto_delete_volumes) @@ -2064,7 +2250,339 @@ impl VmServiceImpl { #[cfg(test)] mod tests { use super::*; - use plasmavmc_types::VmSpec; + use async_trait::async_trait; + use iam_api::{ + iam_admin_server::IamAdminServer, iam_authz_server::IamAuthzServer, + iam_token_server::IamTokenServer, IamAdminService, IamAuthzService, IamTokenService, + }; + use iam_authn::{InternalTokenConfig, InternalTokenService, SigningKey}; + use iam_authz::{PolicyCache, PolicyEvaluator}; + use iam_store::{ + Backend, BindingStore, GroupStore, OrgStore, PrincipalStore, ProjectStore, RoleStore, + TokenStore, + }; + use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope}; + use plasmavmc_api::proto::{ + CpuSpec as ProtoCpuSpec, HypervisorType as ProtoHypervisorType, + MemorySpec as ProtoMemorySpec, NetworkSpec as ProtoNetworkSpec, + }; + use plasmavmc_hypervisor::{BackendCapabilities, HypervisorBackend, UnsupportedReason}; + use plasmavmc_types::{AttachedDisk, VmHandle, VmSpec, VmStatus}; + use prismnet_api::{ + port_service_server::PortServiceServer, subnet_service_server::SubnetServiceServer, + }; + use prismnet_server::{NetworkMetadataStore, OvnClient, PortServiceImpl, SubnetServiceImpl}; + use prismnet_types::{ + DeviceType as PrismnetDeviceType, Port, PortId as PrismnetPortId, Subnet, Vpc, + }; + use std::net::SocketAddr; + use std::sync::Mutex; + use tempfile::{tempdir, TempDir}; + use tokio_stream::wrappers::TcpListenerStream; + use tonic::transport::Server; + + #[derive(Default)] + struct MockBackend { + created: Mutex>, + deleted: Mutex>, + } + + impl MockBackend { + fn last_created_vm(&self) -> Option { + self.created.lock().unwrap().last().cloned() + } + } + + #[async_trait] + impl HypervisorBackend for MockBackend { + fn backend_type(&self) -> HypervisorType { + HypervisorType::Kvm + } + + fn capabilities(&self) -> BackendCapabilities { + BackendCapabilities::default() + } + + fn supports(&self, _spec: &VmSpec) -> std::result::Result<(), UnsupportedReason> { + Ok(()) + } + + async fn create( + &self, + vm: &plasmavmc_types::VirtualMachine, + disks: &[AttachedDisk], + ) -> plasmavmc_types::Result { + self.created.lock().unwrap().push(vm.clone()); + let mut handle = VmHandle::new(vm.id, format!("/tmp/{}", vm.id)); + handle.attached_disks = disks.to_vec(); + Ok(handle) + } + + async fn start(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn stop( + &self, + _handle: &VmHandle, + _timeout: Duration, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn kill(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn reboot(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn migrate( + &self, + _handle: &VmHandle, + _destination_uri: &str, + _timeout: Duration, + _wait: bool, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn prepare_incoming( + &self, + vm: &plasmavmc_types::VirtualMachine, + _listen_uri: &str, + disks: &[AttachedDisk], + ) -> plasmavmc_types::Result { + self.create(vm, disks).await + } + + async fn delete(&self, handle: &VmHandle) -> plasmavmc_types::Result<()> { + self.deleted.lock().unwrap().push(handle.vm_id.to_string()); + Ok(()) + } + + async fn status(&self, _handle: &VmHandle) -> plasmavmc_types::Result { + Ok(VmStatus { + actual_state: VmState::Stopped, + ..VmStatus::default() + }) + } + + async fn attach_disk( + &self, + _handle: &VmHandle, + _disk: &AttachedDisk, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn detach_disk( + &self, + _handle: &VmHandle, + _disk_id: &str, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn attach_nic( + &self, + _handle: &VmHandle, + _nic: &NetworkSpec, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + + async fn detach_nic( + &self, + _handle: &VmHandle, + _nic_id: &str, + ) -> plasmavmc_types::Result<()> { + Ok(()) + } + } + + async fn wait_for_test_tcp(addr: SocketAddr) { + let deadline = tokio::time::Instant::now() + Duration::from_secs(2); + loop { + if tokio::net::TcpStream::connect(addr).await.is_ok() { + return; + } + assert!( + tokio::time::Instant::now() < deadline, + "timed out waiting for test listener {}", + addr + ); + tokio::time::sleep(Duration::from_millis(25)).await; + } + } + + async fn start_test_iam_server() -> String { + let backend = Arc::new(Backend::memory()); + let principal_store = Arc::new(PrincipalStore::new(backend.clone())); + let role_store = Arc::new(RoleStore::new(backend.clone())); + let binding_store = Arc::new(BindingStore::new(backend.clone())); + let token_store = Arc::new(TokenStore::new(backend.clone())); + let group_store = Arc::new(GroupStore::new(backend.clone())); + let org_store = Arc::new(OrgStore::new(backend.clone())); + let project_store = Arc::new(ProjectStore::new(backend)); + + role_store.init_builtin_roles().await.unwrap(); + principal_store + .create(&Principal::new_user("user-1", "User One")) + .await + .unwrap(); + binding_store + .create(&PolicyBinding::new( + "binding-user-1-project-admin", + PrincipalRef::user("user-1"), + "roles/ProjectAdmin", + Scope::project("proj-1", "org-1"), + )) + .await + .unwrap(); + + let cache = Arc::new(PolicyCache::default_config()); + let evaluator = Arc::new(PolicyEvaluator::with_group_store( + binding_store.clone(), + role_store.clone(), + group_store.clone(), + cache, + )); + let token_service = Arc::new(InternalTokenService::new(InternalTokenConfig::new( + SigningKey::generate("vm-service-test-key"), + "vm-service-test", + ))); + let authz_service = IamAuthzService::new(evaluator.clone(), principal_store.clone()); + let token_grpc_service = + IamTokenService::new(token_service, principal_store.clone(), token_store, None); + let admin_service = IamAdminService::new( + principal_store, + role_store, + binding_store, + org_store, + project_store, + group_store, + ) + .with_evaluator(evaluator); + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + Server::builder() + .add_service(IamAuthzServer::new(authz_service)) + .add_service(IamTokenServer::new(token_grpc_service)) + .add_service(IamAdminServer::new(admin_service)) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .unwrap(); + }); + + wait_for_test_tcp(addr).await; + format!("http://{}", addr) + } + + async fn start_test_prismnet_server( + iam_endpoint: &str, + metadata: Arc, + ) -> String { + let auth_service = Arc::new(AuthService::new(iam_endpoint).await.unwrap()); + let ovn = Arc::new(OvnClient::new_mock()); + let port_service = PortServiceImpl::new(metadata.clone(), ovn, auth_service.clone()); + let subnet_service = SubnetServiceImpl::new(metadata, auth_service.clone()); + let auth_handle = tokio::runtime::Handle::current(); + let interceptor = move |mut req: Request<()>| -> Result, Status> { + let auth = auth_service.clone(); + tokio::task::block_in_place(|| { + auth_handle.block_on(async move { + let tenant_context = auth.authenticate_request(&req).await?; + req.extensions_mut().insert(tenant_context); + Ok(req) + }) + }) + }; + + let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap(); + let addr = listener.local_addr().unwrap(); + tokio::spawn(async move { + Server::builder() + .add_service(SubnetServiceServer::with_interceptor( + subnet_service, + interceptor.clone(), + )) + .add_service(PortServiceServer::with_interceptor( + port_service, + interceptor, + )) + .serve_with_incoming(TcpListenerStream::new(listener)) + .await + .unwrap(); + }); + + wait_for_test_tcp(addr).await; + format!("http://{}", addr) + } + + async fn new_test_vm_service( + iam_endpoint: &str, + prismnet_endpoint: Option, + ) -> (TempDir, VmServiceImpl, Arc) { + let tempdir = tempdir().unwrap(); + let registry = Arc::new(HypervisorRegistry::new()); + let backend = Arc::new(MockBackend::default()); + registry.register(backend.clone()); + + let mut config = ServerConfig::default(); + config.storage.backend = crate::config::StorageBackendKind::File; + config.storage.state_path = Some(tempdir.path().join("state")); + config.integrations.prismnet_endpoint = prismnet_endpoint; + config.volumes.managed_volume_root = tempdir.path().join("managed-volumes"); + config.volumes.qemu_img_path = Some(std::env::current_exe().unwrap()); + + let auth = Arc::new(AuthService::new(iam_endpoint).await.unwrap()); + let service = VmServiceImpl::new(registry, auth, iam_endpoint.to_string(), &config) + .await + .unwrap(); + (tempdir, service, backend) + } + + fn test_tenant() -> TenantContext { + TenantContext { + org_id: "org-1".to_string(), + project_id: "proj-1".to_string(), + principal_id: "user-1".to_string(), + principal_name: "User One".to_string(), + principal_kind: PrincipalKind::User, + node_id: None, + } + } + + fn test_vm_request(network: Vec) -> CreateVmRequest { + CreateVmRequest { + name: "test-vm".to_string(), + org_id: "org-1".to_string(), + project_id: "proj-1".to_string(), + spec: Some(ProtoVmSpec { + cpu: Some(ProtoCpuSpec { + vcpus: 1, + cores_per_socket: 1, + sockets: 1, + cpu_model: String::new(), + }), + memory: Some(ProtoMemorySpec { + size_mib: 512, + hugepages: false, + }), + disks: vec![], + network, + boot: None, + security: None, + }), + hypervisor: ProtoHypervisorType::Kvm as i32, + metadata: Default::default(), + labels: Default::default(), + } + } #[test] fn unspecified_disk_cache_defaults_to_writeback() { @@ -2163,6 +2681,216 @@ mod tests { assert!(VmServiceImpl::validate_vm_disk_references(&spec).is_err()); } + + #[tokio::test] + async fn vm_network_reference_validation_requires_subnet_for_port_id() { + let iam_endpoint = start_test_iam_server().await; + let (_tempdir, service, backend) = new_test_vm_service(&iam_endpoint, None).await; + let mut request = Request::new(test_vm_request(vec![ProtoNetworkSpec { + id: "nic0".to_string(), + network_id: "default".to_string(), + subnet_id: String::new(), + port_id: Uuid::new_v4().to_string(), + mac_address: String::new(), + ip_address: String::new(), + cidr_block: String::new(), + gateway_ip: String::new(), + dhcp_enabled: false, + model: ProtoNicModel::VirtioNet as i32, + security_groups: vec![], + }])); + request.extensions_mut().insert(test_tenant()); + + let error = service.create_vm(request).await.unwrap_err(); + assert_eq!(error.code(), tonic::Code::InvalidArgument); + assert!(backend.last_created_vm().is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn create_vm_rejects_unknown_security_group_reference() { + let iam_endpoint = start_test_iam_server().await; + let metadata = Arc::new(NetworkMetadataStore::new_in_memory()); + let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16"); + metadata.create_vpc(vpc.clone()).await.unwrap(); + let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24"); + metadata.create_subnet(subnet.clone()).await.unwrap(); + let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await; + let (_tempdir, service, backend) = + new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await; + + let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec { + id: "nic0".to_string(), + network_id: "default".to_string(), + subnet_id: subnet.id.to_string(), + port_id: String::new(), + mac_address: String::new(), + ip_address: String::new(), + cidr_block: String::new(), + gateway_ip: String::new(), + dhcp_enabled: false, + model: ProtoNicModel::VirtioNet as i32, + security_groups: vec![Uuid::new_v4().to_string()], + }])); + create_request.extensions_mut().insert(test_tenant()); + + let error = service.create_vm(create_request).await.unwrap_err(); + assert_eq!(error.code(), tonic::Code::NotFound); + assert!(backend.last_created_vm().is_none()); + assert!(metadata + .list_ports(Some(&subnet.id), None) + .await + .unwrap() + .is_empty()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn create_vm_auto_manages_prismnet_port_lifecycle() { + let iam_endpoint = start_test_iam_server().await; + let metadata = Arc::new(NetworkMetadataStore::new_in_memory()); + let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16"); + metadata.create_vpc(vpc.clone()).await.unwrap(); + let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24"); + metadata.create_subnet(subnet.clone()).await.unwrap(); + let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await; + let (_tempdir, service, backend) = + new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await; + + let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec { + id: "nic0".to_string(), + network_id: "default".to_string(), + subnet_id: subnet.id.to_string(), + port_id: String::new(), + mac_address: String::new(), + ip_address: String::new(), + cidr_block: String::new(), + gateway_ip: String::new(), + dhcp_enabled: false, + model: ProtoNicModel::VirtioNet as i32, + security_groups: vec![], + }])); + create_request.extensions_mut().insert(test_tenant()); + + let vm = service + .create_vm(create_request) + .await + .unwrap() + .into_inner(); + let nic = vm.spec.as_ref().unwrap().network.first().unwrap(); + assert!(!nic.port_id.is_empty()); + assert!(!nic.mac_address.is_empty()); + assert!(!nic.ip_address.is_empty()); + + let backend_vm = backend.last_created_vm().unwrap(); + assert!(backend_vm + .metadata + .contains_key(PRISMNET_AUTO_PORTS_METADATA_KEY)); + assert_eq!( + backend_vm + .spec + .network + .first() + .and_then(|entry| entry.port_id.as_deref()), + Some(nic.port_id.as_str()) + ); + + let port_id = PrismnetPortId::from_uuid(Uuid::parse_str(&nic.port_id).unwrap()); + let port = metadata + .get_port(&subnet.id, &port_id) + .await + .unwrap() + .unwrap(); + assert_eq!(port.device_id.as_deref(), Some(vm.id.as_str())); + assert_eq!(port.device_type, PrismnetDeviceType::Vm); + + let mut delete_request = Request::new(DeleteVmRequest { + org_id: "org-1".to_string(), + project_id: "proj-1".to_string(), + vm_id: vm.id.clone(), + force: true, + }); + delete_request.extensions_mut().insert(test_tenant()); + service.delete_vm(delete_request).await.unwrap(); + + assert!(metadata + .get_port(&subnet.id, &port_id) + .await + .unwrap() + .is_none()); + } + + #[tokio::test(flavor = "multi_thread")] + async fn delete_vm_preserves_tenant_managed_prismnet_port() { + let iam_endpoint = start_test_iam_server().await; + let metadata = Arc::new(NetworkMetadataStore::new_in_memory()); + let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16"); + metadata.create_vpc(vpc.clone()).await.unwrap(); + let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24"); + metadata.create_subnet(subnet.clone()).await.unwrap(); + + let mut existing_port = Port::new("tenant-managed-port", subnet.id); + existing_port.ip_address = Some("10.0.1.25".to_string()); + let existing_port_id = existing_port.id; + metadata.create_port(existing_port).await.unwrap(); + + let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await; + let (_tempdir, service, backend) = + new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await; + + let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec { + id: "nic0".to_string(), + network_id: "default".to_string(), + subnet_id: subnet.id.to_string(), + port_id: existing_port_id.to_string(), + mac_address: String::new(), + ip_address: String::new(), + cidr_block: String::new(), + gateway_ip: String::new(), + dhcp_enabled: false, + model: ProtoNicModel::VirtioNet as i32, + security_groups: vec![], + }])); + create_request.extensions_mut().insert(test_tenant()); + + let vm = service + .create_vm(create_request) + .await + .unwrap() + .into_inner(); + let nic = vm.spec.as_ref().unwrap().network.first().unwrap(); + assert_eq!(nic.port_id, existing_port_id.to_string()); + assert!(!nic.mac_address.is_empty()); + assert_eq!(nic.ip_address.as_str(), "10.0.1.25"); + + let backend_vm = backend.last_created_vm().unwrap(); + assert!(!backend_vm + .metadata + .contains_key(PRISMNET_AUTO_PORTS_METADATA_KEY)); + + let port = metadata + .get_port(&subnet.id, &existing_port_id) + .await + .unwrap() + .unwrap(); + assert_eq!(port.device_id.as_deref(), Some(vm.id.as_str())); + assert_eq!(port.device_type, PrismnetDeviceType::Vm); + + let mut delete_request = Request::new(DeleteVmRequest { + org_id: "org-1".to_string(), + project_id: "proj-1".to_string(), + vm_id: vm.id.clone(), + force: true, + }); + delete_request.extensions_mut().insert(test_tenant()); + service.delete_vm(delete_request).await.unwrap(); + + let port = metadata + .get_port(&subnet.id, &existing_port_id) + .await + .unwrap() + .unwrap(); + assert_eq!(port.device_id, None); + assert_eq!(port.device_type, PrismnetDeviceType::None); + } } impl StateSink for VmServiceImpl { @@ -2245,6 +2973,7 @@ impl VmService for VmServiceImpl { } let spec = Self::proto_spec_to_types(req.spec.clone()); Self::validate_vm_disk_references(&spec)?; + Self::validate_vm_network_references(&spec)?; if self.is_control_plane_scheduler() { if let Some(target) = self .select_target_node(hv, &req.org_id, &req.project_id, &spec) @@ -2369,10 +3098,23 @@ impl VmService for VmServiceImpl { } }; - // Attach to PrismNET ports if configured if let Err(e) = self.attach_prismnet_ports(&mut vm).await { - tracing::warn!("Failed to attach PrismNET ports: {}", e); - // Continue anyway - network attachment is optional + if let (Some(ref credit_svc), Some(ref res_id)) = + (&self.credit_service, &reservation_id) + { + let mut client = credit_svc.write().await; + if let Err(release_err) = client + .release_reservation(res_id, format!("VM network attachment failed: {}", e)) + .await + { + tracing::warn!("Failed to release reservation {}: {}", res_id, release_err); + } + } + self.rollback_prepared_vm_resources(&vm, true).await; + return Err(Self::map_prismnet_error( + e.as_ref(), + "attach PrismNET ports", + )); } // Create VM @@ -2700,6 +3442,9 @@ impl VmService for VmServiceImpl { tracing::warn!("Failed to detach PrismNET ports: {}", e); // Continue anyway - we still want to delete the VM } + if let Err(e) = self.delete_auto_prismnet_ports(&vm).await { + tracing::warn!("Failed to delete auto-managed PrismNET ports: {}", e); + } } if self.is_control_plane_scheduler() { @@ -3535,6 +4280,7 @@ impl VmService for VmServiceImpl { let spec = Self::proto_spec_to_types(req.spec); Self::validate_vm_disk_references(&spec)?; + Self::validate_vm_network_references(&spec)?; let name = if req.name.is_empty() { req.vm_id.clone() } else { @@ -3635,7 +4381,11 @@ impl VmService for VmServiceImpl { let attached_disks = self.volume_manager.prepare_vm_volumes(&mut vm).await?; if let Err(e) = self.attach_prismnet_ports(&mut vm).await { - tracing::warn!("Failed to attach PrismNET ports: {}", e); + self.rollback_prepared_vm_resources(&vm, false).await; + return Err(Self::map_prismnet_error( + e.as_ref(), + "attach PrismNET ports", + )); } let handle = match backend.create(&vm, &attached_disks).await { @@ -4352,9 +5102,6 @@ impl ImageService for VmServiceImpl { if req.source_url.trim().is_empty() { return Err(Status::invalid_argument("source_url is required")); } - if !req.source_url.starts_with("https://") { - return Err(Status::invalid_argument("source_url must use https://")); - } let Some(store) = self.artifact_store.as_ref() else { return Err(Status::failed_precondition( "LightningStor artifact backing is required for image imports", diff --git a/plasmavmc/crates/plasmavmc-server/src/volume_manager.rs b/plasmavmc/crates/plasmavmc-server/src/volume_manager.rs index bdbaf2c..83f8b30 100644 --- a/plasmavmc/crates/plasmavmc-server/src/volume_manager.rs +++ b/plasmavmc/crates/plasmavmc-server/src/volume_manager.rs @@ -6,6 +6,7 @@ use plasmavmc_types::{ VolumeDriverKind, VolumeFormat, VolumeStatus, }; use serde::{Deserialize, Serialize}; +use sha2::{Digest, Sha256}; use std::net::IpAddr; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -1757,7 +1758,12 @@ fn volume_has_pending_coronafs_image_seed(volume: &Volume) -> bool { } fn derived_volume_id(vm_id: &str, disk_id: &str) -> String { - format!("{vm_id}-{disk_id}") + let digest = Sha256::digest(format!("photoncloud-vm-disk:{vm_id}:{disk_id}").as_bytes()); + let mut bytes = [0u8; 16]; + bytes.copy_from_slice(&digest[..16]); + bytes[6] = (bytes[6] & 0x0f) | 0x50; + bytes[8] = (bytes[8] & 0x3f) | 0x80; + Uuid::from_bytes(bytes).to_string() } fn volume_is_auto_delete(volume: &Volume) -> bool { @@ -2443,6 +2449,20 @@ mod tests { ); } + #[test] + fn derived_volume_id_is_stable_uuid() { + let volume_id = derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "root"); + assert_eq!( + volume_id, + derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "root") + ); + assert_ne!( + volume_id, + derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "data") + ); + Uuid::parse_str(&volume_id).unwrap(); + } + #[test] fn normalize_coronafs_endpoint_supports_comma_separated_values() { assert_eq!( diff --git a/plasmavmc/crates/plasmavmc-types/src/vm.rs b/plasmavmc/crates/plasmavmc-types/src/vm.rs index 64f3037..526fb47 100644 --- a/plasmavmc/crates/plasmavmc-types/src/vm.rs +++ b/plasmavmc/crates/plasmavmc-types/src/vm.rs @@ -262,6 +262,12 @@ pub struct NetworkSpec { pub mac_address: Option, /// IP address (DHCP if None) pub ip_address: Option, + /// Attached subnet CIDR (required for the local KVM dataplane) + pub cidr_block: Option, + /// Attached subnet gateway IP (required for the local KVM dataplane) + pub gateway_ip: Option, + /// Whether DHCP should be enabled on the attached subnet + pub dhcp_enabled: bool, /// NIC model pub model: NicModel, /// Security groups @@ -277,6 +283,9 @@ impl Default for NetworkSpec { port_id: None, mac_address: None, ip_address: None, + cidr_block: None, + gateway_ip: None, + dhcp_enabled: false, model: NicModel::VirtioNet, security_groups: Vec::new(), } @@ -304,8 +313,7 @@ pub struct SecuritySpec { } /// Complete VM specification -#[derive(Debug, Clone, Serialize, Deserialize)] -#[derive(Default)] +#[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct VmSpec { /// CPU configuration pub cpu: CpuSpec, @@ -321,7 +329,6 @@ pub struct VmSpec { pub security: SecuritySpec, } - /// Resource usage statistics #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct ResourceUsage { diff --git a/plasmavmc/proto/plasmavmc.proto b/plasmavmc/proto/plasmavmc.proto index fb5ee8f..aa3c810 100644 --- a/plasmavmc/proto/plasmavmc.proto +++ b/plasmavmc/proto/plasmavmc.proto @@ -210,6 +210,9 @@ message NetworkSpec { repeated string security_groups = 6; string port_id = 7; // PrismNET port ID for OVN integration string subnet_id = 8; // PrismNET subnet ID for OVN integration + string cidr_block = 9; // Effective subnet CIDR for the realized dataplane + string gateway_ip = 10; // Effective subnet gateway for the realized dataplane + bool dhcp_enabled = 11; } enum NicModel { diff --git a/prismnet/crates/prismnet-api/proto/prismnet.proto b/prismnet/crates/prismnet-api/proto/prismnet.proto index b73af29..463bba4 100644 --- a/prismnet/crates/prismnet-api/proto/prismnet.proto +++ b/prismnet/crates/prismnet-api/proto/prismnet.proto @@ -184,6 +184,100 @@ message DeleteSubnetRequest { message DeleteSubnetResponse {} +// ============================================================================= +// Router Service +// ============================================================================= + +service RouterService { + rpc CreateRouter(CreateRouterRequest) returns (CreateRouterResponse); + rpc GetRouter(GetRouterRequest) returns (GetRouterResponse); + rpc ListRouters(ListRoutersRequest) returns (ListRoutersResponse); + rpc UpdateRouter(UpdateRouterRequest) returns (UpdateRouterResponse); + rpc DeleteRouter(DeleteRouterRequest) returns (DeleteRouterResponse); +} + +message Router { + string id = 1; + string org_id = 2; + string project_id = 3; + string vpc_id = 4; + string name = 5; + string description = 6; + string gateway_cidr = 7; + string mac_address = 8; + string external_ip = 9; + RouterStatus status = 10; + uint64 created_at = 11; + uint64 updated_at = 12; +} + +enum RouterStatus { + ROUTER_STATUS_UNSPECIFIED = 0; + ROUTER_STATUS_PROVISIONING = 1; + ROUTER_STATUS_ACTIVE = 2; + ROUTER_STATUS_UPDATING = 3; + ROUTER_STATUS_DELETING = 4; + ROUTER_STATUS_ERROR = 5; +} + +message CreateRouterRequest { + string org_id = 1; + string project_id = 2; + string vpc_id = 3; + string name = 4; + string description = 5; + string gateway_cidr = 6; + string mac_address = 7; + string external_ip = 8; +} + +message CreateRouterResponse { + Router router = 1; +} + +message GetRouterRequest { + string org_id = 1; + string project_id = 2; + string id = 3; +} + +message GetRouterResponse { + Router router = 1; +} + +message ListRoutersRequest { + string org_id = 1; + string project_id = 2; + string vpc_id = 3; + int32 page_size = 4; + string page_token = 5; +} + +message ListRoutersResponse { + repeated Router routers = 1; + string next_page_token = 2; +} + +message UpdateRouterRequest { + string org_id = 1; + string project_id = 2; + string id = 3; + string name = 4; + string description = 5; +} + +message UpdateRouterResponse { + Router router = 1; +} + +message DeleteRouterRequest { + string org_id = 1; + string project_id = 2; + string id = 3; +} + +message DeleteRouterResponse {} + // ============================================================================= // Port Service // ============================================================================= @@ -463,6 +557,12 @@ service IpamService { // List Service IP Pools rpc ListServiceIPPools(ListServiceIPPoolsRequest) returns (ListServiceIPPoolsResponse); + + // Update Service IP Pool metadata + rpc UpdateServiceIPPool(UpdateServiceIPPoolRequest) returns (UpdateServiceIPPoolResponse); + + // Delete Service IP Pool + rpc DeleteServiceIPPool(DeleteServiceIPPoolRequest) returns (DeleteServiceIPPoolResponse); // Allocate IP from pool rpc AllocateServiceIP(AllocateServiceIPRequest) returns (AllocateServiceIPResponse); @@ -550,6 +650,26 @@ message ListServiceIPPoolsResponse { string next_page_token = 2; } +message UpdateServiceIPPoolRequest { + string org_id = 1; + string project_id = 2; + string id = 3; + string name = 4; + string description = 5; +} + +message UpdateServiceIPPoolResponse { + ServiceIPPool pool = 1; +} + +message DeleteServiceIPPoolRequest { + string org_id = 1; + string project_id = 2; + string id = 3; +} + +message DeleteServiceIPPoolResponse {} + message AllocateServiceIPRequest { string org_id = 1; string project_id = 2; diff --git a/prismnet/crates/prismnet-server/src/lib.rs b/prismnet/crates/prismnet-server/src/lib.rs index 68ab933..befcd84 100644 --- a/prismnet/crates/prismnet-server/src/lib.rs +++ b/prismnet/crates/prismnet-server/src/lib.rs @@ -10,5 +10,6 @@ pub use config::ServerConfig; pub use metadata::NetworkMetadataStore; pub use ovn::OvnClient; pub use services::{ - IpamServiceImpl, PortServiceImpl, SecurityGroupServiceImpl, SubnetServiceImpl, VpcServiceImpl, + IpamServiceImpl, PortServiceImpl, RouterServiceImpl, SecurityGroupServiceImpl, + SubnetServiceImpl, VpcServiceImpl, }; diff --git a/prismnet/crates/prismnet-server/src/main.rs b/prismnet/crates/prismnet-server/src/main.rs index 5e6cd3e..778de5b 100644 --- a/prismnet/crates/prismnet-server/src/main.rs +++ b/prismnet/crates/prismnet-server/src/main.rs @@ -7,12 +7,13 @@ use iam_service_auth::AuthService; use metrics_exporter_prometheus::PrometheusBuilder; use prismnet_api::{ ipam_service_server::IpamServiceServer, port_service_server::PortServiceServer, + router_service_server::RouterServiceServer, security_group_service_server::SecurityGroupServiceServer, subnet_service_server::SubnetServiceServer, vpc_service_server::VpcServiceServer, }; use prismnet_server::{ config::MetadataBackend, IpamServiceImpl, NetworkMetadataStore, OvnClient, PortServiceImpl, - SecurityGroupServiceImpl, ServerConfig, SubnetServiceImpl, VpcServiceImpl, + RouterServiceImpl, SecurityGroupServiceImpl, ServerConfig, SubnetServiceImpl, VpcServiceImpl, }; use std::net::SocketAddr; use std::path::PathBuf; @@ -186,6 +187,11 @@ async fn main() -> Result<(), Box> { ovn.clone(), auth_service.clone(), )); + let router_service = Arc::new(RouterServiceImpl::new( + metadata.clone(), + ovn.clone(), + auth_service.clone(), + )); let sg_service = Arc::new(SecurityGroupServiceImpl::new( metadata.clone(), ovn.clone(), @@ -204,6 +210,9 @@ async fn main() -> Result<(), Box> { health_reporter .set_serving::>() .await; + health_reporter + .set_serving::>() + .await; health_reporter .set_serving::>() .await; @@ -259,6 +268,10 @@ async fn main() -> Result<(), Box> { PortServiceServer::new(port_service.as_ref().clone()), make_interceptor(auth_service.clone()), )) + .add_service(tonic::codegen::InterceptedService::new( + RouterServiceServer::new(router_service.as_ref().clone()), + make_interceptor(auth_service.clone()), + )) .add_service(tonic::codegen::InterceptedService::new( SecurityGroupServiceServer::new(sg_service.as_ref().clone()), make_interceptor(auth_service.clone()), @@ -274,6 +287,10 @@ async fn main() -> Result<(), Box> { let rest_state = prismnet_server::rest::RestApiState { vpc_service: vpc_service.clone(), subnet_service: subnet_service.clone(), + port_service: port_service.clone(), + router_service: router_service.clone(), + security_group_service: sg_service.clone(), + ipam_service: ipam_service.clone(), auth_service: auth_service.clone(), }; let rest_app = prismnet_server::rest::build_router(rest_state); diff --git a/prismnet/crates/prismnet-server/src/metadata.rs b/prismnet/crates/prismnet-server/src/metadata.rs index b607c11..7195b1b 100644 --- a/prismnet/crates/prismnet-server/src/metadata.rs +++ b/prismnet/crates/prismnet-server/src/metadata.rs @@ -3,8 +3,9 @@ use dashmap::DashMap; use flaredb_client::RdbClient; use prismnet_types::{ - IPAllocation, Port, PortId, SecurityGroup, SecurityGroupId, SecurityGroupRule, - SecurityGroupRuleId, ServiceIPPool, ServiceIPPoolId, Subnet, SubnetId, Vpc, VpcId, + IPAllocation, Port, PortId, Router, RouterId, SecurityGroup, SecurityGroupId, + SecurityGroupRule, SecurityGroupRuleId, ServiceIPPool, ServiceIPPoolId, Subnet, SubnetId, + Vpc, VpcId, }; use sqlx::pool::PoolOptions; use sqlx::{Pool, Postgres, Sqlite}; @@ -406,6 +407,14 @@ impl NetworkMetadataStore { format!("/prismnet/subnets/{}/", vpc_id) } + fn router_key(org_id: &str, project_id: &str, router_id: &RouterId) -> String { + format!("/prismnet/routers/{}/{}/{}", org_id, project_id, router_id) + } + + fn router_prefix(org_id: &str, project_id: &str) -> String { + format!("/prismnet/routers/{}/{}/", org_id, project_id) + } + fn port_key(subnet_id: &SubnetId, port_id: &PortId) -> String { format!("/prismnet/ports/{}/{}", subnet_id, port_id) } @@ -645,6 +654,94 @@ impl NetworkMetadataStore { // Port Operations // ========================================================================= + pub async fn create_router(&self, router: Router) -> Result { + let id = router.id; + let key = Self::router_key(&router.org_id, &router.project_id, &id); + let value = serde_json::to_string(&router) + .map_err(|e| MetadataError::Serialization(e.to_string()))?; + self.put(&key, &value).await?; + Ok(id) + } + + pub async fn get_router( + &self, + org_id: &str, + project_id: &str, + id: &RouterId, + ) -> Result> { + let key = Self::router_key(org_id, project_id, id); + if let Some(value) = self.get(&key).await? { + let router: Router = serde_json::from_str(&value) + .map_err(|e| MetadataError::Serialization(e.to_string()))?; + Ok(Some(router)) + } else { + Ok(None) + } + } + + pub async fn list_routers(&self, org_id: &str, project_id: &str) -> Result> { + let prefix = Self::router_prefix(org_id, project_id); + let entries = self.get_prefix(&prefix).await?; + let mut routers = Vec::new(); + for (_, value) in entries { + if let Ok(router) = serde_json::from_str::(&value) { + routers.push(router); + } + } + Ok(routers) + } + + pub async fn update_router( + &self, + org_id: &str, + project_id: &str, + id: &RouterId, + name: Option, + description: Option, + ) -> Result> { + let router_opt = self.get_router(org_id, project_id, id).await?; + if let Some(mut router) = router_opt { + if let Some(name) = name { + router.name = name; + } + if let Some(description) = description { + router.description = Some(description); + } + router.updated_at = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let key = Self::router_key(org_id, project_id, id); + let value = serde_json::to_string(&router) + .map_err(|e| MetadataError::Serialization(e.to_string()))?; + self.put(&key, &value).await?; + Ok(Some(router)) + } else { + Ok(None) + } + } + + pub async fn delete_router( + &self, + org_id: &str, + project_id: &str, + id: &RouterId, + ) -> Result> { + let router_opt = self.get_router(org_id, project_id, id).await?; + if let Some(router) = router_opt { + let key = Self::router_key(org_id, project_id, id); + self.delete_key(&key).await?; + Ok(Some(router)) + } else { + Ok(None) + } + } + + // ========================================================================= + // Port Operations + // ========================================================================= + pub async fn create_port(&self, port: Port) -> Result { let id = port.id; let key = Self::port_key(&port.subnet_id, &id); @@ -963,6 +1060,53 @@ impl NetworkMetadataStore { Ok(pools) } + pub async fn update_service_ip_pool( + &self, + org_id: &str, + project_id: &str, + pool_id: &ServiceIPPoolId, + name: Option, + description: Option, + ) -> Result> { + let pool_opt = self.get_service_ip_pool(org_id, project_id, pool_id).await?; + if let Some(mut pool) = pool_opt { + if let Some(name) = name { + pool.name = name; + } + if let Some(description) = description { + pool.description = Some(description); + } + pool.updated_at = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + let key = Self::service_ip_pool_key(org_id, project_id, pool_id); + let value = serde_json::to_string(&pool) + .map_err(|e| MetadataError::Serialization(e.to_string()))?; + self.put(&key, &value).await?; + Ok(Some(pool)) + } else { + Ok(None) + } + } + + pub async fn delete_service_ip_pool( + &self, + org_id: &str, + project_id: &str, + pool_id: &ServiceIPPoolId, + ) -> Result> { + let pool_opt = self.get_service_ip_pool(org_id, project_id, pool_id).await?; + if let Some(pool) = pool_opt { + let key = Self::service_ip_pool_key(org_id, project_id, pool_id); + self.delete_key(&key).await?; + Ok(Some(pool)) + } else { + Ok(None) + } + } + pub async fn allocate_service_ip( &self, pool_id: &ServiceIPPoolId, @@ -1212,7 +1356,10 @@ fn normalize_transport_addr(endpoint: &str) -> String { #[cfg(test)] mod tests { use super::*; - use prismnet_types::{IpProtocol, RuleDirection, SecurityGroup, SecurityGroupRule, Vpc}; + use prismnet_types::{ + IpProtocol, Router, RuleDirection, SecurityGroup, SecurityGroupRule, ServiceIPPool, + ServiceIPPoolType, Vpc, + }; #[tokio::test] async fn test_vpc_crud() { @@ -1311,6 +1458,68 @@ mod tests { assert_eq!(subnets.len(), 1); } + #[tokio::test] + async fn test_router_crud() { + let store = NetworkMetadataStore::new_in_memory(); + + let vpc = Vpc::new("test-vpc", "org-1", "proj-1", "10.0.0.0/16"); + let vpc_id = store.create_vpc(vpc).await.unwrap(); + + let mut router = Router::new( + "edge", + "org-1", + "proj-1", + vpc_id, + "10.0.0.1/24", + "02:00:00:00:00:01", + "203.0.113.10", + ); + router.ovn_router_id = "lr-1".to_string(); + router.ovn_router_port_id = "lrp-1".to_string(); + + let router_id = store.create_router(router).await.unwrap(); + + let retrieved = store + .get_router("org-1", "proj-1", &router_id) + .await + .unwrap() + .unwrap(); + assert_eq!(retrieved.name, "edge"); + assert_eq!(retrieved.external_ip, "203.0.113.10"); + + let routers = store.list_routers("org-1", "proj-1").await.unwrap(); + assert_eq!(routers.len(), 1); + + store + .update_router( + "org-1", + "proj-1", + &router_id, + Some("edge-renamed".to_string()), + Some("tenant edge".to_string()), + ) + .await + .unwrap(); + let updated = store + .get_router("org-1", "proj-1", &router_id) + .await + .unwrap() + .unwrap(); + assert_eq!(updated.name, "edge-renamed"); + assert_eq!(updated.description.as_deref(), Some("tenant edge")); + + let deleted = store + .delete_router("org-1", "proj-1", &router_id) + .await + .unwrap(); + assert!(deleted.is_some()); + assert!(store + .get_router("org-1", "proj-1", &router_id) + .await + .unwrap() + .is_none()); + } + #[tokio::test] async fn test_port_crud() { let store = NetworkMetadataStore::new_in_memory(); @@ -1399,4 +1608,54 @@ mod tests { // Gateway should be skipped assert_ne!(ip1, "10.0.1.1"); } + + #[tokio::test] + async fn test_service_ip_pool_crud() { + let store = NetworkMetadataStore::new_in_memory(); + + let pool = ServiceIPPool::new( + "services", + "org-1", + "proj-1", + "10.96.0.0/24", + ServiceIPPoolType::LoadBalancer, + ); + let pool_id = store.create_service_ip_pool(pool).await.unwrap(); + + let retrieved = store + .get_service_ip_pool("org-1", "proj-1", &pool_id) + .await + .unwrap() + .unwrap(); + assert_eq!(retrieved.name, "services"); + + store + .update_service_ip_pool( + "org-1", + "proj-1", + &pool_id, + Some("services-updated".to_string()), + Some("vip pool".to_string()), + ) + .await + .unwrap(); + let updated = store + .get_service_ip_pool("org-1", "proj-1", &pool_id) + .await + .unwrap() + .unwrap(); + assert_eq!(updated.name, "services-updated"); + assert_eq!(updated.description.as_deref(), Some("vip pool")); + + let deleted = store + .delete_service_ip_pool("org-1", "proj-1", &pool_id) + .await + .unwrap(); + assert!(deleted.is_some()); + assert!(store + .get_service_ip_pool("org-1", "proj-1", &pool_id) + .await + .unwrap() + .is_none()); + } } diff --git a/prismnet/crates/prismnet-server/src/rest.rs b/prismnet/crates/prismnet-server/src/rest.rs index 6a3be6f..5b9345c 100644 --- a/prismnet/crates/prismnet-server/src/rest.rs +++ b/prismnet/crates/prismnet-server/src/rest.rs @@ -1,46 +1,71 @@ //! REST HTTP API handlers for PrismNET //! -//! Implements REST endpoints as specified in T050.S8: -//! - GET /api/v1/vpcs - List VPCs -//! - POST /api/v1/vpcs - Create VPC -//! - GET /api/v1/vpcs/{id} - Get VPC -//! - DELETE /api/v1/vpcs/{id} - Delete VPC -//! - GET /api/v1/subnets - List Subnets -//! - POST /api/v1/subnets - Create Subnet -//! - DELETE /api/v1/subnets/{id} - Delete Subnet -//! - GET /health - Health check +//! Implements REST endpoints for tenant-scoped network resources: +//! - GET/POST /api/v1/vpcs +//! - GET/DELETE /api/v1/vpcs/{id} +//! - GET/POST /api/v1/subnets +//! - DELETE /api/v1/subnets/{id} +//! - GET/POST /api/v1/routers +//! - GET/PATCH/DELETE /api/v1/routers/{id} +//! - GET/POST /api/v1/security-groups +//! - GET/DELETE /api/v1/security-groups/{id} +//! - POST /api/v1/security-groups/{id}/rules +//! - DELETE /api/v1/security-groups/{id}/rules/{rule_id} +//! - GET/POST /api/v1/ports +//! - GET/DELETE /api/v1/ports/{id} +//! - GET/POST /api/v1/service-ip-pools +//! - GET/PATCH/DELETE /api/v1/service-ip-pools/{id} +//! - GET /health use axum::{ extract::{Path, Query, State}, - http::StatusCode, http::HeaderMap, - routing::{delete, get}, + http::StatusCode, + routing::{delete, get, post}, Json, Router, }; +use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext}; use prismnet_api::{ - vpc_service_server::VpcService, + ipam_service_server::IpamService, + port_service_server::PortService, + router_service_server::RouterService, + security_group_service_server::SecurityGroupService, subnet_service_server::SubnetService, - CreateVpcRequest, GetVpcRequest, ListVpcsRequest, DeleteVpcRequest, - CreateSubnetRequest, ListSubnetsRequest, DeleteSubnetRequest, - Vpc as ProtoVpc, Subnet as ProtoSubnet, + vpc_service_server::VpcService, + AddRuleRequest, CreatePortRequest, CreateSecurityGroupRequest, CreateServiceIpPoolRequest, + CreateRouterRequest, CreateSubnetRequest, CreateVpcRequest, DeletePortRequest, + DeleteRouterRequest, DeleteSecurityGroupRequest, DeleteServiceIpPoolRequest, + DeleteSubnetRequest, DeleteVpcRequest, GetPortRequest, GetRouterRequest, + GetSecurityGroupRequest, GetServiceIpPoolRequest, GetVpcRequest, IpProtocol, + ListPortsRequest, ListRoutersRequest, ListSecurityGroupsRequest, ListServiceIpPoolsRequest, + ListSubnetsRequest, ListVpcsRequest, Port as ProtoPort, RemoveRuleRequest, + Router as ProtoRouter, RuleDirection, + SecurityGroup as ProtoSecurityGroup, SecurityGroupRule as ProtoSecurityGroupRule, + ServiceIpPool as ProtoServiceIPPool, ServiceIpPoolType as ProtoServiceIPPoolType, + Subnet as ProtoSubnet, UpdatePortRequest, UpdateRouterRequest, + UpdateServiceIpPoolRequest, Vpc as ProtoVpc, }; use serde::{Deserialize, Serialize}; use std::sync::Arc; -use tonic::Request; use tonic::Code; +use tonic::Request; -use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext}; -use crate::{VpcServiceImpl, SubnetServiceImpl}; +use crate::{ + IpamServiceImpl, PortServiceImpl, RouterServiceImpl, SecurityGroupServiceImpl, + SubnetServiceImpl, VpcServiceImpl, +}; -/// REST API state #[derive(Clone)] pub struct RestApiState { pub vpc_service: Arc, pub subnet_service: Arc, + pub port_service: Arc, + pub router_service: Arc, + pub security_group_service: Arc, + pub ipam_service: Arc, pub auth_service: Arc, } -/// Standard REST error response #[derive(Debug, Serialize)] pub struct ErrorResponse { pub error: ErrorDetail, @@ -70,7 +95,6 @@ impl ResponseMeta { } } -/// Standard REST success response #[derive(Debug, Serialize)] pub struct SuccessResponse { pub data: T, @@ -86,7 +110,15 @@ impl SuccessResponse { } } -/// Create VPC request +#[derive(Debug, Deserialize)] +pub struct ListParams { + pub org_id: Option, + pub project_id: Option, + pub vpc_id: Option, + pub subnet_id: Option, + pub pool_type: Option, +} + #[derive(Debug, Deserialize)] pub struct CreateVpcRequestRest { pub name: String, @@ -96,7 +128,6 @@ pub struct CreateVpcRequestRest { pub description: Option, } -/// Create Subnet request #[derive(Debug, Deserialize)] pub struct CreateSubnetRequestRest { pub name: String, @@ -106,15 +137,78 @@ pub struct CreateSubnetRequestRest { pub description: Option, } -/// Query params for list operations #[derive(Debug, Deserialize)] -pub struct ListParams { +pub struct CreateRouterRequestRest { + pub name: String, pub org_id: Option, pub project_id: Option, - pub vpc_id: Option, + pub vpc_id: String, + pub gateway_cidr: String, + pub mac_address: String, + pub external_ip: String, + pub description: Option, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateRouterRequestRest { + pub org_id: Option, + pub project_id: Option, + pub name: Option, + pub description: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreateSecurityGroupRequestRest { + pub name: String, + pub org_id: Option, + pub project_id: Option, + pub description: Option, +} + +#[derive(Debug, Deserialize)] +pub struct AddSecurityGroupRuleRequestRest { + pub org_id: Option, + pub project_id: Option, + pub direction: String, + pub protocol: Option, + pub port_range_min: Option, + pub port_range_max: Option, + pub remote_cidr: Option, + pub remote_group_id: Option, + pub description: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreatePortRequestRest { + pub name: String, + pub org_id: Option, + pub project_id: Option, + pub subnet_id: String, + pub description: Option, + pub ip_address: Option, + #[serde(default)] + pub security_group_ids: Vec, + pub admin_state_up: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CreateServiceIpPoolRequestRest { + pub name: String, + pub org_id: Option, + pub project_id: Option, + pub cidr_block: String, + pub description: Option, + pub pool_type: Option, +} + +#[derive(Debug, Deserialize)] +pub struct UpdateServiceIpPoolRequestRest { + pub org_id: Option, + pub project_id: Option, + pub name: Option, + pub description: Option, } -/// VPC response #[derive(Debug, Serialize)] pub struct VpcResponse { pub id: String, @@ -128,14 +222,6 @@ pub struct VpcResponse { impl From for VpcResponse { fn from(vpc: ProtoVpc) -> Self { - let status = match vpc.status { - 1 => "provisioning", - 2 => "active", - 3 => "updating", - 4 => "deleting", - 5 => "error", - _ => "unknown", - }; Self { id: vpc.id, name: vpc.name, @@ -143,12 +229,11 @@ impl From for VpcResponse { project_id: vpc.project_id, cidr_block: vpc.cidr_block, description: vpc.description, - status: status.to_string(), + status: vpc_status_to_string(vpc.status), } } } -/// Subnet response #[derive(Debug, Serialize)] pub struct SubnetResponse { pub id: String, @@ -161,51 +246,239 @@ pub struct SubnetResponse { } impl From for SubnetResponse { - fn from(s: ProtoSubnet) -> Self { - let status = match s.status { - 1 => "provisioning", - 2 => "active", - 3 => "updating", - 4 => "deleting", - 5 => "error", - _ => "unknown", - }; + fn from(subnet: ProtoSubnet) -> Self { Self { - id: s.id, - name: s.name, - vpc_id: s.vpc_id, - cidr_block: s.cidr_block, - gateway_ip: s.gateway_ip, - description: s.description, - status: status.to_string(), + id: subnet.id, + name: subnet.name, + vpc_id: subnet.vpc_id, + cidr_block: subnet.cidr_block, + gateway_ip: subnet.gateway_ip, + description: subnet.description, + status: subnet_status_to_string(subnet.status), + } + } +} + +#[derive(Debug, Serialize)] +pub struct RouterResponse { + pub id: String, + pub org_id: String, + pub project_id: String, + pub vpc_id: String, + pub name: String, + pub description: String, + pub gateway_cidr: String, + pub mac_address: String, + pub external_ip: String, + pub status: String, +} + +impl From for RouterResponse { + fn from(router: ProtoRouter) -> Self { + Self { + id: router.id, + org_id: router.org_id, + project_id: router.project_id, + vpc_id: router.vpc_id, + name: router.name, + description: router.description, + gateway_cidr: router.gateway_cidr, + mac_address: router.mac_address, + external_ip: router.external_ip, + status: router_status_to_string(router.status), + } + } +} + +#[derive(Debug, Serialize)] +pub struct SecurityGroupRuleResponse { + pub id: String, + pub security_group_id: String, + pub direction: String, + pub protocol: String, + pub port_range_min: Option, + pub port_range_max: Option, + pub remote_cidr: Option, + pub remote_group_id: Option, + pub description: String, +} + +impl From for SecurityGroupRuleResponse { + fn from(rule: ProtoSecurityGroupRule) -> Self { + Self { + id: rule.id, + security_group_id: rule.security_group_id, + direction: rule_direction_to_string(rule.direction), + protocol: ip_protocol_to_string(rule.protocol), + port_range_min: (rule.port_range_min > 0).then_some(rule.port_range_min), + port_range_max: (rule.port_range_max > 0).then_some(rule.port_range_max), + remote_cidr: (!rule.remote_cidr.is_empty()).then_some(rule.remote_cidr), + remote_group_id: (!rule.remote_group_id.is_empty()).then_some(rule.remote_group_id), + description: rule.description, + } + } +} + +#[derive(Debug, Serialize)] +pub struct SecurityGroupResponse { + pub id: String, + pub project_id: String, + pub name: String, + pub description: String, + pub rules: Vec, +} + +impl From for SecurityGroupResponse { + fn from(security_group: ProtoSecurityGroup) -> Self { + Self { + id: security_group.id, + project_id: security_group.project_id, + name: security_group.name, + description: security_group.description, + rules: security_group + .rules + .into_iter() + .map(SecurityGroupRuleResponse::from) + .collect(), + } + } +} + +#[derive(Debug, Serialize)] +pub struct PortResponse { + pub id: String, + pub subnet_id: String, + pub name: String, + pub description: String, + pub mac_address: String, + pub ip_address: String, + pub device_id: String, + pub device_type: String, + pub security_group_ids: Vec, + pub admin_state_up: bool, + pub status: String, +} + +impl From for PortResponse { + fn from(port: ProtoPort) -> Self { + Self { + id: port.id, + subnet_id: port.subnet_id, + name: port.name, + description: port.description, + mac_address: port.mac_address, + ip_address: port.ip_address, + device_id: port.device_id, + device_type: device_type_to_string(port.device_type), + security_group_ids: port.security_group_ids, + admin_state_up: port.admin_state_up, + status: port_status_to_string(port.status), } } } -/// VPCs list response #[derive(Debug, Serialize)] pub struct VpcsResponse { pub vpcs: Vec, } -/// Subnets list response #[derive(Debug, Serialize)] pub struct SubnetsResponse { pub subnets: Vec, } -/// Build the REST API router +#[derive(Debug, Serialize)] +pub struct RoutersResponse { + pub routers: Vec, +} + +#[derive(Debug, Serialize)] +pub struct SecurityGroupsResponse { + pub security_groups: Vec, +} + +#[derive(Debug, Serialize)] +pub struct PortsResponse { + pub ports: Vec, +} + +#[derive(Debug, Serialize)] +pub struct ServiceIpPoolResponse { + pub id: String, + pub org_id: String, + pub project_id: String, + pub name: String, + pub description: String, + pub cidr_block: String, + pub pool_type: String, + pub allocated_ips: Vec, + pub status: String, +} + +impl From for ServiceIpPoolResponse { + fn from(pool: ProtoServiceIPPool) -> Self { + Self { + id: pool.id, + org_id: pool.org_id, + project_id: pool.project_id, + name: pool.name, + description: pool.description, + cidr_block: pool.cidr_block, + pool_type: service_ip_pool_type_to_string(pool.pool_type), + allocated_ips: pool.allocated_ips, + status: service_ip_pool_status_to_string(pool.status), + } + } +} + +#[derive(Debug, Serialize)] +pub struct ServiceIpPoolsResponse { + pub pools: Vec, +} + pub fn build_router(state: RestApiState) -> Router { Router::new() .route("/api/v1/vpcs", get(list_vpcs).post(create_vpc)) .route("/api/v1/vpcs/{id}", get(get_vpc).delete(delete_vpc)) .route("/api/v1/subnets", get(list_subnets).post(create_subnet)) .route("/api/v1/subnets/{id}", delete(delete_subnet)) + .route("/api/v1/routers", get(list_routers).post(create_router)) + .route( + "/api/v1/routers/{id}", + get(get_router).patch(update_router).delete(delete_router), + ) + .route( + "/api/v1/security-groups", + get(list_security_groups).post(create_security_group), + ) + .route( + "/api/v1/security-groups/{id}", + get(get_security_group).delete(delete_security_group), + ) + .route( + "/api/v1/security-groups/{id}/rules", + post(add_security_group_rule), + ) + .route( + "/api/v1/security-groups/{id}/rules/{rule_id}", + delete(remove_security_group_rule), + ) + .route("/api/v1/ports", get(list_ports).post(create_port)) + .route("/api/v1/ports/{id}", get(get_port).delete(delete_port)) + .route( + "/api/v1/service-ip-pools", + get(list_service_ip_pools).post(create_service_ip_pool), + ) + .route( + "/api/v1/service-ip-pools/{id}", + get(get_service_ip_pool) + .patch(update_service_ip_pool) + .delete(delete_service_ip_pool), + ) .route("/health", get(health_check)) .with_state(state) } -/// Health check endpoint async fn health_check() -> (StatusCode, Json>) { ( StatusCode::OK, @@ -213,15 +486,18 @@ async fn health_check() -> (StatusCode, Json> ) } -/// GET /api/v1/vpcs - List VPCs async fn list_vpcs( State(state): State, Query(params): Query, headers: HeaderMap, ) -> Result>, (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, params.org_id.as_deref(), params.project_id.as_deref()) - .await?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; let mut req = Request::new(ListVpcsRequest { org_id: tenant.org_id.clone(), project_id: tenant.project_id.clone(), @@ -230,26 +506,34 @@ async fn list_vpcs( }); req.extensions_mut().insert(tenant); - let response = state.vpc_service.list_vpcs(req) + let response = state + .vpc_service + .list_vpcs(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "LIST_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - let vpcs: Vec = response.into_inner().vpcs.into_iter() + let vpcs = response + .into_inner() + .vpcs + .into_iter() .map(VpcResponse::from) .collect(); Ok(Json(SuccessResponse::new(VpcsResponse { vpcs }))) } -/// POST /api/v1/vpcs - Create VPC async fn create_vpc( State(state): State, headers: HeaderMap, Json(req): Json, ) -> Result<(StatusCode, Json>), (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, req.org_id.as_deref(), req.project_id.as_deref()) - .await?; + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; let mut grpc_req = Request::new(CreateVpcRequest { name: req.name, org_id: tenant.org_id.clone(), @@ -259,11 +543,15 @@ async fn create_vpc( }); grpc_req.extensions_mut().insert(tenant); - let response = state.vpc_service.create_vpc(grpc_req) + let response = state + .vpc_service + .create_vpc(grpc_req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - let vpc = response.into_inner().vpc + let vpc = response + .into_inner() + .vpc .ok_or_else(|| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", "No VPC returned"))?; Ok(( @@ -272,16 +560,19 @@ async fn create_vpc( )) } -/// GET /api/v1/vpcs/{id} - Get VPC async fn get_vpc( State(state): State, Path(id): Path, Query(params): Query, headers: HeaderMap, ) -> Result>, (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, params.org_id.as_deref(), params.project_id.as_deref()) - .await?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; let mut req = Request::new(GetVpcRequest { id, org_id: tenant.org_id.clone(), @@ -289,32 +580,34 @@ async fn get_vpc( }); req.extensions_mut().insert(tenant); - let response = state.vpc_service.get_vpc(req) + let response = state + .vpc_service + .get_vpc(req) .await - .map_err(|e| { - if e.code() == tonic::Code::NotFound { - error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "VPC not found") - } else { - error_response(StatusCode::INTERNAL_SERVER_ERROR, "GET_FAILED", &e.message()) - } - })?; + .map_err(map_not_found("VPC not found", "GET_FAILED"))?; - let vpc = response.into_inner().vpc + let vpc = response + .into_inner() + .vpc .ok_or_else(|| error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "VPC not found"))?; Ok(Json(SuccessResponse::new(VpcResponse::from(vpc)))) } -/// DELETE /api/v1/vpcs/{id} - Delete VPC async fn delete_vpc( State(state): State, Path(id): Path, Query(params): Query, headers: HeaderMap, -) -> Result<(StatusCode, Json>), (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, params.org_id.as_deref(), params.project_id.as_deref()) - .await?; +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; let mut req = Request::new(DeleteVpcRequest { id: id.clone(), org_id: tenant.org_id.clone(), @@ -322,9 +615,11 @@ async fn delete_vpc( }); req.extensions_mut().insert(tenant); - state.vpc_service.delete_vpc(req) + state + .vpc_service + .delete_vpc(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "DELETE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; Ok(( StatusCode::OK, @@ -332,15 +627,18 @@ async fn delete_vpc( )) } -/// GET /api/v1/subnets - List Subnets async fn list_subnets( State(state): State, Query(params): Query, headers: HeaderMap, ) -> Result>, (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, params.org_id.as_deref(), params.project_id.as_deref()) - .await?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; let mut req = Request::new(ListSubnetsRequest { org_id: tenant.org_id.clone(), project_id: tenant.project_id.clone(), @@ -350,18 +648,22 @@ async fn list_subnets( }); req.extensions_mut().insert(tenant); - let response = state.subnet_service.list_subnets(req) + let response = state + .subnet_service + .list_subnets(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "LIST_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - let subnets: Vec = response.into_inner().subnets.into_iter() + let subnets = response + .into_inner() + .subnets + .into_iter() .map(SubnetResponse::from) .collect(); Ok(Json(SuccessResponse::new(SubnetsResponse { subnets }))) } -/// POST /api/v1/subnets - Create Subnet async fn create_subnet( State(state): State, headers: HeaderMap, @@ -378,11 +680,15 @@ async fn create_subnet( }); grpc_req.extensions_mut().insert(tenant); - let response = state.subnet_service.create_subnet(grpc_req) + let response = state + .subnet_service + .create_subnet(grpc_req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; - let subnet = response.into_inner().subnet + let subnet = response + .into_inner() + .subnet .ok_or_else(|| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", "No subnet returned"))?; Ok(( @@ -391,16 +697,20 @@ async fn create_subnet( )) } -/// DELETE /api/v1/subnets/{id} - Delete Subnet async fn delete_subnet( State(state): State, Path(id): Path, Query(params): Query, headers: HeaderMap, -) -> Result<(StatusCode, Json>), (StatusCode, Json)> { - let tenant = - resolve_rest_tenant(&state, &headers, params.org_id.as_deref(), params.project_id.as_deref()) - .await?; +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; let mut req = Request::new(DeleteSubnetRequest { id: id.clone(), org_id: tenant.org_id.clone(), @@ -409,9 +719,796 @@ async fn delete_subnet( }); req.extensions_mut().insert(tenant); - state.subnet_service.delete_subnet(req) + state + .subnet_service + .delete_subnet(req) .await - .map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "DELETE_FAILED", &e.message()))?; + .map_err(map_tonic_status)?; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), + )) +} + +async fn list_routers( + State(state): State, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(ListRoutersRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: params.vpc_id.unwrap_or_default(), + page_size: 100, + page_token: String::new(), + }); + req.extensions_mut().insert(tenant); + + let response = state + .router_service + .list_routers(req) + .await + .map_err(map_tonic_status)?; + + let routers = response + .into_inner() + .routers + .into_iter() + .map(RouterResponse::from) + .collect(); + + Ok(Json(SuccessResponse::new(RoutersResponse { routers }))) +} + +async fn create_router( + State(state): State, + headers: HeaderMap, + Json(req): Json, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(CreateRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + vpc_id: req.vpc_id, + name: req.name, + description: req.description.unwrap_or_default(), + gateway_cidr: req.gateway_cidr, + mac_address: req.mac_address, + external_ip: req.external_ip, + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .router_service + .create_router(grpc_req) + .await + .map_err(map_tonic_status)?; + + let router = response.into_inner().router.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "CREATE_FAILED", + "No router returned", + ) + })?; + + Ok(( + StatusCode::CREATED, + Json(SuccessResponse::new(RouterResponse::from(router))), + )) +} + +async fn get_router( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(GetRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id, + }); + req.extensions_mut().insert(tenant); + + let response = state + .router_service + .get_router(req) + .await + .map_err(map_not_found("Router not found", "GET_FAILED"))?; + + let router = response + .into_inner() + .router + .ok_or_else(|| error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "Router not found"))?; + + Ok(Json(SuccessResponse::new(RouterResponse::from(router)))) +} + +async fn update_router( + State(state): State, + Path(id): Path, + headers: HeaderMap, + Json(req): Json, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(UpdateRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id, + name: req.name.unwrap_or_default(), + description: req.description.unwrap_or_default(), + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .router_service + .update_router(grpc_req) + .await + .map_err(map_tonic_status)?; + + let router = response.into_inner().router.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "UPDATE_FAILED", + "No router returned", + ) + })?; + + Ok(Json(SuccessResponse::new(RouterResponse::from(router)))) +} + +async fn delete_router( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(DeleteRouterRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: id.clone(), + }); + req.extensions_mut().insert(tenant); + + state + .router_service + .delete_router(req) + .await + .map_err(map_tonic_status)?; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), + )) +} + +async fn list_security_groups( + State(state): State, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(ListSecurityGroupsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + page_size: 100, + page_token: String::new(), + }); + req.extensions_mut().insert(tenant); + + let response = state + .security_group_service + .list_security_groups(req) + .await + .map_err(map_tonic_status)?; + + let security_groups = response + .into_inner() + .security_groups + .into_iter() + .map(SecurityGroupResponse::from) + .collect(); + + Ok(Json(SuccessResponse::new(SecurityGroupsResponse { + security_groups, + }))) +} + +async fn create_security_group( + State(state): State, + headers: HeaderMap, + Json(req): Json, +) -> Result< + (StatusCode, Json>), + (StatusCode, Json), +> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(CreateSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + name: req.name, + description: req.description.unwrap_or_default(), + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .security_group_service + .create_security_group(grpc_req) + .await + .map_err(map_tonic_status)?; + + let security_group = response.into_inner().security_group.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "CREATE_FAILED", + "No security group returned", + ) + })?; + + Ok(( + StatusCode::CREATED, + Json(SuccessResponse::new(SecurityGroupResponse::from( + security_group, + ))), + )) +} + +async fn get_security_group( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(GetSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id, + }); + req.extensions_mut().insert(tenant); + + let response = state + .security_group_service + .get_security_group(req) + .await + .map_err(map_not_found("Security group not found", "GET_FAILED"))?; + + let security_group = response.into_inner().security_group.ok_or_else(|| { + error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "Security group not found") + })?; + + Ok(Json(SuccessResponse::new(SecurityGroupResponse::from( + security_group, + )))) +} + +async fn delete_security_group( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(DeleteSecurityGroupRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: id.clone(), + }); + req.extensions_mut().insert(tenant); + + state + .security_group_service + .delete_security_group(req) + .await + .map_err(map_tonic_status)?; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), + )) +} + +async fn add_security_group_rule( + State(state): State, + Path(id): Path, + headers: HeaderMap, + Json(req): Json, +) -> Result< + (StatusCode, Json>), + (StatusCode, Json), +> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(AddRuleRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + security_group_id: id, + direction: parse_rule_direction(&req.direction)?, + protocol: parse_ip_protocol(req.protocol.as_deref())?, + port_range_min: req.port_range_min.unwrap_or(0), + port_range_max: req.port_range_max.unwrap_or(0), + remote_cidr: req.remote_cidr.unwrap_or_default(), + remote_group_id: req.remote_group_id.unwrap_or_default(), + description: req.description.unwrap_or_default(), + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .security_group_service + .add_rule(grpc_req) + .await + .map_err(map_tonic_status)?; + + let rule = response.into_inner().rule.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "CREATE_FAILED", + "No security group rule returned", + ) + })?; + + Ok(( + StatusCode::CREATED, + Json(SuccessResponse::new(SecurityGroupRuleResponse::from(rule))), + )) +} + +async fn remove_security_group_rule( + State(state): State, + Path((id, rule_id)): Path<(String, String)>, + Query(params): Query, + headers: HeaderMap, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(RemoveRuleRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + security_group_id: id.clone(), + rule_id: rule_id.clone(), + }); + req.extensions_mut().insert(tenant); + + state + .security_group_service + .remove_rule(req) + .await + .map_err(map_tonic_status)?; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(serde_json::json!({ + "security_group_id": id, + "rule_id": rule_id, + "deleted": true + }))), + )) +} + +async fn list_ports( + State(state): State, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let subnet_id = required_query_field(params.subnet_id.clone(), "subnet_id")?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(ListPortsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id, + device_id: String::new(), + page_size: 100, + page_token: String::new(), + }); + req.extensions_mut().insert(tenant); + + let response = state + .port_service + .list_ports(req) + .await + .map_err(map_tonic_status)?; + + let ports = response + .into_inner() + .ports + .into_iter() + .map(PortResponse::from) + .collect(); + + Ok(Json(SuccessResponse::new(PortsResponse { ports }))) +} + +async fn create_port( + State(state): State, + headers: HeaderMap, + Json(req): Json, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let admin_state_up = req.admin_state_up.unwrap_or(true); + + let mut grpc_req = Request::new(CreatePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: req.subnet_id.clone(), + name: req.name, + description: req.description.unwrap_or_default(), + ip_address: req.ip_address.unwrap_or_default(), + security_group_ids: req.security_group_ids, + }); + grpc_req.extensions_mut().insert(tenant.clone()); + + let mut port = state + .port_service + .create_port(grpc_req) + .await + .map_err(map_tonic_status)? + .into_inner() + .port + .ok_or_else(|| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", "No port returned"))?; + + if !admin_state_up { + let mut update_req = Request::new(UpdatePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id: req.subnet_id, + id: port.id.clone(), + name: String::new(), + description: String::new(), + security_group_ids: Vec::new(), + admin_state_up: false, + }); + update_req.extensions_mut().insert(tenant); + port = state + .port_service + .update_port(update_req) + .await + .map_err(map_tonic_status)? + .into_inner() + .port + .ok_or_else(|| error_response(StatusCode::INTERNAL_SERVER_ERROR, "UPDATE_FAILED", "No port returned"))?; + } + + Ok(( + StatusCode::CREATED, + Json(SuccessResponse::new(PortResponse::from(port))), + )) +} + +async fn get_port( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let subnet_id = required_query_field(params.subnet_id.clone(), "subnet_id")?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(GetPortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id, + id, + }); + req.extensions_mut().insert(tenant); + + let response = state + .port_service + .get_port(req) + .await + .map_err(map_not_found("Port not found", "GET_FAILED"))?; + + let port = response + .into_inner() + .port + .ok_or_else(|| error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "Port not found"))?; + + Ok(Json(SuccessResponse::new(PortResponse::from(port)))) +} + +async fn delete_port( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let subnet_id = required_query_field(params.subnet_id.clone(), "subnet_id")?; + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(DeletePortRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + subnet_id, + id: id.clone(), + }); + req.extensions_mut().insert(tenant); + + state + .port_service + .delete_port(req) + .await + .map_err(map_tonic_status)?; + + Ok(( + StatusCode::OK, + Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), + )) +} + +async fn list_service_ip_pools( + State(state): State, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(ListServiceIpPoolsRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + pool_type: parse_optional_service_ip_pool_type(params.pool_type.as_deref())?, + page_size: 100, + page_token: String::new(), + }); + req.extensions_mut().insert(tenant); + + let response = state + .ipam_service + .list_service_ip_pools(req) + .await + .map_err(map_tonic_status)?; + + let pools = response + .into_inner() + .pools + .into_iter() + .map(ServiceIpPoolResponse::from) + .collect(); + + Ok(Json(SuccessResponse::new(ServiceIpPoolsResponse { pools }))) +} + +async fn create_service_ip_pool( + State(state): State, + headers: HeaderMap, + Json(req): Json, +) -> Result< + (StatusCode, Json>), + (StatusCode, Json), +> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(CreateServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + name: req.name, + description: req.description.unwrap_or_default(), + cidr_block: req.cidr_block, + pool_type: parse_service_ip_pool_type(req.pool_type.as_deref())?, + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .ipam_service + .create_service_ip_pool(grpc_req) + .await + .map_err(map_tonic_status)?; + + let pool = response.into_inner().pool.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "CREATE_FAILED", + "No Service IP Pool returned", + ) + })?; + + Ok(( + StatusCode::CREATED, + Json(SuccessResponse::new(ServiceIpPoolResponse::from(pool))), + )) +} + +async fn get_service_ip_pool( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(GetServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id, + }); + req.extensions_mut().insert(tenant); + + let response = state + .ipam_service + .get_service_ip_pool(req) + .await + .map_err(map_not_found("Service IP Pool not found", "GET_FAILED"))?; + + let pool = response.into_inner().pool.ok_or_else(|| { + error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "Service IP Pool not found") + })?; + + Ok(Json(SuccessResponse::new(ServiceIpPoolResponse::from(pool)))) +} + +async fn update_service_ip_pool( + State(state): State, + Path(id): Path, + headers: HeaderMap, + Json(req): Json, +) -> Result>, (StatusCode, Json)> { + let tenant = resolve_rest_tenant( + &state, + &headers, + req.org_id.as_deref(), + req.project_id.as_deref(), + ) + .await?; + let mut grpc_req = Request::new(UpdateServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id, + name: req.name.unwrap_or_default(), + description: req.description.unwrap_or_default(), + }); + grpc_req.extensions_mut().insert(tenant); + + let response = state + .ipam_service + .update_service_ip_pool(grpc_req) + .await + .map_err(map_tonic_status)?; + + let pool = response.into_inner().pool.ok_or_else(|| { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + "UPDATE_FAILED", + "No Service IP Pool returned", + ) + })?; + + Ok(Json(SuccessResponse::new(ServiceIpPoolResponse::from(pool)))) +} + +async fn delete_service_ip_pool( + State(state): State, + Path(id): Path, + Query(params): Query, + headers: HeaderMap, +) -> Result<(StatusCode, Json>), (StatusCode, Json)> +{ + let tenant = resolve_rest_tenant( + &state, + &headers, + params.org_id.as_deref(), + params.project_id.as_deref(), + ) + .await?; + let mut req = Request::new(DeleteServiceIpPoolRequest { + org_id: tenant.org_id.clone(), + project_id: tenant.project_id.clone(), + id: id.clone(), + }); + req.extensions_mut().insert(tenant); + + state + .ipam_service + .delete_service_ip_pool(req) + .await + .map_err(map_tonic_status)?; Ok(( StatusCode::OK, @@ -419,7 +1516,6 @@ async fn delete_subnet( )) } -/// Helper to create error response fn error_response( status: StatusCode, code: &str, @@ -460,11 +1556,18 @@ async fn resolve_rest_tenant( } fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) { + map_tonic_status(status) +} + +fn map_tonic_status(status: tonic::Status) -> (StatusCode, Json) { let status_code = match status.code() { Code::Unauthenticated => StatusCode::UNAUTHORIZED, Code::PermissionDenied => StatusCode::FORBIDDEN, Code::InvalidArgument => StatusCode::BAD_REQUEST, Code::NotFound => StatusCode::NOT_FOUND, + Code::AlreadyExists => StatusCode::CONFLICT, + Code::ResourceExhausted => StatusCode::TOO_MANY_REQUESTS, + Code::FailedPrecondition => StatusCode::UNPROCESSABLE_ENTITY, _ => StatusCode::INTERNAL_SERVER_ERROR, }; let code = match status.code() { @@ -472,8 +1575,217 @@ fn map_auth_status(status: tonic::Status) -> (StatusCode, Json) { Code::PermissionDenied => "FORBIDDEN", Code::InvalidArgument => "INVALID_ARGUMENT", Code::NotFound => "NOT_FOUND", + Code::AlreadyExists => "ALREADY_EXISTS", + Code::ResourceExhausted => "RESOURCE_EXHAUSTED", + Code::FailedPrecondition => "FAILED_PRECONDITION", _ => "INTERNAL", }; error_response(status_code, code, status.message()) } + +fn map_not_found( + not_found_message: &'static str, + default_code: &'static str, +) -> impl Fn(tonic::Status) -> (StatusCode, Json) { + move |status| { + if status.code() == Code::NotFound { + error_response(StatusCode::NOT_FOUND, "NOT_FOUND", not_found_message) + } else { + error_response( + StatusCode::INTERNAL_SERVER_ERROR, + default_code, + status.message(), + ) + } + } +} + +fn required_query_field( + value: Option, + field_name: &str, +) -> Result)> { + value.ok_or_else(|| { + error_response( + StatusCode::BAD_REQUEST, + "INVALID_ARGUMENT", + &format!("{} is required", field_name), + ) + }) +} + +fn parse_rule_direction(direction: &str) -> Result)> { + match direction.to_ascii_lowercase().as_str() { + "ingress" => Ok(RuleDirection::Ingress as i32), + "egress" => Ok(RuleDirection::Egress as i32), + _ => Err(error_response( + StatusCode::BAD_REQUEST, + "INVALID_ARGUMENT", + "direction must be ingress or egress", + )), + } +} + +fn parse_ip_protocol(protocol: Option<&str>) -> Result)> { + match protocol.unwrap_or("any").to_ascii_lowercase().as_str() { + "any" => Ok(IpProtocol::Any as i32), + "tcp" => Ok(IpProtocol::Tcp as i32), + "udp" => Ok(IpProtocol::Udp as i32), + "icmp" => Ok(IpProtocol::Icmp as i32), + "icmpv6" => Ok(IpProtocol::Icmpv6 as i32), + _ => Err(error_response( + StatusCode::BAD_REQUEST, + "INVALID_ARGUMENT", + "protocol must be any, tcp, udp, icmp, or icmpv6", + )), + } +} + +fn parse_service_ip_pool_type( + pool_type: Option<&str>, +) -> Result)> { + match pool_type.unwrap_or("cluster_ip").to_ascii_lowercase().as_str() { + "cluster_ip" => Ok(ProtoServiceIPPoolType::ClusterIp as i32), + "load_balancer" => Ok(ProtoServiceIPPoolType::LoadBalancer as i32), + "node_port" => Ok(ProtoServiceIPPoolType::NodePort as i32), + _ => Err(error_response( + StatusCode::BAD_REQUEST, + "INVALID_ARGUMENT", + "pool_type must be cluster_ip, load_balancer, or node_port", + )), + } +} + +fn parse_optional_service_ip_pool_type( + pool_type: Option<&str>, +) -> Result)> { + match pool_type { + None => Ok(0), + Some(value) => parse_service_ip_pool_type(Some(value)), + } +} + +fn vpc_status_to_string(status: i32) -> String { + match status { + 1 => "provisioning", + 2 => "active", + 3 => "updating", + 4 => "deleting", + 5 => "error", + _ => "unknown", + } + .to_string() +} + +fn subnet_status_to_string(status: i32) -> String { + match status { + 1 => "provisioning", + 2 => "active", + 3 => "updating", + 4 => "deleting", + 5 => "error", + _ => "unknown", + } + .to_string() +} + +fn router_status_to_string(status: i32) -> String { + match status { + 1 => "provisioning", + 2 => "active", + 3 => "updating", + 4 => "deleting", + 5 => "error", + _ => "unknown", + } + .to_string() +} + +fn port_status_to_string(status: i32) -> String { + match status { + 1 => "build", + 2 => "active", + 3 => "down", + 4 => "error", + _ => "unknown", + } + .to_string() +} + +fn device_type_to_string(device_type: i32) -> String { + match device_type { + 1 => "none", + 2 => "vm", + 3 => "router", + 4 => "load_balancer", + 5 => "dhcp_server", + 6 => "other", + _ => "unspecified", + } + .to_string() +} + +fn rule_direction_to_string(direction: i32) -> String { + match direction { + 1 => "ingress", + 2 => "egress", + _ => "unspecified", + } + .to_string() +} + +fn ip_protocol_to_string(protocol: i32) -> String { + match protocol { + 1 => "any", + 2 => "tcp", + 3 => "udp", + 4 => "icmp", + 5 => "icmpv6", + _ => "unspecified", + } + .to_string() +} + +fn service_ip_pool_type_to_string(pool_type: i32) -> String { + match pool_type { + 1 => "cluster_ip", + 2 => "load_balancer", + 3 => "node_port", + _ => "unspecified", + } + .to_string() +} + +fn service_ip_pool_status_to_string(status: i32) -> String { + match status { + 1 => "provisioning", + 2 => "active", + 3 => "updating", + 4 => "deleting", + 5 => "error", + _ => "unknown", + } + .to_string() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn parse_optional_service_ip_pool_type_defaults_to_unspecified() { + assert_eq!(parse_optional_service_ip_pool_type(None).unwrap(), 0); + assert_eq!( + parse_optional_service_ip_pool_type(Some("load_balancer")).unwrap(), + ProtoServiceIPPoolType::LoadBalancer as i32 + ); + } + + #[test] + fn map_tonic_status_preserves_failed_precondition() { + let (status, Json(body)) = + map_tonic_status(tonic::Status::failed_precondition("pool still allocated")); + assert_eq!(status, StatusCode::UNPROCESSABLE_ENTITY); + assert_eq!(body.error.code, "FAILED_PRECONDITION"); + } +} diff --git a/prismnet/crates/prismnet-server/src/services/ipam.rs b/prismnet/crates/prismnet-server/src/services/ipam.rs index e002dc4..c528aad 100644 --- a/prismnet/crates/prismnet-server/src/services/ipam.rs +++ b/prismnet/crates/prismnet-server/src/services/ipam.rs @@ -1,6 +1,7 @@ //! IPAM gRPC service implementation for k8shost Service IP allocation use std::net::IpAddr; +use std::net::Ipv4Addr; use std::sync::Arc; use tonic::{Request, Response, Status}; @@ -8,6 +9,7 @@ use prismnet_api::{ ipam_service_server::IpamService, AllocateServiceIpRequest, AllocateServiceIpResponse, CreateServiceIpPoolRequest, CreateServiceIpPoolResponse, + DeleteServiceIpPoolRequest, DeleteServiceIpPoolResponse, GetIpAllocationRequest, GetIpAllocationResponse, GetServiceIpPoolRequest, GetServiceIpPoolResponse, IpAllocation as ProtoIPAllocation, @@ -16,6 +18,7 @@ use prismnet_api::{ ServiceIpPool as ProtoServiceIPPool, ServiceIpPoolStatus as ProtoServiceIPPoolStatus, ServiceIpPoolType as ProtoServiceIPPoolType, + UpdateServiceIpPoolRequest, UpdateServiceIpPoolResponse, }; use iam_service_auth::{ get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, @@ -29,6 +32,8 @@ use crate::NetworkMetadataStore; const ACTION_POOL_CREATE: &str = "network:ip-pools:create"; const ACTION_POOL_READ: &str = "network:ip-pools:read"; const ACTION_POOL_LIST: &str = "network:ip-pools:list"; +const ACTION_POOL_UPDATE: &str = "network:ip-pools:update"; +const ACTION_POOL_DELETE: &str = "network:ip-pools:delete"; const ACTION_ALLOCATE_IP: &str = "network:ip-allocations:create"; const ACTION_RELEASE_IP: &str = "network:ip-allocations:delete"; const ACTION_ALLOC_READ: &str = "network:ip-allocations:read"; @@ -45,6 +50,43 @@ impl IpamServiceImpl { } } +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> { + let (ip, prefix) = cidr + .split_once('/') + .ok_or_else(|| Status::invalid_argument("cidr_block must be in a.b.c.d/prefix form"))?; + let ip: Ipv4Addr = ip + .parse() + .map_err(|_| Status::invalid_argument("cidr_block must contain a valid IPv4 address"))?; + let prefix: u8 = prefix + .parse() + .map_err(|_| Status::invalid_argument("cidr_block prefix must be an integer"))?; + if prefix > 32 { + return Err(Status::invalid_argument( + "cidr_block prefix must be between 0 and 32", + )); + } + Ok((ip, prefix)) +} + +fn ensure_unique_pool_name( + existing: &[ServiceIPPool], + desired_name: &str, + exclude: Option, +) -> Result<(), Status> { + if desired_name.trim().is_empty() { + return Err(Status::invalid_argument("Service IP Pool name is required")); + } + if existing + .iter() + .any(|pool| pool.name == desired_name && Some(pool.id) != exclude) + { + return Err(Status::already_exists( + "Service IP Pool name already exists in the tenant", + )); + } + Ok(()) +} + // Proto conversion functions fn pool_to_proto(pool: &ServiceIPPool) -> ProtoServiceIPPool { @@ -127,6 +169,13 @@ impl IpamService for IpamServiceImpl { if req.cidr_block.is_empty() { return Err(Status::invalid_argument("cidr_block is required")); } + parse_ipv4_cidr(&req.cidr_block)?; + let existing = self + .metadata + .list_service_ip_pools(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_pool_name(&existing, &req.name, None)?; let pool_type = pool_type_from_proto(req.pool_type); let mut pool = ServiceIPPool::new( @@ -230,6 +279,115 @@ impl IpamService for IpamServiceImpl { })) } + async fn update_service_ip_pool( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let id = uuid::Uuid::parse_str(&req.id) + .map_err(|_| Status::invalid_argument("Invalid pool ID"))?; + let pool_id = ServiceIPPoolId::from_uuid(id); + self.auth + .authorize( + &tenant, + ACTION_POOL_UPDATE, + &resource_for_tenant( + "service-ip-pool", + pool_id.to_string(), + &org_id, + &project_id, + ), + ) + .await?; + + let name = if !req.name.is_empty() { + Some(req.name) + } else { + None + }; + let description = if !req.description.is_empty() { + Some(req.description) + } else { + None + }; + if let Some(name) = name.as_deref() { + let existing = self + .metadata + .list_service_ip_pools(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_pool_name(&existing, name, Some(pool_id))?; + } + + let pool = self + .metadata + .update_service_ip_pool(&org_id, &project_id, &pool_id, name, description) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Service IP Pool not found"))?; + + Ok(Response::new(UpdateServiceIpPoolResponse { + pool: Some(pool_to_proto(&pool)), + })) + } + + async fn delete_service_ip_pool( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let id = uuid::Uuid::parse_str(&req.id) + .map_err(|_| Status::invalid_argument("Invalid pool ID"))?; + let pool_id = ServiceIPPoolId::from_uuid(id); + self.auth + .authorize( + &tenant, + ACTION_POOL_DELETE, + &resource_for_tenant( + "service-ip-pool", + pool_id.to_string(), + &org_id, + &project_id, + ), + ) + .await?; + + let pool = self + .metadata + .get_service_ip_pool(&org_id, &project_id, &pool_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Service IP Pool not found"))?; + + if !pool.allocated_ips.is_empty() { + return Err(Status::failed_precondition( + "cannot delete Service IP Pool with allocated IPs", + )); + } + + self.metadata + .delete_service_ip_pool(&org_id, &project_id, &pool_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Service IP Pool not found"))?; + + Ok(Response::new(DeleteServiceIpPoolResponse {})) + } + async fn allocate_service_ip( &self, request: Request, @@ -383,6 +541,30 @@ impl IpamService for IpamServiceImpl { } } +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rejects_invalid_pool_cidr() { + let err = parse_ipv4_cidr("10.96.0.0/99").unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_duplicate_pool_name() { + let existing = vec![ServiceIPPool::new( + "svc", + "org", + "proj", + "10.96.0.0/24", + ServiceIPPoolType::ClusterIp, + )]; + let err = ensure_unique_pool_name(&existing, "svc", None).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } +} + impl IpamServiceImpl { /// Allocate next available IP from pool's CIDR async fn allocate_next_available_ip(&self, pool: &ServiceIPPool) -> Option { diff --git a/prismnet/crates/prismnet-server/src/services/mod.rs b/prismnet/crates/prismnet-server/src/services/mod.rs index 09510e6..37f2a86 100644 --- a/prismnet/crates/prismnet-server/src/services/mod.rs +++ b/prismnet/crates/prismnet-server/src/services/mod.rs @@ -2,12 +2,14 @@ pub mod ipam; pub mod port; +pub mod router; pub mod security_group; pub mod subnet; pub mod vpc; pub use ipam::IpamServiceImpl; pub use port::PortServiceImpl; +pub use router::RouterServiceImpl; pub use security_group::SecurityGroupServiceImpl; pub use subnet::SubnetServiceImpl; pub use vpc::VpcServiceImpl; diff --git a/prismnet/crates/prismnet-server/src/services/port.rs b/prismnet/crates/prismnet-server/src/services/port.rs index 236b314..49cdca8 100644 --- a/prismnet/crates/prismnet-server/src/services/port.rs +++ b/prismnet/crates/prismnet-server/src/services/port.rs @@ -1,5 +1,6 @@ //! Port gRPC service implementation +use std::net::Ipv4Addr; use std::sync::Arc; use tonic::{Request, Response, Status}; @@ -70,6 +71,34 @@ impl PortServiceImpl { Ok(subnet) } + + async fn resolve_security_groups_in_tenant( + &self, + org_id: &str, + project_id: &str, + security_group_ids: &[String], + ) -> Result, Status> { + let mut resolved = Vec::with_capacity(security_group_ids.len()); + + for security_group_id in security_group_ids { + let uuid = uuid::Uuid::parse_str(security_group_id) + .map_err(|_| Status::invalid_argument("Invalid SecurityGroup ID"))?; + let security_group_id = SecurityGroupId::from_uuid(uuid); + self.metadata + .get_security_group(org_id, project_id, &security_group_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| { + Status::not_found(format!( + "SecurityGroup {} not found", + security_group_id + )) + })?; + resolved.push(security_group_id); + } + + Ok(resolved) + } } fn port_to_proto(port: &Port) -> ProtoPort { @@ -126,6 +155,91 @@ fn proto_to_device_type(device_type: i32) -> DeviceType { } } +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> { + let (ip, prefix) = cidr + .split_once('/') + .ok_or_else(|| Status::invalid_argument("subnet cidr_block must be in a.b.c.d/prefix form"))?; + let ip: Ipv4Addr = ip + .parse() + .map_err(|_| Status::invalid_argument("subnet cidr_block must contain a valid IPv4 address"))?; + let prefix: u8 = prefix + .parse() + .map_err(|_| Status::invalid_argument("subnet cidr_block prefix must be an integer"))?; + if prefix > 32 { + return Err(Status::invalid_argument( + "subnet cidr_block prefix must be between 0 and 32", + )); + } + Ok((ip, prefix)) +} + +fn parse_ipv4(ip: &str, field_name: &str) -> Result { + ip.parse() + .map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address"))) +} + +fn network_mask(prefix: u8) -> u32 { + if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + } +} + +fn cidr_contains_ip(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool { + let mask = network_mask(cidr.1); + (u32::from(cidr.0) & mask) == (u32::from(ip) & mask) +} + +fn validate_requested_ip(subnet: &Subnet, requested_ip: &str, existing_ports: &[Port]) -> Result<(), Status> { + let subnet_cidr = parse_ipv4_cidr(&subnet.cidr_block)?; + let requested_ip = parse_ipv4(requested_ip, "ip_address")?; + let requested_ip_string = requested_ip.to_string(); + + if !cidr_contains_ip(subnet_cidr, requested_ip) { + return Err(Status::invalid_argument( + "ip_address must fall within the subnet cidr_block", + )); + } + + if subnet + .gateway_ip + .as_deref() + .map(|gateway_ip| gateway_ip == requested_ip_string.as_str()) + .unwrap_or(false) + { + return Err(Status::invalid_argument( + "ip_address cannot reuse the subnet gateway_ip", + )); + } + + if existing_ports.iter().any(|port| { + port.ip_address + .as_deref() + .map(|ip_address| ip_address == requested_ip_string.as_str()) + .unwrap_or(false) + }) { + return Err(Status::already_exists( + "ip_address is already allocated in the subnet", + )); + } + + Ok(()) +} + +fn ensure_unique_port_name(existing_ports: &[Port], desired_name: &str, exclude: Option) -> Result<(), Status> { + if desired_name.trim().is_empty() { + return Err(Status::invalid_argument("Port name is required")); + } + if existing_ports + .iter() + .any(|port| port.name == desired_name && Some(port.id) != exclude) + { + return Err(Status::already_exists("Port name already exists in the subnet")); + } + Ok(()) +} + #[tonic::async_trait] impl PortService for PortServiceImpl { async fn create_port( @@ -155,6 +269,13 @@ impl PortService for PortServiceImpl { ) .await?; + let existing_ports = self + .metadata + .list_ports(Some(&subnet_id), None) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_port_name(&existing_ports, &req.name, None)?; + let port = Port::new(&req.name, subnet_id); let mut port = port; if !req.description.is_empty() { @@ -163,6 +284,7 @@ impl PortService for PortServiceImpl { // IP allocation: use provided IP or auto-allocate if !req.ip_address.is_empty() { + validate_requested_ip(&subnet, &req.ip_address, &existing_ports)?; port.ip_address = Some(req.ip_address); } else { // Auto-allocate IP from subnet CIDR @@ -171,15 +293,21 @@ impl PortService for PortServiceImpl { .allocate_ip(&org_id, &project_id, &subnet_id) .await .map_err(|e| Status::internal(e.to_string()))?; + if port.ip_address.is_none() { + return Err(Status::resource_exhausted( + "no available IP addresses remain in the subnet", + )); + } } if !req.security_group_ids.is_empty() { - port.security_groups = req - .security_group_ids - .iter() - .filter_map(|id| uuid::Uuid::parse_str(id).ok()) - .map(SecurityGroupId::from_uuid) - .collect(); + port.security_groups = self + .resolve_security_groups_in_tenant( + &org_id, + &project_id, + &req.security_group_ids, + ) + .await?; } self.metadata @@ -192,10 +320,17 @@ impl PortService for PortServiceImpl { .as_ref() .ok_or_else(|| Status::internal("IP allocation failed"))?; - self.ovn + if let Err(error) = self + .ovn .create_logical_switch_port(&port, &subnet.vpc_id, ip_address) .await - .map_err(|e| Status::internal(e.to_string()))?; + { + let _ = self + .metadata + .delete_port(&org_id, &project_id, &subnet_id, &port.id) + .await; + return Err(Status::internal(error.to_string())); + } Ok(Response::new(CreatePortResponse { port: Some(port_to_proto(&port)), @@ -332,15 +467,24 @@ impl PortService for PortServiceImpl { }; let security_group_ids = if !req.security_group_ids.is_empty() { Some( - req.security_group_ids - .iter() - .filter_map(|id| uuid::Uuid::parse_str(id).ok()) - .map(SecurityGroupId::from_uuid) - .collect(), + self.resolve_security_groups_in_tenant( + &org_id, + &project_id, + &req.security_group_ids, + ) + .await?, ) } else { None }; + if let Some(name) = name.as_deref() { + let existing_ports = self + .metadata + .list_ports(Some(&subnet_id), None) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_port_name(&existing_ports, name, Some(port_id))?; + } let port = self .metadata @@ -490,3 +634,44 @@ impl PortService for PortServiceImpl { })) } } + +#[cfg(test)] +mod tests { + use super::*; + use prismnet_types::VpcId; + + #[test] + fn rejects_requested_ip_outside_subnet() { + let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24"); + subnet.gateway_ip = Some("10.0.1.1".to_string()); + let err = validate_requested_ip(&subnet, "10.0.2.10", &[]).unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_requested_ip_matching_gateway() { + let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24"); + subnet.gateway_ip = Some("10.0.1.1".to_string()); + let err = validate_requested_ip(&subnet, "10.0.1.1", &[]).unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_duplicate_requested_ip() { + let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24"); + subnet.gateway_ip = Some("10.0.1.1".to_string()); + + let mut existing_port = Port::new("existing", subnet.id); + existing_port.ip_address = Some("10.0.1.10".to_string()); + + let err = validate_requested_ip(&subnet, "10.0.1.10", &[existing_port]).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } + + #[test] + fn rejects_duplicate_port_name() { + let existing = vec![Port::new("frontend", SubnetId::new())]; + let err = ensure_unique_port_name(&existing, "frontend", None).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } +} diff --git a/prismnet/crates/prismnet-server/src/services/router.rs b/prismnet/crates/prismnet-server/src/services/router.rs new file mode 100644 index 0000000..e60a24b --- /dev/null +++ b/prismnet/crates/prismnet-server/src/services/router.rs @@ -0,0 +1,455 @@ +//! Router gRPC service implementation + +use std::net::Ipv4Addr; +use std::sync::Arc; + +use tonic::{Request, Response, Status}; + +use iam_service_auth::{ + get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, +}; +use prismnet_api::{ + router_service_server::RouterService, CreateRouterRequest, CreateRouterResponse, + DeleteRouterRequest, DeleteRouterResponse, GetRouterRequest, GetRouterResponse, + ListRoutersRequest, ListRoutersResponse, Router as ProtoRouter, + RouterStatus as ProtoRouterStatus, UpdateRouterRequest, UpdateRouterResponse, +}; +use prismnet_types::{Router, RouterId, RouterStatus, Vpc, VpcId}; + +use crate::{NetworkMetadataStore, OvnClient}; + +const ACTION_ROUTER_CREATE: &str = "network:routers:create"; +const ACTION_ROUTER_READ: &str = "network:routers:read"; +const ACTION_ROUTER_LIST: &str = "network:routers:list"; +const ACTION_ROUTER_UPDATE: &str = "network:routers:update"; +const ACTION_ROUTER_DELETE: &str = "network:routers:delete"; + +#[derive(Clone)] +pub struct RouterServiceImpl { + metadata: Arc, + ovn: Arc, + auth: Arc, +} + +impl RouterServiceImpl { + pub fn new( + metadata: Arc, + ovn: Arc, + auth: Arc, + ) -> Self { + Self { + metadata, + ovn, + auth, + } + } + + async fn validate_vpc_in_tenant( + &self, + org_id: &str, + project_id: &str, + vpc_id: &VpcId, + ) -> Result { + self.metadata + .get_vpc(org_id, project_id, vpc_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::permission_denied("VPC not in tenant scope")) + } +} + +fn router_to_proto(router: &Router) -> ProtoRouter { + ProtoRouter { + id: router.id.to_string(), + org_id: router.org_id.clone(), + project_id: router.project_id.clone(), + vpc_id: router.vpc_id.to_string(), + name: router.name.clone(), + description: router.description.clone().unwrap_or_default(), + gateway_cidr: router.gateway_cidr.clone(), + mac_address: router.mac_address.clone(), + external_ip: router.external_ip.clone(), + status: status_to_proto(&router.status) as i32, + created_at: router.created_at, + updated_at: router.updated_at, + } +} + +fn status_to_proto(status: &RouterStatus) -> ProtoRouterStatus { + match status { + RouterStatus::Provisioning => ProtoRouterStatus::Provisioning, + RouterStatus::Active => ProtoRouterStatus::Active, + RouterStatus::Updating => ProtoRouterStatus::Updating, + RouterStatus::Deleting => ProtoRouterStatus::Deleting, + RouterStatus::Error => ProtoRouterStatus::Error, + } +} + +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> { + let (ip, prefix) = cidr + .split_once('/') + .ok_or_else(|| Status::invalid_argument("CIDR must be in a.b.c.d/prefix form"))?; + let ip: Ipv4Addr = ip + .parse() + .map_err(|_| Status::invalid_argument("CIDR must contain a valid IPv4 address"))?; + let prefix: u8 = prefix + .parse() + .map_err(|_| Status::invalid_argument("CIDR prefix must be an integer"))?; + if prefix > 32 { + return Err(Status::invalid_argument( + "CIDR prefix must be between 0 and 32", + )); + } + Ok((ip, prefix)) +} + +fn parse_ipv4(ip: &str, field_name: &str) -> Result { + ip.parse() + .map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address"))) +} + +fn validate_mac_address(mac_address: &str) -> Result<(), Status> { + let octets: Vec<_> = mac_address.split(':').collect(); + if octets.len() != 6 + || octets + .iter() + .any(|octet| octet.len() != 2 || u8::from_str_radix(octet, 16).is_err()) + { + return Err(Status::invalid_argument( + "mac_address must be a valid MAC address", + )); + } + Ok(()) +} + +fn ipv4_to_u32(ip: Ipv4Addr) -> u32 { + u32::from(ip) +} + +fn network_mask(prefix: u8) -> u32 { + if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + } +} + +fn cidr_contains(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool { + let mask = network_mask(cidr.1); + (ipv4_to_u32(cidr.0) & mask) == (ipv4_to_u32(ip) & mask) +} + +fn validate_router_inputs(vpc: &Vpc, gateway_cidr: &str, mac_address: &str, external_ip: &str) -> Result<(), Status> { + let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?; + let (gateway_ip, _) = parse_ipv4_cidr(gateway_cidr)?; + let _ = parse_ipv4(external_ip, "external_ip")?; + validate_mac_address(mac_address)?; + + if !cidr_contains(vpc_cidr, gateway_ip) { + return Err(Status::invalid_argument( + "gateway_cidr must use an address within the VPC cidr_block", + )); + } + + Ok(()) +} + +#[tonic::async_trait] +impl RouterService for RouterServiceImpl { + async fn create_router( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let vpc_uuid = + uuid::Uuid::parse_str(&req.vpc_id).map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; + let vpc_id = VpcId::from_uuid(vpc_uuid); + let vpc = self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id).await?; + self.auth + .authorize( + &tenant, + ACTION_ROUTER_CREATE, + &resource_for_tenant("router", "*", &org_id, &project_id), + ) + .await?; + + if req.name.trim().is_empty() { + return Err(Status::invalid_argument("router name is required")); + } + validate_router_inputs(&vpc, &req.gateway_cidr, &req.mac_address, &req.external_ip)?; + + let existing = self + .metadata + .list_routers(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + if existing.iter().any(|router| router.vpc_id == vpc_id) { + return Err(Status::already_exists("VPC already has a router")); + } + + let mut router = Router::new( + &req.name, + &org_id, + &project_id, + vpc_id, + &req.gateway_cidr, + &req.mac_address, + &req.external_ip, + ); + if !req.description.is_empty() { + router.description = Some(req.description); + } + + let ovn_router_id = self + .ovn + .create_logical_router(&format!("router-{}", router.id)) + .await + .map_err(|e| Status::internal(e.to_string()))?; + let ovn_router_port_id = match self + .ovn + .add_router_port(&ovn_router_id, &vpc_id, &router.gateway_cidr, &router.mac_address) + .await + { + Ok(port_id) => port_id, + Err(error) => { + let _ = self.ovn.delete_logical_router(&ovn_router_id).await; + return Err(Status::internal(error.to_string())); + } + }; + + if let Err(error) = self + .ovn + .configure_snat(&ovn_router_id, &router.external_ip, &vpc.cidr_block) + .await + { + let _ = self.ovn.delete_logical_router(&ovn_router_id).await; + return Err(Status::internal(error.to_string())); + } + + router.ovn_router_id = ovn_router_id; + router.ovn_router_port_id = ovn_router_port_id; + + if let Err(error) = self.metadata.create_router(router.clone()).await { + let _ = self.ovn.delete_logical_router(&router.ovn_router_id).await; + return Err(Status::internal(error.to_string())); + } + + Ok(Response::new(CreateRouterResponse { + router: Some(router_to_proto(&router)), + })) + } + + async fn get_router( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let id = uuid::Uuid::parse_str(&req.id) + .map_err(|_| Status::invalid_argument("Invalid router ID"))?; + let router_id = RouterId::from_uuid(id); + self.auth + .authorize( + &tenant, + ACTION_ROUTER_READ, + &resource_for_tenant("router", router_id.to_string(), &org_id, &project_id), + ) + .await?; + + let router = self + .metadata + .get_router(&org_id, &project_id, &router_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Router not found"))?; + + Ok(Response::new(GetRouterResponse { + router: Some(router_to_proto(&router)), + })) + } + + async fn list_routers( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + self.auth + .authorize( + &tenant, + ACTION_ROUTER_LIST, + &resource_for_tenant("router", "*", &org_id, &project_id), + ) + .await?; + let req = request.into_inner(); + + let vpc_id = if req.vpc_id.is_empty() { + None + } else { + let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id) + .map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; + Some(VpcId::from_uuid(vpc_uuid)) + }; + + if let Some(vpc_id) = vpc_id.as_ref() { + self.validate_vpc_in_tenant(&org_id, &project_id, vpc_id).await?; + } + + let mut routers = self + .metadata + .list_routers(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + if let Some(vpc_id) = vpc_id { + routers.retain(|router| router.vpc_id == vpc_id); + } + + Ok(Response::new(ListRoutersResponse { + routers: routers.iter().map(router_to_proto).collect(), + next_page_token: String::new(), + })) + } + + async fn update_router( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let id = uuid::Uuid::parse_str(&req.id) + .map_err(|_| Status::invalid_argument("Invalid router ID"))?; + let router_id = RouterId::from_uuid(id); + self.auth + .authorize( + &tenant, + ACTION_ROUTER_UPDATE, + &resource_for_tenant("router", router_id.to_string(), &org_id, &project_id), + ) + .await?; + + let name = if req.name.is_empty() { + None + } else { + Some(req.name) + }; + let description = if req.description.is_empty() { + None + } else { + Some(req.description) + }; + + let router = self + .metadata + .update_router(&org_id, &project_id, &router_id, name, description) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Router not found"))?; + + Ok(Response::new(UpdateRouterResponse { + router: Some(router_to_proto(&router)), + })) + } + + async fn delete_router( + &self, + request: Request, + ) -> Result, Status> { + let tenant = get_tenant_context(&request)?; + let (org_id, project_id) = resolve_tenant_ids_from_context( + &tenant, + &request.get_ref().org_id, + &request.get_ref().project_id, + )?; + let req = request.into_inner(); + + let id = uuid::Uuid::parse_str(&req.id) + .map_err(|_| Status::invalid_argument("Invalid router ID"))?; + let router_id = RouterId::from_uuid(id); + self.auth + .authorize( + &tenant, + ACTION_ROUTER_DELETE, + &resource_for_tenant("router", router_id.to_string(), &org_id, &project_id), + ) + .await?; + + let router = self + .metadata + .get_router(&org_id, &project_id, &router_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Router not found"))?; + + self.ovn + .delete_logical_router(&router.ovn_router_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + + self.metadata + .delete_router(&org_id, &project_id, &router_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Router not found"))?; + + Ok(Response::new(DeleteRouterResponse {})) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rejects_invalid_mac_address() { + let err = validate_mac_address("02:00:00:00:00").unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_gateway_outside_vpc_cidr() { + let vpc = Vpc::new("test", "org", "proj", "10.0.0.0/16"); + let err = validate_router_inputs( + &vpc, + "10.1.0.1/24", + "02:00:00:00:00:01", + "203.0.113.10", + ) + .unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn accepts_router_inputs_inside_vpc_cidr() { + let vpc = Vpc::new("test", "org", "proj", "10.0.0.0/16"); + validate_router_inputs( + &vpc, + "10.0.0.1/24", + "02:00:00:00:00:01", + "203.0.113.10", + ) + .unwrap(); + } +} diff --git a/prismnet/crates/prismnet-server/src/services/security_group.rs b/prismnet/crates/prismnet-server/src/services/security_group.rs index 8b64649..7c2a033 100644 --- a/prismnet/crates/prismnet-server/src/services/security_group.rs +++ b/prismnet/crates/prismnet-server/src/services/security_group.rs @@ -15,7 +15,9 @@ use prismnet_api::{ use iam_service_auth::{ get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, }; -use prismnet_types::{IpProtocol, RuleDirection, SecurityGroup, SecurityGroupId, SecurityGroupRule}; +use prismnet_types::{ + IpProtocol, Port, RuleDirection, SecurityGroup, SecurityGroupId, SecurityGroupRule, +}; use crate::ovn::{build_acl_match, calculate_priority}; use crate::{NetworkMetadataStore, OvnClient}; @@ -49,6 +51,41 @@ impl SecurityGroupServiceImpl { } } +fn ensure_unique_security_group_name( + existing: &[SecurityGroup], + desired_name: &str, + exclude: Option, +) -> Result<(), Status> { + if desired_name.trim().is_empty() { + return Err(Status::invalid_argument("SecurityGroup name is required")); + } + if existing + .iter() + .any(|sg| sg.name == desired_name && Some(sg.id) != exclude) + { + return Err(Status::already_exists( + "SecurityGroup name already exists in the tenant", + )); + } + Ok(()) +} + +fn security_group_is_referenced( + security_group_id: SecurityGroupId, + security_groups: &[SecurityGroup], + ports: &[Port], +) -> bool { + ports + .iter() + .any(|port| port.security_groups.contains(&security_group_id)) + || security_groups.iter().any(|sg| { + sg.id != security_group_id + && sg.rules + .iter() + .any(|rule| rule.remote_group_id == Some(security_group_id)) + }) +} + fn security_group_to_proto(sg: &SecurityGroup) -> ProtoSecurityGroup { ProtoSecurityGroup { id: sg.id.to_string(), @@ -137,6 +174,12 @@ impl SecurityGroupService for SecurityGroupServiceImpl { ) .await?; let req = request.into_inner(); + let existing = self + .metadata + .list_security_groups(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_security_group_name(&existing, &req.name, None)?; let sg = SecurityGroup::new(&req.name, &org_id, &project_id); let mut sg = sg; @@ -256,6 +299,14 @@ impl SecurityGroupService for SecurityGroupServiceImpl { } else { None }; + if let Some(name) = name.as_deref() { + let existing = self + .metadata + .list_security_groups(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_security_group_name(&existing, name, Some(sg_id))?; + } let sg = self .metadata @@ -292,6 +343,38 @@ impl SecurityGroupService for SecurityGroupServiceImpl { ) .await?; + let security_groups = self + .metadata + .list_security_groups(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + let vpcs = self + .metadata + .list_vpcs(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + let mut ports = Vec::new(); + for vpc in vpcs { + let subnets = self + .metadata + .list_subnets(&org_id, &project_id, &vpc.id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + for subnet in subnets { + ports.extend( + self.metadata + .list_ports(Some(&subnet.id), None) + .await + .map_err(|e| Status::internal(e.to_string()))?, + ); + } + } + if security_group_is_referenced(sg_id, &security_groups, &ports) { + return Err(Status::failed_precondition( + "cannot delete SecurityGroup while it is referenced by ports or rules", + )); + } + self.metadata .delete_security_group(&org_id, &project_id, &sg_id) .await @@ -440,3 +523,25 @@ impl SecurityGroupService for SecurityGroupServiceImpl { Ok(Response::new(RemoveRuleResponse {})) } } + +#[cfg(test)] +mod tests { + use super::*; + use prismnet_types::SubnetId; + + #[test] + fn rejects_duplicate_security_group_name() { + let existing = vec![SecurityGroup::new("web", "org", "proj")]; + let err = ensure_unique_security_group_name(&existing, "web", None).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } + + #[test] + fn detects_security_group_references() { + let sg = SecurityGroup::new("web", "org", "proj"); + let sg_id = sg.id; + let mut port = Port::new("port", SubnetId::new()); + port.security_groups = vec![sg_id]; + assert!(security_group_is_referenced(sg_id, &[sg], &[port])); + } +} diff --git a/prismnet/crates/prismnet-server/src/services/subnet.rs b/prismnet/crates/prismnet-server/src/services/subnet.rs index b158cfa..fdd3eaf 100644 --- a/prismnet/crates/prismnet-server/src/services/subnet.rs +++ b/prismnet/crates/prismnet-server/src/services/subnet.rs @@ -1,18 +1,19 @@ //! Subnet gRPC service implementation +use std::net::Ipv4Addr; use std::sync::Arc; use tonic::{Request, Response, Status}; +use iam_service_auth::{ + get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, +}; use prismnet_api::{ subnet_service_server::SubnetService, CreateSubnetRequest, CreateSubnetResponse, DeleteSubnetRequest, DeleteSubnetResponse, GetSubnetRequest, GetSubnetResponse, ListSubnetsRequest, ListSubnetsResponse, Subnet as ProtoSubnet, SubnetStatus as ProtoSubnetStatus, UpdateSubnetRequest, UpdateSubnetResponse, }; -use iam_service_auth::{ - get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, -}; -use prismnet_types::{Subnet, SubnetId, SubnetStatus, VpcId}; +use prismnet_types::{Subnet, SubnetId, SubnetStatus, Vpc, VpcId}; use crate::NetworkMetadataStore; @@ -38,21 +39,129 @@ impl SubnetServiceImpl { org_id: &str, project_id: &str, vpc_id: &VpcId, - ) -> Result<(), Status> { - if self - .metadata + ) -> Result { + self.metadata .get_vpc(org_id, project_id, vpc_id) .await .map_err(|e| Status::internal(e.to_string()))? - .is_none() - { - return Err(Status::permission_denied("VPC not in tenant scope")); - } - - Ok(()) + .ok_or_else(|| Status::permission_denied("VPC not in tenant scope")) } } +fn parse_ipv4_cidr(cidr: &str, field_name: &str) -> Result<(Ipv4Addr, u8), Status> { + let (ip, prefix) = cidr.split_once('/').ok_or_else(|| { + Status::invalid_argument(format!("{field_name} must be in a.b.c.d/prefix form")) + })?; + let ip: Ipv4Addr = ip.parse().map_err(|_| { + Status::invalid_argument(format!("{field_name} must contain a valid IPv4 address")) + })?; + let prefix: u8 = prefix + .parse() + .map_err(|_| Status::invalid_argument(format!("{field_name} prefix must be an integer")))?; + if prefix > 32 { + return Err(Status::invalid_argument(format!( + "{field_name} prefix must be between 0 and 32" + ))); + } + Ok((ip, prefix)) +} + +fn parse_ipv4(ip: &str, field_name: &str) -> Result { + ip.parse() + .map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address"))) +} + +fn network_mask(prefix: u8) -> u32 { + if prefix == 0 { + 0 + } else { + u32::MAX << (32 - prefix) + } +} + +fn cidr_range(cidr: (Ipv4Addr, u8)) -> (u32, u32) { + let mask = network_mask(cidr.1); + let start = u32::from(cidr.0) & mask; + let size = if cidr.1 == 32 { + 1 + } else { + 1u64 << (32 - cidr.1) + }; + let end = start + (size as u32) - 1; + (start, end) +} + +fn cidr_contains_ip(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool { + let mask = network_mask(cidr.1); + (u32::from(cidr.0) & mask) == (u32::from(ip) & mask) +} + +fn cidr_contains_cidr(parent: (Ipv4Addr, u8), child: (Ipv4Addr, u8)) -> bool { + let (parent_start, parent_end) = cidr_range(parent); + let (child_start, child_end) = cidr_range(child); + child_start >= parent_start && child_end <= parent_end +} + +fn cidr_overlaps(a: (Ipv4Addr, u8), b: (Ipv4Addr, u8)) -> bool { + let (a_start, a_end) = cidr_range(a); + let (b_start, b_end) = cidr_range(b); + a_start <= b_end && b_start <= a_end +} + +fn validate_subnet_inputs( + vpc: &Vpc, + cidr_block: &str, + gateway_ip: Option<&str>, + sibling_cidrs: impl IntoIterator, +) -> Result<(), Status> { + let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block, "vpc cidr_block")?; + let subnet_cidr = parse_ipv4_cidr(cidr_block, "cidr_block")?; + if !cidr_contains_cidr(vpc_cidr, subnet_cidr) { + return Err(Status::invalid_argument( + "subnet cidr_block must be contained within the VPC cidr_block", + )); + } + + if let Some(gateway_ip) = gateway_ip { + let gateway_ip = parse_ipv4(gateway_ip, "gateway_ip")?; + if !cidr_contains_ip(subnet_cidr, gateway_ip) { + return Err(Status::invalid_argument( + "gateway_ip must fall within the subnet cidr_block", + )); + } + } + + for sibling in sibling_cidrs { + let sibling_cidr = parse_ipv4_cidr(&sibling, "existing subnet cidr_block")?; + if cidr_overlaps(subnet_cidr, sibling_cidr) { + return Err(Status::already_exists( + "subnet cidr_block overlaps an existing subnet in the VPC", + )); + } + } + + Ok(()) +} + +fn ensure_unique_subnet_name( + existing: &[Subnet], + desired_name: &str, + exclude: Option, +) -> Result<(), Status> { + if desired_name.trim().is_empty() { + return Err(Status::invalid_argument("Subnet name is required")); + } + if existing + .iter() + .any(|subnet| subnet.name == desired_name && Some(subnet.id) != exclude) + { + return Err(Status::already_exists( + "Subnet name already exists in the VPC", + )); + } + Ok(()) +} + fn subnet_to_proto(subnet: &Subnet) -> ProtoSubnet { ProtoSubnet { id: subnet.id.to_string(), @@ -93,7 +202,8 @@ impl SubnetService for SubnetServiceImpl { .map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; let vpc_id = VpcId::from_uuid(vpc_id); - self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) + let vpc = self + .validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) .await?; self.auth .authorize( @@ -103,6 +213,22 @@ impl SubnetService for SubnetServiceImpl { ) .await?; + let existing_subnets = self + .metadata + .list_subnets(&org_id, &project_id, &vpc_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_subnet_name(&existing_subnets, &req.name, None)?; + validate_subnet_inputs( + &vpc, + &req.cidr_block, + (!req.gateway_ip.is_empty()).then_some(req.gateway_ip.as_str()), + existing_subnets + .into_iter() + .map(|subnet| subnet.cidr_block) + .collect::>(), + )?; + let subnet = Subnet::new(&req.name, vpc_id, &req.cidr_block); let mut subnet = subnet; if !req.description.is_empty() { @@ -112,6 +238,7 @@ impl SubnetService for SubnetServiceImpl { subnet.gateway_ip = Some(req.gateway_ip); } subnet.dhcp_enabled = req.dhcp_enabled; + subnet.status = SubnetStatus::Active; self.metadata .create_subnet(subnet.clone()) @@ -138,12 +265,6 @@ impl SubnetService for SubnetServiceImpl { let id = uuid::Uuid::parse_str(&req.id) .map_err(|_| Status::invalid_argument("Invalid Subnet ID"))?; let subnet_id = SubnetId::from_uuid(id); - let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id) - .map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; - let vpc_id = VpcId::from_uuid(vpc_uuid); - - self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) - .await?; self.auth .authorize( &tenant, @@ -152,12 +273,28 @@ impl SubnetService for SubnetServiceImpl { ) .await?; - let subnet = self - .metadata - .get_subnet(&vpc_id, &subnet_id) - .await - .map_err(|e| Status::internal(e.to_string()))? - .ok_or_else(|| Status::not_found("Subnet not found"))?; + let subnet = if req.vpc_id.trim().is_empty() { + let subnet = self + .metadata + .find_subnet_by_id(&subnet_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Subnet not found"))?; + self.validate_vpc_in_tenant(&org_id, &project_id, &subnet.vpc_id) + .await?; + subnet + } else { + let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id) + .map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; + let vpc_id = VpcId::from_uuid(vpc_uuid); + self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) + .await?; + self.metadata + .get_subnet(&vpc_id, &subnet_id) + .await + .map_err(|e| Status::internal(e.to_string()))? + .ok_or_else(|| Status::not_found("Subnet not found"))? + }; Ok(Response::new(GetSubnetResponse { subnet: Some(subnet_to_proto(&subnet)), @@ -191,6 +328,9 @@ impl SubnetService for SubnetServiceImpl { return Err(Status::invalid_argument("vpc_id is required")); }; + self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) + .await?; + let subnets = self .metadata .list_subnets(&org_id, &project_id, &vpc_id) @@ -242,6 +382,14 @@ impl SubnetService for SubnetServiceImpl { } else { None }; + if let Some(name) = name.as_deref() { + let existing_subnets = self + .metadata + .list_subnets(&org_id, &project_id, &vpc_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_subnet_name(&existing_subnets, name, Some(subnet_id))?; + } let subnet = self .metadata @@ -292,6 +440,17 @@ impl SubnetService for SubnetServiceImpl { ) .await?; + let ports = self + .metadata + .list_ports(Some(&subnet_id), None) + .await + .map_err(|e| Status::internal(e.to_string()))?; + if !ports.is_empty() { + return Err(Status::failed_precondition( + "cannot delete subnet while ports still exist", + )); + } + self.metadata .delete_subnet(&org_id, &project_id, &vpc_id, &subnet_id) .await @@ -300,3 +459,45 @@ impl SubnetService for SubnetServiceImpl { Ok(Response::new(DeleteSubnetResponse {})) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rejects_subnet_outside_vpc() { + let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16"); + let err = + validate_subnet_inputs(&vpc, "10.1.0.0/24", None, Vec::::new()).unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_gateway_outside_subnet() { + let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16"); + let err = + validate_subnet_inputs(&vpc, "10.0.1.0/24", Some("10.0.2.1"), Vec::::new()) + .unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_overlapping_subnet() { + let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16"); + let err = validate_subnet_inputs( + &vpc, + "10.0.1.0/24", + Some("10.0.1.1"), + vec!["10.0.1.128/25".to_string()], + ) + .unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } + + #[test] + fn rejects_duplicate_subnet_name() { + let existing = vec![Subnet::new("app", VpcId::new(), "10.0.1.0/24")]; + let err = ensure_unique_subnet_name(&existing, "app", None).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } +} diff --git a/prismnet/crates/prismnet-server/src/services/vpc.rs b/prismnet/crates/prismnet-server/src/services/vpc.rs index fcc3700..1e31181 100644 --- a/prismnet/crates/prismnet-server/src/services/vpc.rs +++ b/prismnet/crates/prismnet-server/src/services/vpc.rs @@ -1,5 +1,6 @@ //! VPC gRPC service implementation +use std::net::Ipv4Addr; use std::sync::Arc; use tonic::{Request, Response, Status}; @@ -42,6 +43,37 @@ impl VpcServiceImpl { } } +fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> { + let (ip, prefix) = cidr + .split_once('/') + .ok_or_else(|| Status::invalid_argument("cidr_block must be in a.b.c.d/prefix form"))?; + let ip: Ipv4Addr = ip + .parse() + .map_err(|_| Status::invalid_argument("cidr_block must contain a valid IPv4 address"))?; + let prefix: u8 = prefix + .parse() + .map_err(|_| Status::invalid_argument("cidr_block prefix must be an integer"))?; + if prefix > 32 { + return Err(Status::invalid_argument( + "cidr_block prefix must be between 0 and 32", + )); + } + Ok((ip, prefix)) +} + +fn ensure_unique_vpc_name(existing: &[Vpc], desired_name: &str, exclude: Option) -> Result<(), Status> { + if desired_name.trim().is_empty() { + return Err(Status::invalid_argument("VPC name is required")); + } + if existing + .iter() + .any(|vpc| vpc.name == desired_name && Some(vpc.id) != exclude) + { + return Err(Status::already_exists("VPC name already exists")); + } + Ok(()) +} + fn vpc_to_proto(vpc: &Vpc) -> ProtoVpc { ProtoVpc { id: vpc.id.to_string(), @@ -86,6 +118,13 @@ impl VpcService for VpcServiceImpl { ) .await?; let req = request.into_inner(); + let existing = self + .metadata + .list_vpcs(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_vpc_name(&existing, &req.name, None)?; + parse_ipv4_cidr(&req.cidr_block)?; let vpc = Vpc::new(&req.name, &org_id, &project_id, &req.cidr_block); let mut vpc = vpc; @@ -93,15 +132,19 @@ impl VpcService for VpcServiceImpl { vpc.description = Some(req.description); } - self.metadata - .create_vpc(vpc.clone()) - .await - .map_err(|e| Status::internal(e.to_string()))?; - - self.ovn + if let Err(error) = self + .ovn .create_logical_switch(&vpc.id, &vpc.cidr_block) .await - .map_err(|e| Status::internal(e.to_string()))?; + { + return Err(Status::internal(error.to_string())); + } + + vpc.status = VpcStatus::Active; + if let Err(error) = self.metadata.create_vpc(vpc.clone()).await { + let _ = self.ovn.delete_logical_switch(&vpc.id).await; + return Err(Status::internal(error.to_string())); + } Ok(Response::new(CreateVpcResponse { vpc: Some(vpc_to_proto(&vpc)), @@ -208,6 +251,14 @@ impl VpcService for VpcServiceImpl { } else { Some(req.description) }; + if let Some(name) = name.as_deref() { + let existing = self + .metadata + .list_vpcs(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + ensure_unique_vpc_name(&existing, name, Some(vpc_id))?; + } let vpc = self .metadata @@ -244,6 +295,28 @@ impl VpcService for VpcServiceImpl { ) .await?; + let subnets = self + .metadata + .list_subnets(&org_id, &project_id, &vpc_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + if !subnets.is_empty() { + return Err(Status::failed_precondition( + "cannot delete VPC while subnets still exist", + )); + } + + let routers = self + .metadata + .list_routers(&org_id, &project_id) + .await + .map_err(|e| Status::internal(e.to_string()))?; + if routers.iter().any(|router| router.vpc_id == vpc_id) { + return Err(Status::failed_precondition( + "cannot delete VPC while routers still exist", + )); + } + self.metadata .delete_vpc(&org_id, &project_id, &vpc_id) .await @@ -258,3 +331,21 @@ impl VpcService for VpcServiceImpl { Ok(Response::new(DeleteVpcResponse {})) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn rejects_invalid_vpc_cidr() { + let err = parse_ipv4_cidr("10.0.0.0/99").unwrap_err(); + assert_eq!(err.code(), tonic::Code::InvalidArgument); + } + + #[test] + fn rejects_duplicate_vpc_name() { + let existing = vec![Vpc::new("prod", "org", "proj", "10.0.0.0/16")]; + let err = ensure_unique_vpc_name(&existing, "prod", None).unwrap_err(); + assert_eq!(err.code(), tonic::Code::AlreadyExists); + } +} diff --git a/prismnet/crates/prismnet-types/src/lib.rs b/prismnet/crates/prismnet-types/src/lib.rs index 0b94286..bd82899 100644 --- a/prismnet/crates/prismnet-types/src/lib.rs +++ b/prismnet/crates/prismnet-types/src/lib.rs @@ -4,6 +4,7 @@ mod dhcp; mod port; +mod router; mod security_group; mod service_ip_pool; mod subnet; @@ -11,6 +12,7 @@ mod vpc; pub use dhcp::*; pub use port::*; +pub use router::*; pub use security_group::*; pub use service_ip_pool::*; pub use subnet::*; diff --git a/prismnet/crates/prismnet-types/src/router.rs b/prismnet/crates/prismnet-types/src/router.rs new file mode 100644 index 0000000..1083963 --- /dev/null +++ b/prismnet/crates/prismnet-types/src/router.rs @@ -0,0 +1,129 @@ +//! Router types + +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +use crate::VpcId; + +/// Unique identifier for a Router +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)] +pub struct RouterId(Uuid); + +impl RouterId { + pub fn new() -> Self { + Self(Uuid::new_v4()) + } + + pub fn from_uuid(uuid: Uuid) -> Self { + Self(uuid) + } + + pub fn as_uuid(&self) -> &Uuid { + &self.0 + } +} + +impl Default for RouterId { + fn default() -> Self { + Self::new() + } +} + +impl std::fmt::Display for RouterId { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +/// Router lifecycle status +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +pub enum RouterStatus { + Provisioning, + Active, + Updating, + Deleting, + Error, +} + +impl Default for RouterStatus { + fn default() -> Self { + Self::Provisioning + } +} + +/// Tenant-scoped logical router with SNAT configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Router { + pub id: RouterId, + pub org_id: String, + pub project_id: String, + pub vpc_id: VpcId, + pub name: String, + pub description: Option, + pub gateway_cidr: String, + pub mac_address: String, + pub external_ip: String, + pub ovn_router_id: String, + pub ovn_router_port_id: String, + pub status: RouterStatus, + pub created_at: u64, + pub updated_at: u64, +} + +impl Router { + pub fn new( + name: impl Into, + org_id: impl Into, + project_id: impl Into, + vpc_id: VpcId, + gateway_cidr: impl Into, + mac_address: impl Into, + external_ip: impl Into, + ) -> Self { + let now = std::time::SystemTime::now() + .duration_since(std::time::UNIX_EPOCH) + .unwrap_or_default() + .as_secs(); + + Self { + id: RouterId::new(), + org_id: org_id.into(), + project_id: project_id.into(), + vpc_id, + name: name.into(), + description: None, + gateway_cidr: gateway_cidr.into(), + mac_address: mac_address.into(), + external_ip: external_ip.into(), + ovn_router_id: String::new(), + ovn_router_port_id: String::new(), + status: RouterStatus::Active, + created_at: now, + updated_at: now, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_router_creation() { + let router = Router::new( + "edge", + "org-1", + "proj-1", + VpcId::new(), + "10.0.0.1/24", + "02:00:00:00:00:01", + "203.0.113.10", + ); + + assert_eq!(router.name, "edge"); + assert_eq!(router.external_ip, "203.0.113.10"); + assert!(router.ovn_router_id.is_empty()); + assert_eq!(router.status, RouterStatus::Active); + } +} diff --git a/scripts/check_workspace_source_roots.py b/scripts/check_workspace_source_roots.py new file mode 100644 index 0000000..b1f02e7 --- /dev/null +++ b/scripts/check_workspace_source_roots.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 + +from __future__ import annotations + +import re +import sys +import tomllib +from pathlib import Path +from typing import Any + + +def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]: + source = flake_path.read_text() + match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S) + if match is None: + raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}") + + roots: dict[str, list[str]] = {} + for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S): + roots[name] = re.findall(r'"([^"]+)"', body) + return roots + + +def collect_path_dependencies(value: Any) -> list[str]: + found: list[str] = [] + + if isinstance(value, dict): + path = value.get("path") + if isinstance(path, str): + found.append(path) + for nested in value.values(): + found.extend(collect_path_dependencies(nested)) + elif isinstance(value, list): + for nested in value: + found.extend(collect_path_dependencies(nested)) + + return found + + +def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]: + workspace_manifest = repo_root / workspace_name / "Cargo.toml" + manifests = [workspace_manifest] + workspace_data = tomllib.loads(workspace_manifest.read_text()) + members = workspace_data.get("workspace", {}).get("members", []) + + for member in members: + for candidate in (workspace_manifest.parent).glob(member): + manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml" + if manifest.is_file(): + manifests.append(manifest) + + unique_manifests: list[Path] = [] + seen: set[Path] = set() + for manifest in manifests: + resolved = manifest.resolve() + if resolved in seen: + continue + seen.add(resolved) + unique_manifests.append(manifest) + return unique_manifests + + +def required_root(dep_rel: Path) -> str: + parts = dep_rel.parts + if not parts: + return "" + if parts[0] == "crates" and len(parts) >= 2: + return "/".join(parts[:2]) + return parts[0] + + +def is_covered(dep_rel: str, configured_roots: list[str]) -> bool: + return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots) + + +def main() -> int: + repo_root = Path(sys.argv[1]).resolve() if len(sys.argv) > 1 else Path.cwd().resolve() + flake_path = repo_root / "flake.nix" + workspace_roots = extract_workspace_source_roots(flake_path) + + failures: list[str] = [] + + for workspace_name, configured_roots in sorted(workspace_roots.items()): + workspace_manifest = repo_root / workspace_name / "Cargo.toml" + if not workspace_manifest.is_file(): + continue + + for manifest in workspace_manifests(repo_root, workspace_name): + manifest_data = tomllib.loads(manifest.read_text()) + for dep_path in collect_path_dependencies(manifest_data): + dependency_dir = (manifest.parent / dep_path).resolve() + try: + dep_rel = dependency_dir.relative_to(repo_root) + except ValueError: + continue + + dep_rel_str = dep_rel.as_posix() + if is_covered(dep_rel_str, configured_roots): + continue + + needed = required_root(dep_rel) + manifest_rel = manifest.relative_to(repo_root).as_posix() + failures.append( + f"{workspace_name}: missing source root '{needed}' for dependency " + f"'{dep_rel_str}' referenced by {manifest_rel}" + ) + + if failures: + print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr) + for failure in failures: + print(f" - {failure}", file=sys.stderr) + return 1 + + print("workspaceSourceRoots covers all workspace path dependencies.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main())