From 4ab47b1726ff87ddb8f224334e441bbe4864677d Mon Sep 17 00:00:00 2001
From: centra
Date: Sat, 4 Apr 2026 00:07:43 +0900
Subject: [PATCH] Implement declarative tenant networking and local VM
dataplane
Add tenant-scoped PrismNET routing, security-group, port, and service-IP APIs plus a deployer reconciler and Nix module that apply declarative tenant network state.
Teach PlasmaVMC to realize PrismNET NICs as a concrete local worker dataplane with Linux bridges, dnsmasq-backed DHCP, tap devices, richer network metadata, stable managed-volume IDs, and file:// image imports.
Expand the VM cluster validation around the new path, including the guest webapp demo, restart and cross-node migration checks, IAM listener reservation hardening, and a flake workspace-source-root audit so Nix builds keep path dependencies complete.
---
deployer/Cargo.lock | 14 +
deployer/Cargo.toml | 1 +
.../crates/plasmacloud-reconciler/Cargo.toml | 3 +
.../crates/plasmacloud-reconciler/src/auth.rs | 79 +
.../crates/plasmacloud-reconciler/src/main.rs | 8 +
.../src/tenant_network.rs | 2053 +++++++++++++++++
docs/testing.md | 5 +-
flake.nix | 122 +
iam/Cargo.lock | 1 +
iam/crates/iam-server/Cargo.toml | 1 +
iam/crates/iam-server/src/main.rs | 21 +-
k8shost/Cargo.lock | 14 +
nix/modules/default.nix | 4 +
nix/modules/plasmacloud-tenant-networking.nix | 373 +++
nix/modules/plasmavmc.nix | 5 +-
nix/modules/service-port-reservations.nix | 10 +
nix/test-cluster/README.md | 4 +
nix/test-cluster/node01.nix | 88 +
nix/test-cluster/node06.nix | 20 +
nix/test-cluster/run-cluster.sh | 579 +++--
nix/test-cluster/vm-guest-image.nix | 159 +-
plasmavmc/Cargo.lock | 1476 +-----------
plasmavmc/crates/plasmavmc-kvm/src/env.rs | 35 -
plasmavmc/crates/plasmavmc-kvm/src/lib.rs | 183 +-
plasmavmc/crates/plasmavmc-kvm/src/network.rs | 678 ++++++
plasmavmc/crates/plasmavmc-server/Cargo.toml | 8 +-
.../plasmavmc-server/src/artifact_store.rs | 99 +-
.../plasmavmc-server/src/prismnet_client.rs | 152 +-
plasmavmc/crates/plasmavmc-server/src/rest.rs | 360 ++-
.../crates/plasmavmc-server/src/vm_service.rs | 823 ++++++-
.../plasmavmc-server/src/volume_manager.rs | 22 +-
plasmavmc/crates/plasmavmc-types/src/vm.rs | 13 +-
plasmavmc/proto/plasmavmc.proto | 3 +
.../crates/prismnet-api/proto/prismnet.proto | 120 +
prismnet/crates/prismnet-server/src/lib.rs | 3 +-
prismnet/crates/prismnet-server/src/main.rs | 19 +-
.../crates/prismnet-server/src/metadata.rs | 265 ++-
prismnet/crates/prismnet-server/src/rest.rs | 1532 +++++++++++-
.../prismnet-server/src/services/ipam.rs | 182 ++
.../prismnet-server/src/services/mod.rs | 2 +
.../prismnet-server/src/services/port.rs | 211 +-
.../prismnet-server/src/services/router.rs | 455 ++++
.../src/services/security_group.rs | 107 +-
.../prismnet-server/src/services/subnet.rs | 253 +-
.../prismnet-server/src/services/vpc.rs | 105 +-
prismnet/crates/prismnet-types/src/lib.rs | 2 +
prismnet/crates/prismnet-types/src/router.rs | 129 ++
scripts/check_workspace_source_roots.py | 119 +
48 files changed, 9016 insertions(+), 1904 deletions(-)
create mode 100644 deployer/crates/plasmacloud-reconciler/src/auth.rs
create mode 100644 deployer/crates/plasmacloud-reconciler/src/tenant_network.rs
create mode 100644 nix/modules/plasmacloud-tenant-networking.nix
create mode 100644 nix/modules/service-port-reservations.nix
create mode 100644 plasmavmc/crates/plasmavmc-kvm/src/network.rs
create mode 100644 prismnet/crates/prismnet-server/src/services/router.rs
create mode 100644 prismnet/crates/prismnet-types/src/router.rs
create mode 100644 scripts/check_workspace_source_roots.py
diff --git a/deployer/Cargo.lock b/deployer/Cargo.lock
index ed8efc1..f1c60de 100644
--- a/deployer/Cargo.lock
+++ b/deployer/Cargo.lock
@@ -2039,6 +2039,9 @@ dependencies = [
"deployer-types",
"fiberlb-api",
"flashdns-api",
+ "iam-client",
+ "iam-types",
+ "prismnet-api",
"serde",
"serde_json",
"tokio",
@@ -2093,6 +2096,17 @@ dependencies = [
"syn",
]
+[[package]]
+name = "prismnet-api"
+version = "0.1.0"
+dependencies = [
+ "prost",
+ "prost-types",
+ "protoc-bin-vendored",
+ "tonic",
+ "tonic-build",
+]
+
[[package]]
name = "proc-macro2"
version = "1.0.106"
diff --git a/deployer/Cargo.toml b/deployer/Cargo.toml
index a9dac2f..c35537b 100644
--- a/deployer/Cargo.toml
+++ b/deployer/Cargo.toml
@@ -45,3 +45,4 @@ fiberlb-api = { path = "../fiberlb/crates/fiberlb-api" }
flashdns-api = { path = "../flashdns/crates/flashdns-api" }
iam-client = { path = "../iam/crates/iam-client" }
iam-types = { path = "../iam/crates/iam-types" }
+prismnet-api = { path = "../prismnet/crates/prismnet-api" }
diff --git a/deployer/crates/plasmacloud-reconciler/Cargo.toml b/deployer/crates/plasmacloud-reconciler/Cargo.toml
index ea1ee1d..f904416 100644
--- a/deployer/crates/plasmacloud-reconciler/Cargo.toml
+++ b/deployer/crates/plasmacloud-reconciler/Cargo.toml
@@ -19,5 +19,8 @@ tracing-subscriber.workspace = true
fiberlb-api.workspace = true
flashdns-api.workspace = true
deployer-types.workspace = true
+iam-client.workspace = true
+iam-types.workspace = true
+prismnet-api.workspace = true
clap = { version = "4.5", features = ["derive"] }
tonic = "0.12"
diff --git a/deployer/crates/plasmacloud-reconciler/src/auth.rs b/deployer/crates/plasmacloud-reconciler/src/auth.rs
new file mode 100644
index 0000000..cd13ad0
--- /dev/null
+++ b/deployer/crates/plasmacloud-reconciler/src/auth.rs
@@ -0,0 +1,79 @@
+use anyhow::Result;
+use iam_client::client::IamClientConfig;
+use iam_client::IamClient;
+use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope};
+use tonic::metadata::MetadataValue;
+use tonic::Request;
+
+pub fn authorized_request(message: T, token: &str) -> Request {
+ let mut req = Request::new(message);
+ let header = format!("Bearer {}", token);
+ let value = MetadataValue::try_from(header.as_str()).expect("valid bearer token metadata");
+ req.metadata_mut().insert("authorization", value);
+ req
+}
+
+pub async fn issue_controller_token(
+ iam_server_addr: &str,
+ principal_id: &str,
+ org_id: &str,
+ project_id: &str,
+) -> Result {
+ let mut config = IamClientConfig::new(iam_server_addr).with_timeout(5000);
+ if iam_server_addr.starts_with("http://") || !iam_server_addr.starts_with("https://") {
+ config = config.without_tls();
+ }
+
+ let client = IamClient::connect(config).await?;
+ let principal_ref = PrincipalRef::service_account(principal_id);
+ let principal = match client.get_principal(&principal_ref).await? {
+ Some(existing) => existing,
+ None => {
+ client
+ .create_service_account(principal_id, principal_id, org_id, project_id)
+ .await?
+ }
+ };
+
+ ensure_project_admin_binding(&client, &principal, org_id, project_id).await?;
+
+ let scope = Scope::project(project_id, org_id);
+ client
+ .issue_token(
+ &principal,
+ vec!["roles/ProjectAdmin".to_string()],
+ scope,
+ 3600,
+ )
+ .await
+ .map_err(Into::into)
+}
+
+async fn ensure_project_admin_binding(
+ client: &IamClient,
+ principal: &Principal,
+ org_id: &str,
+ project_id: &str,
+) -> Result<()> {
+ let scope = Scope::project(project_id, org_id);
+ let bindings = client
+ .list_bindings_for_principal(&principal.to_ref())
+ .await?;
+
+ let already_bound = bindings
+ .iter()
+ .any(|binding| binding.role_ref == "roles/ProjectAdmin" && binding.scope == scope);
+ if already_bound {
+ return Ok(());
+ }
+
+ let binding = PolicyBinding::new(
+ format!("{}-project-admin-{}-{}", principal.id, org_id, project_id),
+ principal.to_ref(),
+ "roles/ProjectAdmin",
+ scope,
+ )
+ .with_created_by("plasmacloud-reconciler");
+ client.create_binding(&binding).await?;
+ Ok(())
+}
diff --git a/deployer/crates/plasmacloud-reconciler/src/main.rs b/deployer/crates/plasmacloud-reconciler/src/main.rs
index cdbfde4..8e54d06 100644
--- a/deployer/crates/plasmacloud-reconciler/src/main.rs
+++ b/deployer/crates/plasmacloud-reconciler/src/main.rs
@@ -39,7 +39,9 @@ use flashdns_api::proto::{
ZoneInfo,
};
+mod auth;
mod hosts;
+mod tenant_network;
mod watcher;
#[derive(Parser)]
@@ -75,6 +77,9 @@ enum Command {
prune: bool,
},
+ /// Apply tenant-scoped PrismNET declarations
+ TenantNetwork(tenant_network::TenantNetworkCommand),
+
/// Reconcile host deployments into per-node desired-system state
Hosts(hosts::HostsCommand),
}
@@ -300,6 +305,9 @@ async fn main() -> Result<()> {
let spec: DnsConfig = read_json(&config).await?;
reconcile_dns(spec, endpoint, prune).await?;
}
+ Command::TenantNetwork(command) => {
+ tenant_network::run(command).await?;
+ }
Command::Hosts(command) => {
hosts::run(command).await?;
}
diff --git a/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs b/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs
new file mode 100644
index 0000000..5188066
--- /dev/null
+++ b/deployer/crates/plasmacloud-reconciler/src/tenant_network.rs
@@ -0,0 +1,2053 @@
+use std::collections::{HashMap, HashSet};
+use std::path::PathBuf;
+use std::time::{Duration, Instant};
+
+use anyhow::{anyhow, bail, Context, Result};
+use clap::Args;
+use serde::Deserialize;
+use std::net::Ipv4Addr;
+use tonic::transport::{Channel, Endpoint};
+use tracing::{info, warn};
+
+use prismnet_api::ipam_service_client::IpamServiceClient;
+use prismnet_api::port_service_client::PortServiceClient;
+use prismnet_api::router_service_client::RouterServiceClient;
+use prismnet_api::security_group_service_client::SecurityGroupServiceClient;
+use prismnet_api::subnet_service_client::SubnetServiceClient;
+use prismnet_api::vpc_service_client::VpcServiceClient;
+use prismnet_api::{
+ AddRuleRequest, CreatePortRequest, CreateSecurityGroupRequest, CreateServiceIpPoolRequest,
+ CreateRouterRequest, CreateSubnetRequest, CreateVpcRequest, DeletePortRequest,
+ DeleteRouterRequest, DeleteSecurityGroupRequest, DeleteServiceIpPoolRequest,
+ DeleteSubnetRequest, DeleteVpcRequest, IpProtocol, ListPortsRequest, ListRoutersRequest,
+ ListSecurityGroupsRequest, ListServiceIpPoolsRequest, ListSubnetsRequest, ListVpcsRequest,
+ Port, RemoveRuleRequest, Router, RuleDirection, SecurityGroup, SecurityGroupRule,
+ ServiceIpPool, ServiceIpPoolType, Subnet, UpdatePortRequest, UpdateRouterRequest,
+ UpdateSecurityGroupRequest, UpdateServiceIpPoolRequest, UpdateSubnetRequest,
+ UpdateVpcRequest, Vpc,
+};
+
+use crate::auth::{authorized_request, issue_controller_token};
+
+const SERVICE_READY_TIMEOUT: Duration = Duration::from_secs(180);
+const SERVICE_RETRY_INTERVAL: Duration = Duration::from_secs(2);
+const CONNECT_TIMEOUT: Duration = Duration::from_secs(5);
+
+#[derive(Args, Debug)]
+pub struct TenantNetworkCommand {
+ #[arg(long)]
+ config: PathBuf,
+
+ #[arg(long)]
+ endpoint: String,
+
+ #[arg(long)]
+ iam_endpoint: String,
+
+ #[arg(long)]
+ controller_principal_id: String,
+
+ #[arg(long, default_value_t = false)]
+ prune: bool,
+}
+
+#[derive(Debug, Deserialize)]
+struct TenantNetworkConfig {
+ #[serde(default)]
+ tenants: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct TenantSpec {
+ org_id: String,
+ project_id: String,
+ #[serde(default)]
+ security_groups: Vec,
+ #[serde(default)]
+ service_ip_pools: Vec,
+ #[serde(default)]
+ vpcs: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct ServiceIpPoolSpec {
+ name: String,
+ cidr_block: String,
+ #[serde(default)]
+ description: Option,
+ #[serde(default)]
+ pool_type: Option,
+}
+
+#[derive(Debug, Deserialize)]
+struct SecurityGroupSpec {
+ name: String,
+ #[serde(default)]
+ description: Option,
+ #[serde(default)]
+ rules: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct SecurityGroupRuleSpec {
+ direction: String,
+ #[serde(default)]
+ protocol: Option,
+ #[serde(default)]
+ port_range_min: Option,
+ #[serde(default)]
+ port_range_max: Option,
+ #[serde(default)]
+ remote_cidr: Option,
+ #[serde(default)]
+ remote_group: Option,
+ #[serde(default)]
+ description: Option,
+}
+
+#[derive(Debug, Deserialize)]
+struct VpcSpec {
+ name: String,
+ cidr_block: String,
+ #[serde(default)]
+ description: Option,
+ #[serde(default)]
+ router: Option,
+ #[serde(default)]
+ subnets: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct RouterSpec {
+ name: String,
+ gateway_cidr: String,
+ mac_address: String,
+ external_ip: String,
+ #[serde(default)]
+ description: Option,
+}
+
+#[derive(Debug, Deserialize)]
+struct SubnetSpec {
+ name: String,
+ cidr_block: String,
+ #[serde(default)]
+ gateway_ip: Option,
+ #[serde(default)]
+ description: Option,
+ #[serde(default)]
+ dhcp_enabled: Option,
+ #[serde(default)]
+ ports: Vec,
+}
+
+#[derive(Debug, Deserialize)]
+struct PortSpec {
+ name: String,
+ #[serde(default)]
+ description: Option,
+ #[serde(default)]
+ ip_address: Option,
+ #[serde(default)]
+ security_groups: Vec,
+ #[serde(default)]
+ admin_state_up: Option,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, Hash)]
+struct RuleFingerprint {
+ direction: i32,
+ protocol: i32,
+ port_range_min: u32,
+ port_range_max: u32,
+ remote_cidr: String,
+ remote_group_id: String,
+ description: String,
+}
+
+pub async fn run(command: TenantNetworkCommand) -> Result<()> {
+ let config = read_json(&command.config).await?;
+ validate_config(&config)?;
+ reconcile(
+ config,
+ command.endpoint,
+ command.iam_endpoint,
+ command.controller_principal_id,
+ command.prune,
+ )
+ .await
+}
+
+async fn read_json Deserialize<'a>>(path: &PathBuf) -> Result {
+ let contents = tokio::fs::read_to_string(path)
+ .await
+ .with_context(|| format!("failed to read {}", path.display()))?;
+ let config = serde_json::from_str(&contents)
+ .with_context(|| format!("failed to parse {}", path.display()))?;
+ Ok(config)
+}
+
+async fn reconcile(
+ config: TenantNetworkConfig,
+ endpoint: String,
+ iam_endpoint: String,
+ controller_principal_id: String,
+ prune: bool,
+) -> Result<()> {
+ let prismnet_channel = connect_with_retry("PrismNET", &endpoint).await?;
+ let mut vpc_client = VpcServiceClient::new(prismnet_channel.clone());
+ let mut subnet_client = SubnetServiceClient::new(prismnet_channel.clone());
+ let mut port_client = PortServiceClient::new(prismnet_channel.clone());
+ let mut router_client = RouterServiceClient::new(prismnet_channel.clone());
+ let mut sg_client = SecurityGroupServiceClient::new(prismnet_channel.clone());
+ let mut ipam_client = IpamServiceClient::new(prismnet_channel);
+
+ for tenant in &config.tenants {
+ let token = issue_controller_token_with_retry(
+ &iam_endpoint,
+ &controller_principal_id,
+ &tenant.org_id,
+ &tenant.project_id,
+ )
+ .await?;
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ prune,
+ "reconciling tenant network declarations"
+ );
+
+ reconcile_tenant(
+ &mut vpc_client,
+ &mut subnet_client,
+ &mut port_client,
+ &mut router_client,
+ &mut sg_client,
+ &mut ipam_client,
+ tenant,
+ &token,
+ prune,
+ )
+ .await?;
+ }
+
+ Ok(())
+}
+
+async fn connect_with_retry(service_name: &str, endpoint: &str) -> Result {
+ let deadline = Instant::now() + SERVICE_READY_TIMEOUT;
+
+ loop {
+ match Endpoint::from_shared(endpoint.to_string())
+ .context("invalid gRPC endpoint")?
+ .connect_timeout(CONNECT_TIMEOUT)
+ .connect()
+ .await
+ {
+ Ok(channel) => return Ok(channel),
+ Err(error) => {
+ if Instant::now() >= deadline {
+ return Err(anyhow!(error).context(format!(
+ "{service_name} at {endpoint} did not become ready within {} seconds",
+ SERVICE_READY_TIMEOUT.as_secs()
+ )));
+ }
+ warn!(
+ service = service_name,
+ endpoint,
+ error = %error,
+ "service is not ready yet; retrying"
+ );
+ tokio::time::sleep(SERVICE_RETRY_INTERVAL).await;
+ }
+ }
+ }
+}
+
+async fn issue_controller_token_with_retry(
+ iam_endpoint: &str,
+ controller_principal_id: &str,
+ org_id: &str,
+ project_id: &str,
+) -> Result {
+ let deadline = Instant::now() + SERVICE_READY_TIMEOUT;
+
+ loop {
+ match issue_controller_token(iam_endpoint, controller_principal_id, org_id, project_id).await
+ {
+ Ok(token) => return Ok(token),
+ Err(error) => {
+ if Instant::now() >= deadline {
+ return Err(error.context(format!(
+ "failed to issue controller token for tenant {org_id}/{project_id} within {} seconds",
+ SERVICE_READY_TIMEOUT.as_secs()
+ )));
+ }
+ warn!(
+ iam_endpoint,
+ org_id,
+ project_id,
+ error = %error,
+ "IAM is not ready to issue controller tokens yet; retrying"
+ );
+ tokio::time::sleep(SERVICE_RETRY_INTERVAL).await;
+ }
+ }
+ }
+}
+
+async fn reconcile_tenant(
+ vpc_client: &mut VpcServiceClient,
+ subnet_client: &mut SubnetServiceClient,
+ port_client: &mut PortServiceClient,
+ router_client: &mut RouterServiceClient,
+ sg_client: &mut SecurityGroupServiceClient,
+ ipam_client: &mut IpamServiceClient,
+ tenant: &TenantSpec,
+ token: &str,
+ prune: bool,
+) -> Result<()> {
+ let mut security_groups = list_security_groups(sg_client, tenant, token).await?;
+ let mut security_group_ids = HashMap::new();
+ for spec in &tenant.security_groups {
+ let sg = ensure_security_group(sg_client, tenant, spec, &security_groups, token).await?;
+ security_group_ids.insert(spec.name.clone(), sg.id.clone());
+ }
+
+ security_groups = list_security_groups(sg_client, tenant, token).await?;
+ for spec in &tenant.security_groups {
+ let actual = security_groups
+ .iter()
+ .find(|sg| sg.name == spec.name)
+ .with_context(|| format!("security group {} not found after reconciliation", spec.name))?;
+ sync_security_group_rules(sg_client, tenant, spec, actual, &security_group_ids, token, prune)
+ .await?;
+ }
+
+ let service_ip_pools = list_service_ip_pools(ipam_client, tenant, token).await?;
+ let mut desired_service_ip_pool_names = HashSet::new();
+ for pool_spec in &tenant.service_ip_pools {
+ desired_service_ip_pool_names.insert(pool_spec.name.clone());
+ ensure_service_ip_pool(ipam_client, tenant, pool_spec, &service_ip_pools, token).await?;
+ }
+
+ if prune {
+ let current_pools = list_service_ip_pools(ipam_client, tenant, token).await?;
+ for pool in current_pools {
+ if !desired_service_ip_pool_names.contains(&pool.name) {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ pool = %pool.name,
+ "deleting unmanaged service ip pool"
+ );
+ ipam_client
+ .delete_service_ip_pool(authorized_request(
+ DeleteServiceIpPoolRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: pool.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+ }
+
+ let vpcs = list_vpcs(vpc_client, tenant, token).await?;
+ let mut desired_vpc_names = HashSet::new();
+ for vpc_spec in &tenant.vpcs {
+ desired_vpc_names.insert(vpc_spec.name.clone());
+ let vpc = ensure_vpc(vpc_client, tenant, vpc_spec, &vpcs, token).await?;
+ reconcile_vpc(
+ subnet_client,
+ port_client,
+ router_client,
+ tenant,
+ vpc_spec,
+ &vpc,
+ &security_group_ids,
+ token,
+ prune,
+ )
+ .await?;
+ }
+
+ if prune {
+ let current_vpcs = list_vpcs(vpc_client, tenant, token).await?;
+ for vpc in current_vpcs {
+ if !desired_vpc_names.contains(&vpc.name) {
+ delete_vpc_tree(
+ vpc_client,
+ subnet_client,
+ port_client,
+ router_client,
+ tenant,
+ &vpc,
+ token,
+ )
+ .await?;
+ }
+ }
+
+ let current_security_groups = list_security_groups(sg_client, tenant, token).await?;
+ let desired_security_groups: HashSet<_> = tenant
+ .security_groups
+ .iter()
+ .map(|sg| sg.name.as_str())
+ .collect();
+ for sg in current_security_groups {
+ if !desired_security_groups.contains(sg.name.as_str()) {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ security_group = %sg.name,
+ "deleting unmanaged security group"
+ );
+ sg_client
+ .delete_security_group(authorized_request(
+ DeleteSecurityGroupRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: sg.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+async fn reconcile_vpc(
+ subnet_client: &mut SubnetServiceClient,
+ port_client: &mut PortServiceClient,
+ router_client: &mut RouterServiceClient,
+ tenant: &TenantSpec,
+ vpc_spec: &VpcSpec,
+ vpc: &Vpc,
+ security_group_ids: &HashMap,
+ token: &str,
+ prune: bool,
+) -> Result<()> {
+ let routers = list_routers(router_client, tenant, Some(&vpc.id), token).await?;
+ if let Some(router_spec) = vpc_spec.router.as_ref() {
+ let router = ensure_router(router_client, tenant, vpc, router_spec, &routers, token).await?;
+ if prune {
+ for existing in routers {
+ if existing.id != router.id {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %existing.name,
+ "deleting unmanaged router"
+ );
+ router_client
+ .delete_router(authorized_request(
+ DeleteRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: existing.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+ }
+ } else if prune {
+ for router in routers {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %router.name,
+ "deleting unmanaged router"
+ );
+ router_client
+ .delete_router(authorized_request(
+ DeleteRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: router.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+
+ let subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?;
+ let mut desired_subnet_names = HashSet::new();
+ for subnet_spec in &vpc_spec.subnets {
+ desired_subnet_names.insert(subnet_spec.name.clone());
+ let subnet = ensure_subnet(subnet_client, tenant, vpc, subnet_spec, &subnets, token).await?;
+ reconcile_subnet(
+ port_client,
+ tenant,
+ subnet_spec,
+ &subnet,
+ security_group_ids,
+ token,
+ prune,
+ )
+ .await?;
+ }
+
+ if prune {
+ let current_subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?;
+ for subnet in current_subnets {
+ if !desired_subnet_names.contains(&subnet.name) {
+ delete_subnet_tree(subnet_client, port_client, tenant, vpc, &subnet, token).await?;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+async fn reconcile_subnet(
+ port_client: &mut PortServiceClient,
+ tenant: &TenantSpec,
+ subnet_spec: &SubnetSpec,
+ subnet: &Subnet,
+ security_group_ids: &HashMap,
+ token: &str,
+ prune: bool,
+) -> Result<()> {
+ let ports = list_ports(port_client, tenant, &subnet.id, token).await?;
+ let mut desired_port_names = HashSet::new();
+ for port_spec in &subnet_spec.ports {
+ desired_port_names.insert(port_spec.name.clone());
+ ensure_port(
+ port_client,
+ tenant,
+ subnet,
+ port_spec,
+ &ports,
+ security_group_ids,
+ token,
+ )
+ .await?;
+ }
+
+ if prune {
+ let current_ports = list_ports(port_client, tenant, &subnet.id, token).await?;
+ for port in current_ports {
+ if !desired_port_names.contains(&port.name) {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ subnet = %subnet.name,
+ port = %port.name,
+ "deleting unmanaged port"
+ );
+ port_client
+ .delete_port(authorized_request(
+ DeletePortRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet.id.clone(),
+ id: port.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+async fn ensure_security_group(
+ sg_client: &mut SecurityGroupServiceClient,
+ tenant: &TenantSpec,
+ spec: &SecurityGroupSpec,
+ existing: &[SecurityGroup],
+ token: &str,
+) -> Result {
+ if let Some(sg) = existing.iter().find(|sg| sg.name == spec.name) {
+ let desired_description = string_or_default(spec.description.as_deref());
+ if sg.description != desired_description {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ security_group = %spec.name,
+ "updating security group"
+ );
+ let response = sg_client
+ .update_security_group(authorized_request(
+ UpdateSecurityGroupRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: sg.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response
+ .security_group
+ .context("missing security group in update response");
+ }
+
+ return Ok(sg.clone());
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ security_group = %spec.name,
+ "creating security group"
+ );
+ let response = sg_client
+ .create_security_group(authorized_request(
+ CreateSecurityGroupRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+
+ response
+ .security_group
+ .context("missing security group in create response")
+}
+
+async fn sync_security_group_rules(
+ sg_client: &mut SecurityGroupServiceClient,
+ tenant: &TenantSpec,
+ spec: &SecurityGroupSpec,
+ actual: &SecurityGroup,
+ security_group_ids: &HashMap,
+ token: &str,
+ prune: bool,
+) -> Result<()> {
+ let actual_rules: HashMap = actual
+ .rules
+ .iter()
+ .map(|rule| (fingerprint_actual_rule(rule), rule.id.clone()))
+ .collect();
+
+ let mut desired_rules = HashSet::new();
+ for desired_rule in &spec.rules {
+ let fingerprint = fingerprint_desired_rule(desired_rule, security_group_ids)?;
+ let is_new = !actual_rules.contains_key(&fingerprint);
+ desired_rules.insert(fingerprint.clone());
+
+ if is_new {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ security_group = %spec.name,
+ "adding security group rule"
+ );
+ sg_client
+ .add_rule(authorized_request(
+ AddRuleRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ security_group_id: actual.id.clone(),
+ direction: fingerprint.direction,
+ protocol: fingerprint.protocol,
+ port_range_min: fingerprint.port_range_min,
+ port_range_max: fingerprint.port_range_max,
+ remote_cidr: fingerprint.remote_cidr.clone(),
+ remote_group_id: fingerprint.remote_group_id.clone(),
+ description: fingerprint.description.clone(),
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+
+ if prune {
+ for (fingerprint, rule_id) in actual_rules {
+ if !desired_rules.contains(&fingerprint) {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ security_group = %spec.name,
+ "removing unmanaged security group rule"
+ );
+ sg_client
+ .remove_rule(authorized_request(
+ RemoveRuleRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ security_group_id: actual.id.clone(),
+ rule_id,
+ },
+ token,
+ ))
+ .await?;
+ }
+ }
+ }
+
+ Ok(())
+}
+
+async fn ensure_vpc(
+ vpc_client: &mut VpcServiceClient,
+ tenant: &TenantSpec,
+ spec: &VpcSpec,
+ existing: &[Vpc],
+ token: &str,
+) -> Result {
+ if let Some(vpc) = existing.iter().find(|vpc| vpc.name == spec.name) {
+ ensure_field_matches(
+ "vpc",
+ &spec.name,
+ "cidr_block",
+ &vpc.cidr_block,
+ &spec.cidr_block,
+ )?;
+
+ let desired_description = string_or_default(spec.description.as_deref());
+ if vpc.description != desired_description {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %spec.name,
+ "updating vpc"
+ );
+ let response = vpc_client
+ .update_vpc(authorized_request(
+ UpdateVpcRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: vpc.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.vpc.context("missing vpc in update response");
+ }
+
+ return Ok(vpc.clone());
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %spec.name,
+ "creating vpc"
+ );
+ let response = vpc_client
+ .create_vpc(authorized_request(
+ CreateVpcRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ cidr_block: spec.cidr_block.clone(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+
+ response.vpc.context("missing vpc in create response")
+}
+
+async fn ensure_router(
+ router_client: &mut RouterServiceClient,
+ tenant: &TenantSpec,
+ vpc: &Vpc,
+ spec: &RouterSpec,
+ existing: &[Router],
+ token: &str,
+) -> Result {
+ if let Some(router) = existing.iter().find(|router| router.name == spec.name) {
+ ensure_field_matches(
+ "router",
+ &spec.name,
+ "gateway_cidr",
+ &router.gateway_cidr,
+ &spec.gateway_cidr,
+ )?;
+ ensure_field_matches(
+ "router",
+ &spec.name,
+ "mac_address",
+ &router.mac_address,
+ &spec.mac_address,
+ )?;
+ ensure_field_matches(
+ "router",
+ &spec.name,
+ "external_ip",
+ &router.external_ip,
+ &spec.external_ip,
+ )?;
+
+ let desired_description = string_or_default(spec.description.as_deref());
+ if router.description != desired_description {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %spec.name,
+ "updating router"
+ );
+ let response = router_client
+ .update_router(authorized_request(
+ UpdateRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: router.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.router.context("missing router in update response");
+ }
+
+ return Ok(router.clone());
+ }
+
+ if existing.len() == 1 {
+ let router = &existing[0];
+ ensure_field_matches(
+ "router",
+ &router.name,
+ "gateway_cidr",
+ &router.gateway_cidr,
+ &spec.gateway_cidr,
+ )?;
+ ensure_field_matches(
+ "router",
+ &router.name,
+ "mac_address",
+ &router.mac_address,
+ &spec.mac_address,
+ )?;
+ ensure_field_matches(
+ "router",
+ &router.name,
+ "external_ip",
+ &router.external_ip,
+ &spec.external_ip,
+ )?;
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %router.name,
+ "renaming router to match declaration"
+ );
+ let response = router_client
+ .update_router(authorized_request(
+ UpdateRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: router.id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.router.context("missing router in update response");
+ }
+
+ if existing.len() > 1 {
+ bail!(
+ "vpc {} has multiple routers; reconcile cannot determine which one to keep",
+ vpc.name
+ );
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %spec.name,
+ "creating router"
+ );
+ let response = router_client
+ .create_router(authorized_request(
+ CreateRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ vpc_id: vpc.id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ gateway_cidr: spec.gateway_cidr.clone(),
+ mac_address: spec.mac_address.clone(),
+ external_ip: spec.external_ip.clone(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+
+ response.router.context("missing router in create response")
+}
+
+async fn ensure_subnet(
+ subnet_client: &mut SubnetServiceClient,
+ tenant: &TenantSpec,
+ vpc: &Vpc,
+ spec: &SubnetSpec,
+ existing: &[Subnet],
+ token: &str,
+) -> Result {
+ if let Some(subnet) = existing.iter().find(|subnet| subnet.name == spec.name) {
+ ensure_field_matches(
+ "subnet",
+ &spec.name,
+ "cidr_block",
+ &subnet.cidr_block,
+ &spec.cidr_block,
+ )?;
+
+ if let Some(gateway_ip) = spec.gateway_ip.as_deref() {
+ ensure_field_matches("subnet", &spec.name, "gateway_ip", &subnet.gateway_ip, gateway_ip)?;
+ }
+
+ let desired_description = string_or_default(spec.description.as_deref());
+ let desired_dhcp_enabled = spec.dhcp_enabled.unwrap_or(true);
+ if subnet.description != desired_description || subnet.dhcp_enabled != desired_dhcp_enabled {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ subnet = %spec.name,
+ "updating subnet"
+ );
+ let response = subnet_client
+ .update_subnet(authorized_request(
+ UpdateSubnetRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ vpc_id: vpc.id.clone(),
+ id: subnet.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ dhcp_enabled: desired_dhcp_enabled,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.subnet.context("missing subnet in update response");
+ }
+
+ return Ok(subnet.clone());
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ subnet = %spec.name,
+ "creating subnet"
+ );
+ let response = subnet_client
+ .create_subnet(authorized_request(
+ CreateSubnetRequest {
+ vpc_id: vpc.id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ cidr_block: spec.cidr_block.clone(),
+ gateway_ip: string_or_default(spec.gateway_ip.as_deref()),
+ dhcp_enabled: spec.dhcp_enabled.unwrap_or(true),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+
+ response.subnet.context("missing subnet in create response")
+}
+
+async fn ensure_port(
+ port_client: &mut PortServiceClient,
+ tenant: &TenantSpec,
+ subnet: &Subnet,
+ spec: &PortSpec,
+ existing: &[Port],
+ security_group_ids: &HashMap,
+ token: &str,
+) -> Result {
+ let desired_security_group_ids = resolve_security_group_ids(&spec.security_groups, security_group_ids)?;
+ if let Some(port) = existing.iter().find(|port| port.name == spec.name) {
+ if let Some(ip_address) = spec.ip_address.as_deref() {
+ ensure_field_matches("port", &spec.name, "ip_address", &port.ip_address, ip_address)?;
+ }
+
+ let desired_description = string_or_default(spec.description.as_deref());
+ let desired_admin_state_up = spec.admin_state_up.unwrap_or(true);
+ let actual_security_group_ids: HashSet<_> =
+ port.security_group_ids.iter().cloned().collect();
+ let desired_security_group_ids_set: HashSet<_> =
+ desired_security_group_ids.iter().cloned().collect();
+ let needs_update = port.description != desired_description
+ || port.admin_state_up != desired_admin_state_up
+ || actual_security_group_ids != desired_security_group_ids_set;
+
+ if needs_update {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ subnet = %subnet.name,
+ port = %spec.name,
+ "updating port"
+ );
+ let response = port_client
+ .update_port(authorized_request(
+ UpdatePortRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet.id.clone(),
+ id: port.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ security_group_ids: desired_security_group_ids,
+ admin_state_up: desired_admin_state_up,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.port.context("missing port in update response");
+ }
+
+ return Ok(port.clone());
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ subnet = %subnet.name,
+ port = %spec.name,
+ "creating port"
+ );
+ let mut port = port_client
+ .create_port(authorized_request(
+ CreatePortRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet.id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ ip_address: string_or_default(spec.ip_address.as_deref()),
+ security_group_ids: desired_security_group_ids.clone(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner()
+ .port
+ .context("missing port in create response")?;
+
+ if !spec.admin_state_up.unwrap_or(true) {
+ port = port_client
+ .update_port(authorized_request(
+ UpdatePortRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet.id.clone(),
+ id: port.id.clone(),
+ name: String::new(),
+ description: String::new(),
+ security_group_ids: Vec::new(),
+ admin_state_up: false,
+ },
+ token,
+ ))
+ .await?
+ .into_inner()
+ .port
+ .context("missing port in update response")?;
+ }
+
+ Ok(port)
+}
+
+async fn delete_vpc_tree(
+ vpc_client: &mut VpcServiceClient,
+ subnet_client: &mut SubnetServiceClient,
+ port_client: &mut PortServiceClient,
+ router_client: &mut RouterServiceClient,
+ tenant: &TenantSpec,
+ vpc: &Vpc,
+ token: &str,
+) -> Result<()> {
+ let routers = list_routers(router_client, tenant, Some(&vpc.id), token).await?;
+ for router in routers {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ router = %router.name,
+ "deleting unmanaged router"
+ );
+ router_client
+ .delete_router(authorized_request(
+ DeleteRouterRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: router.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+
+ let subnets = list_subnets(subnet_client, tenant, &vpc.id, token).await?;
+ for subnet in subnets {
+ delete_subnet_tree(subnet_client, port_client, tenant, vpc, &subnet, token).await?;
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ "deleting unmanaged vpc"
+ );
+ vpc_client
+ .delete_vpc(authorized_request(
+ DeleteVpcRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: vpc.id.clone(),
+ },
+ token,
+ ))
+ .await?;
+
+ Ok(())
+}
+
+async fn delete_subnet_tree(
+ subnet_client: &mut SubnetServiceClient,
+ port_client: &mut PortServiceClient,
+ tenant: &TenantSpec,
+ vpc: &Vpc,
+ subnet: &Subnet,
+ token: &str,
+) -> Result<()> {
+ let ports = list_ports(port_client, tenant, &subnet.id, token).await?;
+ for port in ports {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ subnet = %subnet.name,
+ port = %port.name,
+ "deleting unmanaged port"
+ );
+ port_client
+ .delete_port(authorized_request(
+ DeletePortRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet.id.clone(),
+ id: port.id,
+ },
+ token,
+ ))
+ .await?;
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ vpc = %vpc.name,
+ subnet = %subnet.name,
+ "deleting unmanaged subnet"
+ );
+ subnet_client
+ .delete_subnet(authorized_request(
+ DeleteSubnetRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ vpc_id: vpc.id.clone(),
+ id: subnet.id.clone(),
+ },
+ token,
+ ))
+ .await?;
+
+ Ok(())
+}
+
+async fn ensure_service_ip_pool(
+ ipam_client: &mut IpamServiceClient,
+ tenant: &TenantSpec,
+ spec: &ServiceIpPoolSpec,
+ existing: &[ServiceIpPool],
+ token: &str,
+) -> Result {
+ let desired_pool_type = parse_service_ip_pool_type(spec.pool_type.as_deref())?;
+ if let Some(pool) = existing.iter().find(|pool| pool.name == spec.name) {
+ ensure_field_matches(
+ "service ip pool",
+ &spec.name,
+ "cidr_block",
+ &pool.cidr_block,
+ &spec.cidr_block,
+ )?;
+ ensure_field_matches(
+ "service ip pool",
+ &spec.name,
+ "pool_type",
+ service_ip_pool_type_name(pool.pool_type),
+ service_ip_pool_type_name(desired_pool_type),
+ )?;
+
+ let desired_description = string_or_default(spec.description.as_deref());
+ if pool.description != desired_description {
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ pool = %spec.name,
+ "updating service ip pool"
+ );
+ let response = ipam_client
+ .update_service_ip_pool(authorized_request(
+ UpdateServiceIpPoolRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ id: pool.id.clone(),
+ name: spec.name.clone(),
+ description: desired_description,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ return response.pool.context("missing service ip pool in update response");
+ }
+
+ return Ok(pool.clone());
+ }
+
+ info!(
+ org_id = %tenant.org_id,
+ project_id = %tenant.project_id,
+ pool = %spec.name,
+ "creating service ip pool"
+ );
+ let response = ipam_client
+ .create_service_ip_pool(authorized_request(
+ CreateServiceIpPoolRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ name: spec.name.clone(),
+ description: string_or_default(spec.description.as_deref()),
+ cidr_block: spec.cidr_block.clone(),
+ pool_type: desired_pool_type,
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+
+ response
+ .pool
+ .context("missing service ip pool in create response")
+}
+
+async fn list_vpcs(
+ vpc_client: &mut VpcServiceClient,
+ tenant: &TenantSpec,
+ token: &str,
+) -> Result> {
+ let response = vpc_client
+ .list_vpcs(authorized_request(
+ ListVpcsRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.vpcs)
+}
+
+async fn list_routers(
+ router_client: &mut RouterServiceClient,
+ tenant: &TenantSpec,
+ vpc_id: Option<&str>,
+ token: &str,
+) -> Result> {
+ let response = router_client
+ .list_routers(authorized_request(
+ ListRoutersRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ vpc_id: vpc_id.unwrap_or_default().to_string(),
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.routers)
+}
+
+async fn list_subnets(
+ subnet_client: &mut SubnetServiceClient,
+ tenant: &TenantSpec,
+ vpc_id: &str,
+ token: &str,
+) -> Result> {
+ let response = subnet_client
+ .list_subnets(authorized_request(
+ ListSubnetsRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ vpc_id: vpc_id.to_string(),
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.subnets)
+}
+
+async fn list_ports(
+ port_client: &mut PortServiceClient,
+ tenant: &TenantSpec,
+ subnet_id: &str,
+ token: &str,
+) -> Result> {
+ let response = port_client
+ .list_ports(authorized_request(
+ ListPortsRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ subnet_id: subnet_id.to_string(),
+ device_id: String::new(),
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.ports)
+}
+
+async fn list_security_groups(
+ sg_client: &mut SecurityGroupServiceClient,
+ tenant: &TenantSpec,
+ token: &str,
+) -> Result> {
+ let response = sg_client
+ .list_security_groups(authorized_request(
+ ListSecurityGroupsRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.security_groups)
+}
+
+async fn list_service_ip_pools(
+ ipam_client: &mut IpamServiceClient,
+ tenant: &TenantSpec,
+ token: &str,
+) -> Result> {
+ let response = ipam_client
+ .list_service_ip_pools(authorized_request(
+ ListServiceIpPoolsRequest {
+ org_id: tenant.org_id.clone(),
+ project_id: tenant.project_id.clone(),
+ pool_type: 0,
+ page_size: 500,
+ page_token: String::new(),
+ },
+ token,
+ ))
+ .await?
+ .into_inner();
+ Ok(response.pools)
+}
+
+fn resolve_security_group_ids(
+ names: &[String],
+ security_group_ids: &HashMap,
+) -> Result> {
+ let mut resolved = Vec::with_capacity(names.len());
+ for name in names {
+ let id = security_group_ids
+ .get(name)
+ .with_context(|| format!("unknown security group reference {}", name))?;
+ resolved.push(id.clone());
+ }
+ resolved.sort();
+ resolved.dedup();
+ Ok(resolved)
+}
+
+fn fingerprint_actual_rule(rule: &SecurityGroupRule) -> RuleFingerprint {
+ RuleFingerprint {
+ direction: rule.direction,
+ protocol: rule.protocol,
+ port_range_min: rule.port_range_min,
+ port_range_max: rule.port_range_max,
+ remote_cidr: rule.remote_cidr.clone(),
+ remote_group_id: rule.remote_group_id.clone(),
+ description: rule.description.clone(),
+ }
+}
+
+fn fingerprint_desired_rule(
+ rule: &SecurityGroupRuleSpec,
+ security_group_ids: &HashMap,
+) -> Result {
+ let remote_group_id = match rule.remote_group.as_deref() {
+ Some(name) => security_group_ids
+ .get(name)
+ .with_context(|| format!("unknown remote security group {}", name))?
+ .clone(),
+ None => String::new(),
+ };
+
+ Ok(RuleFingerprint {
+ direction: parse_direction(&rule.direction)?,
+ protocol: parse_protocol(rule.protocol.as_deref())?,
+ port_range_min: rule.port_range_min.unwrap_or(0),
+ port_range_max: rule.port_range_max.unwrap_or(0),
+ remote_cidr: string_or_default(rule.remote_cidr.as_deref()),
+ remote_group_id,
+ description: string_or_default(rule.description.as_deref()),
+ })
+}
+
+fn parse_direction(direction: &str) -> Result {
+ match direction.to_ascii_lowercase().as_str() {
+ "ingress" => Ok(RuleDirection::Ingress as i32),
+ "egress" => Ok(RuleDirection::Egress as i32),
+ other => bail!("unsupported rule direction {}", other),
+ }
+}
+
+fn parse_protocol(protocol: Option<&str>) -> Result {
+ match protocol
+ .unwrap_or("any")
+ .to_ascii_lowercase()
+ .as_str()
+ {
+ "any" => Ok(IpProtocol::Any as i32),
+ "tcp" => Ok(IpProtocol::Tcp as i32),
+ "udp" => Ok(IpProtocol::Udp as i32),
+ "icmp" => Ok(IpProtocol::Icmp as i32),
+ "icmpv6" => Ok(IpProtocol::Icmpv6 as i32),
+ other => bail!("unsupported ip protocol {}", other),
+ }
+}
+
+fn parse_service_ip_pool_type(pool_type: Option<&str>) -> Result {
+ match pool_type.unwrap_or("cluster_ip").to_ascii_lowercase().as_str() {
+ "cluster_ip" => Ok(ServiceIpPoolType::ClusterIp as i32),
+ "load_balancer" => Ok(ServiceIpPoolType::LoadBalancer as i32),
+ "node_port" => Ok(ServiceIpPoolType::NodePort as i32),
+ other => bail!("unsupported service ip pool type {}", other),
+ }
+}
+
+fn service_ip_pool_type_name(pool_type: i32) -> &'static str {
+ match pool_type {
+ x if x == ServiceIpPoolType::ClusterIp as i32 => "cluster_ip",
+ x if x == ServiceIpPoolType::LoadBalancer as i32 => "load_balancer",
+ x if x == ServiceIpPoolType::NodePort as i32 => "node_port",
+ _ => "unspecified",
+ }
+}
+
+fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8)> {
+ let (ip, prefix) = cidr
+ .split_once('/')
+ .with_context(|| format!("invalid CIDR {}", cidr))?;
+ let ip: Ipv4Addr = ip
+ .parse()
+ .with_context(|| format!("invalid IPv4 address in CIDR {}", cidr))?;
+ let prefix: u8 = prefix
+ .parse()
+ .with_context(|| format!("invalid prefix in CIDR {}", cidr))?;
+ if prefix > 32 {
+ bail!("invalid prefix in CIDR {}", cidr);
+ }
+ Ok((ip, prefix))
+}
+
+fn parse_ipv4(ip: &str) -> Result {
+ ip.parse()
+ .with_context(|| format!("invalid IPv4 address {}", ip))
+}
+
+fn validate_mac_address(mac_address: &str) -> Result<()> {
+ let octets: Vec<_> = mac_address.split(':').collect();
+ if octets.len() != 6
+ || octets
+ .iter()
+ .any(|octet| octet.len() != 2 || u8::from_str_radix(octet, 16).is_err())
+ {
+ bail!("invalid mac_address {}", mac_address);
+ }
+ Ok(())
+}
+
+fn cidr_range(cidr: (Ipv4Addr, u8)) -> (u32, u32) {
+ let mask = if cidr.1 == 0 {
+ 0
+ } else {
+ u32::MAX << (32 - cidr.1)
+ };
+ let start = u32::from(cidr.0) & mask;
+ let size = if cidr.1 == 32 { 1 } else { 1u64 << (32 - cidr.1) };
+ let end = start + (size as u32) - 1;
+ (start, end)
+}
+
+fn cidr_contains(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool {
+ let mask = if cidr.1 == 0 { 0 } else { u32::MAX << (32 - cidr.1) };
+ (u32::from(cidr.0) & mask) == (u32::from(ip) & mask)
+}
+
+fn cidr_contains_cidr(parent: (Ipv4Addr, u8), child: (Ipv4Addr, u8)) -> bool {
+ let (parent_start, parent_end) = cidr_range(parent);
+ let (child_start, child_end) = cidr_range(child);
+ child_start >= parent_start && child_end <= parent_end
+}
+
+fn cidr_overlaps(a: (Ipv4Addr, u8), b: (Ipv4Addr, u8)) -> bool {
+ let (a_start, a_end) = cidr_range(a);
+ let (b_start, b_end) = cidr_range(b);
+ a_start <= b_end && b_start <= a_end
+}
+
+fn validate_router_spec(vpc: &VpcSpec, router: &RouterSpec) -> Result<()> {
+ let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?;
+ let (gateway_ip, _) = parse_ipv4_cidr(&router.gateway_cidr)?;
+ let _: Ipv4Addr = router
+ .external_ip
+ .parse()
+ .with_context(|| format!("invalid external_ip {}", router.external_ip))?;
+ validate_mac_address(&router.mac_address)?;
+ if !cidr_contains(vpc_cidr, gateway_ip) {
+ bail!(
+ "router {} gateway_cidr must fall inside vpc {} cidr_block",
+ router.name,
+ vpc.name
+ );
+ }
+ Ok(())
+}
+
+fn ensure_field_matches(
+ resource_kind: &str,
+ resource_name: &str,
+ field_name: &str,
+ actual: &str,
+ desired: &str,
+) -> Result<()> {
+ if actual != desired {
+ bail!(
+ "{} {} has immutable field drift: {} is {:?}, expected {:?}",
+ resource_kind,
+ resource_name,
+ field_name,
+ actual,
+ desired
+ );
+ }
+ Ok(())
+}
+
+fn string_or_default(value: Option<&str>) -> String {
+ value.unwrap_or_default().to_string()
+}
+
+fn validate_config(config: &TenantNetworkConfig) -> Result<()> {
+ let mut tenant_scopes = HashSet::new();
+ for tenant in &config.tenants {
+ if tenant.org_id.trim().is_empty() || tenant.project_id.trim().is_empty() {
+ bail!("tenant org_id/project_id must be non-empty");
+ }
+
+ if !tenant_scopes.insert((tenant.org_id.clone(), tenant.project_id.clone())) {
+ bail!(
+ "duplicate tenant scope {}/{} in tenant networking config",
+ tenant.org_id,
+ tenant.project_id
+ );
+ }
+
+ validate_unique_names(
+ "security group",
+ tenant
+ .security_groups
+ .iter()
+ .map(|sg| sg.name.as_str()),
+ )?;
+ validate_unique_names(
+ "service ip pool",
+ tenant.service_ip_pools.iter().map(|pool| pool.name.as_str()),
+ )?;
+ validate_unique_names("vpc", tenant.vpcs.iter().map(|vpc| vpc.name.as_str()))?;
+
+ let security_group_names: HashSet<_> =
+ tenant.security_groups.iter().map(|sg| sg.name.as_str()).collect();
+ for pool in &tenant.service_ip_pools {
+ if pool.cidr_block.trim().is_empty() {
+ bail!("service ip pool {} must set cidr_block", pool.name);
+ }
+ parse_ipv4_cidr(&pool.cidr_block)?;
+ parse_service_ip_pool_type(pool.pool_type.as_deref())?;
+ }
+ for sg in &tenant.security_groups {
+ for rule in &sg.rules {
+ if rule.remote_cidr.is_some() && rule.remote_group.is_some() {
+ bail!(
+ "security group {} rule cannot set both remote_cidr and remote_group",
+ sg.name
+ );
+ }
+ if let Some(remote_group) = rule.remote_group.as_deref() {
+ if !security_group_names.contains(remote_group) {
+ bail!(
+ "security group {} references unknown remote_group {}",
+ sg.name,
+ remote_group
+ );
+ }
+ }
+ if let Some(remote_cidr) = rule.remote_cidr.as_deref() {
+ parse_ipv4_cidr(remote_cidr)?;
+ }
+ if let (Some(min), Some(max)) = (rule.port_range_min, rule.port_range_max) {
+ if min > max {
+ bail!(
+ "security group {} has invalid port range {}-{}",
+ sg.name,
+ min,
+ max
+ );
+ }
+ }
+ parse_direction(&rule.direction)?;
+ parse_protocol(rule.protocol.as_deref())?;
+ }
+ }
+
+ for vpc in &tenant.vpcs {
+ let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?;
+ if let Some(router) = vpc.router.as_ref() {
+ if router.name.trim().is_empty() {
+ bail!("router name must be non-empty");
+ }
+ if router.gateway_cidr.trim().is_empty()
+ || router.mac_address.trim().is_empty()
+ || router.external_ip.trim().is_empty()
+ {
+ bail!("router {} must set gateway_cidr, mac_address, and external_ip", router.name);
+ }
+ validate_router_spec(vpc, router)?;
+ }
+ validate_unique_names("subnet", vpc.subnets.iter().map(|subnet| subnet.name.as_str()))?;
+ let mut seen_subnet_cidrs: Vec<(&str, (Ipv4Addr, u8))> = Vec::new();
+ for subnet in &vpc.subnets {
+ let subnet_cidr = parse_ipv4_cidr(&subnet.cidr_block)?;
+ if !cidr_contains_cidr(vpc_cidr, subnet_cidr) {
+ bail!(
+ "subnet {} cidr_block must fall inside vpc {} cidr_block",
+ subnet.name,
+ vpc.name
+ );
+ }
+ if let Some((other_name, _)) = seen_subnet_cidrs
+ .iter()
+ .find(|(_, other_cidr)| cidr_overlaps(*other_cidr, subnet_cidr))
+ {
+ bail!(
+ "subnet {} overlaps subnet {} in vpc {}",
+ subnet.name,
+ other_name,
+ vpc.name
+ );
+ }
+ seen_subnet_cidrs.push((subnet.name.as_str(), subnet_cidr));
+ let gateway_ip = subnet
+ .gateway_ip
+ .as_deref()
+ .map(parse_ipv4)
+ .transpose()?;
+ if let Some(gateway_ip) = gateway_ip {
+ if !cidr_contains(subnet_cidr, gateway_ip) {
+ bail!(
+ "subnet {} gateway_ip must fall inside subnet cidr_block",
+ subnet.name
+ );
+ }
+ }
+ validate_unique_names("port", subnet.ports.iter().map(|port| port.name.as_str()))?;
+ let mut fixed_ips = HashSet::new();
+ for port in &subnet.ports {
+ for security_group in &port.security_groups {
+ if !security_group_names.contains(security_group.as_str()) {
+ bail!(
+ "port {} references unknown security group {}",
+ port.name,
+ security_group
+ );
+ }
+ }
+ if let Some(ip_address) = port.ip_address.as_deref() {
+ let ip_address = parse_ipv4(ip_address)?;
+ if !cidr_contains(subnet_cidr, ip_address) {
+ bail!(
+ "port {} ip_address must fall inside subnet {} cidr_block",
+ port.name,
+ subnet.name
+ );
+ }
+ if gateway_ip.map(|gateway_ip| gateway_ip == ip_address).unwrap_or(false) {
+ bail!(
+ "port {} ip_address cannot reuse subnet {} gateway_ip",
+ port.name,
+ subnet.name
+ );
+ }
+ if !fixed_ips.insert(ip_address) {
+ bail!(
+ "subnet {} declares duplicate fixed ip_address {}",
+ subnet.name,
+ ip_address
+ );
+ }
+ }
+ }
+ }
+ }
+ }
+
+ Ok(())
+}
+
+fn validate_unique_names<'a>(
+ resource_kind: &str,
+ names: impl IntoIterator- ,
+) -> Result<()> {
+ let mut seen = HashSet::new();
+ for name in names {
+ if name.trim().is_empty() {
+ bail!("{} name must be non-empty", resource_kind);
+ }
+ if !seen.insert(name) {
+ bail!("duplicate {} name {}", resource_kind, name);
+ }
+ }
+ Ok(())
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn rejects_duplicate_tenant_scope() {
+ let config = TenantNetworkConfig {
+ tenants: vec![
+ TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: Vec::new(),
+ },
+ TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: Vec::new(),
+ },
+ ],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn resolves_rule_remote_group_by_name() {
+ let mut security_group_ids = HashMap::new();
+ security_group_ids.insert("web".to_string(), "sg-1".to_string());
+
+ let fingerprint = fingerprint_desired_rule(
+ &SecurityGroupRuleSpec {
+ direction: "ingress".into(),
+ protocol: Some("tcp".into()),
+ port_range_min: Some(443),
+ port_range_max: Some(443),
+ remote_cidr: None,
+ remote_group: Some("web".into()),
+ description: Some("allow web".into()),
+ },
+ &security_group_ids,
+ )
+ .unwrap();
+
+ assert_eq!(fingerprint.direction, RuleDirection::Ingress as i32);
+ assert_eq!(fingerprint.protocol, IpProtocol::Tcp as i32);
+ assert_eq!(fingerprint.remote_group_id, "sg-1");
+ }
+
+ #[test]
+ fn rejects_port_reference_to_unknown_security_group() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: vec![SecurityGroupSpec {
+ name: "web".into(),
+ description: None,
+ rules: Vec::new(),
+ }],
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: None,
+ subnets: vec![SubnetSpec {
+ name: "subnet-a".into(),
+ cidr_block: "10.0.1.0/24".into(),
+ gateway_ip: None,
+ description: None,
+ dhcp_enabled: None,
+ ports: vec![PortSpec {
+ name: "port-a".into(),
+ description: None,
+ ip_address: None,
+ security_groups: vec!["db".into()],
+ admin_state_up: None,
+ }],
+ }],
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_invalid_service_ip_pool_type() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: vec![ServiceIpPoolSpec {
+ name: "lb".into(),
+ cidr_block: "10.200.0.0/24".into(),
+ description: None,
+ pool_type: Some("bogus".into()),
+ }],
+ vpcs: Vec::new(),
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_router_gateway_outside_vpc() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: Some(RouterSpec {
+ name: "edge".into(),
+ gateway_cidr: "10.1.0.1/24".into(),
+ mac_address: "02:00:00:00:00:01".into(),
+ external_ip: "203.0.113.10".into(),
+ description: None,
+ }),
+ subnets: Vec::new(),
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_router_with_invalid_mac() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: Some(RouterSpec {
+ name: "edge".into(),
+ gateway_cidr: "10.0.0.1/24".into(),
+ mac_address: "bad-mac".into(),
+ external_ip: "203.0.113.10".into(),
+ description: None,
+ }),
+ subnets: Vec::new(),
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_subnet_outside_vpc() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: None,
+ subnets: vec![SubnetSpec {
+ name: "subnet-a".into(),
+ cidr_block: "10.1.0.0/24".into(),
+ gateway_ip: None,
+ description: None,
+ dhcp_enabled: None,
+ ports: Vec::new(),
+ }],
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_overlapping_subnets() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: None,
+ subnets: vec![
+ SubnetSpec {
+ name: "subnet-a".into(),
+ cidr_block: "10.0.1.0/24".into(),
+ gateway_ip: None,
+ description: None,
+ dhcp_enabled: None,
+ ports: Vec::new(),
+ },
+ SubnetSpec {
+ name: "subnet-b".into(),
+ cidr_block: "10.0.1.128/25".into(),
+ gateway_ip: None,
+ description: None,
+ dhcp_enabled: None,
+ ports: Vec::new(),
+ },
+ ],
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+
+ #[test]
+ fn rejects_duplicate_fixed_port_ip() {
+ let config = TenantNetworkConfig {
+ tenants: vec![TenantSpec {
+ org_id: "org-1".into(),
+ project_id: "proj-1".into(),
+ security_groups: Vec::new(),
+ service_ip_pools: Vec::new(),
+ vpcs: vec![VpcSpec {
+ name: "vpc-a".into(),
+ cidr_block: "10.0.0.0/16".into(),
+ description: None,
+ router: None,
+ subnets: vec![SubnetSpec {
+ name: "subnet-a".into(),
+ cidr_block: "10.0.1.0/24".into(),
+ gateway_ip: Some("10.0.1.1".into()),
+ description: None,
+ dhcp_enabled: None,
+ ports: vec![
+ PortSpec {
+ name: "port-a".into(),
+ description: None,
+ ip_address: Some("10.0.1.10".into()),
+ security_groups: Vec::new(),
+ admin_state_up: None,
+ },
+ PortSpec {
+ name: "port-b".into(),
+ description: None,
+ ip_address: Some("10.0.1.10".into()),
+ security_groups: Vec::new(),
+ admin_state_up: None,
+ },
+ ],
+ }],
+ }],
+ }],
+ };
+
+ assert!(validate_config(&config).is_err());
+ }
+}
diff --git a/docs/testing.md b/docs/testing.md
index 4d820ea..fb3d1d6 100644
--- a/docs/testing.md
+++ b/docs/testing.md
@@ -19,14 +19,16 @@ This flow:
```bash
nix run ./nix/test-cluster#cluster -- fresh-smoke
+nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp
nix run ./nix/test-cluster#cluster -- fresh-matrix
nix run ./nix/test-cluster#cluster -- fresh-bench-storage
nix build .#checks.x86_64-linux.deployer-vm-smoke
```
-Use these three commands as the release-facing local proof set:
+Use these commands as the release-facing local proof set:
- `fresh-smoke`: whole-cluster readiness, core behavior, and fault injection
+- `fresh-demo-vm-webapp`: focused VM demo showing a web app inside the guest with SQLite state persisted on the attached PhotonCloud volume across restart and migration
- `fresh-matrix`: composed service scenarios such as `prismnet + flashdns + fiberlb` and PrismNet-backed VM hosting bundles with `plasmavmc + coronafs + lightningstor`
- `fresh-bench-storage`: CoronaFS local-vs-shared-volume throughput, cross-worker volume visibility, and LightningStor large/small-object throughput capture
- `deployer-vm-smoke`: prebuilt NixOS system closure handoff into `nix-agent`, proving host rollout can activate a host-built target without guest-side compilation
@@ -37,6 +39,7 @@ Use these three commands as the release-facing local proof set:
nix run ./nix/test-cluster#cluster -- status
nix run ./nix/test-cluster#cluster -- logs node01
nix run ./nix/test-cluster#cluster -- ssh node04
+nix run ./nix/test-cluster#cluster -- demo-vm-webapp
nix run ./nix/test-cluster#cluster -- matrix
nix run ./nix/test-cluster#cluster -- bench-storage
nix run ./nix/test-cluster#cluster -- fresh-matrix
diff --git a/flake.nix b/flake.nix
index 3a42ff4..81d5e47 100644
--- a/flake.nix
+++ b/flake.nix
@@ -203,6 +203,7 @@
"flaredb"
"flashdns"
"iam"
+ "prismnet"
];
};
@@ -1064,6 +1065,127 @@
};
checks = {
+ workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit" {
+ nativeBuildInputs = [ pkgs.python3 ];
+ } ''
+ ${pkgs.python3}/bin/python - <<'PY' ${./.}
+ from __future__ import annotations
+
+ import re
+ import sys
+ import tomllib
+ from pathlib import Path
+ from typing import Any
+
+
+ def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]:
+ source = flake_path.read_text()
+ match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S)
+ if match is None:
+ raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}")
+
+ roots: dict[str, list[str]] = {}
+ for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S):
+ roots[name] = re.findall(r'"([^"]+)"', body)
+ return roots
+
+
+ def collect_path_dependencies(value: Any) -> list[str]:
+ found: list[str] = []
+
+ if isinstance(value, dict):
+ path = value.get("path")
+ if isinstance(path, str):
+ found.append(path)
+ for nested in value.values():
+ found.extend(collect_path_dependencies(nested))
+ elif isinstance(value, list):
+ for nested in value:
+ found.extend(collect_path_dependencies(nested))
+
+ return found
+
+
+ def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]:
+ workspace_manifest = repo_root / workspace_name / "Cargo.toml"
+ manifests = [workspace_manifest]
+ workspace_data = tomllib.loads(workspace_manifest.read_text())
+ members = workspace_data.get("workspace", {}).get("members", [])
+
+ for member in members:
+ for candidate in workspace_manifest.parent.glob(member):
+ manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml"
+ if manifest.is_file():
+ manifests.append(manifest)
+
+ unique_manifests: list[Path] = []
+ seen: set[Path] = set()
+ for manifest in manifests:
+ resolved = manifest.resolve()
+ if resolved in seen:
+ continue
+ seen.add(resolved)
+ unique_manifests.append(manifest)
+ return unique_manifests
+
+
+ def required_root(dep_rel: Path) -> str:
+ parts = dep_rel.parts
+ if not parts:
+ return ""
+ if parts[0] == "crates" and len(parts) >= 2:
+ return "/".join(parts[:2])
+ return parts[0]
+
+
+ def is_covered(dep_rel: str, configured_roots: list[str]) -> bool:
+ return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots)
+
+
+ def main() -> int:
+ repo_root = Path(sys.argv[1]).resolve()
+ workspace_roots = extract_workspace_source_roots(repo_root / "flake.nix")
+ failures: list[str] = []
+
+ for workspace_name, configured_roots in sorted(workspace_roots.items()):
+ workspace_manifest = repo_root / workspace_name / "Cargo.toml"
+ if not workspace_manifest.is_file():
+ continue
+
+ for manifest in workspace_manifests(repo_root, workspace_name):
+ manifest_data = tomllib.loads(manifest.read_text())
+ for dep_path in collect_path_dependencies(manifest_data):
+ dependency_dir = (manifest.parent / dep_path).resolve()
+ try:
+ dep_rel = dependency_dir.relative_to(repo_root)
+ except ValueError:
+ continue
+
+ dep_rel_str = dep_rel.as_posix()
+ if is_covered(dep_rel_str, configured_roots):
+ continue
+
+ failures.append(
+ f"{workspace_name}: missing source root '{required_root(dep_rel)}' "
+ f"for dependency '{dep_rel_str}' referenced by "
+ f"{manifest.relative_to(repo_root).as_posix()}"
+ )
+
+ if failures:
+ print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr)
+ for failure in failures:
+ print(f" - {failure}", file=sys.stderr)
+ return 1
+
+ print("workspaceSourceRoots covers all workspace path dependencies.")
+ return 0
+
+
+ raise SystemExit(main())
+ PY
+ touch "$out"
+ '';
+
first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/first-boot-topology-vm-smoke.nix {
inherit pkgs;
diff --git a/iam/Cargo.lock b/iam/Cargo.lock
index 6a6ab2c..51e309a 100644
--- a/iam/Cargo.lock
+++ b/iam/Cargo.lock
@@ -1294,6 +1294,7 @@ dependencies = [
"serde_json",
"thiserror 1.0.69",
"tokio",
+ "tokio-stream",
"toml",
"tonic",
"tonic-health",
diff --git a/iam/crates/iam-server/Cargo.toml b/iam/crates/iam-server/Cargo.toml
index e674048..376d546 100644
--- a/iam/crates/iam-server/Cargo.toml
+++ b/iam/crates/iam-server/Cargo.toml
@@ -21,6 +21,7 @@ serde = { workspace = true }
serde_json = { workspace = true }
thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] }
+tokio-stream = { workspace = true, features = ["net"] }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
tonic = { workspace = true }
diff --git a/iam/crates/iam-server/src/main.rs b/iam/crates/iam-server/src/main.rs
index 86d71ee..d2ee8e5 100644
--- a/iam/crates/iam-server/src/main.rs
+++ b/iam/crates/iam-server/src/main.rs
@@ -16,6 +16,7 @@ use tonic::service::Interceptor;
use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig};
use tonic::{metadata::MetadataMap, Request, Status};
use tonic_health::server::health_reporter;
+use tokio_stream::wrappers::TcpListenerStream;
use tracing::{info, warn};
use iam_api::{
@@ -283,6 +284,20 @@ async fn main() -> Result<(), Box> {
info!("Starting IAM server on {}", config.server.addr);
+ // Reserve the public listeners before opening outbound cluster/backend connections.
+ // Without this, a peer connection can claim the service port as an ephemeral source port
+ // and make the later gRPC bind fail with EADDRINUSE.
+ let grpc_addr = config.server.addr;
+ let http_addr = config.server.http_addr;
+ let grpc_listener = tokio::net::TcpListener::bind(grpc_addr).await?;
+ let http_listener = tokio::net::TcpListener::bind(http_addr).await?;
+
+ info!(
+ grpc_addr = %grpc_addr,
+ http_addr = %http_addr,
+ "IAM listeners reserved"
+ );
+
if let Some(endpoint) = &config.cluster.chainfire_endpoint {
let normalized = normalize_chainfire_endpoint(endpoint);
info!(
@@ -514,17 +529,15 @@ async fn main() -> Result<(), Box> {
.add_service(IamCredentialServer::new(credential_service))
.add_service(GatewayAuthServiceServer::new(gateway_auth_service))
.add_service(admin_server)
- .serve(config.server.addr);
+ .serve_with_incoming(TcpListenerStream::new(grpc_listener));
// HTTP REST API server
- let http_addr = config.server.http_addr;
let rest_state = rest::RestApiState {
- server_addr: config.server.addr.to_string(),
+ server_addr: grpc_addr.to_string(),
tls_enabled: config.server.tls.is_some(),
admin_token: admin_token.clone(),
};
let rest_app = rest::build_router(rest_state);
- let http_listener = tokio::net::TcpListener::bind(&http_addr).await?;
info!(http_addr = %http_addr, "HTTP REST API server starting");
diff --git a/k8shost/Cargo.lock b/k8shost/Cargo.lock
index ac2d6ef..84c69a2 100644
--- a/k8shost/Cargo.lock
+++ b/k8shost/Cargo.lock
@@ -2217,6 +2217,18 @@ version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084"
+[[package]]
+name = "nix"
+version = "0.29.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
+dependencies = [
+ "bitflags 2.11.0",
+ "cfg-if",
+ "cfg_aliases",
+ "libc",
+]
+
[[package]]
name = "nom"
version = "7.1.3"
@@ -2539,6 +2551,7 @@ name = "plasmavmc-kvm"
version = "0.1.0"
dependencies = [
"async-trait",
+ "nix",
"plasmavmc-hypervisor",
"plasmavmc-types",
"serde",
@@ -2576,6 +2589,7 @@ dependencies = [
"reqwest 0.12.28",
"serde",
"serde_json",
+ "sha2",
"thiserror 1.0.69",
"tokio",
"tokio-stream",
diff --git a/nix/modules/default.nix b/nix/modules/default.nix
index 547952b..b2a23aa 100644
--- a/nix/modules/default.nix
+++ b/nix/modules/default.nix
@@ -3,6 +3,7 @@
./chainfire.nix
./plasmacloud-cluster.nix
./install-target.nix
+ ./service-port-reservations.nix
./creditservice.nix
./coronafs.nix
./flaredb.nix
@@ -11,6 +12,9 @@
./prismnet.nix
./flashdns.nix
./fiberlb.nix
+ ./plasmacloud-network.nix
+ ./plasmacloud-resources.nix
+ ./plasmacloud-tenant-networking.nix
./lightningstor.nix
./k8shost.nix
./nightlight.nix
diff --git a/nix/modules/plasmacloud-tenant-networking.nix b/nix/modules/plasmacloud-tenant-networking.nix
new file mode 100644
index 0000000..3c17134
--- /dev/null
+++ b/nix/modules/plasmacloud-tenant-networking.nix
@@ -0,0 +1,373 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+ cfg = config.plasmacloud.tenantNetworking;
+ jsonFormat = pkgs.formats.json {};
+
+ serviceIpPoolType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "Service IP pool name";
+ };
+
+ cidr_block = mkOption {
+ type = types.str;
+ description = "Service IP pool CIDR";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Service IP pool description";
+ };
+
+ pool_type = mkOption {
+ type = types.nullOr (types.enum [ "cluster_ip" "load_balancer" "node_port" ]);
+ default = null;
+ description = "Service IP pool type";
+ };
+ };
+ };
+
+ portType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "Port name";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Port description";
+ };
+
+ ip_address = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Requested fixed IP address";
+ };
+
+ security_groups = mkOption {
+ type = types.listOf types.str;
+ default = [];
+ description = "Security group names attached to the port";
+ };
+
+ admin_state_up = mkOption {
+ type = types.nullOr types.bool;
+ default = null;
+ description = "Administrative state for the port";
+ };
+ };
+ };
+
+ subnetType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "Subnet name";
+ };
+
+ cidr_block = mkOption {
+ type = types.str;
+ description = "Subnet CIDR";
+ };
+
+ gateway_ip = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Gateway IP";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Subnet description";
+ };
+
+ dhcp_enabled = mkOption {
+ type = types.nullOr types.bool;
+ default = null;
+ description = "Enable DHCP for the subnet";
+ };
+
+ ports = mkOption {
+ type = types.listOf portType;
+ default = [];
+ description = "Ports within the subnet";
+ };
+ };
+ };
+
+ routerType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "Router name";
+ };
+
+ gateway_cidr = mkOption {
+ type = types.str;
+ description = "Gateway interface CIDR attached to the VPC";
+ };
+
+ mac_address = mkOption {
+ type = types.str;
+ description = "Router interface MAC address";
+ };
+
+ external_ip = mkOption {
+ type = types.str;
+ description = "SNAT external IPv4 address";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Router description";
+ };
+ };
+ };
+
+ vpcType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "VPC name";
+ };
+
+ cidr_block = mkOption {
+ type = types.str;
+ description = "VPC CIDR";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "VPC description";
+ };
+
+ router = mkOption {
+ type = types.nullOr routerType;
+ default = null;
+ description = "Optional tenant edge router for the VPC";
+ };
+
+ subnets = mkOption {
+ type = types.listOf subnetType;
+ default = [];
+ description = "Subnets within the VPC";
+ };
+ };
+ };
+
+ securityGroupRuleType = types.submodule {
+ options = {
+ direction = mkOption {
+ type = types.enum [ "ingress" "egress" ];
+ description = "Rule direction";
+ };
+
+ protocol = mkOption {
+ type = types.nullOr (types.enum [ "any" "tcp" "udp" "icmp" "icmpv6" ]);
+ default = null;
+ description = "IP protocol";
+ };
+
+ port_range_min = mkOption {
+ type = types.nullOr types.int;
+ default = null;
+ description = "Minimum port in range";
+ };
+
+ port_range_max = mkOption {
+ type = types.nullOr types.int;
+ default = null;
+ description = "Maximum port in range";
+ };
+
+ remote_cidr = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Remote CIDR";
+ };
+
+ remote_group = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Remote security group name";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Rule description";
+ };
+ };
+ };
+
+ securityGroupType = types.submodule {
+ options = {
+ name = mkOption {
+ type = types.str;
+ description = "Security group name";
+ };
+
+ description = mkOption {
+ type = types.nullOr types.str;
+ default = null;
+ description = "Security group description";
+ };
+
+ rules = mkOption {
+ type = types.listOf securityGroupRuleType;
+ default = [];
+ description = "Security group rules";
+ };
+ };
+ };
+
+ tenantType = types.submodule {
+ options = {
+ org_id = mkOption {
+ type = types.str;
+ description = "Tenant organization ID";
+ };
+
+ project_id = mkOption {
+ type = types.str;
+ description = "Tenant project ID";
+ };
+
+ security_groups = mkOption {
+ type = types.listOf securityGroupType;
+ default = [];
+ description = "Tenant-scoped security groups";
+ };
+
+ service_ip_pools = mkOption {
+ type = types.listOf serviceIpPoolType;
+ default = [];
+ description = "Tenant-scoped Service IP pools";
+ };
+
+ vpcs = mkOption {
+ type = types.listOf vpcType;
+ default = [];
+ description = "Tenant-scoped VPCs and their nested resources";
+ };
+ };
+ };
+
+ configFile = jsonFormat.generate "plasmacloud-tenant-networking.json" {
+ inherit (cfg) tenants;
+ };
+ configPath = cfg.configPath;
+ configRelative = removePrefix "/etc/" configPath;
+
+in {
+ options.plasmacloud.tenantNetworking = {
+ enable = mkEnableOption "tenant-scoped PrismNET declarations";
+
+ endpoint = mkOption {
+ type = types.str;
+ default = "http://127.0.0.1:50081";
+ description = "PrismNET gRPC endpoint";
+ };
+
+ iamEndpoint = mkOption {
+ type = types.str;
+ default = "http://127.0.0.1:50080";
+ description = "IAM gRPC endpoint used to mint tenant-scoped controller tokens";
+ };
+
+ controllerPrincipalId = mkOption {
+ type = types.str;
+ default = "plasmacloud-reconciler";
+ description = "Service account used by the reconciler when applying tenant declarations";
+ };
+
+ tenants = mkOption {
+ type = types.listOf tenantType;
+ default = [];
+ description = "Tenant-scoped network declarations. This is separate from platform networking under plasmacloud.network.";
+ };
+
+ configPath = mkOption {
+ type = types.str;
+ default = "/etc/plasmacloud/tenant-networking.json";
+ description = "Path for rendered tenant networking config";
+ };
+
+ applyOnBoot = mkOption {
+ type = types.bool;
+ default = true;
+ description = "Apply declarations at boot";
+ };
+
+ applyOnChange = mkOption {
+ type = types.bool;
+ default = true;
+ description = "Apply declarations when the config file changes";
+ };
+
+ prune = mkOption {
+ type = types.bool;
+ default = false;
+ description = "Delete tenant network resources not declared for managed tenants";
+ };
+
+ package = mkOption {
+ type = types.package;
+ default = pkgs.plasmacloud-reconciler or (throw "plasmacloud-reconciler package not found");
+ description = "Reconciler package for tenant networking declarations";
+ };
+ };
+
+ config = mkIf cfg.enable {
+ assertions = [
+ {
+ assertion = hasPrefix "/etc/" configPath;
+ message = "plasmacloud.tenantNetworking.configPath must be under /etc";
+ }
+ ];
+
+ environment.etc."${configRelative}".source = configFile;
+
+ systemd.services.plasmacloud-tenant-networking-apply = {
+ description = "Apply PlasmaCloud tenant networking declarations";
+ after =
+ [ "network-online.target" ]
+ ++ optional config.services.prismnet.enable "prismnet.service"
+ ++ optional config.services.iam.enable "iam.service";
+ wants =
+ [ "network-online.target" ]
+ ++ optional config.services.prismnet.enable "prismnet.service"
+ ++ optional config.services.iam.enable "iam.service";
+ wantedBy = optional cfg.applyOnBoot "multi-user.target";
+
+ serviceConfig = {
+ Type = "oneshot";
+ RemainAfterExit = true;
+ ExecStart =
+ "${cfg.package}/bin/plasmacloud-reconciler tenant-network"
+ + " --config ${configPath}"
+ + " --endpoint ${cfg.endpoint}"
+ + " --iam-endpoint ${cfg.iamEndpoint}"
+ + " --controller-principal-id ${cfg.controllerPrincipalId}"
+ + optionalString cfg.prune " --prune";
+ };
+ };
+
+ systemd.paths.plasmacloud-tenant-networking-apply = mkIf cfg.applyOnChange {
+ wantedBy = [ "multi-user.target" ];
+ pathConfig = {
+ PathChanged = configPath;
+ };
+ };
+ };
+}
diff --git a/nix/modules/plasmavmc.nix b/nix/modules/plasmavmc.nix
index 6ed8f1d..aa900e1 100644
--- a/nix/modules/plasmavmc.nix
+++ b/nix/modules/plasmavmc.nix
@@ -329,7 +329,7 @@ in
wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps;
wants = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps;
- path = [ pkgs.qemu pkgs.coreutils pkgs.curl ];
+ path = [ pkgs.qemu pkgs.coreutils pkgs.curl pkgs.iproute2 pkgs.dnsmasq ];
preStart =
lib.optionalString (localIamHealthUrl != null) ''
for _ in $(seq 1 90); do
@@ -377,13 +377,14 @@ in
# Security hardening - relaxed for KVM access
NoNewPrivileges = false; # Needed for KVM
+ AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
PrivateTmp = true;
ProtectSystem = "strict";
ProtectHome = true;
ReadWritePaths =
[ cfg.dataDir "/run/libvirt" cfg.managedVolumeRoot ]
++ lib.optionals (coronafsDataDir != null) [ coronafsDataDir ];
- DeviceAllow = [ "/dev/kvm rw" ];
+ DeviceAllow = [ "/dev/kvm rw" "/dev/net/tun rw" ];
# Start command
ExecStart = "${cfg.package}/bin/plasmavmc-server --config ${plasmavmcConfigFile}";
diff --git a/nix/modules/service-port-reservations.nix b/nix/modules/service-port-reservations.nix
new file mode 100644
index 0000000..7165312
--- /dev/null
+++ b/nix/modules/service-port-reservations.nix
@@ -0,0 +1,10 @@
+{ lib, ... }:
+
+{
+ boot.kernel.sysctl = {
+ # PhotonCloud control-plane services bind within this band. Reserve it from the
+ # ephemeral allocator so outbound peer/backend connections cannot steal a service
+ # port during boot and block the later listener bind.
+ "net.ipv4.ip_local_reserved_ports" = lib.mkDefault "50051-50090";
+ };
+}
diff --git a/nix/test-cluster/README.md b/nix/test-cluster/README.md
index 11a2fec..e63ddc3 100644
--- a/nix/test-cluster/README.md
+++ b/nix/test-cluster/README.md
@@ -45,6 +45,8 @@ nix run ./nix/test-cluster#cluster -- build
nix run ./nix/test-cluster#cluster -- start
nix run ./nix/test-cluster#cluster -- smoke
nix run ./nix/test-cluster#cluster -- fresh-smoke
+nix run ./nix/test-cluster#cluster -- demo-vm-webapp
+nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp
nix run ./nix/test-cluster#cluster -- matrix
nix run ./nix/test-cluster#cluster -- fresh-matrix
nix run ./nix/test-cluster#cluster -- bench-storage
@@ -61,6 +63,8 @@ Preferred entrypoint for publishable verification: `nix run ./nix/test-cluster#c
`make cluster-smoke` is a convenience wrapper for the same clean host-build VM validation flow.
+`nix run ./nix/test-cluster#cluster -- demo-vm-webapp` creates a PrismNet-attached VM, boots a tiny web app inside the guest, stores its state in SQLite on the attached data volume, and then proves that the counter survives guest restart plus cross-worker migration.
+
`nix run ./nix/test-cluster#cluster -- matrix` reuses the current running cluster to exercise composed service scenarios such as `prismnet + flashdns + fiberlb`, PrismNet-backed VM hosting with `plasmavmc + prismnet + coronafs + lightningstor`, the Kubernetes-style hosting bundle, and API-gateway-mediated `nightlight` / `creditservice` flows.
Preferred entrypoint for publishable matrix verification: `nix run ./nix/test-cluster#cluster -- fresh-matrix`
diff --git a/nix/test-cluster/node01.nix b/nix/test-cluster/node01.nix
index 405ac65..c04cbc3 100644
--- a/nix/test-cluster/node01.nix
+++ b/nix/test-cluster/node01.nix
@@ -11,6 +11,7 @@
../modules/flaredb.nix
../modules/iam.nix
../modules/prismnet.nix
+ ../modules/plasmacloud-tenant-networking.nix
../modules/flashdns.nix
../modules/fiberlb.nix
../modules/k8shost.nix
@@ -166,4 +167,91 @@
services.lightningstor.s3AccessKeyId = "photoncloud-test";
services.lightningstor.s3SecretKey = "photoncloud-test-secret";
+
+ plasmacloud.tenantNetworking = {
+ enable = true;
+ endpoint = "http://127.0.0.1:50081";
+ iamEndpoint = "http://127.0.0.1:50080";
+ controllerPrincipalId = "plasmacloud-reconciler";
+ prune = true;
+ tenants = [
+ {
+ org_id = "matrix-tenant-org";
+ project_id = "matrix-tenant-project";
+ security_groups = [
+ {
+ name = "vm-default";
+ description = "Default tenant SG for matrix VMs";
+ rules = [
+ {
+ direction = "ingress";
+ protocol = "tcp";
+ port_range_min = 22;
+ port_range_max = 22;
+ remote_cidr = "10.100.0.0/24";
+ description = "Allow SSH from the cluster network";
+ }
+ {
+ direction = "egress";
+ protocol = "any";
+ remote_cidr = "0.0.0.0/0";
+ description = "Allow outbound traffic";
+ }
+ ];
+ }
+ {
+ name = "web";
+ description = "HTTP ingress from default tenant members";
+ rules = [
+ {
+ direction = "ingress";
+ protocol = "tcp";
+ port_range_min = 80;
+ port_range_max = 80;
+ remote_group = "vm-default";
+ description = "Allow HTTP from vm-default members";
+ }
+ ];
+ }
+ ];
+ service_ip_pools = [
+ {
+ name = "cluster-services";
+ cidr_block = "10.62.200.0/24";
+ description = "ClusterIP allocations for matrix tenant services";
+ pool_type = "cluster_ip";
+ }
+ {
+ name = "public-services";
+ cidr_block = "10.62.210.0/24";
+ description = "Load balancer allocations for matrix tenant services";
+ pool_type = "load_balancer";
+ }
+ ];
+ vpcs = [
+ {
+ name = "matrix-vpc";
+ cidr_block = "10.62.0.0/16";
+ description = "Declarative PrismNET tenant network for VM matrix validation";
+ router = {
+ name = "matrix-router";
+ gateway_cidr = "10.62.0.1/24";
+ mac_address = "02:00:00:00:62:01";
+ external_ip = "203.0.113.62";
+ description = "Tenant edge router";
+ };
+ subnets = [
+ {
+ name = "matrix-subnet";
+ cidr_block = "10.62.10.0/24";
+ gateway_ip = "10.62.10.1";
+ description = "Primary VM subnet for matrix validation";
+ dhcp_enabled = true;
+ }
+ ];
+ }
+ ];
+ }
+ ];
+ };
}
diff --git a/nix/test-cluster/node06.nix b/nix/test-cluster/node06.nix
index f160b64..0127616 100644
--- a/nix/test-cluster/node06.nix
+++ b/nix/test-cluster/node06.nix
@@ -48,6 +48,26 @@
pathPrefix = "/api/v1/subnets";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
}
+ {
+ name = "prismnet-routers";
+ pathPrefix = "/api/v1/routers";
+ upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
+ }
+ {
+ name = "prismnet-security-groups";
+ pathPrefix = "/api/v1/security-groups";
+ upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
+ }
+ {
+ name = "prismnet-ports";
+ pathPrefix = "/api/v1/ports";
+ upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
+ }
+ {
+ name = "prismnet-service-ip-pools";
+ pathPrefix = "/api/v1/service-ip-pools";
+ upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
+ }
{
name = "plasmavmc-vms";
pathPrefix = "/api/v1/vms";
diff --git a/nix/test-cluster/run-cluster.sh b/nix/test-cluster/run-cluster.sh
index 87dc0b1..6d3ad09 100755
--- a/nix/test-cluster/run-cluster.sh
+++ b/nix/test-cluster/run-cluster.sh
@@ -35,7 +35,9 @@ SSH_PASSWORD="${PHOTON_VM_ROOT_PASSWORD:-test}"
SSH_CONNECT_TIMEOUT="${PHOTON_VM_SSH_CONNECT_TIMEOUT:-5}"
SSH_WAIT_TIMEOUT="${PHOTON_VM_SSH_WAIT_TIMEOUT:-300}"
UNIT_WAIT_TIMEOUT="${PHOTON_VM_UNIT_WAIT_TIMEOUT:-240}"
+UNIT_CHECK_TIMEOUT="${PHOTON_VM_UNIT_CHECK_TIMEOUT:-15}"
HTTP_WAIT_TIMEOUT="${PHOTON_VM_HTTP_WAIT_TIMEOUT:-180}"
+VM_DEMO_HTTP_PORT="${PHOTON_VM_DEMO_HTTP_PORT:-8080}"
KVM_WAIT_TIMEOUT="${PHOTON_VM_KVM_WAIT_TIMEOUT:-180}"
FLAREDB_WAIT_TIMEOUT="${PHOTON_VM_FLAREDB_WAIT_TIMEOUT:-180}"
GRPCURL_MAX_MSG_SIZE="${PHOTON_VM_GRPCURL_MAX_MSG_SIZE:-1073741824}"
@@ -83,6 +85,15 @@ PLASMAVMC_PROTO="${PLASMAVMC_PROTO_DIR}/plasmavmc.proto"
FLAREDB_PROTO_DIR="${REPO_ROOT}/flaredb/crates/flaredb-proto/src"
FLAREDB_PROTO="${FLAREDB_PROTO_DIR}/kvrpc.proto"
FLAREDB_SQL_PROTO="${FLAREDB_PROTO_DIR}/sqlrpc.proto"
+MATRIX_TENANT_ORG_ID="matrix-tenant-org"
+MATRIX_TENANT_PROJECT_ID="matrix-tenant-project"
+MATRIX_TENANT_VPC_NAME="matrix-vpc"
+MATRIX_TENANT_SUBNET_NAME="matrix-subnet"
+MATRIX_TENANT_ROUTER_NAME="matrix-router"
+MATRIX_TENANT_DEFAULT_SG_NAME="vm-default"
+MATRIX_TENANT_WEB_SG_NAME="web"
+MATRIX_TENANT_CLUSTER_POOL_NAME="cluster-services"
+MATRIX_TENANT_LB_POOL_NAME="public-services"
# shellcheck disable=SC2034
NODE_PHASES=(
@@ -530,6 +541,26 @@ wait_for_prismnet_port_detachment() {
done
}
+wait_for_prismnet_port_absent() {
+ local token="$1"
+ local org_id="$2"
+ local project_id="$3"
+ local subnet_id="$4"
+ local port_id="$5"
+ local timeout="${6:-${HTTP_WAIT_TIMEOUT}}"
+ local deadline=$((SECONDS + timeout))
+
+ while true; do
+ if ! prismnet_get_port_json "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null 2>&1; then
+ return 0
+ fi
+ if (( SECONDS >= deadline )); then
+ die "timed out waiting for PrismNet port ${port_id} to be deleted"
+ fi
+ sleep 2
+ done
+}
+
wait_for_vm_network_spec() {
local token="$1"
local get_vm_json="$2"
@@ -567,6 +598,28 @@ wait_for_vm_network_spec() {
done
}
+api_gateway_request() {
+ local method="$1"
+ local token="$2"
+ local path="$3"
+ local body="${4:-}"
+ local url="http://127.0.0.1:18080${path}"
+
+ if [[ -n "${body}" ]]; then
+ curl -fsS \
+ -X "${method}" \
+ -H "authorization: Bearer ${token}" \
+ -H "content-type: application/json" \
+ --data "${body}" \
+ "${url}"
+ else
+ curl -fsS \
+ -X "${method}" \
+ -H "authorization: Bearer ${token}" \
+ "${url}"
+ fi
+}
+
build_link() {
printf '%s/build-%s' "$(vm_dir)" "$1"
}
@@ -2149,10 +2202,15 @@ wait_for_unit() {
local deadline=$((SECONDS + timeout))
local stable_checks=0
local required_stable_checks=3
+ local ssh_port
+ ssh_port="$(ssh_port_for_node "${node}")"
log "Waiting for ${unit}.service on ${node}"
while (( stable_checks < required_stable_checks )); do
- if ssh_node "${node}" "state=\$(systemctl show --property=ActiveState --value ${unit}.service); sub=\$(systemctl show --property=SubState --value ${unit}.service); [[ \"\${state}\" == active && (\"\${sub}\" == running || \"\${sub}\" == exited) ]]" >/dev/null 2>&1; then
+ if timeout "${UNIT_CHECK_TIMEOUT}" \
+ sshpass -p "${SSH_PASSWORD}" \
+ ssh "${SSH_OPTS[@]}" -p "${ssh_port}" root@127.0.0.1 \
+ "systemctl is-active --quiet ${unit}.service" >/dev/null 2>&1; then
stable_checks=$((stable_checks + 1))
else
stable_checks=0
@@ -2243,6 +2301,55 @@ EOF
done
}
+vm_demo_url() {
+ local ip="$1"
+ local path="${2:-/}"
+ printf 'http://%s:%s%s\n' "${ip}" "${VM_DEMO_HTTP_PORT}" "${path}"
+}
+
+wait_for_vm_demo_http() {
+ local node="$1"
+ local ip="$2"
+ local timeout="${3:-${HTTP_WAIT_TIMEOUT}}"
+
+ wait_for_http "${node}" "$(vm_demo_url "${ip}" "/health")" "${timeout}"
+}
+
+vm_demo_request_json() {
+ local node="$1"
+ local method="$2"
+ local ip="$3"
+ local path="$4"
+
+ ssh_node_script "${node}" "${method}" "$(vm_demo_url "${ip}" "${path}")" <<'EOF'
+set -euo pipefail
+method="$1"
+url="$2"
+curl -fsS -X "${method}" "${url}"
+EOF
+}
+
+assert_vm_demo_state() {
+ local state_json="$1"
+ local expected_visits="$2"
+ local expected_root_boots="$3"
+ local expected_data_boots="$4"
+
+ printf '%s' "${state_json}" | jq -e \
+ --argjson visits "${expected_visits}" \
+ --argjson root_boots "${expected_root_boots}" \
+ --argjson data_boots "${expected_data_boots}" \
+ --argjson listen_port "${VM_DEMO_HTTP_PORT}" \
+ --arg db_path "/mnt/photon-vm-data/demo.sqlite3" '
+ .status == "ok"
+ and .visits == $visits
+ and .root_boot_count == $root_boots
+ and .data_boot_count == $data_boots
+ and .listen_port == $listen_port
+ and .db_path == $db_path
+ ' >/dev/null || die "unexpected VM demo payload: ${state_json}"
+}
+
wait_for_host_http() {
local url="$1"
local timeout="${2:-${HTTP_WAIT_TIMEOUT}}"
@@ -2577,10 +2684,10 @@ wait_for_qemu_volume_present() {
while true; do
qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)"
- if [[ "${qemu_processes}" == *"${volume_ref}"* ]]; then
+ if qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}"; then
return 0
fi
- if [[ -n "${alternate_ref}" && "${qemu_processes}" == *"${alternate_ref}"* ]]; then
+ if qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then
return 0
fi
if (( SECONDS >= deadline )); then
@@ -2601,7 +2708,8 @@ wait_for_qemu_volume_absent() {
while true; do
qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)"
- if [[ "${qemu_processes}" != *"${volume_ref}"* ]] && [[ -z "${alternate_ref}" || "${qemu_processes}" != *"${alternate_ref}"* ]]; then
+ if ! qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}" \
+ && ! qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then
return 0
fi
if (( SECONDS >= deadline )); then
@@ -2612,6 +2720,39 @@ wait_for_qemu_volume_absent() {
done
}
+qemu_processes_contain_ref() {
+ local qemu_processes="$1"
+ local ref="${2:-}"
+
+ [[ -n "${ref}" ]] || return 1
+ if [[ "${qemu_processes}" == *"${ref}"* ]]; then
+ return 0
+ fi
+
+ if [[ "${ref}" == nbd://* ]]; then
+ local authority host port
+ authority="${ref#nbd://}"
+ authority="${authority%%/*}"
+ if [[ "${authority}" == \[*\] ]]; then
+ host="${authority#\[}"
+ host="${host%\]}"
+ port="10809"
+ elif [[ "${authority}" == *:* ]]; then
+ host="${authority%:*}"
+ port="${authority##*:}"
+ else
+ host="${authority}"
+ port="10809"
+ fi
+ if [[ -n "${host}" && -n "${port}" ]] \
+ && [[ "${qemu_processes}" == *"\"host\":\"${host}\",\"port\":\"${port}\""* ]]; then
+ return 0
+ fi
+ fi
+
+ return 1
+}
+
try_get_vm_json() {
local token="$1"
local get_vm_json="$2"
@@ -2625,6 +2766,15 @@ try_get_vm_json() {
127.0.0.1:${vm_port} plasmavmc.v1.VmService/GetVm
}
+vm_disk_volume_id_from_json() {
+ local vm_json="$1"
+ local disk_id="$2"
+
+ printf '%s' "${vm_json}" | jq -r --arg disk_id "${disk_id}" '
+ (.spec.disks // [])[]? | select(.id == $disk_id) | .source.volumeId // empty
+ ' | head -n1
+}
+
try_get_volume_json() {
local token="$1"
local get_volume_json="$2"
@@ -3300,6 +3450,151 @@ validate_prismnet_flow() {
stop_ssh_tunnel node01 "${iam_tunnel}"
}
+validate_tenant_networking_flow() {
+ log "Validating declarative tenant networking via API gateway and PrismNet"
+
+ local iam_tunnel="" prism_tunnel="" gateway_tunnel=""
+ iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
+ prism_tunnel="$(start_ssh_tunnel node01 15081 50081)"
+ gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)"
+ trap 'stop_ssh_tunnel node06 "${gateway_tunnel}"; stop_ssh_tunnel node01 "${prism_tunnel}"; stop_ssh_tunnel node01 "${iam_tunnel}"' RETURN
+
+ wait_for_unit node01 plasmacloud-tenant-networking-apply 120
+ wait_for_http node06 http://127.0.0.1:8080/health
+
+ ssh_node node01 "systemctl start plasmacloud-tenant-networking-apply.service"
+ wait_for_unit node01 plasmacloud-tenant-networking-apply 120
+
+ local org_id="${MATRIX_TENANT_ORG_ID}"
+ local project_id="${MATRIX_TENANT_PROJECT_ID}"
+ local principal_id="tenant-networking-smoke-$(date +%s)"
+ local token
+ token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")"
+
+ local vpcs_json subnets_json routers_json security_groups_json pools_json
+ local vpc_id subnet_id router_id default_sg_id web_sg_id cluster_pool_id lb_pool_id
+ local allocate_response allocated_ip service_uid
+
+ vpcs_json="$(api_gateway_request GET "${token}" "/api/v1/vpcs")"
+ vpc_id="$(printf '%s' "${vpcs_json}" | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" '
+ .data.vpcs[] | select(.name == $name) | .id
+ ')"
+ [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "declarative tenant VPC ${MATRIX_TENANT_VPC_NAME} was not exposed through the API gateway"
+ printf '%s' "${vpcs_json}" | jq -e --arg name "${MATRIX_TENANT_VPC_NAME}" '
+ .data.vpcs | any(.name == $name and .cidr_block == "10.62.0.0/16" and .status == "active")
+ ' >/dev/null || die "unexpected VPC payload for declarative tenant network"
+
+ subnets_json="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}")"
+ subnet_id="$(printf '%s' "${subnets_json}" | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" '
+ .data.subnets[] | select(.name == $name) | .id
+ ')"
+ [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "declarative tenant subnet ${MATRIX_TENANT_SUBNET_NAME} was not exposed through the API gateway"
+ printf '%s' "${subnets_json}" | jq -e --arg name "${MATRIX_TENANT_SUBNET_NAME}" '
+ .data.subnets | any(
+ .name == $name and
+ .cidr_block == "10.62.10.0/24" and
+ .gateway_ip == "10.62.10.1" and
+ .status == "active"
+ )
+ ' >/dev/null || die "unexpected subnet payload for declarative tenant network"
+
+ routers_json="$(api_gateway_request GET "${token}" "/api/v1/routers?vpc_id=${vpc_id}")"
+ router_id="$(printf '%s' "${routers_json}" | jq -r --arg name "${MATRIX_TENANT_ROUTER_NAME}" '
+ .data.routers[] | select(.name == $name) | .id
+ ')"
+ [[ -n "${router_id}" && "${router_id}" != "null" ]] || die "declarative tenant router ${MATRIX_TENANT_ROUTER_NAME} was not exposed through the API gateway"
+ printf '%s' "${routers_json}" | jq -e --arg name "${MATRIX_TENANT_ROUTER_NAME}" '
+ .data.routers | any(
+ .name == $name and
+ .gateway_cidr == "10.62.0.1/24" and
+ .external_ip == "203.0.113.62" and
+ .status == "active"
+ )
+ ' >/dev/null || die "unexpected router payload for declarative tenant network"
+
+ security_groups_json="$(api_gateway_request GET "${token}" "/api/v1/security-groups")"
+ default_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" '
+ .data.security_groups[] | select(.name == $name) | .id
+ ')"
+ web_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" '
+ .data.security_groups[] | select(.name == $name) | .id
+ ')"
+ [[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "default security group ${MATRIX_TENANT_DEFAULT_SG_NAME} missing from declarative tenant networking"
+ [[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "security group ${MATRIX_TENANT_WEB_SG_NAME} missing from declarative tenant networking"
+ printf '%s' "${security_groups_json}" | jq -e \
+ --arg default_name "${MATRIX_TENANT_DEFAULT_SG_NAME}" \
+ --arg web_name "${MATRIX_TENANT_WEB_SG_NAME}" \
+ --arg default_id "${default_sg_id}" '
+ (.data.security_groups | any(.name == $default_name and (.rules | any(.direction == "egress"))))
+ and
+ (.data.security_groups | any(
+ .name == $web_name and
+ (.rules | any(
+ .direction == "ingress" and
+ .protocol == "tcp" and
+ .port_range_min == 80 and
+ .port_range_max == 80 and
+ .remote_group_id == $default_id
+ ))
+ ))
+ ' >/dev/null || die "declarative security group rules did not match expected shape"
+
+ pools_json="$(api_gateway_request GET "${token}" "/api/v1/service-ip-pools")"
+ cluster_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" '
+ .data.pools[] | select(.name == $name) | .id
+ ')"
+ lb_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_LB_POOL_NAME}" '
+ .data.pools[] | select(.name == $name) | .id
+ ')"
+ [[ -n "${cluster_pool_id}" && "${cluster_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_CLUSTER_POOL_NAME} missing from declarative tenant networking"
+ [[ -n "${lb_pool_id}" && "${lb_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_LB_POOL_NAME} missing from declarative tenant networking"
+ printf '%s' "${pools_json}" | jq -e \
+ --arg cluster_name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" \
+ --arg lb_name "${MATRIX_TENANT_LB_POOL_NAME}" '
+ (.data.pools | any(.name == $cluster_name and .pool_type == "cluster_ip" and .cidr_block == "10.62.200.0/24"))
+ and
+ (.data.pools | any(.name == $lb_name and .pool_type == "load_balancer" and .cidr_block == "10.62.210.0/24"))
+ ' >/dev/null || die "unexpected service IP pool payload for declarative tenant network"
+
+ service_uid="matrix-service-$(date +%s)"
+ allocate_response="$(grpcurl -plaintext \
+ -H "authorization: Bearer ${token}" \
+ -import-path "${PRISMNET_PROTO_DIR}" \
+ -proto "${PRISMNET_PROTO}" \
+ -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg pool "${cluster_pool_id}" --arg service_uid "${service_uid}" '{orgId:$org, projectId:$project, poolId:$pool, serviceUid:$service_uid, requestedIp:""}')" \
+ 127.0.0.1:15081 prismnet.IpamService/AllocateServiceIP)"
+ allocated_ip="$(printf '%s' "${allocate_response}" | jq -r '.ipAddress')"
+ [[ -n "${allocated_ip}" && "${allocated_ip}" != "null" ]] || die "failed to allocate a service IP from ${MATRIX_TENANT_CLUSTER_POOL_NAME}"
+
+ api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \
+ | jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) != null' >/dev/null \
+ || die "allocated service IP ${allocated_ip} was not reflected in the REST pool view"
+
+ grpcurl -plaintext \
+ -H "authorization: Bearer ${token}" \
+ -import-path "${PRISMNET_PROTO_DIR}" \
+ -proto "${PRISMNET_PROTO}" \
+ -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg ip "${allocated_ip}" '{orgId:$org, projectId:$project, ipAddress:$ip}')" \
+ 127.0.0.1:15081 prismnet.IpamService/ReleaseServiceIP >/dev/null
+
+ local release_deadline=$((SECONDS + HTTP_WAIT_TIMEOUT))
+ while true; do
+ if api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \
+ | jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) == null' >/dev/null; then
+ break
+ fi
+ if (( SECONDS >= release_deadline )); then
+ die "timed out waiting for released service IP ${allocated_ip} to disappear from ${MATRIX_TENANT_CLUSTER_POOL_NAME}"
+ fi
+ sleep 2
+ done
+
+ trap - RETURN
+ stop_ssh_tunnel node06 "${gateway_tunnel}"
+ stop_ssh_tunnel node01 "${prism_tunnel}"
+ stop_ssh_tunnel node01 "${iam_tunnel}"
+}
+
validate_flashdns_flow() {
log "Validating FlashDNS zone, record, and authoritative query flow"
@@ -4096,9 +4391,10 @@ validate_lightningstor_distributed_storage() {
validate_vm_storage_flow() {
log "Validating PlasmaVMC image import, shared-volume execution, and cross-node migration"
- local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel=""
+ local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel="" gateway_tunnel=""
local node04_coronafs_tunnel="" node05_coronafs_tunnel=""
local current_worker_coronafs_port="" peer_worker_coronafs_port=""
+ local demo_http_sg_id=""
local vm_port=15082
iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
prism_tunnel="$(start_ssh_tunnel node01 15081 50081)"
@@ -4107,10 +4403,11 @@ validate_vm_storage_flow() {
coronafs_tunnel="$(start_ssh_tunnel node01 15088 "${CORONAFS_API_PORT}")"
node04_coronafs_tunnel="$(start_ssh_tunnel node04 25088 "${CORONAFS_API_PORT}")"
node05_coronafs_tunnel="$(start_ssh_tunnel node05 35088 "${CORONAFS_API_PORT}")"
+ gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)"
local image_source_path=""
local vm_watch_output=""
local node01_proto_root="/var/lib/plasmavmc/test-protos"
- local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac=""
+ local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac="" default_sg_id="" web_sg_id=""
cleanup_vm_storage_flow() {
if [[ -n "${token:-}" && -n "${port_id:-}" && -n "${subnet_id:-}" ]]; then
grpcurl -plaintext \
@@ -4120,28 +4417,15 @@ validate_vm_storage_flow() {
-d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \
127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null 2>&1 || true
fi
- if [[ -n "${token:-}" && -n "${subnet_id:-}" && -n "${vpc_id:-}" ]]; then
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \
- 127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null 2>&1 || true
- fi
- if [[ -n "${token:-}" && -n "${vpc_id:-}" ]]; then
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \
- 127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null 2>&1 || true
- fi
if [[ -n "${image_source_path}" && "${image_source_path}" != /nix/store/* ]]; then
ssh_node node01 "rm -f ${image_source_path}" >/dev/null 2>&1 || true
fi
if [[ -n "${vm_watch_output}" ]]; then
ssh_node node01 "rm -f ${vm_watch_output} ${vm_watch_output}.pid ${vm_watch_output}.stderr" >/dev/null 2>&1 || true
fi
+ if [[ -n "${token:-}" && -n "${demo_http_sg_id:-}" ]]; then
+ api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null 2>&1 || true
+ fi
stop_ssh_tunnel node05 "${node05_coronafs_tunnel}"
stop_ssh_tunnel node04 "${node04_coronafs_tunnel}"
stop_ssh_tunnel node01 "${coronafs_tunnel}"
@@ -4149,48 +4433,64 @@ validate_vm_storage_flow() {
stop_ssh_tunnel node01 "${ls_tunnel}"
stop_ssh_tunnel node01 "${prism_tunnel}"
stop_ssh_tunnel node01 "${iam_tunnel}"
+ stop_ssh_tunnel node06 "${gateway_tunnel}"
}
trap cleanup_vm_storage_flow RETURN
wait_for_plasmavmc_workers_registered 15082
- local org_id="vm-smoke-org"
- local project_id="vm-smoke-project"
+ local org_id="${MATRIX_TENANT_ORG_ID}"
+ local project_id="${MATRIX_TENANT_PROJECT_ID}"
local principal_id="plasmavmc-smoke-$(date +%s)"
local token
+ local demo_state_json=""
+ local demo_visit_json=""
token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")"
- log "Matrix case: PlasmaVMC + PrismNet"
- vpc_id="$(create_prismnet_vpc_with_retry \
- "${token}" \
- "${org_id}" \
- "${project_id}" \
- "vm-network-vpc" \
- "vm storage matrix networking" \
- "10.62.0.0/16" | jq -r '.vpc.id')"
- [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to create PrismNet VPC for PlasmaVMC matrix"
-
- subnet_id="$(grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg vpc "${vpc_id}" '{vpcId:$vpc, name:"vm-network-subnet", description:"vm storage matrix subnet", cidrBlock:"10.62.10.0/24", gatewayIp:"10.62.10.1", dhcpEnabled:true}')" \
- 127.0.0.1:15081 prismnet.SubnetService/CreateSubnet | jq -r '.subnet.id')"
- [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to create PrismNet subnet for PlasmaVMC matrix"
-
- local prismnet_port_response
- prismnet_port_response="$(grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, name:"vm-network-port", description:"vm storage matrix port", ipAddress:""}')" \
- 127.0.0.1:15081 prismnet.PortService/CreatePort)"
- port_id="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.id')"
- port_ip="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.ipAddress')"
- port_mac="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.macAddress')"
- [[ -n "${port_id}" && "${port_id}" != "null" ]] || die "failed to create PrismNet port for PlasmaVMC matrix"
- [[ -n "${port_ip}" && "${port_ip}" != "null" ]] || die "PrismNet port ${port_id} did not return an IP address"
- [[ -n "${port_mac}" && "${port_mac}" != "null" ]] || die "PrismNet port ${port_id} did not return a MAC address"
+ log "Matrix case: PlasmaVMC + declarative PrismNet tenant networking"
+ vpc_id="$(api_gateway_request GET "${token}" "/api/v1/vpcs" \
+ | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" '.data.vpcs[] | select(.name == $name) | .id')"
+ [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to locate declarative PrismNet VPC ${MATRIX_TENANT_VPC_NAME} for PlasmaVMC matrix"
+ subnet_id="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}" \
+ | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" '.data.subnets[] | select(.name == $name) | .id')"
+ [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to locate declarative PrismNet subnet ${MATRIX_TENANT_SUBNET_NAME} for PlasmaVMC matrix"
+ default_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \
+ | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')"
+ web_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \
+ | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')"
+ [[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_DEFAULT_SG_NAME} for PlasmaVMC matrix"
+ [[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_WEB_SG_NAME} for PlasmaVMC matrix"
+ demo_http_sg_id="$(
+ api_gateway_request POST "${token}" "/api/v1/security-groups" "$(
+ jq -cn \
+ --arg name "vm-demo-web-$(date +%s)" \
+ --arg org "${org_id}" \
+ --arg project "${project_id}" \
+ '{
+ name:$name,
+ org_id:$org,
+ project_id:$project,
+ description:"temporary ingress for the VM web demo"
+ }'
+ )" | jq -r '.data.id'
+ )"
+ [[ -n "${demo_http_sg_id}" && "${demo_http_sg_id}" != "null" ]] || die "failed to create a temporary security group for the VM web demo"
+ api_gateway_request POST "${token}" "/api/v1/security-groups/${demo_http_sg_id}/rules" "$(
+ jq -cn \
+ --arg org "${org_id}" \
+ --arg project "${project_id}" \
+ --argjson port "${VM_DEMO_HTTP_PORT}" \
+ '{
+ org_id:$org,
+ project_id:$project,
+ direction:"ingress",
+ protocol:"tcp",
+ port_range_min:$port,
+ port_range_max:$port,
+ remote_cidr:"0.0.0.0/0",
+ description:"allow worker-originated HTTP checks for the VM web demo"
+ }'
+ )" >/dev/null
ensure_lightningstor_bucket 15086 "${token}" "plasmavmc-images" "${org_id}" "${project_id}"
wait_for_lightningstor_write_quorum 15086 "${token}" "plasmavmc-images" "PlasmaVMC image import"
@@ -4311,6 +4611,10 @@ EOS
--arg org "${org_id}" \
--arg project "${project_id}" \
--arg image_id "${image_id}" \
+ --arg subnet_id "${subnet_id}" \
+ --arg default_sg_id "${default_sg_id}" \
+ --arg web_sg_id "${web_sg_id}" \
+ --arg demo_http_sg_id "${demo_http_sg_id}" \
'{
name:$name,
org_id:$org,
@@ -4330,74 +4634,28 @@ EOS
source:{type:"blank"},
size_gib:2
}
+ ],
+ network:[
+ {
+ id:"tenant0",
+ subnet_id:$subnet_id,
+ model:"virtio-net",
+ security_groups:[$default_sg_id, $web_sg_id, $demo_http_sg_id]
+ }
]
}'
)"
- local create_vm_grpc_json
- create_vm_grpc_json="$(
- jq -cn \
- --arg name "$(printf '%s' "${create_vm_rest_json}" | jq -r '.name')" \
- --arg org "${org_id}" \
- --arg project "${project_id}" \
- --arg image_id "${image_id}" \
- --arg subnet_id "${subnet_id}" \
- --arg port_id "${port_id}" \
- '{
- name:$name,
- orgId:$org,
- projectId:$project,
- hypervisor:"HYPERVISOR_TYPE_KVM",
- spec:{
- cpu:{vcpus:1, coresPerSocket:1, sockets:1},
- memory:{sizeMib:1024},
- disks:[
- {
- id:"root",
- source:{imageId:$image_id},
- sizeGib:4,
- bus:"DISK_BUS_VIRTIO",
- cache:"DISK_CACHE_WRITEBACK",
- bootIndex:1
- },
- {
- id:"data",
- source:{blank:true},
- sizeGib:2,
- bus:"DISK_BUS_VIRTIO",
- cache:"DISK_CACHE_WRITEBACK"
- }
- ],
- network:[
- {
- id:"tenant0",
- subnetId:$subnet_id,
- portId:$port_id,
- model:"NIC_MODEL_VIRTIO_NET"
- }
- ]
- }
- }'
- )"
-
local create_response vm_id
- create_response="$(
- ssh_node_script node01 "${node01_proto_root}" "${token}" "$(printf '%s' "${create_vm_grpc_json}" | base64 | tr -d '\n')" <<'EOS'
-set -euo pipefail
-proto_root="$1"
-token="$2"
-request_b64="$3"
-request_json="$(printf '%s' "${request_b64}" | base64 -d)"
-grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${proto_root}/plasmavmc" \
- -proto "${proto_root}/plasmavmc/plasmavmc.proto" \
- -d "${request_json}" \
- 127.0.0.1:50082 plasmavmc.v1.VmService/CreateVm
-EOS
- )"
- vm_id="$(printf '%s' "${create_response}" | jq -r '.id')"
+ create_response="$(api_gateway_request POST "${token}" "/api/v1/vms" "${create_vm_rest_json}")"
+ vm_id="$(printf '%s' "${create_response}" | jq -r '.data.id')"
[[ -n "${vm_id}" && "${vm_id}" != "null" ]] || die "failed to create VM through PlasmaVMC"
+ port_id="$(printf '%s' "${create_response}" | jq -r '.data.network[0].port_id // empty')"
+ port_ip="$(printf '%s' "${create_response}" | jq -r '.data.network[0].ip_address // empty')"
+ port_mac="$(printf '%s' "${create_response}" | jq -r '.data.network[0].mac_address // empty')"
+ [[ -n "${port_id}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet port_id"
+ [[ -n "${port_ip}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet IP address"
+ [[ -n "${port_mac}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet MAC address"
vm_watch_output="/tmp/plasmavmc-watch-${vm_id}.json"
start_plasmavmc_vm_watch node01 "${node01_proto_root}" "${token}" "${org_id}" "${project_id}" "${vm_id}" "${vm_watch_output}"
sleep 2
@@ -4435,7 +4693,12 @@ EOS
current_worker_coronafs_port=35088
peer_worker_coronafs_port=25088
fi
- wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}" >/dev/null
+ local vm_spec_json volume_id data_volume_id
+ vm_spec_json="$(wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}")"
+ volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "root")"
+ data_volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "data")"
+ [[ -n "${volume_id}" ]] || die "failed to resolve root volume ID from VM spec"
+ [[ -n "${data_volume_id}" ]] || die "failed to resolve data volume ID from VM spec"
wait_for_prismnet_port_binding "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" "${vm_id}" >/dev/null
grpcurl -plaintext \
@@ -4465,8 +4728,6 @@ EOS
done
log "Matrix case: PlasmaVMC + PrismNet + CoronaFS + LightningStor"
- local volume_id="${vm_id}-root"
- local data_volume_id="${vm_id}-data"
local volume_path="${CORONAFS_VOLUME_ROOT}/${volume_id}.raw"
local data_volume_path="${CORONAFS_VOLUME_ROOT}/${data_volume_id}.raw"
local volume_export_json data_volume_export_json volume_uri data_volume_uri
@@ -4498,6 +4759,12 @@ EOS
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM startup"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=1"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=1"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=1"
+ wait_for_vm_demo_http "${node_id}" "${port_ip}"
+ demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
+ assert_vm_demo_state "${demo_state_json}" 0 1 1
+ demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
+ assert_vm_demo_state "${demo_visit_json}" 1 1 1
local get_root_volume_json get_data_volume_json
local root_volume_state_json data_volume_state_json
local root_attachment_generation data_attachment_generation
@@ -4604,6 +4871,12 @@ EOS
fi
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=2"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=2"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=2"
+ wait_for_vm_demo_http "${node_id}" "${port_ip}"
+ demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
+ assert_vm_demo_state "${demo_state_json}" 1 2 2
+ demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
+ assert_vm_demo_state "${demo_visit_json}" 2 2 2
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM restart"
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
@@ -4686,7 +4959,12 @@ EOS
wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}"
wait_for_qemu_volume_absent "${source_node}" "${volume_path}" "${source_volume_qemu_ref}"
wait_for_qemu_volume_absent "${source_node}" "${data_volume_path}" "${source_data_volume_qemu_ref}"
- wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=2"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=3"
+ wait_for_vm_demo_http "${node_id}" "${port_ip}"
+ demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
+ assert_vm_demo_state "${demo_state_json}" 2 3 3
+ demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
+ assert_vm_demo_state "${demo_visit_json}" 3 3 3
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
[[ "$(printf '%s' "${root_volume_state_json}" | jq -r '.attachedToNode // empty')" == "${node_id}" ]] || die "root volume ${volume_id} is not owned by migrated node ${node_id}"
@@ -4768,8 +5046,12 @@ EOS
[[ -n "${current_data_volume_qemu_ref}" ]] || die "worker ${node_id} did not republish an attachable local ref for ${data_volume_id} after post-migration restart"
wait_for_qemu_volume_present "${node_id}" "${volume_path}" "${current_volume_qemu_ref}"
wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}"
- wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=3"
- wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=3"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=4"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=4"
+ wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=4"
+ wait_for_vm_demo_http "${node_id}" "${port_ip}"
+ demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
+ assert_vm_demo_state "${demo_state_json}" 3 4 4
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM post-migration restart"
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
@@ -4830,7 +5112,10 @@ EOS
done
wait_for_plasmavmc_vm_watch_completion node01 "${vm_watch_output}" 60
assert_plasmavmc_vm_watch_events node01 "${vm_watch_output}" "${vm_id}"
- wait_for_prismnet_port_detachment "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null
+ wait_for_prismnet_port_absent "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null
+ port_id=""
+ api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null
+ demo_http_sg_id=""
ssh_node "${node_id}" "bash -lc '[[ ! -d $(printf '%q' "$(vm_runtime_dir_path "${vm_id}")") ]]'"
ssh_node node01 "bash -lc '[[ ! -f ${volume_path} ]]'"
@@ -4879,28 +5164,6 @@ EOS
die "shared-fs VM data volume unexpectedly persisted to LightningStor object storage"
fi
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \
- 127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null
- port_id=""
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \
- 127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null
- subnet_id=""
- grpcurl -plaintext \
- -H "authorization: Bearer ${token}" \
- -import-path "${PRISMNET_PROTO_DIR}" \
- -proto "${PRISMNET_PROTO}" \
- -d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \
- 127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null
- vpc_id=""
-
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PLASMAVMC_PROTO_DIR}" \
@@ -6890,8 +7153,11 @@ benchmark_plasmavmc_guest_runtime() {
fi
local start_ns attach_ns ready_ns attach_sec ready_sec
- local root_volume_id="${vm_id}-root"
- local data_volume_id="${vm_id}-data"
+ local root_volume_id data_volume_id
+ root_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "root")"
+ data_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "data")"
+ [[ -n "${root_volume_id}" ]] || die "runtime benchmark VM did not expose a root volume ID"
+ [[ -n "${data_volume_id}" ]] || die "runtime benchmark VM did not expose a data volume ID"
local root_uri data_uri
start_ns="$(date +%s%N)"
@@ -7434,6 +7700,7 @@ validate_cluster() {
validate_control_plane
validate_iam_flow
validate_prismnet_flow
+ validate_tenant_networking_flow
validate_flashdns_flow
validate_fiberlb_flow
validate_workers
@@ -7484,6 +7751,16 @@ fresh_storage_smoke_requested() {
storage_smoke_requested
}
+demo_vm_webapp_requested() {
+ start_requested "$@"
+ validate_vm_storage_flow
+}
+
+fresh_demo_vm_webapp_requested() {
+ clean_requested "$@"
+ demo_vm_webapp_requested "$@"
+}
+
matrix_requested() {
start_requested "$@"
validate_component_matrix
@@ -7771,6 +8048,8 @@ Commands:
fresh-smoke clean local runtime state, rebuild on the host, start, and validate
storage-smoke start the storage lab (node01-05) and validate CoronaFS/LightningStor/PlasmaVMC
fresh-storage-smoke clean local runtime state, rebuild node01-05 on the host, start, and validate the storage lab
+ demo-vm-webapp start the cluster and run the VM web app demo with persistent volume state
+ fresh-demo-vm-webapp clean local runtime state, rebuild on the host, start, and run the VM web app demo
matrix Start the cluster and validate composed service configurations against the current running VMs
fresh-matrix clean local runtime state, rebuild on the host, start, and validate composed service configurations
bench-storage start the cluster and benchmark CoronaFS plus LightningStor against the current running VMs
@@ -7797,6 +8076,8 @@ Examples:
$0 fresh-smoke
$0 storage-smoke
$0 fresh-storage-smoke
+ $0 demo-vm-webapp
+ $0 fresh-demo-vm-webapp
$0 matrix
$0 fresh-matrix
$0 bench-storage
@@ -7830,6 +8111,8 @@ main() {
fresh-smoke) fresh_smoke_requested "$@" ;;
storage-smoke) storage_smoke_requested ;;
fresh-storage-smoke) fresh_storage_smoke_requested ;;
+ demo-vm-webapp) demo_vm_webapp_requested "$@" ;;
+ fresh-demo-vm-webapp) fresh_demo_vm_webapp_requested "$@" ;;
matrix) matrix_requested "$@" ;;
fresh-matrix) fresh_matrix_requested "$@" ;;
bench-storage) bench_storage_requested "$@" ;;
diff --git a/nix/test-cluster/vm-guest-image.nix b/nix/test-cluster/vm-guest-image.nix
index b9e0b9c..e758fab 100644
--- a/nix/test-cluster/vm-guest-image.nix
+++ b/nix/test-cluster/vm-guest-image.nix
@@ -1,6 +1,132 @@
{ modulesPath, lib, pkgs, ... }:
-{
+let
+ photonVmDemoApi = pkgs.writeText "photon-vm-demo-api.py" ''
+ import json
+ import os
+ import socket
+ import sqlite3
+ from http import HTTPStatus
+ from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
+
+ DATA_MOUNT = "/mnt/photon-vm-data"
+ DB_PATH = os.path.join(DATA_MOUNT, "demo.sqlite3")
+ ROOT_BOOT_COUNT_PATH = "/var/lib/photon-vm-smoke/boot-count"
+ DATA_BOOT_COUNT_PATH = os.path.join(DATA_MOUNT, "boot-count")
+ CONSOLE_PATH = "/dev/ttyS0"
+ LISTEN_HOST = "0.0.0.0"
+ LISTEN_PORT = 8080
+
+
+ def log_console(message: str) -> None:
+ try:
+ with open(CONSOLE_PATH, "a", encoding="utf-8") as console:
+ console.write(message + "\n")
+ except OSError:
+ pass
+
+
+ def read_int(path: str) -> int:
+ try:
+ with open(path, "r", encoding="utf-8") as handle:
+ return int(handle.read().strip() or "0")
+ except (FileNotFoundError, ValueError, OSError):
+ return 0
+
+
+ def init_db() -> None:
+ os.makedirs(DATA_MOUNT, exist_ok=True)
+ conn = sqlite3.connect(DB_PATH)
+ try:
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)"
+ )
+ conn.execute(
+ "INSERT INTO counters (name, value) VALUES ('visits', 0) "
+ "ON CONFLICT(name) DO NOTHING"
+ )
+ conn.commit()
+ finally:
+ conn.close()
+
+
+ def current_state(increment: bool = False) -> dict:
+ conn = sqlite3.connect(DB_PATH, timeout=30)
+ try:
+ conn.execute(
+ "CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)"
+ )
+ conn.execute(
+ "INSERT INTO counters (name, value) VALUES ('visits', 0) "
+ "ON CONFLICT(name) DO NOTHING"
+ )
+ if increment:
+ conn.execute(
+ "UPDATE counters SET value = value + 1 WHERE name = 'visits'"
+ )
+ visits = conn.execute(
+ "SELECT value FROM counters WHERE name = 'visits'"
+ ).fetchone()[0]
+ conn.commit()
+ finally:
+ conn.close()
+ return {
+ "status": "ok",
+ "hostname": socket.gethostname(),
+ "listen_port": LISTEN_PORT,
+ "db_path": DB_PATH,
+ "visits": visits,
+ "root_boot_count": read_int(ROOT_BOOT_COUNT_PATH),
+ "data_boot_count": read_int(DATA_BOOT_COUNT_PATH),
+ }
+
+
+ class Handler(BaseHTTPRequestHandler):
+ server_version = "PhotonVMDemo/1.0"
+
+ def log_message(self, format: str, *args) -> None:
+ return
+
+ def _send_json(self, payload: dict, status: int = HTTPStatus.OK) -> None:
+ body = json.dumps(payload, sort_keys=True).encode("utf-8")
+ self.send_response(status)
+ self.send_header("Content-Type", "application/json")
+ self.send_header("Content-Length", str(len(body)))
+ self.end_headers()
+ self.wfile.write(body)
+
+ def do_GET(self) -> None:
+ if self.path == "/health":
+ self._send_json({"status": "ok"})
+ return
+ if self.path == "/state":
+ self._send_json(current_state())
+ return
+ self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
+
+ def do_POST(self) -> None:
+ if self.path == "/visit":
+ payload = current_state(increment=True)
+ log_console("PHOTON_VM_DEMO_VISIT visits=%s" % payload["visits"])
+ self._send_json(payload)
+ return
+ self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
+
+
+ def main() -> None:
+ init_db()
+ server = ThreadingHTTPServer((LISTEN_HOST, LISTEN_PORT), Handler)
+ log_console(
+ "PHOTON_VM_DEMO_WEB_READY count=%s port=%s db=%s"
+ % (read_int(ROOT_BOOT_COUNT_PATH), LISTEN_PORT, DB_PATH)
+ )
+ server.serve_forever()
+
+
+ if __name__ == "__main__":
+ main()
+ '';
+in {
imports = [
(modulesPath + "/virtualisation/disk-image.nix")
(modulesPath + "/profiles/qemu-guest.nix")
@@ -18,6 +144,7 @@
networking.hostName = "photon-vm-smoke";
networking.useDHCP = lib.mkDefault true;
+ networking.firewall.enable = false;
services.getty.autologinUser = "root";
users.mutableUsers = false;
@@ -144,5 +271,35 @@
'';
};
+ systemd.services.photon-vm-demo-api = {
+ description = "PhotonCloud VM demo web app";
+ wantedBy = [ "multi-user.target" ];
+ wants = [ "network-online.target" "photon-vm-smoke.service" ];
+ after = [ "network-online.target" "photon-vm-smoke.service" ];
+ path = with pkgs; [
+ bash
+ coreutils
+ python3
+ util-linux
+ ];
+ serviceConfig = {
+ Type = "simple";
+ Restart = "always";
+ RestartSec = "1";
+ };
+ script = ''
+ deadline=$((SECONDS + 60))
+ while ! mountpoint -q /mnt/photon-vm-data; do
+ if [ "$SECONDS" -ge "$deadline" ]; then
+ echo "PHOTON_VM_DEMO_WEB_ERROR step=mount-timeout" >/dev/ttyS0
+ exit 1
+ fi
+ sleep 1
+ done
+
+ exec python3 ${photonVmDemoApi}
+ '';
+ };
+
system.stateVersion = "24.05";
}
diff --git a/plasmavmc/Cargo.lock b/plasmavmc/Cargo.lock
index 1d4a95b..8de1f77 100644
--- a/plasmavmc/Cargo.lock
+++ b/plasmavmc/Cargo.lock
@@ -37,17 +37,6 @@ dependencies = [
"subtle",
]
-[[package]]
-name = "ahash"
-version = "0.7.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9"
-dependencies = [
- "getrandom 0.2.17",
- "once_cell",
- "version_check",
-]
-
[[package]]
name = "ahash"
version = "0.8.12"
@@ -134,15 +123,6 @@ dependencies = [
"windows-sys 0.61.2",
]
-[[package]]
-name = "anyerror"
-version = "0.1.13"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71add24cc141a1e8326f249b74c41cfd217aeb2a67c9c6cf9134d175469afd49"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "anyhow"
version = "1.0.102"
@@ -172,12 +152,6 @@ dependencies = [
"password-hash",
]
-[[package]]
-name = "arrayvec"
-version = "0.7.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
-
[[package]]
name = "async-stream"
version = "0.3.6"
@@ -197,7 +171,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -208,7 +182,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -264,11 +238,9 @@ dependencies = [
"axum-core 0.4.5",
"bytes",
"futures-util",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"http-body-util",
- "hyper 1.8.1",
- "hyper-util",
"itoa",
"matchit 0.7.3",
"memchr",
@@ -277,15 +249,10 @@ dependencies = [
"pin-project-lite",
"rustversion",
"serde",
- "serde_json",
- "serde_path_to_error",
- "serde_urlencoded",
- "sync_wrapper 1.0.2",
- "tokio",
+ "sync_wrapper",
"tower 0.5.3",
"tower-layer",
"tower-service",
- "tracing",
]
[[package]]
@@ -298,10 +265,10 @@ dependencies = [
"bytes",
"form_urlencoded",
"futures-util",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"http-body-util",
- "hyper 1.8.1",
+ "hyper",
"hyper-util",
"itoa",
"matchit 0.8.4",
@@ -314,7 +281,7 @@ dependencies = [
"serde_json",
"serde_path_to_error",
"serde_urlencoded",
- "sync_wrapper 1.0.2",
+ "sync_wrapper",
"tokio",
"tower 0.5.3",
"tower-layer",
@@ -331,16 +298,15 @@ dependencies = [
"async-trait",
"bytes",
"futures-util",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"http-body-util",
"mime",
"pin-project-lite",
"rustversion",
- "sync_wrapper 1.0.2",
+ "sync_wrapper",
"tower-layer",
"tower-service",
- "tracing",
]
[[package]]
@@ -351,29 +317,17 @@ checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
dependencies = [
"bytes",
"futures-core",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"http-body-util",
"mime",
"pin-project-lite",
- "sync_wrapper 1.0.2",
+ "sync_wrapper",
"tower-layer",
"tower-service",
"tracing",
]
-[[package]]
-name = "base64"
-version = "0.13.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8"
-
-[[package]]
-name = "base64"
-version = "0.21.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567"
-
[[package]]
name = "base64"
version = "0.22.1"
@@ -386,57 +340,12 @@ version = "1.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2af50177e190e07a26ab74f8b1efbfe2ef87da2116221318cb1c2e82baf7de06"
-[[package]]
-name = "bincode"
-version = "1.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "bindgen"
-version = "0.72.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"
-dependencies = [
- "bitflags 2.11.0",
- "cexpr",
- "clang-sys",
- "itertools 0.13.0",
- "proc-macro2",
- "quote",
- "regex",
- "rustc-hash",
- "shlex",
- "syn 2.0.117",
-]
-
-[[package]]
-name = "bitflags"
-version = "1.3.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
-
[[package]]
name = "bitflags"
version = "2.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
-[[package]]
-name = "bitvec"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1bc2832c24239b0141d5674bb9174f9d68a8b5b3f2753311927c172ca46f7e9c"
-dependencies = [
- "funty",
- "radium",
- "tap",
- "wyz",
-]
-
[[package]]
name = "blake2"
version = "0.10.6"
@@ -455,70 +364,12 @@ dependencies = [
"generic-array",
]
-[[package]]
-name = "borsh"
-version = "1.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cfd1e3f8955a5d7de9fab72fc8373fade9fb8a703968cb200ae3dc6cf08e185a"
-dependencies = [
- "borsh-derive",
- "bytes",
- "cfg_aliases",
-]
-
-[[package]]
-name = "borsh-derive"
-version = "1.6.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bfcfdc083699101d5a7965e49925975f2f55060f94f9a05e7187be95d530ca59"
-dependencies = [
- "once_cell",
- "proc-macro-crate",
- "proc-macro2",
- "quote",
- "syn 2.0.117",
-]
-
[[package]]
name = "bumpalo"
version = "3.20.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb"
-[[package]]
-name = "byte-unit"
-version = "5.2.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8c6d47a4e2961fb8721bcfc54feae6455f2f64e7054f9bc67e875f0e77f4c58d"
-dependencies = [
- "rust_decimal",
- "schemars",
- "serde",
- "utf8-width",
-]
-
-[[package]]
-name = "bytecheck"
-version = "0.6.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2"
-dependencies = [
- "bytecheck_derive",
- "ptr_meta",
- "simdutf8",
-]
-
-[[package]]
-name = "bytecheck_derive"
-version = "0.6.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "byteorder"
version = "1.5.0"
@@ -531,16 +382,6 @@ version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33"
-[[package]]
-name = "bzip2-sys"
-version = "0.1.13+1.0.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "225bff33b2141874fe80d71e07d6eec4f85c5c216453dd96388240f96e1acc14"
-dependencies = [
- "cc",
- "pkg-config",
-]
-
[[package]]
name = "cc"
version = "1.2.57"
@@ -553,15 +394,6 @@ dependencies = [
"shlex",
]
-[[package]]
-name = "cexpr"
-version = "0.6.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766"
-dependencies = [
- "nom",
-]
-
[[package]]
name = "cfg-if"
version = "1.0.4"
@@ -574,26 +406,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724"
-[[package]]
-name = "chainfire-api"
-version = "0.1.0"
-dependencies = [
- "async-trait",
- "bincode",
- "chainfire-raft",
- "chainfire-storage",
- "chainfire-types",
- "chainfire-watch",
- "futures",
- "prost",
- "prost-types",
- "tokio",
- "tokio-stream",
- "tonic",
- "tonic-build",
- "tracing",
-]
-
[[package]]
name = "chainfire-client"
version = "0.1.0"
@@ -610,24 +422,6 @@ dependencies = [
"tracing",
]
-[[package]]
-name = "chainfire-gossip"
-version = "0.1.0"
-dependencies = [
- "bincode",
- "bytes",
- "chainfire-types",
- "dashmap",
- "foca",
- "futures",
- "parking_lot",
- "rand 0.9.2",
- "serde",
- "thiserror 1.0.69",
- "tokio",
- "tracing",
-]
-
[[package]]
name = "chainfire-proto"
version = "0.1.0"
@@ -641,77 +435,6 @@ dependencies = [
"tonic-build",
]
-[[package]]
-name = "chainfire-raft"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "async-trait",
- "bincode",
- "bytes",
- "chainfire-storage",
- "chainfire-types",
- "dashmap",
- "futures",
- "parking_lot",
- "rand 0.8.5",
- "serde",
- "thiserror 1.0.69",
- "tokio",
- "tracing",
-]
-
-[[package]]
-name = "chainfire-server"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "async-trait",
- "axum 0.7.9",
- "chainfire-api",
- "chainfire-gossip",
- "chainfire-raft",
- "chainfire-storage",
- "chainfire-types",
- "chainfire-watch",
- "chrono",
- "clap",
- "config",
- "futures",
- "http 1.4.0",
- "http-body-util",
- "metrics",
- "metrics-exporter-prometheus",
- "reqwest 0.12.28",
- "serde",
- "serde_json",
- "tokio",
- "toml 0.8.23",
- "tonic",
- "tonic-health",
- "tower 0.5.3",
- "tower-http",
- "tracing",
- "tracing-subscriber",
- "uuid",
-]
-
-[[package]]
-name = "chainfire-storage"
-version = "0.1.0"
-dependencies = [
- "async-trait",
- "bincode",
- "bytes",
- "chainfire-types",
- "dashmap",
- "parking_lot",
- "rocksdb",
- "serde",
- "tokio",
- "tracing",
-]
-
[[package]]
name = "chainfire-types"
version = "0.1.0"
@@ -721,19 +444,6 @@ dependencies = [
"thiserror 1.0.69",
]
-[[package]]
-name = "chainfire-watch"
-version = "0.1.0"
-dependencies = [
- "chainfire-types",
- "dashmap",
- "futures",
- "parking_lot",
- "tokio",
- "tokio-stream",
- "tracing",
-]
-
[[package]]
name = "chrono"
version = "0.4.44"
@@ -758,16 +468,6 @@ dependencies = [
"inout",
]
-[[package]]
-name = "clang-sys"
-version = "1.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4"
-dependencies = [
- "glob",
- "libc",
-]
-
[[package]]
name = "clap"
version = "4.6.0"
@@ -799,7 +499,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -817,15 +517,6 @@ dependencies = [
"cc",
]
-[[package]]
-name = "cobs"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0fa961b519f0b462e3a3b4a34b64d119eeaca1d59af726fe450bbba07a9fc0a1"
-dependencies = [
- "thiserror 2.0.18",
-]
-
[[package]]
name = "colorchoice"
version = "1.0.5"
@@ -841,35 +532,6 @@ dependencies = [
"crossbeam-utils",
]
-[[package]]
-name = "config"
-version = "0.13.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "23738e11972c7643e4ec947840fc463b6a571afcd3e735bdfce7d03c7a784aca"
-dependencies = [
- "async-trait",
- "json5",
- "lazy_static",
- "nom",
- "pathdiff",
- "ron",
- "rust-ini",
- "serde",
- "serde_json",
- "toml 0.5.11",
- "yaml-rust",
-]
-
-[[package]]
-name = "core-foundation"
-version = "0.9.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
[[package]]
name = "core-foundation"
version = "0.10.1"
@@ -910,32 +572,6 @@ version = "2.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5"
-[[package]]
-name = "creditservice-api"
-version = "0.1.0"
-dependencies = [
- "apigateway-api",
- "async-trait",
- "chrono",
- "creditservice-proto",
- "creditservice-types",
- "flaredb-client",
- "iam-types",
- "photon-auth-client",
- "prost",
- "prost-types",
- "reqwest 0.11.27",
- "serde",
- "serde_json",
- "sqlx",
- "thiserror 1.0.69",
- "tokio",
- "tonic",
- "tonic-health",
- "tracing",
- "uuid",
-]
-
[[package]]
name = "creditservice-client"
version = "0.1.0"
@@ -958,17 +594,6 @@ dependencies = [
"tonic-build",
]
-[[package]]
-name = "creditservice-types"
-version = "0.1.0"
-dependencies = [
- "chrono",
- "rust_decimal",
- "serde",
- "thiserror 1.0.69",
- "uuid",
-]
-
[[package]]
name = "crossbeam-epoch"
version = "0.9.18"
@@ -1036,27 +661,6 @@ dependencies = [
"powerfmt",
]
-[[package]]
-name = "derive_more"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05"
-dependencies = [
- "derive_more-impl",
-]
-
-[[package]]
-name = "derive_more-impl"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.117",
- "unicode-xid",
-]
-
[[package]]
name = "digest"
version = "0.10.7"
@@ -1076,15 +680,9 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
-[[package]]
-name = "dlv-list"
-version = "0.3.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0688c2a7f92e427f44895cd63841bff7b29f8d7a1648b9e7e07a4a365b2e1257"
-
[[package]]
name = "dotenvy"
version = "0.15.7"
@@ -1097,12 +695,6 @@ version = "1.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
-[[package]]
-name = "dyn-clone"
-version = "1.0.20"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555"
-
[[package]]
name = "either"
version = "1.15.0"
@@ -1112,15 +704,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "encoding_rs"
-version = "0.8.35"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "75030f3c4f45dafd7586dd6780965a8c7e8e285a5ecb86713e63a79c5b2766f3"
-dependencies = [
- "cfg-if",
-]
-
[[package]]
name = "equivalent"
version = "1.0.2"
@@ -1200,93 +783,6 @@ dependencies = [
"tonic-build",
]
-[[package]]
-name = "flaredb-raft"
-version = "0.1.0"
-dependencies = [
- "bincode",
- "flaredb-proto",
- "flaredb-storage",
- "flaredb-types",
- "openraft",
- "serde",
- "serde_json",
- "tokio",
- "tonic",
- "tracing",
-]
-
-[[package]]
-name = "flaredb-server"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "async-trait",
- "axum 0.8.4",
- "chrono",
- "clap",
- "config",
- "flaredb-client",
- "flaredb-proto",
- "flaredb-raft",
- "flaredb-sql",
- "flaredb-storage",
- "flaredb-types",
- "futures",
- "metrics",
- "metrics-exporter-prometheus",
- "openraft",
- "prost",
- "rocksdb",
- "serde",
- "serde_json",
- "sha2",
- "tokio",
- "tokio-stream",
- "toml 0.8.23",
- "tonic",
- "tonic-health",
- "tracing",
- "tracing-subscriber",
- "uuid",
-]
-
-[[package]]
-name = "flaredb-sql"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "bincode",
- "bytes",
- "flaredb-client",
- "flaredb-proto",
- "serde",
- "serde_json",
- "sqlparser",
- "thiserror 1.0.69",
- "tokio",
- "tonic",
- "tracing",
-]
-
-[[package]]
-name = "flaredb-storage"
-version = "0.1.0"
-dependencies = [
- "async-trait",
- "rocksdb",
- "thiserror 1.0.69",
-]
-
-[[package]]
-name = "flaredb-types"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "serde",
- "thiserror 1.0.69",
-]
-
[[package]]
name = "flume"
version = "0.11.1"
@@ -1304,19 +800,6 @@ version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
-[[package]]
-name = "foca"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1f59e967f3f675997e4a4a6b99d2a75148d59d64c46211b78b4f34ebb951b273"
-dependencies = [
- "bytes",
- "postcard",
- "rand 0.9.2",
- "serde",
- "tracing",
-]
-
[[package]]
name = "foldhash"
version = "0.1.5"
@@ -1338,12 +821,6 @@ version = "1.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
-[[package]]
-name = "funty"
-version = "2.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c"
-
[[package]]
name = "futures"
version = "0.3.32"
@@ -1411,7 +888,7 @@ checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -1490,37 +967,12 @@ dependencies = [
"polyval",
]
-[[package]]
-name = "glob"
-version = "0.3.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280"
-
[[package]]
name = "glob-match"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9985c9503b412198aa4197559e9a318524ebc4519c229bfa05a535828c950b9d"
-[[package]]
-name = "h2"
-version = "0.3.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d"
-dependencies = [
- "bytes",
- "fnv",
- "futures-core",
- "futures-sink",
- "futures-util",
- "http 0.2.12",
- "indexmap 2.13.0",
- "slab",
- "tokio",
- "tokio-util",
- "tracing",
-]
-
[[package]]
name = "h2"
version = "0.4.13"
@@ -1532,7 +984,7 @@ dependencies = [
"fnv",
"futures-core",
"futures-sink",
- "http 1.4.0",
+ "http",
"indexmap 2.13.0",
"slab",
"tokio",
@@ -1545,9 +997,6 @@ name = "hashbrown"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"
-dependencies = [
- "ahash 0.7.8",
-]
[[package]]
name = "hashbrown"
@@ -1555,7 +1004,7 @@ version = "0.14.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"
dependencies = [
- "ahash 0.8.12",
+ "ahash",
]
[[package]]
@@ -1629,17 +1078,6 @@ dependencies = [
"windows-sys 0.61.2",
]
-[[package]]
-name = "http"
-version = "0.2.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1"
-dependencies = [
- "bytes",
- "fnv",
- "itoa",
-]
-
[[package]]
name = "http"
version = "1.4.0"
@@ -1650,17 +1088,6 @@ dependencies = [
"itoa",
]
-[[package]]
-name = "http-body"
-version = "0.4.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2"
-dependencies = [
- "bytes",
- "http 0.2.12",
- "pin-project-lite",
-]
-
[[package]]
name = "http-body"
version = "1.0.1"
@@ -1668,7 +1095,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184"
dependencies = [
"bytes",
- "http 1.4.0",
+ "http",
]
[[package]]
@@ -1679,8 +1106,8 @@ checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a"
dependencies = [
"bytes",
"futures-core",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"pin-project-lite",
]
@@ -1696,30 +1123,6 @@ version = "1.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
-[[package]]
-name = "hyper"
-version = "0.14.32"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41dfc780fdec9373c01bae43289ea34c972e40ee3c9f6b3c8801a35f35586ce7"
-dependencies = [
- "bytes",
- "futures-channel",
- "futures-core",
- "futures-util",
- "h2 0.3.27",
- "http 0.2.12",
- "http-body 0.4.6",
- "httparse",
- "httpdate",
- "itoa",
- "pin-project-lite",
- "socket2 0.5.10",
- "tokio",
- "tower-service",
- "tracing",
- "want",
-]
-
[[package]]
name = "hyper"
version = "1.8.1"
@@ -1730,9 +1133,9 @@ dependencies = [
"bytes",
"futures-channel",
"futures-core",
- "h2 0.4.13",
- "http 1.4.0",
- "http-body 1.0.1",
+ "h2",
+ "http",
+ "http-body",
"httparse",
"httpdate",
"itoa",
@@ -1743,35 +1146,21 @@ dependencies = [
"want",
]
-[[package]]
-name = "hyper-rustls"
-version = "0.24.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590"
-dependencies = [
- "futures-util",
- "http 0.2.12",
- "hyper 0.14.32",
- "rustls 0.21.12",
- "tokio",
- "tokio-rustls 0.24.1",
-]
-
[[package]]
name = "hyper-rustls"
version = "0.27.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58"
dependencies = [
- "http 1.4.0",
- "hyper 1.8.1",
+ "http",
+ "hyper",
"hyper-util",
"log",
- "rustls 0.23.37",
+ "rustls",
"rustls-native-certs",
"rustls-pki-types",
"tokio",
- "tokio-rustls 0.26.4",
+ "tokio-rustls",
"tower-service",
"webpki-roots 1.0.6",
]
@@ -1782,7 +1171,7 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0"
dependencies = [
- "hyper 1.8.1",
+ "hyper",
"hyper-util",
"pin-project-lite",
"tokio",
@@ -1795,13 +1184,13 @@ version = "0.1.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0"
dependencies = [
- "base64 0.22.1",
+ "base64",
"bytes",
"futures-channel",
"futures-util",
- "http 1.4.0",
- "http-body 1.0.1",
- "hyper 1.8.1",
+ "http",
+ "http-body",
+ "hyper",
"ipnet",
"libc",
"percent-encoding",
@@ -1820,7 +1209,7 @@ dependencies = [
"apigateway-api",
"argon2",
"async-trait",
- "base64 0.22.1",
+ "base64",
"iam-audit",
"iam-authn",
"iam-authz",
@@ -1860,12 +1249,12 @@ name = "iam-authn"
version = "0.1.0"
dependencies = [
"async-trait",
- "base64 0.22.1",
+ "base64",
"hmac",
"iam-types",
"jsonwebtoken",
"rand 0.8.5",
- "reqwest 0.12.28",
+ "reqwest",
"serde",
"serde_json",
"sha2",
@@ -1910,7 +1299,7 @@ dependencies = [
name = "iam-service-auth"
version = "0.1.0"
dependencies = [
- "http 1.4.0",
+ "http",
"iam-client",
"iam-types",
"serde_json",
@@ -2134,15 +1523,6 @@ version = "1.70.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695"
-[[package]]
-name = "itertools"
-version = "0.13.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
-dependencies = [
- "either",
-]
-
[[package]]
name = "itertools"
version = "0.14.0"
@@ -2178,24 +1558,13 @@ dependencies = [
"wasm-bindgen",
]
-[[package]]
-name = "json5"
-version = "0.4.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "96b0db21af676c1ce64250b5f40f3ce2cf27e4e47cb91ed91eb6fe9350b430c1"
-dependencies = [
- "pest",
- "pest_derive",
- "serde",
-]
-
[[package]]
name = "jsonwebtoken"
version = "9.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a87cc7a48537badeae96744432de36f4be2b4a34a05a5ef32e9dd8a1c169dde"
dependencies = [
- "base64 0.22.1",
+ "base64",
"js-sys",
"pem",
"ring",
@@ -2222,27 +1591,12 @@ version = "0.1.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
"libc",
"plain",
"redox_syscall 0.7.3",
]
-[[package]]
-name = "librocksdb-sys"
-version = "0.17.3+10.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cef2a00ee60fe526157c9023edab23943fae1ce2ab6f4abb2a807c1746835de9"
-dependencies = [
- "bindgen",
- "bzip2-sys",
- "cc",
- "libc",
- "libz-sys",
- "lz4-sys",
- "zstd-sys",
-]
-
[[package]]
name = "libsqlite3-sys"
version = "0.30.1"
@@ -2254,17 +1608,6 @@ dependencies = [
"vcpkg",
]
-[[package]]
-name = "libz-sys"
-version = "1.1.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d52f4c29e2a68ac30c9087e1b772dc9f44a2b66ed44edf2266cf2be9b03dafc1"
-dependencies = [
- "cc",
- "pkg-config",
- "vcpkg",
-]
-
[[package]]
name = "lightningstor-api"
version = "0.1.0"
@@ -2291,12 +1634,6 @@ dependencies = [
"uuid",
]
-[[package]]
-name = "linked-hash-map"
-version = "0.5.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f"
-
[[package]]
name = "linux-raw-sys"
version = "0.12.1"
@@ -2330,22 +1667,6 @@ version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154"
-[[package]]
-name = "lz4-sys"
-version = "1.11.1+lz4-1.10.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6"
-dependencies = [
- "cc",
- "libc",
-]
-
-[[package]]
-name = "maplit"
-version = "1.0.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3e2e65a1a2e43cfcb47a895c4c8b10d1f4a61097f9f254f183aee60cad9c651d"
-
[[package]]
name = "matchers"
version = "0.2.0"
@@ -2389,7 +1710,7 @@ version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3045b4193fbdc5b5681f32f11070da9be3609f189a79f3390706d42587f46bb5"
dependencies = [
- "ahash 0.8.12",
+ "ahash",
"portable-atomic",
]
@@ -2399,10 +1720,10 @@ version = "0.15.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b4f0c8427b39666bf970460908b213ec09b3b350f20c0c2eabcbba51704a08e6"
dependencies = [
- "base64 0.22.1",
+ "base64",
"http-body-util",
- "hyper 1.8.1",
- "hyper-rustls 0.27.7",
+ "hyper",
+ "hyper-rustls",
"hyper-util",
"indexmap 2.13.0",
"ipnet",
@@ -2435,12 +1756,6 @@ version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a"
-[[package]]
-name = "minimal-lexical"
-version = "0.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
-
[[package]]
name = "mio"
version = "1.1.1"
@@ -2464,22 +1779,12 @@ version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
"cfg-if",
"cfg_aliases",
"libc",
]
-[[package]]
-name = "nom"
-version = "7.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
-dependencies = [
- "memchr",
- "minimal-lexical",
-]
-
[[package]]
name = "nu-ansi-term"
version = "0.50.3"
@@ -2551,58 +1856,12 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
-[[package]]
-name = "openraft"
-version = "0.9.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "cc22bb6823c606299be05f3cc0d2ac30216412e05352eaf192a481c12ea055fc"
-dependencies = [
- "anyerror",
- "byte-unit",
- "chrono",
- "clap",
- "derive_more",
- "futures",
- "maplit",
- "openraft-macros",
- "rand 0.8.5",
- "serde",
- "thiserror 1.0.69",
- "tokio",
- "tracing",
- "tracing-futures",
- "validit",
-]
-
-[[package]]
-name = "openraft-macros"
-version = "0.9.21"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e8e5c7db6c8f2137b45a63096e09ac5a89177799b4bb0073915a5f41ee156651"
-dependencies = [
- "chrono",
- "proc-macro2",
- "quote",
- "semver",
- "syn 2.0.117",
-]
-
[[package]]
name = "openssl-probe"
version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
-[[package]]
-name = "ordered-multimap"
-version = "0.4.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ccd746e37177e1711c20dd619a1620f34f5c8b569c53590a72dedd5344d8924a"
-dependencies = [
- "dlv-list",
- "hashbrown 0.12.3",
-]
-
[[package]]
name = "parking"
version = "2.2.1"
@@ -2643,19 +1902,13 @@ dependencies = [
"subtle",
]
-[[package]]
-name = "pathdiff"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3"
-
[[package]]
name = "pem"
version = "3.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "38af38e8470ac9dee3ce1bae1af9c1671fffc44ddfd8bd1d0a3445bf349a8ef3"
dependencies = [
- "base64 0.22.1",
+ "base64",
"serde",
]
@@ -2665,49 +1918,6 @@ version = "2.3.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
-[[package]]
-name = "pest"
-version = "2.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e0848c601009d37dfa3430c4666e147e49cdcf1b92ecd3e63657d8a5f19da662"
-dependencies = [
- "memchr",
- "ucd-trie",
-]
-
-[[package]]
-name = "pest_derive"
-version = "2.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "11f486f1ea21e6c10ed15d5a7c77165d0ee443402f0780849d1768e7d9d6fe77"
-dependencies = [
- "pest",
- "pest_generator",
-]
-
-[[package]]
-name = "pest_generator"
-version = "2.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8040c4647b13b210a963c1ed407c1ff4fdfa01c31d6d2a098218702e6664f94f"
-dependencies = [
- "pest",
- "pest_meta",
- "proc-macro2",
- "quote",
- "syn 2.0.117",
-]
-
-[[package]]
-name = "pest_meta"
-version = "2.8.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "89815c69d36021a140146f26659a81d6c2afa33d216d736dd4be5381a7362220"
-dependencies = [
- "pest",
- "sha2",
-]
-
[[package]]
name = "petgraph"
version = "0.7.1"
@@ -2718,14 +1928,6 @@ dependencies = [
"indexmap 2.13.0",
]
-[[package]]
-name = "photon-auth-client"
-version = "0.1.0"
-dependencies = [
- "anyhow",
- "iam-service-auth",
-]
-
[[package]]
name = "pin-project"
version = "1.1.11"
@@ -2743,7 +1945,7 @@ checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2838,19 +2040,17 @@ dependencies = [
"axum 0.8.4",
"bytes",
"chainfire-client",
- "chainfire-server",
"chrono",
"clap",
- "creditservice-api",
"creditservice-client",
- "creditservice-proto",
"dashmap",
"flaredb-client",
- "flaredb-proto",
- "flaredb-server",
"iam-api",
+ "iam-authn",
+ "iam-authz",
"iam-client",
"iam-service-auth",
+ "iam-store",
"iam-types",
"lightningstor-api",
"metrics-exporter-prometheus",
@@ -2863,7 +2063,7 @@ dependencies = [
"prismnet-server",
"prismnet-types",
"prost",
- "reqwest 0.12.28",
+ "reqwest",
"serde",
"serde_json",
"sha2",
@@ -2871,7 +2071,7 @@ dependencies = [
"thiserror 1.0.69",
"tokio",
"tokio-stream",
- "toml 0.8.23",
+ "toml",
"tonic",
"tonic-health",
"tracing",
@@ -2907,16 +2107,6 @@ version = "1.13.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
-[[package]]
-name = "postcard"
-version = "1.1.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6764c3b5dd454e283a30e6dfe78e9b31096d9e32036b5d1eaac7a6119ccb9a24"
-dependencies = [
- "cobs",
- "serde",
-]
-
[[package]]
name = "potential_utf"
version = "0.1.4"
@@ -2948,7 +2138,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2984,7 +2174,7 @@ dependencies = [
"sqlx",
"thiserror 1.0.69",
"tokio",
- "toml 0.8.23",
+ "toml",
"tonic",
"tonic-health",
"tracing",
@@ -3000,15 +2190,6 @@ dependencies = [
"uuid",
]
-[[package]]
-name = "proc-macro-crate"
-version = "3.4.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983"
-dependencies = [
- "toml_edit 0.23.4",
-]
-
[[package]]
name = "proc-macro2"
version = "1.0.106"
@@ -3035,7 +2216,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf"
dependencies = [
"heck",
- "itertools 0.14.0",
+ "itertools",
"log",
"multimap",
"once_cell",
@@ -3044,7 +2225,7 @@ dependencies = [
"prost",
"prost-types",
"regex",
- "syn 2.0.117",
+ "syn",
"tempfile",
]
@@ -3055,10 +2236,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a56d757972c98b346a9b766e3f02746cde6dd1cd1d1d563472929fdd74bec4d"
dependencies = [
"anyhow",
- "itertools 0.14.0",
+ "itertools",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3134,26 +2315,6 @@ version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95067976aca6421a523e491fce939a3e65249bac4b977adee0ee9771568e8aa3"
-[[package]]
-name = "ptr_meta"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "0738ccf7ea06b608c10564b31debd4f5bc5e197fc8bfe088f68ae5ce81e7a4f1"
-dependencies = [
- "ptr_meta_derive",
-]
-
-[[package]]
-name = "ptr_meta_derive"
-version = "0.1.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
[[package]]
name = "quanta"
version = "0.12.6"
@@ -3181,7 +2342,7 @@ dependencies = [
"quinn-proto",
"quinn-udp",
"rustc-hash",
- "rustls 0.23.37",
+ "rustls",
"socket2 0.6.3",
"thiserror 2.0.18",
"tokio",
@@ -3201,7 +2362,7 @@ dependencies = [
"rand 0.9.2",
"ring",
"rustc-hash",
- "rustls 0.23.37",
+ "rustls",
"rustls-pki-types",
"slab",
"thiserror 2.0.18",
@@ -3239,12 +2400,6 @@ version = "5.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f"
-[[package]]
-name = "radium"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09"
-
[[package]]
name = "rand"
version = "0.8.5"
@@ -3310,7 +2465,7 @@ version = "11.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
]
[[package]]
@@ -3319,7 +2474,7 @@ version = "0.5.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
]
[[package]]
@@ -3328,27 +2483,7 @@ version = "0.7.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16"
dependencies = [
- "bitflags 2.11.0",
-]
-
-[[package]]
-name = "ref-cast"
-version = "1.0.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f354300ae66f76f1c85c5f84693f0ce81d747e2c3f21a45fef496d89c960bf7d"
-dependencies = [
- "ref-cast-impl",
-]
-
-[[package]]
-name = "ref-cast-impl"
-version = "1.0.25"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 2.0.117",
+ "bitflags",
]
[[package]]
@@ -3380,84 +2515,34 @@ version = "0.8.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
-[[package]]
-name = "rend"
-version = "0.4.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c"
-dependencies = [
- "bytecheck",
-]
-
-[[package]]
-name = "reqwest"
-version = "0.11.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62"
-dependencies = [
- "base64 0.21.7",
- "bytes",
- "encoding_rs",
- "futures-core",
- "futures-util",
- "h2 0.3.27",
- "http 0.2.12",
- "http-body 0.4.6",
- "hyper 0.14.32",
- "hyper-rustls 0.24.2",
- "ipnet",
- "js-sys",
- "log",
- "mime",
- "once_cell",
- "percent-encoding",
- "pin-project-lite",
- "rustls 0.21.12",
- "rustls-pemfile 1.0.4",
- "serde",
- "serde_json",
- "serde_urlencoded",
- "sync_wrapper 0.1.2",
- "system-configuration",
- "tokio",
- "tokio-rustls 0.24.1",
- "tower-service",
- "url",
- "wasm-bindgen",
- "wasm-bindgen-futures",
- "web-sys",
- "webpki-roots 0.25.4",
- "winreg",
-]
-
[[package]]
name = "reqwest"
version = "0.12.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147"
dependencies = [
- "base64 0.22.1",
+ "base64",
"bytes",
"futures-core",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"http-body-util",
- "hyper 1.8.1",
- "hyper-rustls 0.27.7",
+ "hyper",
+ "hyper-rustls",
"hyper-util",
"js-sys",
"log",
"percent-encoding",
"pin-project-lite",
"quinn",
- "rustls 0.23.37",
+ "rustls",
"rustls-pki-types",
"serde",
"serde_json",
"serde_urlencoded",
- "sync_wrapper 1.0.2",
+ "sync_wrapper",
"tokio",
- "tokio-rustls 0.26.4",
+ "tokio-rustls",
"tower 0.5.3",
"tower-http",
"tower-service",
@@ -3482,82 +2567,6 @@ dependencies = [
"windows-sys 0.52.0",
]
-[[package]]
-name = "rkyv"
-version = "0.7.46"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2297bf9c81a3f0dc96bc9521370b88f054168c29826a75e89c55ff196e7ed6a1"
-dependencies = [
- "bitvec",
- "bytecheck",
- "bytes",
- "hashbrown 0.12.3",
- "ptr_meta",
- "rend",
- "rkyv_derive",
- "seahash",
- "tinyvec",
- "uuid",
-]
-
-[[package]]
-name = "rkyv_derive"
-version = "0.7.46"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "84d7b42d4b8d06048d3ac8db0eb31bcb942cbeb709f0b5f2b2ebde398d3038f5"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
-]
-
-[[package]]
-name = "rocksdb"
-version = "0.24.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ddb7af00d2b17dbd07d82c0063e25411959748ff03e8d4f96134c2ff41fce34f"
-dependencies = [
- "libc",
- "librocksdb-sys",
-]
-
-[[package]]
-name = "ron"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "88073939a61e5b7680558e6be56b419e208420c2adb92be54921fa6b72283f1a"
-dependencies = [
- "base64 0.13.1",
- "bitflags 1.3.2",
- "serde",
-]
-
-[[package]]
-name = "rust-ini"
-version = "0.18.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f6d5f2436026b4f6e79dc829837d467cc7e9a55ee40e750d716713540715a2df"
-dependencies = [
- "cfg-if",
- "ordered-multimap",
-]
-
-[[package]]
-name = "rust_decimal"
-version = "1.40.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0"
-dependencies = [
- "arrayvec",
- "borsh",
- "bytes",
- "num-traits",
- "rand 0.8.5",
- "rkyv",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "rustc-hash"
version = "2.1.1"
@@ -3570,25 +2579,13 @@ version = "1.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.61.2",
]
-[[package]]
-name = "rustls"
-version = "0.21.12"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e"
-dependencies = [
- "log",
- "ring",
- "rustls-webpki 0.101.7",
- "sct",
-]
-
[[package]]
name = "rustls"
version = "0.23.37"
@@ -3600,7 +2597,7 @@ dependencies = [
"once_cell",
"ring",
"rustls-pki-types",
- "rustls-webpki 0.103.10",
+ "rustls-webpki",
"subtle",
"zeroize",
]
@@ -3617,15 +2614,6 @@ dependencies = [
"security-framework",
]
-[[package]]
-name = "rustls-pemfile"
-version = "1.0.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c"
-dependencies = [
- "base64 0.21.7",
-]
-
[[package]]
name = "rustls-pemfile"
version = "2.2.0"
@@ -3645,16 +2633,6 @@ dependencies = [
"zeroize",
]
-[[package]]
-name = "rustls-webpki"
-version = "0.101.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765"
-dependencies = [
- "ring",
- "untrusted",
-]
-
[[package]]
name = "rustls-webpki"
version = "0.103.10"
@@ -3688,48 +2666,20 @@ dependencies = [
"windows-sys 0.61.2",
]
-[[package]]
-name = "schemars"
-version = "1.2.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a2b42f36aa1cd011945615b92222f6bf73c599a102a300334cd7f8dbeec726cc"
-dependencies = [
- "dyn-clone",
- "ref-cast",
- "serde",
- "serde_json",
-]
-
[[package]]
name = "scopeguard"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49"
-[[package]]
-name = "sct"
-version = "0.7.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414"
-dependencies = [
- "ring",
- "untrusted",
-]
-
-[[package]]
-name = "seahash"
-version = "4.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1c107b6f4780854c8b126e228ea8869f4d7b71260f962fefb57b996b8959ba6b"
-
[[package]]
name = "security-framework"
version = "3.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d"
dependencies = [
- "bitflags 2.11.0",
- "core-foundation 0.10.1",
+ "bitflags",
+ "core-foundation",
"core-foundation-sys",
"libc",
"security-framework-sys",
@@ -3745,12 +2695,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "semver"
-version = "1.0.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
-
[[package]]
name = "serde"
version = "1.0.219"
@@ -3768,7 +2712,7 @@ checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3850,12 +2794,6 @@ dependencies = [
"libc",
]
-[[package]]
-name = "simdutf8"
-version = "0.1.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e"
-
[[package]]
name = "simple_asn1"
version = "0.6.3"
@@ -3918,15 +2856,6 @@ dependencies = [
"lock_api",
]
-[[package]]
-name = "sqlparser"
-version = "0.39.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "743b4dc2cbde11890ccb254a8fc9d537fa41b36da00de2a1c5e9848c9bc42bd7"
-dependencies = [
- "log",
-]
-
[[package]]
name = "sqlx"
version = "0.8.6"
@@ -3945,7 +2874,7 @@ version = "0.8.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ee6798b1838b6a0f69c007c133b8df5866302197e404e8b6ee8ed3e3a5e68dc6"
dependencies = [
- "base64 0.22.1",
+ "base64",
"bytes",
"crc",
"crossbeam-queue",
@@ -3962,7 +2891,7 @@ dependencies = [
"memchr",
"once_cell",
"percent-encoding",
- "rustls 0.23.37",
+ "rustls",
"serde",
"serde_json",
"sha2",
@@ -3985,7 +2914,7 @@ dependencies = [
"quote",
"sqlx-core",
"sqlx-macros-core",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4007,7 +2936,7 @@ dependencies = [
"sqlx-core",
"sqlx-postgres",
"sqlx-sqlite",
- "syn 2.0.117",
+ "syn",
"tokio",
"url",
]
@@ -4019,8 +2948,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "db58fcd5a53cf07c184b154801ff91347e4c30d17a3562a635ff028ad5deda46"
dependencies = [
"atoi",
- "base64 0.22.1",
- "bitflags 2.11.0",
+ "base64",
+ "bitflags",
"byteorder",
"crc",
"dotenvy",
@@ -4102,17 +3031,6 @@ version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
-[[package]]
-name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
[[package]]
name = "syn"
version = "2.0.117"
@@ -4124,12 +3042,6 @@ dependencies = [
"unicode-ident",
]
-[[package]]
-name = "sync_wrapper"
-version = "0.1.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160"
-
[[package]]
name = "sync_wrapper"
version = "1.0.2"
@@ -4147,36 +3059,9 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
-[[package]]
-name = "system-configuration"
-version = "0.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7"
-dependencies = [
- "bitflags 1.3.2",
- "core-foundation 0.9.4",
- "system-configuration-sys",
-]
-
-[[package]]
-name = "system-configuration-sys"
-version = "0.5.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a75fb188eb626b924683e3b95e3a48e63551fcfb51949de2f06a9d91dbee93c9"
-dependencies = [
- "core-foundation-sys",
- "libc",
-]
-
-[[package]]
-name = "tap"
-version = "1.0.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369"
-
[[package]]
name = "tempfile"
version = "3.27.0"
@@ -4216,7 +3101,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4227,7 +3112,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4320,17 +3205,7 @@ checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
-]
-
-[[package]]
-name = "tokio-rustls"
-version = "0.24.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c28327cf380ac148141087fbfb9de9d7bd4e84ab5d2c28fbc911d753de8a7081"
-dependencies = [
- "rustls 0.21.12",
- "tokio",
+ "syn",
]
[[package]]
@@ -4339,7 +3214,7 @@ version = "0.26.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61"
dependencies = [
- "rustls 0.23.37",
+ "rustls",
"tokio",
]
@@ -4367,15 +3242,6 @@ dependencies = [
"tokio",
]
-[[package]]
-name = "toml"
-version = "0.5.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "toml"
version = "0.8.23"
@@ -4384,8 +3250,8 @@ checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
dependencies = [
"serde",
"serde_spanned",
- "toml_datetime 0.6.11",
- "toml_edit 0.22.27",
+ "toml_datetime",
+ "toml_edit",
]
[[package]]
@@ -4397,15 +3263,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "toml_datetime"
-version = "0.7.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bade1c3e902f58d73d3f294cd7f20391c1cb2fbcb643b73566bc773971df91e3"
-dependencies = [
- "serde",
-]
-
[[package]]
name = "toml_edit"
version = "0.22.27"
@@ -4415,30 +3272,9 @@ dependencies = [
"indexmap 2.13.0",
"serde",
"serde_spanned",
- "toml_datetime 0.6.11",
+ "toml_datetime",
"toml_write",
- "winnow 0.7.15",
-]
-
-[[package]]
-name = "toml_edit"
-version = "0.23.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7211ff1b8f0d3adae1663b7da9ffe396eabe1ca25f0b0bee42b0da29a9ddce93"
-dependencies = [
- "indexmap 2.13.0",
- "toml_datetime 0.7.0",
- "toml_parser",
- "winnow 0.7.15",
-]
-
-[[package]]
-name = "toml_parser"
-version = "1.0.10+spec-1.1.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
-dependencies = [
- "winnow 1.0.0",
+ "winnow",
]
[[package]]
@@ -4456,23 +3292,23 @@ dependencies = [
"async-stream",
"async-trait",
"axum 0.7.9",
- "base64 0.22.1",
+ "base64",
"bytes",
- "h2 0.4.13",
- "http 1.4.0",
- "http-body 1.0.1",
+ "h2",
+ "http",
+ "http-body",
"http-body-util",
- "hyper 1.8.1",
+ "hyper",
"hyper-timeout",
"hyper-util",
"percent-encoding",
"pin-project",
"prost",
"rustls-native-certs",
- "rustls-pemfile 2.2.0",
+ "rustls-pemfile",
"socket2 0.5.10",
"tokio",
- "tokio-rustls 0.26.4",
+ "tokio-rustls",
"tokio-stream",
"tower 0.4.13",
"tower-layer",
@@ -4491,7 +3327,7 @@ dependencies = [
"prost-build",
"prost-types",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4536,7 +3372,7 @@ dependencies = [
"futures-core",
"futures-util",
"pin-project-lite",
- "sync_wrapper 1.0.2",
+ "sync_wrapper",
"tokio",
"tower-layer",
"tower-service",
@@ -4549,17 +3385,16 @@ version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
dependencies = [
- "bitflags 2.11.0",
+ "bitflags",
"bytes",
"futures-util",
- "http 1.4.0",
- "http-body 1.0.1",
+ "http",
+ "http-body",
"iri-string",
"pin-project-lite",
"tower 0.5.3",
"tower-layer",
"tower-service",
- "tracing",
]
[[package]]
@@ -4594,7 +3429,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4607,16 +3442,6 @@ dependencies = [
"valuable",
]
-[[package]]
-name = "tracing-futures"
-version = "0.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "97d095ae15e245a057c8e8451bab9b3ee1e1f68e9ba2b4fbc18d0ac5237835f2"
-dependencies = [
- "pin-project",
- "tracing",
-]
-
[[package]]
name = "tracing-log"
version = "0.2.0"
@@ -4658,12 +3483,6 @@ version = "1.19.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb"
-[[package]]
-name = "ucd-trie"
-version = "0.1.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2896d95c02a80c6d6a5d6e953d479f5ddf2dfdb6a244441010e373ac0fb88971"
-
[[package]]
name = "unicode-bidi"
version = "0.3.18"
@@ -4691,12 +3510,6 @@ version = "0.1.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7df058c713841ad818f1dc5d3fd88063241cc61f49f5fbea4b951e8cf5a8d71d"
-[[package]]
-name = "unicode-xid"
-version = "0.2.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853"
-
[[package]]
name = "universal-hash"
version = "0.5.1"
@@ -4725,12 +3538,6 @@ dependencies = [
"serde",
]
-[[package]]
-name = "utf8-width"
-version = "0.1.8"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1292c0d970b54115d14f2492fe0170adf21d68a1de108eebc51c1df4f346a091"
-
[[package]]
name = "utf8_iter"
version = "1.0.4"
@@ -4755,15 +3562,6 @@ dependencies = [
"wasm-bindgen",
]
-[[package]]
-name = "validit"
-version = "0.2.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "4efba0434d5a0a62d4f22070b44ce055dc18cb64d4fa98276aa523dadfaba0e7"
-dependencies = [
- "anyerror",
-]
-
[[package]]
name = "valuable"
version = "0.1.1"
@@ -4858,7 +3656,7 @@ dependencies = [
"bumpalo",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
"wasm-bindgen-shared",
]
@@ -4891,12 +3689,6 @@ dependencies = [
"wasm-bindgen",
]
-[[package]]
-name = "webpki-roots"
-version = "0.25.4"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1"
-
[[package]]
name = "webpki-roots"
version = "0.26.11"
@@ -4968,7 +3760,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -4979,7 +3771,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -5237,22 +4029,6 @@ dependencies = [
"memchr",
]
-[[package]]
-name = "winnow"
-version = "1.0.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
-
-[[package]]
-name = "winreg"
-version = "0.50.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
-dependencies = [
- "cfg-if",
- "windows-sys 0.48.0",
-]
-
[[package]]
name = "wit-bindgen"
version = "0.51.0"
@@ -5265,24 +4041,6 @@ version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
-[[package]]
-name = "wyz"
-version = "0.5.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "05f360fc0b24296329c78fda852a1e9ae82de9cf7b27dae4b7f62f118f77b9ed"
-dependencies = [
- "tap",
-]
-
-[[package]]
-name = "yaml-rust"
-version = "0.4.5"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85"
-dependencies = [
- "linked-hash-map",
-]
-
[[package]]
name = "yoke"
version = "0.8.1"
@@ -5302,7 +4060,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
"synstructure",
]
@@ -5323,7 +4081,7 @@ checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -5343,7 +4101,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
"synstructure",
]
@@ -5383,15 +4141,5 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
-]
-
-[[package]]
-name = "zstd-sys"
-version = "2.0.16+zstd.1.5.7"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748"
-dependencies = [
- "cc",
- "pkg-config",
+ "syn",
]
diff --git a/plasmavmc/crates/plasmavmc-kvm/src/env.rs b/plasmavmc/crates/plasmavmc-kvm/src/env.rs
index 16726d9..f30d6ca 100644
--- a/plasmavmc/crates/plasmavmc-kvm/src/env.rs
+++ b/plasmavmc/crates/plasmavmc-kvm/src/env.rs
@@ -10,7 +10,6 @@ pub const ENV_INITRD_PATH: &str = "PLASMAVMC_INITRD_PATH";
pub const ENV_RUNTIME_DIR: &str = "PLASMAVMC_RUNTIME_DIR";
pub const ENV_QMP_TIMEOUT_SECS: &str = "PLASMAVMC_QMP_TIMEOUT_SECS";
pub const ENV_NBD_MAX_QUEUES: &str = "PLASMAVMC_NBD_MAX_QUEUES";
-pub const ENV_NBD_AIO_MODE: &str = "PLASMAVMC_NBD_AIO_MODE";
/// Resolve QEMU binary path, falling back to a provided default.
pub fn resolve_qemu_path(default: impl AsRef) -> PathBuf {
@@ -55,15 +54,6 @@ pub fn resolve_nbd_max_queues() -> u16 {
.unwrap_or(16)
}
-pub fn resolve_nbd_aio_mode() -> &'static str {
- match std::env::var(ENV_NBD_AIO_MODE).ok().as_deref() {
- Some("threads") => "threads",
- Some("native") => "native",
- Some("io_uring") => "io_uring",
- _ => "io_uring",
- }
-}
-
#[cfg(test)]
pub(crate) fn env_test_lock() -> &'static Mutex<()> {
static LOCK: OnceLock> = OnceLock::new();
@@ -161,29 +151,4 @@ mod tests {
assert_eq!(resolve_nbd_max_queues(), 12);
std::env::remove_var(ENV_NBD_MAX_QUEUES);
}
-
- #[test]
- fn resolve_nbd_aio_mode_defaults_to_io_uring() {
- let _guard = env_test_lock().lock().unwrap();
- std::env::remove_var(ENV_NBD_AIO_MODE);
- assert_eq!(resolve_nbd_aio_mode(), "io_uring");
- }
-
- #[test]
- fn resolve_nbd_aio_mode_accepts_supported_modes() {
- let _guard = env_test_lock().lock().unwrap();
- for mode in ["threads", "native", "io_uring"] {
- std::env::set_var(ENV_NBD_AIO_MODE, mode);
- assert_eq!(resolve_nbd_aio_mode(), mode);
- }
- std::env::remove_var(ENV_NBD_AIO_MODE);
- }
-
- #[test]
- fn resolve_nbd_aio_mode_falls_back_for_invalid_values() {
- let _guard = env_test_lock().lock().unwrap();
- std::env::set_var(ENV_NBD_AIO_MODE, "bogus");
- assert_eq!(resolve_nbd_aio_mode(), "io_uring");
- std::env::remove_var(ENV_NBD_AIO_MODE);
- }
}
diff --git a/plasmavmc/crates/plasmavmc-kvm/src/lib.rs b/plasmavmc/crates/plasmavmc-kvm/src/lib.rs
index 40d4d02..5618643 100644
--- a/plasmavmc/crates/plasmavmc-kvm/src/lib.rs
+++ b/plasmavmc/crates/plasmavmc-kvm/src/lib.rs
@@ -4,12 +4,17 @@
//! It uses QEMU with KVM acceleration to run virtual machines.
mod env;
+mod network;
mod qmp;
use async_trait::async_trait;
use env::{
- resolve_kernel_initrd, resolve_nbd_aio_mode, resolve_nbd_max_queues, resolve_qcow2_path,
- resolve_qemu_path, resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH,
+ resolve_kernel_initrd, resolve_nbd_max_queues, resolve_qcow2_path, resolve_qemu_path,
+ resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH,
+};
+use network::{
+ cleanup_vm_networks, decode_network_states, encode_network_states, ensure_vm_networks,
+ tap_name_for_nic, NETWORK_STATE_KEY,
};
use nix::sys::signal::{kill as nix_kill, Signal};
use nix::unistd::Pid;
@@ -76,7 +81,8 @@ fn disk_aio_mode(disk: &AttachedDisk) -> Option<&'static str> {
match (&disk.attachment, disk.cache) {
(DiskAttachment::File { .. }, DiskCache::None) => Some("native"),
(DiskAttachment::File { .. }, _) => Some("threads"),
- (DiskAttachment::Nbd { .. }, _) => Some(resolve_nbd_aio_mode()),
+ // QEMU's NBD blockdev backend does not accept an `aio` parameter.
+ (DiskAttachment::Nbd { .. }, _) => None,
(DiskAttachment::CephRbd { .. }, _) => None,
}
}
@@ -118,6 +124,23 @@ fn bootindex_suffix(boot_index: Option) -> String {
.unwrap_or_default()
}
+fn nic_device_driver(model: NicModel) -> &'static str {
+ match model {
+ NicModel::VirtioNet => "virtio-net-pci",
+ NicModel::E1000 => "e1000",
+ }
+}
+
+fn nic_device_component(nic: &NetworkSpec, fallback_index: usize) -> String {
+ sanitize_device_component(
+ nic.port_id
+ .as_deref()
+ .filter(|value| !value.is_empty())
+ .unwrap_or(&nic.id),
+ fallback_index,
+ )
+}
+
fn qmp_timeout() -> Duration {
Duration::from_secs(resolve_qmp_timeout_secs())
}
@@ -434,6 +457,7 @@ fn build_qemu_args(
"-S".into(),
];
args.extend(build_disk_args(vm, disks)?);
+ args.extend(build_network_args(vm));
if let Some(kernel) = kernel {
args.push("-kernel".into());
@@ -449,6 +473,31 @@ fn build_qemu_args(
Ok(args)
}
+fn build_network_args(vm: &VirtualMachine) -> Vec {
+ let mut args = Vec::new();
+ for (index, nic) in vm.spec.network.iter().enumerate() {
+ let device_id = nic_device_component(nic, index);
+ let tap_name = tap_name_for_nic(nic);
+ args.push("-netdev".into());
+ args.push(format!(
+ "tap,id=netdev-{id},ifname={tap},script=no,downscript=no",
+ id = device_id,
+ tap = tap_name
+ ));
+ args.push("-device".into());
+ let mut device = format!(
+ "{driver},id=net-{id},netdev=netdev-{id}",
+ driver = nic_device_driver(nic.model),
+ id = device_id
+ );
+ if let Some(mac) = nic.mac_address.as_deref() {
+ device.push_str(&format!(",mac={mac}"));
+ }
+ args.push(device);
+ }
+ args
+}
+
/// Build QEMU args for an incoming migration listener.
fn build_qemu_args_incoming(
vm: &VirtualMachine,
@@ -568,6 +617,11 @@ impl HypervisorBackend for KvmBackend {
tokio::fs::create_dir_all(&runtime_dir)
.await
.map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?;
+ tokio::fs::create_dir_all(&self.runtime_dir)
+ .await
+ .map_err(|e| {
+ Error::HypervisorError(format!("Failed to create backend runtime root: {e}"))
+ })?;
let qmp_socket = runtime_dir.join("qmp.sock");
let console_log = runtime_dir.join("console.log");
@@ -575,16 +629,23 @@ impl HypervisorBackend for KvmBackend {
let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = tokio::fs::remove_file(&console_log).await;
let qemu_bin = resolve_qemu_path(&self.qemu_path);
+ let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?;
let (kernel_path, initrd_path) = resolve_kernel_initrd();
- let args = build_qemu_args(
+ let args = match build_qemu_args(
vm,
disks,
&qmp_socket,
&console_log,
kernel_path.as_deref(),
initrd_path.as_deref(),
- )?;
+ ) {
+ Ok(args) => args,
+ Err(error) => {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(error);
+ }
+ };
let mut cmd = Command::new(&qemu_bin);
cmd.args(&args);
@@ -597,9 +658,15 @@ impl HypervisorBackend for KvmBackend {
"Spawning KVM QEMU"
);
- let mut child = cmd
- .spawn()
- .map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?;
+ let mut child = match cmd.spawn() {
+ Ok(child) => child,
+ Err(error) => {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(Error::HypervisorError(format!(
+ "Failed to spawn QEMU: {error}"
+ )));
+ }
+ };
let pid = child.id().map(|p| p);
// Wait for QMP readiness before detaching so slow nested workers do not leave orphans.
@@ -614,6 +681,7 @@ impl HypervisorBackend for KvmBackend {
let _ = child.start_kill();
let _ = child.wait().await;
let _ = tokio::fs::remove_file(&qmp_socket).await;
+ let _ = cleanup_vm_networks(&network_states).await;
return Err(err);
}
@@ -629,6 +697,10 @@ impl HypervisorBackend for KvmBackend {
handle
.backend_state
.insert("console_log".into(), console_log.display().to_string());
+ handle.backend_state.insert(
+ NETWORK_STATE_KEY.into(),
+ encode_network_states(&network_states)?,
+ );
handle.pid = pid;
handle.attached_disks = disks.to_vec();
@@ -789,15 +861,21 @@ impl HypervisorBackend for KvmBackend {
tokio::fs::create_dir_all(&runtime_dir)
.await
.map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?;
+ tokio::fs::create_dir_all(&self.runtime_dir)
+ .await
+ .map_err(|e| {
+ Error::HypervisorError(format!("Failed to create backend runtime root: {e}"))
+ })?;
let qmp_socket = runtime_dir.join("qmp.sock");
let console_log = runtime_dir.join("console.log");
let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = tokio::fs::remove_file(&console_log).await;
let qemu_bin = resolve_qemu_path(&self.qemu_path);
+ let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?;
let (kernel_path, initrd_path) = resolve_kernel_initrd();
- let args = build_qemu_args_incoming(
+ let args = match build_qemu_args_incoming(
vm,
disks,
&qmp_socket,
@@ -805,7 +883,13 @@ impl HypervisorBackend for KvmBackend {
kernel_path.as_deref(),
initrd_path.as_deref(),
listen_uri,
- )?;
+ ) {
+ Ok(args) => args,
+ Err(error) => {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(error);
+ }
+ };
let mut cmd = Command::new(&qemu_bin);
cmd.args(&args);
@@ -818,9 +902,15 @@ impl HypervisorBackend for KvmBackend {
"Spawning QEMU for incoming migration"
);
- let mut child = cmd
- .spawn()
- .map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?;
+ let mut child = match cmd.spawn() {
+ Ok(child) => child,
+ Err(error) => {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(Error::HypervisorError(format!(
+ "Failed to spawn QEMU: {error}"
+ )));
+ }
+ };
let pid = child.id().map(|p| p);
if let Err(err) = wait_for_qmp(&qmp_socket, qmp_timeout()).await {
@@ -834,6 +924,7 @@ impl HypervisorBackend for KvmBackend {
let _ = child.start_kill();
let _ = child.wait().await;
let _ = tokio::fs::remove_file(&qmp_socket).await;
+ let _ = cleanup_vm_networks(&network_states).await;
return Err(err);
}
@@ -848,6 +939,10 @@ impl HypervisorBackend for KvmBackend {
handle
.backend_state
.insert("console_log".into(), console_log.display().to_string());
+ handle.backend_state.insert(
+ NETWORK_STATE_KEY.into(),
+ encode_network_states(&network_states)?,
+ );
handle.pid = pid;
handle.attached_disks = disks.to_vec();
@@ -913,6 +1008,7 @@ impl HypervisorBackend for KvmBackend {
async fn delete(&self, handle: &VmHandle) -> Result<()> {
tracing::info!(vm_id = %handle.vm_id, "Deleting VM resources");
+ let network_states = decode_network_states(handle.backend_state.get(NETWORK_STATE_KEY))?;
if handle.pid.is_some() || self.qmp_socket_path(handle).exists() {
let _ = self.kill(handle).await;
@@ -940,6 +1036,7 @@ impl HypervisorBackend for KvmBackend {
Error::HypervisorError(format!("Failed to remove runtime dir: {e}"))
})?;
}
+ cleanup_vm_networks(&network_states).await?;
tracing::info!(vm_id = %handle.vm_id, "Deleted VM resources");
@@ -1054,6 +1151,10 @@ impl HypervisorBackend for KvmBackend {
let qmp_socket = self.qmp_socket_path(handle);
wait_for_qmp(&qmp_socket, qmp_timeout()).await?;
let mut client = QmpClient::connect(&qmp_socket).await?;
+ let network_states =
+ ensure_vm_networks(&self.runtime_dir, std::slice::from_ref(nic)).await?;
+ let tap_name = tap_name_for_nic(nic);
+ let device_id = nic_device_component(nic, 0);
// Generate MAC address if not provided
let mac_addr = nic
@@ -1068,23 +1169,29 @@ impl HypervisorBackend for KvmBackend {
// Step 1: Add network backend via netdev_add
let netdev_args = serde_json::json!({
"type": "tap",
- "id": format!("netdev-{}", nic.id),
- "ifname": format!("tap-{}", nic.id),
+ "id": format!("netdev-{}", device_id),
+ "ifname": tap_name,
"script": "no",
"downscript": "no"
});
- client.command("netdev_add", Some(netdev_args)).await?;
+ if let Err(error) = client.command("netdev_add", Some(netdev_args)).await {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(error);
+ }
// Step 2: Add virtio-net-pci frontend device
let device_args = serde_json::json!({
- "driver": "virtio-net-pci",
- "id": format!("net-{}", nic.id),
- "netdev": format!("netdev-{}", nic.id),
+ "driver": nic_device_driver(nic.model),
+ "id": format!("net-{}", device_id),
+ "netdev": format!("netdev-{}", device_id),
"mac": mac_addr
});
- client.command("device_add", Some(device_args)).await?;
+ if let Err(error) = client.command("device_add", Some(device_args)).await {
+ let _ = cleanup_vm_networks(&network_states).await;
+ return Err(error);
+ }
tracing::info!(
vm_id = %handle.vm_id,
@@ -1106,6 +1213,7 @@ impl HypervisorBackend for KvmBackend {
let qmp_socket = self.qmp_socket_path(handle);
wait_for_qmp(&qmp_socket, qmp_timeout()).await?;
let mut client = QmpClient::connect(&qmp_socket).await?;
+ let nic_id = sanitize_device_component(nic_id, 0);
// Remove the virtio-net-pci device (netdev backend will be cleaned up automatically)
let device_args = serde_json::json!({
@@ -1281,8 +1389,6 @@ mod tests {
#[test]
fn build_qemu_args_coerces_writeback_cache_to_none_for_nbd_disks() {
- let _guard = crate::env::env_test_lock().lock().unwrap();
- std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
let disks = vec![AttachedDisk {
id: "root".into(),
@@ -1300,13 +1406,11 @@ mod tests {
let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
let args_joined = args.join(" ");
assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
- assert!(args_joined.contains("\"aio\":\"io_uring\""));
+ assert!(!args_joined.contains("\"aio\":"));
}
#[test]
- fn build_qemu_args_uses_io_uring_for_nbd_none_cache_by_default() {
- let _guard = crate::env::env_test_lock().lock().unwrap();
- std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
+ fn build_qemu_args_does_not_set_aio_for_nbd_disks() {
let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
let disks = vec![AttachedDisk {
id: "root".into(),
@@ -1324,32 +1428,7 @@ mod tests {
let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
let args_joined = args.join(" ");
assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
- assert!(args_joined.contains("\"aio\":\"io_uring\""));
- }
-
- #[test]
- fn build_qemu_args_honors_nbd_aio_override() {
- let _guard = crate::env::env_test_lock().lock().unwrap();
- std::env::set_var(crate::env::ENV_NBD_AIO_MODE, "threads");
- let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
- let disks = vec![AttachedDisk {
- id: "root".into(),
- attachment: DiskAttachment::Nbd {
- uri: "nbd://10.100.0.11:11000".into(),
- format: VolumeFormat::Raw,
- },
- bus: DiskBus::Virtio,
- cache: DiskCache::None,
- boot_index: Some(1),
- read_only: false,
- }];
- let qmp = PathBuf::from("/tmp/qmp.sock");
- let console = PathBuf::from("/tmp/console.log");
- let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
- let args_joined = args.join(" ");
- assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
- assert!(args_joined.contains("\"aio\":\"threads\""));
- std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
+ assert!(!args_joined.contains("\"aio\":"));
}
#[test]
diff --git a/plasmavmc/crates/plasmavmc-kvm/src/network.rs b/plasmavmc/crates/plasmavmc-kvm/src/network.rs
new file mode 100644
index 0000000..f7f26b0
--- /dev/null
+++ b/plasmavmc/crates/plasmavmc-kvm/src/network.rs
@@ -0,0 +1,678 @@
+use nix::sys::signal::{kill as nix_kill, Signal};
+use nix::unistd::Pid;
+use plasmavmc_types::{Error, NetworkSpec, Result};
+use serde::{Deserialize, Serialize};
+use std::collections::HashSet;
+use std::net::Ipv4Addr;
+use std::os::unix::fs::MetadataExt;
+use std::path::{Path, PathBuf};
+use tokio::fs;
+use tokio::process::Command;
+use tokio::time::{sleep, Duration, Instant};
+
+pub const NETWORK_STATE_KEY: &str = "network_state";
+const DNSMASQ_START_TIMEOUT: Duration = Duration::from_secs(5);
+
+#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
+pub struct NetworkRuntimeState {
+ pub nic_id: String,
+ pub subnet_id: String,
+ pub port_id: String,
+ pub bridge_name: String,
+ pub tap_name: String,
+ pub mac_address: String,
+ pub ip_address: String,
+ pub cidr_block: String,
+ pub gateway_ip: String,
+ pub dhcp_enabled: bool,
+ pub network_dir: String,
+ pub dnsmasq_conf: String,
+ pub hosts_file: String,
+ pub lease_file: String,
+ pub pid_file: String,
+ pub host_alias: String,
+}
+
+#[derive(Debug, Clone)]
+struct NicDataplaneConfig {
+ state: NetworkRuntimeState,
+ gateway_prefix: String,
+ dhcp_range_start: String,
+ dhcp_range_end: String,
+}
+
+pub fn tap_name_for_nic(nic: &NetworkSpec) -> String {
+ let seed = nic
+ .port_id
+ .as_deref()
+ .or(nic.subnet_id.as_deref())
+ .unwrap_or(&nic.id);
+ interface_name("pct", seed)
+}
+
+pub fn encode_network_states(states: &[NetworkRuntimeState]) -> Result {
+ serde_json::to_string(states)
+ .map_err(|error| Error::HypervisorError(format!("failed to encode network state: {error}")))
+}
+
+pub fn decode_network_states(serialized: Option<&String>) -> Result> {
+ match serialized {
+ Some(value) if !value.trim().is_empty() => serde_json::from_str(value).map_err(|error| {
+ Error::HypervisorError(format!("failed to decode network state: {error}"))
+ }),
+ _ => Ok(Vec::new()),
+ }
+}
+
+pub async fn ensure_vm_networks(
+ runtime_root: &Path,
+ nics: &[NetworkSpec],
+) -> Result> {
+ let mut states = Vec::with_capacity(nics.len());
+ for nic in nics {
+ let config = dataplane_config(runtime_root, nic)?;
+ states.push(config.state.clone());
+ if let Err(error) = ensure_bridge(&config).await {
+ let _ = cleanup_vm_networks(&states).await;
+ return Err(error);
+ }
+ if let Err(error) = ensure_dnsmasq(&config).await {
+ let _ = cleanup_vm_networks(&states).await;
+ return Err(error);
+ }
+ if let Err(error) = ensure_tap(runtime_root, &config).await {
+ let _ = cleanup_vm_networks(&states).await;
+ return Err(error);
+ }
+ }
+ Ok(states)
+}
+
+pub async fn cleanup_vm_networks(states: &[NetworkRuntimeState]) -> Result<()> {
+ let mut errors = Vec::new();
+ let mut seen_bridges = HashSet::new();
+
+ for state in states.iter().rev() {
+ if let Err(error) = delete_interface_if_present(&state.tap_name).await {
+ errors.push(error.to_string());
+ }
+
+ if let Err(error) = remove_host_entry(state).await {
+ errors.push(error.to_string());
+ }
+
+ if !seen_bridges.insert(state.bridge_name.clone()) {
+ continue;
+ }
+
+ match bridge_has_hosts(state).await {
+ Ok(true) => {
+ if let Err(error) = reload_dnsmasq(state).await {
+ errors.push(error.to_string());
+ }
+ }
+ Ok(false) => {
+ if let Err(error) = stop_dnsmasq(state).await {
+ errors.push(error.to_string());
+ }
+ if let Err(error) = delete_interface_if_present(&state.bridge_name).await {
+ errors.push(error.to_string());
+ }
+ let _ = fs::remove_dir_all(&state.network_dir).await;
+ }
+ Err(error) => errors.push(error.to_string()),
+ }
+ }
+
+ if errors.is_empty() {
+ Ok(())
+ } else {
+ Err(Error::HypervisorError(format!(
+ "network cleanup failed: {}",
+ errors.join("; ")
+ )))
+ }
+}
+
+fn dataplane_config(runtime_root: &Path, nic: &NetworkSpec) -> Result {
+ let subnet_id = nic
+ .subnet_id
+ .clone()
+ .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires subnet_id".into()))?;
+ let port_id = nic.port_id.clone().unwrap_or_else(|| nic.id.clone());
+ let mac_address = nic
+ .mac_address
+ .clone()
+ .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires mac_address".into()))?;
+ let ip_address = nic
+ .ip_address
+ .clone()
+ .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires ip_address".into()))?;
+ let cidr_block = nic
+ .cidr_block
+ .clone()
+ .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires cidr_block".into()))?;
+ let gateway_ip = nic
+ .gateway_ip
+ .clone()
+ .ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires gateway_ip".into()))?;
+
+ let (cidr_ip, prefix) = parse_ipv4_cidr(&cidr_block)?;
+ let gateway = parse_ipv4(&gateway_ip, "gateway_ip")?;
+ if !cidr_contains_ip(cidr_ip, prefix, gateway) {
+ return Err(Error::HypervisorError(format!(
+ "gateway {gateway_ip} is outside subnet {cidr_block}"
+ )));
+ }
+ let (dhcp_range_start, dhcp_range_end) = dhcp_range(cidr_ip, prefix, gateway)?;
+ let bridge_name = interface_name("pcbr", &subnet_id);
+ let tap_name = tap_name_for_nic(nic);
+ let network_dir = runtime_root.join("networks").join(&subnet_id);
+ let host_alias = format!("port-{}", compact_id(&port_id, 12));
+ let state = NetworkRuntimeState {
+ nic_id: nic.id.clone(),
+ subnet_id,
+ port_id,
+ bridge_name,
+ tap_name,
+ mac_address,
+ ip_address,
+ cidr_block,
+ gateway_ip: gateway_ip.clone(),
+ dhcp_enabled: nic.dhcp_enabled,
+ network_dir: network_dir.display().to_string(),
+ dnsmasq_conf: network_dir.join("dnsmasq.conf").display().to_string(),
+ hosts_file: network_dir.join("hosts").display().to_string(),
+ lease_file: network_dir.join("leases").display().to_string(),
+ pid_file: network_dir.join("dnsmasq.pid").display().to_string(),
+ host_alias,
+ };
+
+ Ok(NicDataplaneConfig {
+ gateway_prefix: format!("{gateway_ip}/{prefix}"),
+ dhcp_range_start,
+ dhcp_range_end,
+ state,
+ })
+}
+
+async fn ensure_bridge(config: &NicDataplaneConfig) -> Result<()> {
+ if !link_exists(&config.state.bridge_name).await? {
+ run_command(
+ "ip",
+ [
+ "link",
+ "add",
+ "name",
+ config.state.bridge_name.as_str(),
+ "type",
+ "bridge",
+ ],
+ )
+ .await?;
+ }
+
+ run_command(
+ "ip",
+ [
+ "addr",
+ "replace",
+ config.gateway_prefix.as_str(),
+ "dev",
+ config.state.bridge_name.as_str(),
+ ],
+ )
+ .await?;
+ run_command(
+ "ip",
+ [
+ "link",
+ "set",
+ "dev",
+ config.state.bridge_name.as_str(),
+ "up",
+ ],
+ )
+ .await
+}
+
+async fn ensure_dnsmasq(config: &NicDataplaneConfig) -> Result<()> {
+ fs::create_dir_all(&config.state.network_dir)
+ .await
+ .map_err(|error| {
+ Error::HypervisorError(format!(
+ "failed to create network runtime directory {}: {error}",
+ config.state.network_dir
+ ))
+ })?;
+ write_hosts_file(&config.state).await?;
+
+ let dnsmasq_conf = format!(
+ "interface={bridge}\n\
+bind-interfaces\n\
+except-interface=lo\n\
+port=0\n\
+dhcp-authoritative\n\
+dhcp-option=option:router,{gateway}\n\
+dhcp-range={range_start},{range_end},{mask},1h\n\
+dhcp-hostsfile={hosts_file}\n\
+dhcp-leasefile={lease_file}\n\
+pid-file={pid_file}\n",
+ bridge = config.state.bridge_name,
+ gateway = config.state.gateway_ip,
+ range_start = config.dhcp_range_start,
+ range_end = config.dhcp_range_end,
+ mask = cidr_mask(&config.state.cidr_block)?,
+ hosts_file = config.state.hosts_file,
+ lease_file = config.state.lease_file,
+ pid_file = config.state.pid_file,
+ );
+ fs::write(&config.state.dnsmasq_conf, dnsmasq_conf)
+ .await
+ .map_err(|error| {
+ Error::HypervisorError(format!(
+ "failed to write dnsmasq config {}: {error}",
+ config.state.dnsmasq_conf
+ ))
+ })?;
+
+ if dnsmasq_running(&config.state).await? {
+ reload_dnsmasq(&config.state).await?;
+ return Ok(());
+ }
+
+ let mut command = Command::new("dnsmasq");
+ command.arg(format!("--conf-file={}", config.state.dnsmasq_conf));
+ let output = command
+ .output()
+ .await
+ .map_err(|error| Error::HypervisorError(format!("failed to spawn dnsmasq: {error}")))?;
+ if !output.status.success() {
+ return Err(command_failed("dnsmasq", &[], &output));
+ }
+
+ let deadline = Instant::now() + DNSMASQ_START_TIMEOUT;
+ while Instant::now() < deadline {
+ if dnsmasq_running(&config.state).await? {
+ return Ok(());
+ }
+ sleep(Duration::from_millis(100)).await;
+ }
+
+ Err(Error::HypervisorError(format!(
+ "dnsmasq did not start for bridge {}",
+ config.state.bridge_name
+ )))
+}
+
+async fn ensure_tap(runtime_root: &Path, config: &NicDataplaneConfig) -> Result<()> {
+ let _ = delete_interface_if_present(&config.state.tap_name).await;
+ let metadata = fs::metadata(runtime_root).await.map_err(|error| {
+ Error::HypervisorError(format!(
+ "failed to inspect runtime root {}: {error}",
+ runtime_root.display()
+ ))
+ })?;
+ let uid = metadata.uid().to_string();
+ let gid = metadata.gid().to_string();
+
+ run_command(
+ "ip",
+ [
+ "tuntap",
+ "add",
+ "dev",
+ config.state.tap_name.as_str(),
+ "mode",
+ "tap",
+ "user",
+ uid.as_str(),
+ "group",
+ gid.as_str(),
+ ],
+ )
+ .await?;
+ run_command(
+ "ip",
+ [
+ "link",
+ "set",
+ "dev",
+ config.state.tap_name.as_str(),
+ "master",
+ config.state.bridge_name.as_str(),
+ ],
+ )
+ .await?;
+ run_command(
+ "ip",
+ ["link", "set", "dev", config.state.tap_name.as_str(), "up"],
+ )
+ .await
+}
+
+async fn write_hosts_file(state: &NetworkRuntimeState) -> Result<()> {
+ let hosts_path = PathBuf::from(&state.hosts_file);
+ let existing = match fs::read_to_string(&hosts_path).await {
+ Ok(contents) => contents,
+ Err(error) if error.kind() == std::io::ErrorKind::NotFound => String::new(),
+ Err(error) => {
+ return Err(Error::HypervisorError(format!(
+ "failed to read dnsmasq hosts file {}: {error}",
+ hosts_path.display()
+ )))
+ }
+ };
+
+ let mut lines: Vec = existing
+ .lines()
+ .filter(|line| !line.trim().is_empty() && !line.contains(&state.host_alias))
+ .map(ToOwned::to_owned)
+ .collect();
+ lines.push(format!(
+ "{mac},{ip},{alias}",
+ mac = state.mac_address,
+ ip = state.ip_address,
+ alias = state.host_alias
+ ));
+ let mut rendered = lines.join("\n");
+ if !rendered.is_empty() {
+ rendered.push('\n');
+ }
+ fs::write(&hosts_path, rendered).await.map_err(|error| {
+ Error::HypervisorError(format!(
+ "failed to write dnsmasq hosts file {}: {error}",
+ hosts_path.display()
+ ))
+ })
+}
+
+async fn remove_host_entry(state: &NetworkRuntimeState) -> Result<()> {
+ let hosts_path = PathBuf::from(&state.hosts_file);
+ let existing = match fs::read_to_string(&hosts_path).await {
+ Ok(contents) => contents,
+ Err(error) if error.kind() == std::io::ErrorKind::NotFound => return Ok(()),
+ Err(error) => {
+ return Err(Error::HypervisorError(format!(
+ "failed to read dnsmasq hosts file {}: {error}",
+ hosts_path.display()
+ )))
+ }
+ };
+ let filtered: Vec<&str> = existing
+ .lines()
+ .filter(|line| !line.contains(&state.host_alias))
+ .collect();
+ let mut rendered = filtered.join("\n");
+ if !rendered.is_empty() {
+ rendered.push('\n');
+ }
+ fs::write(&hosts_path, rendered).await.map_err(|error| {
+ Error::HypervisorError(format!(
+ "failed to update dnsmasq hosts file {}: {error}",
+ hosts_path.display()
+ ))
+ })
+}
+
+async fn bridge_has_hosts(state: &NetworkRuntimeState) -> Result {
+ match fs::read_to_string(&state.hosts_file).await {
+ Ok(contents) => Ok(contents.lines().any(|line| !line.trim().is_empty())),
+ Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false),
+ Err(error) => Err(Error::HypervisorError(format!(
+ "failed to inspect dnsmasq hosts file {}: {error}",
+ state.hosts_file
+ ))),
+ }
+}
+
+async fn dnsmasq_running(state: &NetworkRuntimeState) -> Result {
+ let pid = read_pid_file(&state.pid_file).await?;
+ if let Some(pid) = pid {
+ return Ok(pid_running(pid));
+ }
+ Ok(false)
+}
+
+async fn reload_dnsmasq(state: &NetworkRuntimeState) -> Result<()> {
+ if let Some(pid) = read_pid_file(&state.pid_file).await? {
+ signal_pid(pid, Signal::SIGHUP)?;
+ }
+ Ok(())
+}
+
+async fn stop_dnsmasq(state: &NetworkRuntimeState) -> Result<()> {
+ if let Some(pid) = read_pid_file(&state.pid_file).await? {
+ signal_pid(pid, Signal::SIGTERM)?;
+ let deadline = Instant::now() + Duration::from_secs(2);
+ while pid_running(pid) && Instant::now() < deadline {
+ sleep(Duration::from_millis(50)).await;
+ }
+ if pid_running(pid) {
+ signal_pid(pid, Signal::SIGKILL)?;
+ }
+ }
+ let _ = fs::remove_file(&state.pid_file).await;
+ Ok(())
+}
+
+async fn read_pid_file(path: &str) -> Result