Implement declarative tenant networking and local VM dataplane

Add tenant-scoped PrismNET routing, security-group, port, and service-IP APIs plus a deployer reconciler and Nix module that apply declarative tenant network state.

Teach PlasmaVMC to realize PrismNET NICs as a concrete local worker dataplane with Linux bridges, dnsmasq-backed DHCP, tap devices, richer network metadata, stable managed-volume IDs, and file:// image imports.

Expand the VM cluster validation around the new path, including the guest webapp demo, restart and cross-node migration checks, IAM listener reservation hardening, and a flake workspace-source-root audit so Nix builds keep path dependencies complete.
This commit is contained in:
centra 2026-04-04 00:07:43 +09:00
parent 83c34f8453
commit 4ab47b1726
Signed by: centra
GPG key ID: 0C09689D20B25ACA
48 changed files with 9016 additions and 1904 deletions

14
deployer/Cargo.lock generated
View file

@ -2039,6 +2039,9 @@ dependencies = [
"deployer-types", "deployer-types",
"fiberlb-api", "fiberlb-api",
"flashdns-api", "flashdns-api",
"iam-client",
"iam-types",
"prismnet-api",
"serde", "serde",
"serde_json", "serde_json",
"tokio", "tokio",
@ -2093,6 +2096,17 @@ dependencies = [
"syn", "syn",
] ]
[[package]]
name = "prismnet-api"
version = "0.1.0"
dependencies = [
"prost",
"prost-types",
"protoc-bin-vendored",
"tonic",
"tonic-build",
]
[[package]] [[package]]
name = "proc-macro2" name = "proc-macro2"
version = "1.0.106" version = "1.0.106"

View file

@ -45,3 +45,4 @@ fiberlb-api = { path = "../fiberlb/crates/fiberlb-api" }
flashdns-api = { path = "../flashdns/crates/flashdns-api" } flashdns-api = { path = "../flashdns/crates/flashdns-api" }
iam-client = { path = "../iam/crates/iam-client" } iam-client = { path = "../iam/crates/iam-client" }
iam-types = { path = "../iam/crates/iam-types" } iam-types = { path = "../iam/crates/iam-types" }
prismnet-api = { path = "../prismnet/crates/prismnet-api" }

View file

@ -19,5 +19,8 @@ tracing-subscriber.workspace = true
fiberlb-api.workspace = true fiberlb-api.workspace = true
flashdns-api.workspace = true flashdns-api.workspace = true
deployer-types.workspace = true deployer-types.workspace = true
iam-client.workspace = true
iam-types.workspace = true
prismnet-api.workspace = true
clap = { version = "4.5", features = ["derive"] } clap = { version = "4.5", features = ["derive"] }
tonic = "0.12" tonic = "0.12"

View file

@ -0,0 +1,79 @@
use anyhow::Result;
use iam_client::client::IamClientConfig;
use iam_client::IamClient;
use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope};
use tonic::metadata::MetadataValue;
use tonic::Request;
pub fn authorized_request<T>(message: T, token: &str) -> Request<T> {
let mut req = Request::new(message);
let header = format!("Bearer {}", token);
let value = MetadataValue::try_from(header.as_str()).expect("valid bearer token metadata");
req.metadata_mut().insert("authorization", value);
req
}
pub async fn issue_controller_token(
iam_server_addr: &str,
principal_id: &str,
org_id: &str,
project_id: &str,
) -> Result<String> {
let mut config = IamClientConfig::new(iam_server_addr).with_timeout(5000);
if iam_server_addr.starts_with("http://") || !iam_server_addr.starts_with("https://") {
config = config.without_tls();
}
let client = IamClient::connect(config).await?;
let principal_ref = PrincipalRef::service_account(principal_id);
let principal = match client.get_principal(&principal_ref).await? {
Some(existing) => existing,
None => {
client
.create_service_account(principal_id, principal_id, org_id, project_id)
.await?
}
};
ensure_project_admin_binding(&client, &principal, org_id, project_id).await?;
let scope = Scope::project(project_id, org_id);
client
.issue_token(
&principal,
vec!["roles/ProjectAdmin".to_string()],
scope,
3600,
)
.await
.map_err(Into::into)
}
async fn ensure_project_admin_binding(
client: &IamClient,
principal: &Principal,
org_id: &str,
project_id: &str,
) -> Result<()> {
let scope = Scope::project(project_id, org_id);
let bindings = client
.list_bindings_for_principal(&principal.to_ref())
.await?;
let already_bound = bindings
.iter()
.any(|binding| binding.role_ref == "roles/ProjectAdmin" && binding.scope == scope);
if already_bound {
return Ok(());
}
let binding = PolicyBinding::new(
format!("{}-project-admin-{}-{}", principal.id, org_id, project_id),
principal.to_ref(),
"roles/ProjectAdmin",
scope,
)
.with_created_by("plasmacloud-reconciler");
client.create_binding(&binding).await?;
Ok(())
}

View file

@ -39,7 +39,9 @@ use flashdns_api::proto::{
ZoneInfo, ZoneInfo,
}; };
mod auth;
mod hosts; mod hosts;
mod tenant_network;
mod watcher; mod watcher;
#[derive(Parser)] #[derive(Parser)]
@ -75,6 +77,9 @@ enum Command {
prune: bool, prune: bool,
}, },
/// Apply tenant-scoped PrismNET declarations
TenantNetwork(tenant_network::TenantNetworkCommand),
/// Reconcile host deployments into per-node desired-system state /// Reconcile host deployments into per-node desired-system state
Hosts(hosts::HostsCommand), Hosts(hosts::HostsCommand),
} }
@ -300,6 +305,9 @@ async fn main() -> Result<()> {
let spec: DnsConfig = read_json(&config).await?; let spec: DnsConfig = read_json(&config).await?;
reconcile_dns(spec, endpoint, prune).await?; reconcile_dns(spec, endpoint, prune).await?;
} }
Command::TenantNetwork(command) => {
tenant_network::run(command).await?;
}
Command::Hosts(command) => { Command::Hosts(command) => {
hosts::run(command).await?; hosts::run(command).await?;
} }

File diff suppressed because it is too large Load diff

View file

@ -19,14 +19,16 @@ This flow:
```bash ```bash
nix run ./nix/test-cluster#cluster -- fresh-smoke nix run ./nix/test-cluster#cluster -- fresh-smoke
nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp
nix run ./nix/test-cluster#cluster -- fresh-matrix nix run ./nix/test-cluster#cluster -- fresh-matrix
nix run ./nix/test-cluster#cluster -- fresh-bench-storage nix run ./nix/test-cluster#cluster -- fresh-bench-storage
nix build .#checks.x86_64-linux.deployer-vm-smoke nix build .#checks.x86_64-linux.deployer-vm-smoke
``` ```
Use these three commands as the release-facing local proof set: Use these commands as the release-facing local proof set:
- `fresh-smoke`: whole-cluster readiness, core behavior, and fault injection - `fresh-smoke`: whole-cluster readiness, core behavior, and fault injection
- `fresh-demo-vm-webapp`: focused VM demo showing a web app inside the guest with SQLite state persisted on the attached PhotonCloud volume across restart and migration
- `fresh-matrix`: composed service scenarios such as `prismnet + flashdns + fiberlb` and PrismNet-backed VM hosting bundles with `plasmavmc + coronafs + lightningstor` - `fresh-matrix`: composed service scenarios such as `prismnet + flashdns + fiberlb` and PrismNet-backed VM hosting bundles with `plasmavmc + coronafs + lightningstor`
- `fresh-bench-storage`: CoronaFS local-vs-shared-volume throughput, cross-worker volume visibility, and LightningStor large/small-object throughput capture - `fresh-bench-storage`: CoronaFS local-vs-shared-volume throughput, cross-worker volume visibility, and LightningStor large/small-object throughput capture
- `deployer-vm-smoke`: prebuilt NixOS system closure handoff into `nix-agent`, proving host rollout can activate a host-built target without guest-side compilation - `deployer-vm-smoke`: prebuilt NixOS system closure handoff into `nix-agent`, proving host rollout can activate a host-built target without guest-side compilation
@ -37,6 +39,7 @@ Use these three commands as the release-facing local proof set:
nix run ./nix/test-cluster#cluster -- status nix run ./nix/test-cluster#cluster -- status
nix run ./nix/test-cluster#cluster -- logs node01 nix run ./nix/test-cluster#cluster -- logs node01
nix run ./nix/test-cluster#cluster -- ssh node04 nix run ./nix/test-cluster#cluster -- ssh node04
nix run ./nix/test-cluster#cluster -- demo-vm-webapp
nix run ./nix/test-cluster#cluster -- matrix nix run ./nix/test-cluster#cluster -- matrix
nix run ./nix/test-cluster#cluster -- bench-storage nix run ./nix/test-cluster#cluster -- bench-storage
nix run ./nix/test-cluster#cluster -- fresh-matrix nix run ./nix/test-cluster#cluster -- fresh-matrix

122
flake.nix
View file

@ -203,6 +203,7 @@
"flaredb" "flaredb"
"flashdns" "flashdns"
"iam" "iam"
"prismnet"
]; ];
}; };
@ -1064,6 +1065,127 @@
}; };
checks = { checks = {
workspace-source-roots-audit = pkgs.runCommand "workspace-source-roots-audit" {
nativeBuildInputs = [ pkgs.python3 ];
} ''
${pkgs.python3}/bin/python - <<'PY' ${./.}
from __future__ import annotations
import re
import sys
import tomllib
from pathlib import Path
from typing import Any
def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]:
source = flake_path.read_text()
match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S)
if match is None:
raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}")
roots: dict[str, list[str]] = {}
for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S):
roots[name] = re.findall(r'"([^"]+)"', body)
return roots
def collect_path_dependencies(value: Any) -> list[str]:
found: list[str] = []
if isinstance(value, dict):
path = value.get("path")
if isinstance(path, str):
found.append(path)
for nested in value.values():
found.extend(collect_path_dependencies(nested))
elif isinstance(value, list):
for nested in value:
found.extend(collect_path_dependencies(nested))
return found
def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]:
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
manifests = [workspace_manifest]
workspace_data = tomllib.loads(workspace_manifest.read_text())
members = workspace_data.get("workspace", {}).get("members", [])
for member in members:
for candidate in workspace_manifest.parent.glob(member):
manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml"
if manifest.is_file():
manifests.append(manifest)
unique_manifests: list[Path] = []
seen: set[Path] = set()
for manifest in manifests:
resolved = manifest.resolve()
if resolved in seen:
continue
seen.add(resolved)
unique_manifests.append(manifest)
return unique_manifests
def required_root(dep_rel: Path) -> str:
parts = dep_rel.parts
if not parts:
return ""
if parts[0] == "crates" and len(parts) >= 2:
return "/".join(parts[:2])
return parts[0]
def is_covered(dep_rel: str, configured_roots: list[str]) -> bool:
return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots)
def main() -> int:
repo_root = Path(sys.argv[1]).resolve()
workspace_roots = extract_workspace_source_roots(repo_root / "flake.nix")
failures: list[str] = []
for workspace_name, configured_roots in sorted(workspace_roots.items()):
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
if not workspace_manifest.is_file():
continue
for manifest in workspace_manifests(repo_root, workspace_name):
manifest_data = tomllib.loads(manifest.read_text())
for dep_path in collect_path_dependencies(manifest_data):
dependency_dir = (manifest.parent / dep_path).resolve()
try:
dep_rel = dependency_dir.relative_to(repo_root)
except ValueError:
continue
dep_rel_str = dep_rel.as_posix()
if is_covered(dep_rel_str, configured_roots):
continue
failures.append(
f"{workspace_name}: missing source root '{required_root(dep_rel)}' "
f"for dependency '{dep_rel_str}' referenced by "
f"{manifest.relative_to(repo_root).as_posix()}"
)
if failures:
print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr)
for failure in failures:
print(f" - {failure}", file=sys.stderr)
return 1
print("workspaceSourceRoots covers all workspace path dependencies.")
return 0
raise SystemExit(main())
PY
touch "$out"
'';
first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest ( first-boot-topology-vm-smoke = pkgs.testers.runNixOSTest (
import ./nix/tests/first-boot-topology-vm-smoke.nix { import ./nix/tests/first-boot-topology-vm-smoke.nix {
inherit pkgs; inherit pkgs;

1
iam/Cargo.lock generated
View file

@ -1294,6 +1294,7 @@ dependencies = [
"serde_json", "serde_json",
"thiserror 1.0.69", "thiserror 1.0.69",
"tokio", "tokio",
"tokio-stream",
"toml", "toml",
"tonic", "tonic",
"tonic-health", "tonic-health",

View file

@ -21,6 +21,7 @@ serde = { workspace = true }
serde_json = { workspace = true } serde_json = { workspace = true }
thiserror = { workspace = true } thiserror = { workspace = true }
tokio = { workspace = true, features = ["full"] } tokio = { workspace = true, features = ["full"] }
tokio-stream = { workspace = true, features = ["net"] }
tracing = { workspace = true } tracing = { workspace = true }
tracing-subscriber = { workspace = true } tracing-subscriber = { workspace = true }
tonic = { workspace = true } tonic = { workspace = true }

View file

@ -16,6 +16,7 @@ use tonic::service::Interceptor;
use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig}; use tonic::transport::{Certificate, Identity, Server, ServerTlsConfig};
use tonic::{metadata::MetadataMap, Request, Status}; use tonic::{metadata::MetadataMap, Request, Status};
use tonic_health::server::health_reporter; use tonic_health::server::health_reporter;
use tokio_stream::wrappers::TcpListenerStream;
use tracing::{info, warn}; use tracing::{info, warn};
use iam_api::{ use iam_api::{
@ -283,6 +284,20 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
info!("Starting IAM server on {}", config.server.addr); info!("Starting IAM server on {}", config.server.addr);
// Reserve the public listeners before opening outbound cluster/backend connections.
// Without this, a peer connection can claim the service port as an ephemeral source port
// and make the later gRPC bind fail with EADDRINUSE.
let grpc_addr = config.server.addr;
let http_addr = config.server.http_addr;
let grpc_listener = tokio::net::TcpListener::bind(grpc_addr).await?;
let http_listener = tokio::net::TcpListener::bind(http_addr).await?;
info!(
grpc_addr = %grpc_addr,
http_addr = %http_addr,
"IAM listeners reserved"
);
if let Some(endpoint) = &config.cluster.chainfire_endpoint { if let Some(endpoint) = &config.cluster.chainfire_endpoint {
let normalized = normalize_chainfire_endpoint(endpoint); let normalized = normalize_chainfire_endpoint(endpoint);
info!( info!(
@ -514,17 +529,15 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
.add_service(IamCredentialServer::new(credential_service)) .add_service(IamCredentialServer::new(credential_service))
.add_service(GatewayAuthServiceServer::new(gateway_auth_service)) .add_service(GatewayAuthServiceServer::new(gateway_auth_service))
.add_service(admin_server) .add_service(admin_server)
.serve(config.server.addr); .serve_with_incoming(TcpListenerStream::new(grpc_listener));
// HTTP REST API server // HTTP REST API server
let http_addr = config.server.http_addr;
let rest_state = rest::RestApiState { let rest_state = rest::RestApiState {
server_addr: config.server.addr.to_string(), server_addr: grpc_addr.to_string(),
tls_enabled: config.server.tls.is_some(), tls_enabled: config.server.tls.is_some(),
admin_token: admin_token.clone(), admin_token: admin_token.clone(),
}; };
let rest_app = rest::build_router(rest_state); let rest_app = rest::build_router(rest_state);
let http_listener = tokio::net::TcpListener::bind(&http_addr).await?;
info!(http_addr = %http_addr, "HTTP REST API server starting"); info!(http_addr = %http_addr, "HTTP REST API server starting");

14
k8shost/Cargo.lock generated
View file

@ -2217,6 +2217,18 @@ version = "0.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084"
[[package]]
name = "nix"
version = "0.29.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46"
dependencies = [
"bitflags 2.11.0",
"cfg-if",
"cfg_aliases",
"libc",
]
[[package]] [[package]]
name = "nom" name = "nom"
version = "7.1.3" version = "7.1.3"
@ -2539,6 +2551,7 @@ name = "plasmavmc-kvm"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait", "async-trait",
"nix",
"plasmavmc-hypervisor", "plasmavmc-hypervisor",
"plasmavmc-types", "plasmavmc-types",
"serde", "serde",
@ -2576,6 +2589,7 @@ dependencies = [
"reqwest 0.12.28", "reqwest 0.12.28",
"serde", "serde",
"serde_json", "serde_json",
"sha2",
"thiserror 1.0.69", "thiserror 1.0.69",
"tokio", "tokio",
"tokio-stream", "tokio-stream",

View file

@ -3,6 +3,7 @@
./chainfire.nix ./chainfire.nix
./plasmacloud-cluster.nix ./plasmacloud-cluster.nix
./install-target.nix ./install-target.nix
./service-port-reservations.nix
./creditservice.nix ./creditservice.nix
./coronafs.nix ./coronafs.nix
./flaredb.nix ./flaredb.nix
@ -11,6 +12,9 @@
./prismnet.nix ./prismnet.nix
./flashdns.nix ./flashdns.nix
./fiberlb.nix ./fiberlb.nix
./plasmacloud-network.nix
./plasmacloud-resources.nix
./plasmacloud-tenant-networking.nix
./lightningstor.nix ./lightningstor.nix
./k8shost.nix ./k8shost.nix
./nightlight.nix ./nightlight.nix

View file

@ -0,0 +1,373 @@
{ config, lib, pkgs, ... }:
with lib;
let
cfg = config.plasmacloud.tenantNetworking;
jsonFormat = pkgs.formats.json {};
serviceIpPoolType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Service IP pool name";
};
cidr_block = mkOption {
type = types.str;
description = "Service IP pool CIDR";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Service IP pool description";
};
pool_type = mkOption {
type = types.nullOr (types.enum [ "cluster_ip" "load_balancer" "node_port" ]);
default = null;
description = "Service IP pool type";
};
};
};
portType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Port name";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Port description";
};
ip_address = mkOption {
type = types.nullOr types.str;
default = null;
description = "Requested fixed IP address";
};
security_groups = mkOption {
type = types.listOf types.str;
default = [];
description = "Security group names attached to the port";
};
admin_state_up = mkOption {
type = types.nullOr types.bool;
default = null;
description = "Administrative state for the port";
};
};
};
subnetType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Subnet name";
};
cidr_block = mkOption {
type = types.str;
description = "Subnet CIDR";
};
gateway_ip = mkOption {
type = types.nullOr types.str;
default = null;
description = "Gateway IP";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Subnet description";
};
dhcp_enabled = mkOption {
type = types.nullOr types.bool;
default = null;
description = "Enable DHCP for the subnet";
};
ports = mkOption {
type = types.listOf portType;
default = [];
description = "Ports within the subnet";
};
};
};
routerType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Router name";
};
gateway_cidr = mkOption {
type = types.str;
description = "Gateway interface CIDR attached to the VPC";
};
mac_address = mkOption {
type = types.str;
description = "Router interface MAC address";
};
external_ip = mkOption {
type = types.str;
description = "SNAT external IPv4 address";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Router description";
};
};
};
vpcType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "VPC name";
};
cidr_block = mkOption {
type = types.str;
description = "VPC CIDR";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "VPC description";
};
router = mkOption {
type = types.nullOr routerType;
default = null;
description = "Optional tenant edge router for the VPC";
};
subnets = mkOption {
type = types.listOf subnetType;
default = [];
description = "Subnets within the VPC";
};
};
};
securityGroupRuleType = types.submodule {
options = {
direction = mkOption {
type = types.enum [ "ingress" "egress" ];
description = "Rule direction";
};
protocol = mkOption {
type = types.nullOr (types.enum [ "any" "tcp" "udp" "icmp" "icmpv6" ]);
default = null;
description = "IP protocol";
};
port_range_min = mkOption {
type = types.nullOr types.int;
default = null;
description = "Minimum port in range";
};
port_range_max = mkOption {
type = types.nullOr types.int;
default = null;
description = "Maximum port in range";
};
remote_cidr = mkOption {
type = types.nullOr types.str;
default = null;
description = "Remote CIDR";
};
remote_group = mkOption {
type = types.nullOr types.str;
default = null;
description = "Remote security group name";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Rule description";
};
};
};
securityGroupType = types.submodule {
options = {
name = mkOption {
type = types.str;
description = "Security group name";
};
description = mkOption {
type = types.nullOr types.str;
default = null;
description = "Security group description";
};
rules = mkOption {
type = types.listOf securityGroupRuleType;
default = [];
description = "Security group rules";
};
};
};
tenantType = types.submodule {
options = {
org_id = mkOption {
type = types.str;
description = "Tenant organization ID";
};
project_id = mkOption {
type = types.str;
description = "Tenant project ID";
};
security_groups = mkOption {
type = types.listOf securityGroupType;
default = [];
description = "Tenant-scoped security groups";
};
service_ip_pools = mkOption {
type = types.listOf serviceIpPoolType;
default = [];
description = "Tenant-scoped Service IP pools";
};
vpcs = mkOption {
type = types.listOf vpcType;
default = [];
description = "Tenant-scoped VPCs and their nested resources";
};
};
};
configFile = jsonFormat.generate "plasmacloud-tenant-networking.json" {
inherit (cfg) tenants;
};
configPath = cfg.configPath;
configRelative = removePrefix "/etc/" configPath;
in {
options.plasmacloud.tenantNetworking = {
enable = mkEnableOption "tenant-scoped PrismNET declarations";
endpoint = mkOption {
type = types.str;
default = "http://127.0.0.1:50081";
description = "PrismNET gRPC endpoint";
};
iamEndpoint = mkOption {
type = types.str;
default = "http://127.0.0.1:50080";
description = "IAM gRPC endpoint used to mint tenant-scoped controller tokens";
};
controllerPrincipalId = mkOption {
type = types.str;
default = "plasmacloud-reconciler";
description = "Service account used by the reconciler when applying tenant declarations";
};
tenants = mkOption {
type = types.listOf tenantType;
default = [];
description = "Tenant-scoped network declarations. This is separate from platform networking under plasmacloud.network.";
};
configPath = mkOption {
type = types.str;
default = "/etc/plasmacloud/tenant-networking.json";
description = "Path for rendered tenant networking config";
};
applyOnBoot = mkOption {
type = types.bool;
default = true;
description = "Apply declarations at boot";
};
applyOnChange = mkOption {
type = types.bool;
default = true;
description = "Apply declarations when the config file changes";
};
prune = mkOption {
type = types.bool;
default = false;
description = "Delete tenant network resources not declared for managed tenants";
};
package = mkOption {
type = types.package;
default = pkgs.plasmacloud-reconciler or (throw "plasmacloud-reconciler package not found");
description = "Reconciler package for tenant networking declarations";
};
};
config = mkIf cfg.enable {
assertions = [
{
assertion = hasPrefix "/etc/" configPath;
message = "plasmacloud.tenantNetworking.configPath must be under /etc";
}
];
environment.etc."${configRelative}".source = configFile;
systemd.services.plasmacloud-tenant-networking-apply = {
description = "Apply PlasmaCloud tenant networking declarations";
after =
[ "network-online.target" ]
++ optional config.services.prismnet.enable "prismnet.service"
++ optional config.services.iam.enable "iam.service";
wants =
[ "network-online.target" ]
++ optional config.services.prismnet.enable "prismnet.service"
++ optional config.services.iam.enable "iam.service";
wantedBy = optional cfg.applyOnBoot "multi-user.target";
serviceConfig = {
Type = "oneshot";
RemainAfterExit = true;
ExecStart =
"${cfg.package}/bin/plasmacloud-reconciler tenant-network"
+ " --config ${configPath}"
+ " --endpoint ${cfg.endpoint}"
+ " --iam-endpoint ${cfg.iamEndpoint}"
+ " --controller-principal-id ${cfg.controllerPrincipalId}"
+ optionalString cfg.prune " --prune";
};
};
systemd.paths.plasmacloud-tenant-networking-apply = mkIf cfg.applyOnChange {
wantedBy = [ "multi-user.target" ];
pathConfig = {
PathChanged = configPath;
};
};
};
}

View file

@ -329,7 +329,7 @@ in
wantedBy = [ "multi-user.target" ]; wantedBy = [ "multi-user.target" ];
after = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps; after = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps;
wants = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps; wants = [ "network-online.target" "prismnet.service" "flaredb.service" "chainfire.service" ] ++ localIamDeps;
path = [ pkgs.qemu pkgs.coreutils pkgs.curl ]; path = [ pkgs.qemu pkgs.coreutils pkgs.curl pkgs.iproute2 pkgs.dnsmasq ];
preStart = preStart =
lib.optionalString (localIamHealthUrl != null) '' lib.optionalString (localIamHealthUrl != null) ''
for _ in $(seq 1 90); do for _ in $(seq 1 90); do
@ -377,13 +377,14 @@ in
# Security hardening - relaxed for KVM access # Security hardening - relaxed for KVM access
NoNewPrivileges = false; # Needed for KVM NoNewPrivileges = false; # Needed for KVM
AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
PrivateTmp = true; PrivateTmp = true;
ProtectSystem = "strict"; ProtectSystem = "strict";
ProtectHome = true; ProtectHome = true;
ReadWritePaths = ReadWritePaths =
[ cfg.dataDir "/run/libvirt" cfg.managedVolumeRoot ] [ cfg.dataDir "/run/libvirt" cfg.managedVolumeRoot ]
++ lib.optionals (coronafsDataDir != null) [ coronafsDataDir ]; ++ lib.optionals (coronafsDataDir != null) [ coronafsDataDir ];
DeviceAllow = [ "/dev/kvm rw" ]; DeviceAllow = [ "/dev/kvm rw" "/dev/net/tun rw" ];
# Start command # Start command
ExecStart = "${cfg.package}/bin/plasmavmc-server --config ${plasmavmcConfigFile}"; ExecStart = "${cfg.package}/bin/plasmavmc-server --config ${plasmavmcConfigFile}";

View file

@ -0,0 +1,10 @@
{ lib, ... }:
{
boot.kernel.sysctl = {
# PhotonCloud control-plane services bind within this band. Reserve it from the
# ephemeral allocator so outbound peer/backend connections cannot steal a service
# port during boot and block the later listener bind.
"net.ipv4.ip_local_reserved_ports" = lib.mkDefault "50051-50090";
};
}

View file

@ -45,6 +45,8 @@ nix run ./nix/test-cluster#cluster -- build
nix run ./nix/test-cluster#cluster -- start nix run ./nix/test-cluster#cluster -- start
nix run ./nix/test-cluster#cluster -- smoke nix run ./nix/test-cluster#cluster -- smoke
nix run ./nix/test-cluster#cluster -- fresh-smoke nix run ./nix/test-cluster#cluster -- fresh-smoke
nix run ./nix/test-cluster#cluster -- demo-vm-webapp
nix run ./nix/test-cluster#cluster -- fresh-demo-vm-webapp
nix run ./nix/test-cluster#cluster -- matrix nix run ./nix/test-cluster#cluster -- matrix
nix run ./nix/test-cluster#cluster -- fresh-matrix nix run ./nix/test-cluster#cluster -- fresh-matrix
nix run ./nix/test-cluster#cluster -- bench-storage nix run ./nix/test-cluster#cluster -- bench-storage
@ -61,6 +63,8 @@ Preferred entrypoint for publishable verification: `nix run ./nix/test-cluster#c
`make cluster-smoke` is a convenience wrapper for the same clean host-build VM validation flow. `make cluster-smoke` is a convenience wrapper for the same clean host-build VM validation flow.
`nix run ./nix/test-cluster#cluster -- demo-vm-webapp` creates a PrismNet-attached VM, boots a tiny web app inside the guest, stores its state in SQLite on the attached data volume, and then proves that the counter survives guest restart plus cross-worker migration.
`nix run ./nix/test-cluster#cluster -- matrix` reuses the current running cluster to exercise composed service scenarios such as `prismnet + flashdns + fiberlb`, PrismNet-backed VM hosting with `plasmavmc + prismnet + coronafs + lightningstor`, the Kubernetes-style hosting bundle, and API-gateway-mediated `nightlight` / `creditservice` flows. `nix run ./nix/test-cluster#cluster -- matrix` reuses the current running cluster to exercise composed service scenarios such as `prismnet + flashdns + fiberlb`, PrismNet-backed VM hosting with `plasmavmc + prismnet + coronafs + lightningstor`, the Kubernetes-style hosting bundle, and API-gateway-mediated `nightlight` / `creditservice` flows.
Preferred entrypoint for publishable matrix verification: `nix run ./nix/test-cluster#cluster -- fresh-matrix` Preferred entrypoint for publishable matrix verification: `nix run ./nix/test-cluster#cluster -- fresh-matrix`

View file

@ -11,6 +11,7 @@
../modules/flaredb.nix ../modules/flaredb.nix
../modules/iam.nix ../modules/iam.nix
../modules/prismnet.nix ../modules/prismnet.nix
../modules/plasmacloud-tenant-networking.nix
../modules/flashdns.nix ../modules/flashdns.nix
../modules/fiberlb.nix ../modules/fiberlb.nix
../modules/k8shost.nix ../modules/k8shost.nix
@ -166,4 +167,91 @@
services.lightningstor.s3AccessKeyId = "photoncloud-test"; services.lightningstor.s3AccessKeyId = "photoncloud-test";
services.lightningstor.s3SecretKey = "photoncloud-test-secret"; services.lightningstor.s3SecretKey = "photoncloud-test-secret";
plasmacloud.tenantNetworking = {
enable = true;
endpoint = "http://127.0.0.1:50081";
iamEndpoint = "http://127.0.0.1:50080";
controllerPrincipalId = "plasmacloud-reconciler";
prune = true;
tenants = [
{
org_id = "matrix-tenant-org";
project_id = "matrix-tenant-project";
security_groups = [
{
name = "vm-default";
description = "Default tenant SG for matrix VMs";
rules = [
{
direction = "ingress";
protocol = "tcp";
port_range_min = 22;
port_range_max = 22;
remote_cidr = "10.100.0.0/24";
description = "Allow SSH from the cluster network";
}
{
direction = "egress";
protocol = "any";
remote_cidr = "0.0.0.0/0";
description = "Allow outbound traffic";
}
];
}
{
name = "web";
description = "HTTP ingress from default tenant members";
rules = [
{
direction = "ingress";
protocol = "tcp";
port_range_min = 80;
port_range_max = 80;
remote_group = "vm-default";
description = "Allow HTTP from vm-default members";
}
];
}
];
service_ip_pools = [
{
name = "cluster-services";
cidr_block = "10.62.200.0/24";
description = "ClusterIP allocations for matrix tenant services";
pool_type = "cluster_ip";
}
{
name = "public-services";
cidr_block = "10.62.210.0/24";
description = "Load balancer allocations for matrix tenant services";
pool_type = "load_balancer";
}
];
vpcs = [
{
name = "matrix-vpc";
cidr_block = "10.62.0.0/16";
description = "Declarative PrismNET tenant network for VM matrix validation";
router = {
name = "matrix-router";
gateway_cidr = "10.62.0.1/24";
mac_address = "02:00:00:00:62:01";
external_ip = "203.0.113.62";
description = "Tenant edge router";
};
subnets = [
{
name = "matrix-subnet";
cidr_block = "10.62.10.0/24";
gateway_ip = "10.62.10.1";
description = "Primary VM subnet for matrix validation";
dhcp_enabled = true;
}
];
}
];
}
];
};
} }

View file

@ -48,6 +48,26 @@
pathPrefix = "/api/v1/subnets"; pathPrefix = "/api/v1/subnets";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087"; upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
} }
{
name = "prismnet-routers";
pathPrefix = "/api/v1/routers";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
}
{
name = "prismnet-security-groups";
pathPrefix = "/api/v1/security-groups";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
}
{
name = "prismnet-ports";
pathPrefix = "/api/v1/ports";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
}
{
name = "prismnet-service-ip-pools";
pathPrefix = "/api/v1/service-ip-pools";
upstream = "http://${config.plasmacloud.cluster.nodes.node01.ip}:8087";
}
{ {
name = "plasmavmc-vms"; name = "plasmavmc-vms";
pathPrefix = "/api/v1/vms"; pathPrefix = "/api/v1/vms";

View file

@ -35,7 +35,9 @@ SSH_PASSWORD="${PHOTON_VM_ROOT_PASSWORD:-test}"
SSH_CONNECT_TIMEOUT="${PHOTON_VM_SSH_CONNECT_TIMEOUT:-5}" SSH_CONNECT_TIMEOUT="${PHOTON_VM_SSH_CONNECT_TIMEOUT:-5}"
SSH_WAIT_TIMEOUT="${PHOTON_VM_SSH_WAIT_TIMEOUT:-300}" SSH_WAIT_TIMEOUT="${PHOTON_VM_SSH_WAIT_TIMEOUT:-300}"
UNIT_WAIT_TIMEOUT="${PHOTON_VM_UNIT_WAIT_TIMEOUT:-240}" UNIT_WAIT_TIMEOUT="${PHOTON_VM_UNIT_WAIT_TIMEOUT:-240}"
UNIT_CHECK_TIMEOUT="${PHOTON_VM_UNIT_CHECK_TIMEOUT:-15}"
HTTP_WAIT_TIMEOUT="${PHOTON_VM_HTTP_WAIT_TIMEOUT:-180}" HTTP_WAIT_TIMEOUT="${PHOTON_VM_HTTP_WAIT_TIMEOUT:-180}"
VM_DEMO_HTTP_PORT="${PHOTON_VM_DEMO_HTTP_PORT:-8080}"
KVM_WAIT_TIMEOUT="${PHOTON_VM_KVM_WAIT_TIMEOUT:-180}" KVM_WAIT_TIMEOUT="${PHOTON_VM_KVM_WAIT_TIMEOUT:-180}"
FLAREDB_WAIT_TIMEOUT="${PHOTON_VM_FLAREDB_WAIT_TIMEOUT:-180}" FLAREDB_WAIT_TIMEOUT="${PHOTON_VM_FLAREDB_WAIT_TIMEOUT:-180}"
GRPCURL_MAX_MSG_SIZE="${PHOTON_VM_GRPCURL_MAX_MSG_SIZE:-1073741824}" GRPCURL_MAX_MSG_SIZE="${PHOTON_VM_GRPCURL_MAX_MSG_SIZE:-1073741824}"
@ -83,6 +85,15 @@ PLASMAVMC_PROTO="${PLASMAVMC_PROTO_DIR}/plasmavmc.proto"
FLAREDB_PROTO_DIR="${REPO_ROOT}/flaredb/crates/flaredb-proto/src" FLAREDB_PROTO_DIR="${REPO_ROOT}/flaredb/crates/flaredb-proto/src"
FLAREDB_PROTO="${FLAREDB_PROTO_DIR}/kvrpc.proto" FLAREDB_PROTO="${FLAREDB_PROTO_DIR}/kvrpc.proto"
FLAREDB_SQL_PROTO="${FLAREDB_PROTO_DIR}/sqlrpc.proto" FLAREDB_SQL_PROTO="${FLAREDB_PROTO_DIR}/sqlrpc.proto"
MATRIX_TENANT_ORG_ID="matrix-tenant-org"
MATRIX_TENANT_PROJECT_ID="matrix-tenant-project"
MATRIX_TENANT_VPC_NAME="matrix-vpc"
MATRIX_TENANT_SUBNET_NAME="matrix-subnet"
MATRIX_TENANT_ROUTER_NAME="matrix-router"
MATRIX_TENANT_DEFAULT_SG_NAME="vm-default"
MATRIX_TENANT_WEB_SG_NAME="web"
MATRIX_TENANT_CLUSTER_POOL_NAME="cluster-services"
MATRIX_TENANT_LB_POOL_NAME="public-services"
# shellcheck disable=SC2034 # shellcheck disable=SC2034
NODE_PHASES=( NODE_PHASES=(
@ -530,6 +541,26 @@ wait_for_prismnet_port_detachment() {
done done
} }
wait_for_prismnet_port_absent() {
local token="$1"
local org_id="$2"
local project_id="$3"
local subnet_id="$4"
local port_id="$5"
local timeout="${6:-${HTTP_WAIT_TIMEOUT}}"
local deadline=$((SECONDS + timeout))
while true; do
if ! prismnet_get_port_json "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null 2>&1; then
return 0
fi
if (( SECONDS >= deadline )); then
die "timed out waiting for PrismNet port ${port_id} to be deleted"
fi
sleep 2
done
}
wait_for_vm_network_spec() { wait_for_vm_network_spec() {
local token="$1" local token="$1"
local get_vm_json="$2" local get_vm_json="$2"
@ -567,6 +598,28 @@ wait_for_vm_network_spec() {
done done
} }
api_gateway_request() {
local method="$1"
local token="$2"
local path="$3"
local body="${4:-}"
local url="http://127.0.0.1:18080${path}"
if [[ -n "${body}" ]]; then
curl -fsS \
-X "${method}" \
-H "authorization: Bearer ${token}" \
-H "content-type: application/json" \
--data "${body}" \
"${url}"
else
curl -fsS \
-X "${method}" \
-H "authorization: Bearer ${token}" \
"${url}"
fi
}
build_link() { build_link() {
printf '%s/build-%s' "$(vm_dir)" "$1" printf '%s/build-%s' "$(vm_dir)" "$1"
} }
@ -2149,10 +2202,15 @@ wait_for_unit() {
local deadline=$((SECONDS + timeout)) local deadline=$((SECONDS + timeout))
local stable_checks=0 local stable_checks=0
local required_stable_checks=3 local required_stable_checks=3
local ssh_port
ssh_port="$(ssh_port_for_node "${node}")"
log "Waiting for ${unit}.service on ${node}" log "Waiting for ${unit}.service on ${node}"
while (( stable_checks < required_stable_checks )); do while (( stable_checks < required_stable_checks )); do
if ssh_node "${node}" "state=\$(systemctl show --property=ActiveState --value ${unit}.service); sub=\$(systemctl show --property=SubState --value ${unit}.service); [[ \"\${state}\" == active && (\"\${sub}\" == running || \"\${sub}\" == exited) ]]" >/dev/null 2>&1; then if timeout "${UNIT_CHECK_TIMEOUT}" \
sshpass -p "${SSH_PASSWORD}" \
ssh "${SSH_OPTS[@]}" -p "${ssh_port}" root@127.0.0.1 \
"systemctl is-active --quiet ${unit}.service" >/dev/null 2>&1; then
stable_checks=$((stable_checks + 1)) stable_checks=$((stable_checks + 1))
else else
stable_checks=0 stable_checks=0
@ -2243,6 +2301,55 @@ EOF
done done
} }
vm_demo_url() {
local ip="$1"
local path="${2:-/}"
printf 'http://%s:%s%s\n' "${ip}" "${VM_DEMO_HTTP_PORT}" "${path}"
}
wait_for_vm_demo_http() {
local node="$1"
local ip="$2"
local timeout="${3:-${HTTP_WAIT_TIMEOUT}}"
wait_for_http "${node}" "$(vm_demo_url "${ip}" "/health")" "${timeout}"
}
vm_demo_request_json() {
local node="$1"
local method="$2"
local ip="$3"
local path="$4"
ssh_node_script "${node}" "${method}" "$(vm_demo_url "${ip}" "${path}")" <<'EOF'
set -euo pipefail
method="$1"
url="$2"
curl -fsS -X "${method}" "${url}"
EOF
}
assert_vm_demo_state() {
local state_json="$1"
local expected_visits="$2"
local expected_root_boots="$3"
local expected_data_boots="$4"
printf '%s' "${state_json}" | jq -e \
--argjson visits "${expected_visits}" \
--argjson root_boots "${expected_root_boots}" \
--argjson data_boots "${expected_data_boots}" \
--argjson listen_port "${VM_DEMO_HTTP_PORT}" \
--arg db_path "/mnt/photon-vm-data/demo.sqlite3" '
.status == "ok"
and .visits == $visits
and .root_boot_count == $root_boots
and .data_boot_count == $data_boots
and .listen_port == $listen_port
and .db_path == $db_path
' >/dev/null || die "unexpected VM demo payload: ${state_json}"
}
wait_for_host_http() { wait_for_host_http() {
local url="$1" local url="$1"
local timeout="${2:-${HTTP_WAIT_TIMEOUT}}" local timeout="${2:-${HTTP_WAIT_TIMEOUT}}"
@ -2577,10 +2684,10 @@ wait_for_qemu_volume_present() {
while true; do while true; do
qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)" qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)"
if [[ "${qemu_processes}" == *"${volume_ref}"* ]]; then if qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}"; then
return 0 return 0
fi fi
if [[ -n "${alternate_ref}" && "${qemu_processes}" == *"${alternate_ref}"* ]]; then if qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then
return 0 return 0
fi fi
if (( SECONDS >= deadline )); then if (( SECONDS >= deadline )); then
@ -2601,7 +2708,8 @@ wait_for_qemu_volume_absent() {
while true; do while true; do
qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)" qemu_processes="$(ssh_node "${node}" "pgrep -fa '[q]emu-system' || true" 2>/dev/null || true)"
if [[ "${qemu_processes}" != *"${volume_ref}"* ]] && [[ -z "${alternate_ref}" || "${qemu_processes}" != *"${alternate_ref}"* ]]; then if ! qemu_processes_contain_ref "${qemu_processes}" "${volume_ref}" \
&& ! qemu_processes_contain_ref "${qemu_processes}" "${alternate_ref}"; then
return 0 return 0
fi fi
if (( SECONDS >= deadline )); then if (( SECONDS >= deadline )); then
@ -2612,6 +2720,39 @@ wait_for_qemu_volume_absent() {
done done
} }
qemu_processes_contain_ref() {
local qemu_processes="$1"
local ref="${2:-}"
[[ -n "${ref}" ]] || return 1
if [[ "${qemu_processes}" == *"${ref}"* ]]; then
return 0
fi
if [[ "${ref}" == nbd://* ]]; then
local authority host port
authority="${ref#nbd://}"
authority="${authority%%/*}"
if [[ "${authority}" == \[*\] ]]; then
host="${authority#\[}"
host="${host%\]}"
port="10809"
elif [[ "${authority}" == *:* ]]; then
host="${authority%:*}"
port="${authority##*:}"
else
host="${authority}"
port="10809"
fi
if [[ -n "${host}" && -n "${port}" ]] \
&& [[ "${qemu_processes}" == *"\"host\":\"${host}\",\"port\":\"${port}\""* ]]; then
return 0
fi
fi
return 1
}
try_get_vm_json() { try_get_vm_json() {
local token="$1" local token="$1"
local get_vm_json="$2" local get_vm_json="$2"
@ -2625,6 +2766,15 @@ try_get_vm_json() {
127.0.0.1:${vm_port} plasmavmc.v1.VmService/GetVm 127.0.0.1:${vm_port} plasmavmc.v1.VmService/GetVm
} }
vm_disk_volume_id_from_json() {
local vm_json="$1"
local disk_id="$2"
printf '%s' "${vm_json}" | jq -r --arg disk_id "${disk_id}" '
(.spec.disks // [])[]? | select(.id == $disk_id) | .source.volumeId // empty
' | head -n1
}
try_get_volume_json() { try_get_volume_json() {
local token="$1" local token="$1"
local get_volume_json="$2" local get_volume_json="$2"
@ -3300,6 +3450,151 @@ validate_prismnet_flow() {
stop_ssh_tunnel node01 "${iam_tunnel}" stop_ssh_tunnel node01 "${iam_tunnel}"
} }
validate_tenant_networking_flow() {
log "Validating declarative tenant networking via API gateway and PrismNet"
local iam_tunnel="" prism_tunnel="" gateway_tunnel=""
iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
prism_tunnel="$(start_ssh_tunnel node01 15081 50081)"
gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)"
trap 'stop_ssh_tunnel node06 "${gateway_tunnel}"; stop_ssh_tunnel node01 "${prism_tunnel}"; stop_ssh_tunnel node01 "${iam_tunnel}"' RETURN
wait_for_unit node01 plasmacloud-tenant-networking-apply 120
wait_for_http node06 http://127.0.0.1:8080/health
ssh_node node01 "systemctl start plasmacloud-tenant-networking-apply.service"
wait_for_unit node01 plasmacloud-tenant-networking-apply 120
local org_id="${MATRIX_TENANT_ORG_ID}"
local project_id="${MATRIX_TENANT_PROJECT_ID}"
local principal_id="tenant-networking-smoke-$(date +%s)"
local token
token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")"
local vpcs_json subnets_json routers_json security_groups_json pools_json
local vpc_id subnet_id router_id default_sg_id web_sg_id cluster_pool_id lb_pool_id
local allocate_response allocated_ip service_uid
vpcs_json="$(api_gateway_request GET "${token}" "/api/v1/vpcs")"
vpc_id="$(printf '%s' "${vpcs_json}" | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" '
.data.vpcs[] | select(.name == $name) | .id
')"
[[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "declarative tenant VPC ${MATRIX_TENANT_VPC_NAME} was not exposed through the API gateway"
printf '%s' "${vpcs_json}" | jq -e --arg name "${MATRIX_TENANT_VPC_NAME}" '
.data.vpcs | any(.name == $name and .cidr_block == "10.62.0.0/16" and .status == "active")
' >/dev/null || die "unexpected VPC payload for declarative tenant network"
subnets_json="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}")"
subnet_id="$(printf '%s' "${subnets_json}" | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" '
.data.subnets[] | select(.name == $name) | .id
')"
[[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "declarative tenant subnet ${MATRIX_TENANT_SUBNET_NAME} was not exposed through the API gateway"
printf '%s' "${subnets_json}" | jq -e --arg name "${MATRIX_TENANT_SUBNET_NAME}" '
.data.subnets | any(
.name == $name and
.cidr_block == "10.62.10.0/24" and
.gateway_ip == "10.62.10.1" and
.status == "active"
)
' >/dev/null || die "unexpected subnet payload for declarative tenant network"
routers_json="$(api_gateway_request GET "${token}" "/api/v1/routers?vpc_id=${vpc_id}")"
router_id="$(printf '%s' "${routers_json}" | jq -r --arg name "${MATRIX_TENANT_ROUTER_NAME}" '
.data.routers[] | select(.name == $name) | .id
')"
[[ -n "${router_id}" && "${router_id}" != "null" ]] || die "declarative tenant router ${MATRIX_TENANT_ROUTER_NAME} was not exposed through the API gateway"
printf '%s' "${routers_json}" | jq -e --arg name "${MATRIX_TENANT_ROUTER_NAME}" '
.data.routers | any(
.name == $name and
.gateway_cidr == "10.62.0.1/24" and
.external_ip == "203.0.113.62" and
.status == "active"
)
' >/dev/null || die "unexpected router payload for declarative tenant network"
security_groups_json="$(api_gateway_request GET "${token}" "/api/v1/security-groups")"
default_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" '
.data.security_groups[] | select(.name == $name) | .id
')"
web_sg_id="$(printf '%s' "${security_groups_json}" | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" '
.data.security_groups[] | select(.name == $name) | .id
')"
[[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "default security group ${MATRIX_TENANT_DEFAULT_SG_NAME} missing from declarative tenant networking"
[[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "security group ${MATRIX_TENANT_WEB_SG_NAME} missing from declarative tenant networking"
printf '%s' "${security_groups_json}" | jq -e \
--arg default_name "${MATRIX_TENANT_DEFAULT_SG_NAME}" \
--arg web_name "${MATRIX_TENANT_WEB_SG_NAME}" \
--arg default_id "${default_sg_id}" '
(.data.security_groups | any(.name == $default_name and (.rules | any(.direction == "egress"))))
and
(.data.security_groups | any(
.name == $web_name and
(.rules | any(
.direction == "ingress" and
.protocol == "tcp" and
.port_range_min == 80 and
.port_range_max == 80 and
.remote_group_id == $default_id
))
))
' >/dev/null || die "declarative security group rules did not match expected shape"
pools_json="$(api_gateway_request GET "${token}" "/api/v1/service-ip-pools")"
cluster_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" '
.data.pools[] | select(.name == $name) | .id
')"
lb_pool_id="$(printf '%s' "${pools_json}" | jq -r --arg name "${MATRIX_TENANT_LB_POOL_NAME}" '
.data.pools[] | select(.name == $name) | .id
')"
[[ -n "${cluster_pool_id}" && "${cluster_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_CLUSTER_POOL_NAME} missing from declarative tenant networking"
[[ -n "${lb_pool_id}" && "${lb_pool_id}" != "null" ]] || die "service IP pool ${MATRIX_TENANT_LB_POOL_NAME} missing from declarative tenant networking"
printf '%s' "${pools_json}" | jq -e \
--arg cluster_name "${MATRIX_TENANT_CLUSTER_POOL_NAME}" \
--arg lb_name "${MATRIX_TENANT_LB_POOL_NAME}" '
(.data.pools | any(.name == $cluster_name and .pool_type == "cluster_ip" and .cidr_block == "10.62.200.0/24"))
and
(.data.pools | any(.name == $lb_name and .pool_type == "load_balancer" and .cidr_block == "10.62.210.0/24"))
' >/dev/null || die "unexpected service IP pool payload for declarative tenant network"
service_uid="matrix-service-$(date +%s)"
allocate_response="$(grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg pool "${cluster_pool_id}" --arg service_uid "${service_uid}" '{orgId:$org, projectId:$project, poolId:$pool, serviceUid:$service_uid, requestedIp:""}')" \
127.0.0.1:15081 prismnet.IpamService/AllocateServiceIP)"
allocated_ip="$(printf '%s' "${allocate_response}" | jq -r '.ipAddress')"
[[ -n "${allocated_ip}" && "${allocated_ip}" != "null" ]] || die "failed to allocate a service IP from ${MATRIX_TENANT_CLUSTER_POOL_NAME}"
api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \
| jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) != null' >/dev/null \
|| die "allocated service IP ${allocated_ip} was not reflected in the REST pool view"
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg ip "${allocated_ip}" '{orgId:$org, projectId:$project, ipAddress:$ip}')" \
127.0.0.1:15081 prismnet.IpamService/ReleaseServiceIP >/dev/null
local release_deadline=$((SECONDS + HTTP_WAIT_TIMEOUT))
while true; do
if api_gateway_request GET "${token}" "/api/v1/service-ip-pools/${cluster_pool_id}" \
| jq -e --arg ip "${allocated_ip}" '.data.allocated_ips | index($ip) == null' >/dev/null; then
break
fi
if (( SECONDS >= release_deadline )); then
die "timed out waiting for released service IP ${allocated_ip} to disappear from ${MATRIX_TENANT_CLUSTER_POOL_NAME}"
fi
sleep 2
done
trap - RETURN
stop_ssh_tunnel node06 "${gateway_tunnel}"
stop_ssh_tunnel node01 "${prism_tunnel}"
stop_ssh_tunnel node01 "${iam_tunnel}"
}
validate_flashdns_flow() { validate_flashdns_flow() {
log "Validating FlashDNS zone, record, and authoritative query flow" log "Validating FlashDNS zone, record, and authoritative query flow"
@ -4096,9 +4391,10 @@ validate_lightningstor_distributed_storage() {
validate_vm_storage_flow() { validate_vm_storage_flow() {
log "Validating PlasmaVMC image import, shared-volume execution, and cross-node migration" log "Validating PlasmaVMC image import, shared-volume execution, and cross-node migration"
local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel="" local iam_tunnel="" prism_tunnel="" ls_tunnel="" vm_tunnel="" coronafs_tunnel="" gateway_tunnel=""
local node04_coronafs_tunnel="" node05_coronafs_tunnel="" local node04_coronafs_tunnel="" node05_coronafs_tunnel=""
local current_worker_coronafs_port="" peer_worker_coronafs_port="" local current_worker_coronafs_port="" peer_worker_coronafs_port=""
local demo_http_sg_id=""
local vm_port=15082 local vm_port=15082
iam_tunnel="$(start_ssh_tunnel node01 15080 50080)" iam_tunnel="$(start_ssh_tunnel node01 15080 50080)"
prism_tunnel="$(start_ssh_tunnel node01 15081 50081)" prism_tunnel="$(start_ssh_tunnel node01 15081 50081)"
@ -4107,10 +4403,11 @@ validate_vm_storage_flow() {
coronafs_tunnel="$(start_ssh_tunnel node01 15088 "${CORONAFS_API_PORT}")" coronafs_tunnel="$(start_ssh_tunnel node01 15088 "${CORONAFS_API_PORT}")"
node04_coronafs_tunnel="$(start_ssh_tunnel node04 25088 "${CORONAFS_API_PORT}")" node04_coronafs_tunnel="$(start_ssh_tunnel node04 25088 "${CORONAFS_API_PORT}")"
node05_coronafs_tunnel="$(start_ssh_tunnel node05 35088 "${CORONAFS_API_PORT}")" node05_coronafs_tunnel="$(start_ssh_tunnel node05 35088 "${CORONAFS_API_PORT}")"
gateway_tunnel="$(start_ssh_tunnel node06 18080 8080)"
local image_source_path="" local image_source_path=""
local vm_watch_output="" local vm_watch_output=""
local node01_proto_root="/var/lib/plasmavmc/test-protos" local node01_proto_root="/var/lib/plasmavmc/test-protos"
local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac="" local vpc_id="" subnet_id="" port_id="" port_ip="" port_mac="" default_sg_id="" web_sg_id=""
cleanup_vm_storage_flow() { cleanup_vm_storage_flow() {
if [[ -n "${token:-}" && -n "${port_id:-}" && -n "${subnet_id:-}" ]]; then if [[ -n "${token:-}" && -n "${port_id:-}" && -n "${subnet_id:-}" ]]; then
grpcurl -plaintext \ grpcurl -plaintext \
@ -4120,28 +4417,15 @@ validate_vm_storage_flow() {
-d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \ -d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \
127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null 2>&1 || true 127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null 2>&1 || true
fi fi
if [[ -n "${token:-}" && -n "${subnet_id:-}" && -n "${vpc_id:-}" ]]; then
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \
127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null 2>&1 || true
fi
if [[ -n "${token:-}" && -n "${vpc_id:-}" ]]; then
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id:-}" --arg project "${project_id:-}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \
127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null 2>&1 || true
fi
if [[ -n "${image_source_path}" && "${image_source_path}" != /nix/store/* ]]; then if [[ -n "${image_source_path}" && "${image_source_path}" != /nix/store/* ]]; then
ssh_node node01 "rm -f ${image_source_path}" >/dev/null 2>&1 || true ssh_node node01 "rm -f ${image_source_path}" >/dev/null 2>&1 || true
fi fi
if [[ -n "${vm_watch_output}" ]]; then if [[ -n "${vm_watch_output}" ]]; then
ssh_node node01 "rm -f ${vm_watch_output} ${vm_watch_output}.pid ${vm_watch_output}.stderr" >/dev/null 2>&1 || true ssh_node node01 "rm -f ${vm_watch_output} ${vm_watch_output}.pid ${vm_watch_output}.stderr" >/dev/null 2>&1 || true
fi fi
if [[ -n "${token:-}" && -n "${demo_http_sg_id:-}" ]]; then
api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null 2>&1 || true
fi
stop_ssh_tunnel node05 "${node05_coronafs_tunnel}" stop_ssh_tunnel node05 "${node05_coronafs_tunnel}"
stop_ssh_tunnel node04 "${node04_coronafs_tunnel}" stop_ssh_tunnel node04 "${node04_coronafs_tunnel}"
stop_ssh_tunnel node01 "${coronafs_tunnel}" stop_ssh_tunnel node01 "${coronafs_tunnel}"
@ -4149,48 +4433,64 @@ validate_vm_storage_flow() {
stop_ssh_tunnel node01 "${ls_tunnel}" stop_ssh_tunnel node01 "${ls_tunnel}"
stop_ssh_tunnel node01 "${prism_tunnel}" stop_ssh_tunnel node01 "${prism_tunnel}"
stop_ssh_tunnel node01 "${iam_tunnel}" stop_ssh_tunnel node01 "${iam_tunnel}"
stop_ssh_tunnel node06 "${gateway_tunnel}"
} }
trap cleanup_vm_storage_flow RETURN trap cleanup_vm_storage_flow RETURN
wait_for_plasmavmc_workers_registered 15082 wait_for_plasmavmc_workers_registered 15082
local org_id="vm-smoke-org" local org_id="${MATRIX_TENANT_ORG_ID}"
local project_id="vm-smoke-project" local project_id="${MATRIX_TENANT_PROJECT_ID}"
local principal_id="plasmavmc-smoke-$(date +%s)" local principal_id="plasmavmc-smoke-$(date +%s)"
local token local token
local demo_state_json=""
local demo_visit_json=""
token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")" token="$(issue_project_admin_token 15080 "${org_id}" "${project_id}" "${principal_id}")"
log "Matrix case: PlasmaVMC + PrismNet" log "Matrix case: PlasmaVMC + declarative PrismNet tenant networking"
vpc_id="$(create_prismnet_vpc_with_retry \ vpc_id="$(api_gateway_request GET "${token}" "/api/v1/vpcs" \
"${token}" \ | jq -r --arg name "${MATRIX_TENANT_VPC_NAME}" '.data.vpcs[] | select(.name == $name) | .id')"
"${org_id}" \ [[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to locate declarative PrismNet VPC ${MATRIX_TENANT_VPC_NAME} for PlasmaVMC matrix"
"${project_id}" \ subnet_id="$(api_gateway_request GET "${token}" "/api/v1/subnets?vpc_id=${vpc_id}" \
"vm-network-vpc" \ | jq -r --arg name "${MATRIX_TENANT_SUBNET_NAME}" '.data.subnets[] | select(.name == $name) | .id')"
"vm storage matrix networking" \ [[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to locate declarative PrismNet subnet ${MATRIX_TENANT_SUBNET_NAME} for PlasmaVMC matrix"
"10.62.0.0/16" | jq -r '.vpc.id')" default_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \
[[ -n "${vpc_id}" && "${vpc_id}" != "null" ]] || die "failed to create PrismNet VPC for PlasmaVMC matrix" | jq -r --arg name "${MATRIX_TENANT_DEFAULT_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')"
web_sg_id="$(api_gateway_request GET "${token}" "/api/v1/security-groups" \
subnet_id="$(grpcurl -plaintext \ | jq -r --arg name "${MATRIX_TENANT_WEB_SG_NAME}" '.data.security_groups[] | select(.name == $name) | .id')"
-H "authorization: Bearer ${token}" \ [[ -n "${default_sg_id}" && "${default_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_DEFAULT_SG_NAME} for PlasmaVMC matrix"
-import-path "${PRISMNET_PROTO_DIR}" \ [[ -n "${web_sg_id}" && "${web_sg_id}" != "null" ]] || die "failed to locate security group ${MATRIX_TENANT_WEB_SG_NAME} for PlasmaVMC matrix"
-proto "${PRISMNET_PROTO}" \ demo_http_sg_id="$(
-d "$(jq -cn --arg vpc "${vpc_id}" '{vpcId:$vpc, name:"vm-network-subnet", description:"vm storage matrix subnet", cidrBlock:"10.62.10.0/24", gatewayIp:"10.62.10.1", dhcpEnabled:true}')" \ api_gateway_request POST "${token}" "/api/v1/security-groups" "$(
127.0.0.1:15081 prismnet.SubnetService/CreateSubnet | jq -r '.subnet.id')" jq -cn \
[[ -n "${subnet_id}" && "${subnet_id}" != "null" ]] || die "failed to create PrismNet subnet for PlasmaVMC matrix" --arg name "vm-demo-web-$(date +%s)" \
--arg org "${org_id}" \
local prismnet_port_response --arg project "${project_id}" \
prismnet_port_response="$(grpcurl -plaintext \ '{
-H "authorization: Bearer ${token}" \ name:$name,
-import-path "${PRISMNET_PROTO_DIR}" \ org_id:$org,
-proto "${PRISMNET_PROTO}" \ project_id:$project,
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, name:"vm-network-port", description:"vm storage matrix port", ipAddress:""}')" \ description:"temporary ingress for the VM web demo"
127.0.0.1:15081 prismnet.PortService/CreatePort)" }'
port_id="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.id')" )" | jq -r '.data.id'
port_ip="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.ipAddress')" )"
port_mac="$(printf '%s' "${prismnet_port_response}" | jq -r '.port.macAddress')" [[ -n "${demo_http_sg_id}" && "${demo_http_sg_id}" != "null" ]] || die "failed to create a temporary security group for the VM web demo"
[[ -n "${port_id}" && "${port_id}" != "null" ]] || die "failed to create PrismNet port for PlasmaVMC matrix" api_gateway_request POST "${token}" "/api/v1/security-groups/${demo_http_sg_id}/rules" "$(
[[ -n "${port_ip}" && "${port_ip}" != "null" ]] || die "PrismNet port ${port_id} did not return an IP address" jq -cn \
[[ -n "${port_mac}" && "${port_mac}" != "null" ]] || die "PrismNet port ${port_id} did not return a MAC address" --arg org "${org_id}" \
--arg project "${project_id}" \
--argjson port "${VM_DEMO_HTTP_PORT}" \
'{
org_id:$org,
project_id:$project,
direction:"ingress",
protocol:"tcp",
port_range_min:$port,
port_range_max:$port,
remote_cidr:"0.0.0.0/0",
description:"allow worker-originated HTTP checks for the VM web demo"
}'
)" >/dev/null
ensure_lightningstor_bucket 15086 "${token}" "plasmavmc-images" "${org_id}" "${project_id}" ensure_lightningstor_bucket 15086 "${token}" "plasmavmc-images" "${org_id}" "${project_id}"
wait_for_lightningstor_write_quorum 15086 "${token}" "plasmavmc-images" "PlasmaVMC image import" wait_for_lightningstor_write_quorum 15086 "${token}" "plasmavmc-images" "PlasmaVMC image import"
@ -4311,6 +4611,10 @@ EOS
--arg org "${org_id}" \ --arg org "${org_id}" \
--arg project "${project_id}" \ --arg project "${project_id}" \
--arg image_id "${image_id}" \ --arg image_id "${image_id}" \
--arg subnet_id "${subnet_id}" \
--arg default_sg_id "${default_sg_id}" \
--arg web_sg_id "${web_sg_id}" \
--arg demo_http_sg_id "${demo_http_sg_id}" \
'{ '{
name:$name, name:$name,
org_id:$org, org_id:$org,
@ -4330,74 +4634,28 @@ EOS
source:{type:"blank"}, source:{type:"blank"},
size_gib:2 size_gib:2
} }
]
}'
)"
local create_vm_grpc_json
create_vm_grpc_json="$(
jq -cn \
--arg name "$(printf '%s' "${create_vm_rest_json}" | jq -r '.name')" \
--arg org "${org_id}" \
--arg project "${project_id}" \
--arg image_id "${image_id}" \
--arg subnet_id "${subnet_id}" \
--arg port_id "${port_id}" \
'{
name:$name,
orgId:$org,
projectId:$project,
hypervisor:"HYPERVISOR_TYPE_KVM",
spec:{
cpu:{vcpus:1, coresPerSocket:1, sockets:1},
memory:{sizeMib:1024},
disks:[
{
id:"root",
source:{imageId:$image_id},
sizeGib:4,
bus:"DISK_BUS_VIRTIO",
cache:"DISK_CACHE_WRITEBACK",
bootIndex:1
},
{
id:"data",
source:{blank:true},
sizeGib:2,
bus:"DISK_BUS_VIRTIO",
cache:"DISK_CACHE_WRITEBACK"
}
], ],
network:[ network:[
{ {
id:"tenant0", id:"tenant0",
subnetId:$subnet_id, subnet_id:$subnet_id,
portId:$port_id, model:"virtio-net",
model:"NIC_MODEL_VIRTIO_NET" security_groups:[$default_sg_id, $web_sg_id, $demo_http_sg_id]
} }
] ]
}
}' }'
)" )"
local create_response vm_id local create_response vm_id
create_response="$( create_response="$(api_gateway_request POST "${token}" "/api/v1/vms" "${create_vm_rest_json}")"
ssh_node_script node01 "${node01_proto_root}" "${token}" "$(printf '%s' "${create_vm_grpc_json}" | base64 | tr -d '\n')" <<'EOS' vm_id="$(printf '%s' "${create_response}" | jq -r '.data.id')"
set -euo pipefail
proto_root="$1"
token="$2"
request_b64="$3"
request_json="$(printf '%s' "${request_b64}" | base64 -d)"
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${proto_root}/plasmavmc" \
-proto "${proto_root}/plasmavmc/plasmavmc.proto" \
-d "${request_json}" \
127.0.0.1:50082 plasmavmc.v1.VmService/CreateVm
EOS
)"
vm_id="$(printf '%s' "${create_response}" | jq -r '.id')"
[[ -n "${vm_id}" && "${vm_id}" != "null" ]] || die "failed to create VM through PlasmaVMC" [[ -n "${vm_id}" && "${vm_id}" != "null" ]] || die "failed to create VM through PlasmaVMC"
port_id="$(printf '%s' "${create_response}" | jq -r '.data.network[0].port_id // empty')"
port_ip="$(printf '%s' "${create_response}" | jq -r '.data.network[0].ip_address // empty')"
port_mac="$(printf '%s' "${create_response}" | jq -r '.data.network[0].mac_address // empty')"
[[ -n "${port_id}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet port_id"
[[ -n "${port_ip}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet IP address"
[[ -n "${port_mac}" ]] || die "REST CreateVm response did not include an auto-managed PrismNet MAC address"
vm_watch_output="/tmp/plasmavmc-watch-${vm_id}.json" vm_watch_output="/tmp/plasmavmc-watch-${vm_id}.json"
start_plasmavmc_vm_watch node01 "${node01_proto_root}" "${token}" "${org_id}" "${project_id}" "${vm_id}" "${vm_watch_output}" start_plasmavmc_vm_watch node01 "${node01_proto_root}" "${token}" "${org_id}" "${project_id}" "${vm_id}" "${vm_watch_output}"
sleep 2 sleep 2
@ -4435,7 +4693,12 @@ EOS
current_worker_coronafs_port=35088 current_worker_coronafs_port=35088
peer_worker_coronafs_port=25088 peer_worker_coronafs_port=25088
fi fi
wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}" >/dev/null local vm_spec_json volume_id data_volume_id
vm_spec_json="$(wait_for_vm_network_spec "${token}" "${get_vm_json}" "${port_id}" "${subnet_id}" "${port_mac}" "${port_ip}" "${vm_port}")"
volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "root")"
data_volume_id="$(vm_disk_volume_id_from_json "${vm_spec_json}" "data")"
[[ -n "${volume_id}" ]] || die "failed to resolve root volume ID from VM spec"
[[ -n "${data_volume_id}" ]] || die "failed to resolve data volume ID from VM spec"
wait_for_prismnet_port_binding "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" "${vm_id}" >/dev/null wait_for_prismnet_port_binding "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" "${vm_id}" >/dev/null
grpcurl -plaintext \ grpcurl -plaintext \
@ -4465,8 +4728,6 @@ EOS
done done
log "Matrix case: PlasmaVMC + PrismNet + CoronaFS + LightningStor" log "Matrix case: PlasmaVMC + PrismNet + CoronaFS + LightningStor"
local volume_id="${vm_id}-root"
local data_volume_id="${vm_id}-data"
local volume_path="${CORONAFS_VOLUME_ROOT}/${volume_id}.raw" local volume_path="${CORONAFS_VOLUME_ROOT}/${volume_id}.raw"
local data_volume_path="${CORONAFS_VOLUME_ROOT}/${data_volume_id}.raw" local data_volume_path="${CORONAFS_VOLUME_ROOT}/${data_volume_id}.raw"
local volume_export_json data_volume_export_json volume_uri data_volume_uri local volume_export_json data_volume_export_json volume_uri data_volume_uri
@ -4498,6 +4759,12 @@ EOS
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM startup" wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM startup"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=1" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=1"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=1" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=1"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=1"
wait_for_vm_demo_http "${node_id}" "${port_ip}"
demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
assert_vm_demo_state "${demo_state_json}" 0 1 1
demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
assert_vm_demo_state "${demo_visit_json}" 1 1 1
local get_root_volume_json get_data_volume_json local get_root_volume_json get_data_volume_json
local root_volume_state_json data_volume_state_json local root_volume_state_json data_volume_state_json
local root_attachment_generation data_attachment_generation local root_attachment_generation data_attachment_generation
@ -4604,6 +4871,12 @@ EOS
fi fi
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=2" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=2"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=2" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=2"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=2"
wait_for_vm_demo_http "${node_id}" "${port_ip}"
demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
assert_vm_demo_state "${demo_state_json}" 1 2 2
demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
assert_vm_demo_state "${demo_visit_json}" 2 2 2
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM restart" wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM restart"
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
@ -4686,7 +4959,12 @@ EOS
wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}" wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}"
wait_for_qemu_volume_absent "${source_node}" "${volume_path}" "${source_volume_qemu_ref}" wait_for_qemu_volume_absent "${source_node}" "${volume_path}" "${source_volume_qemu_ref}"
wait_for_qemu_volume_absent "${source_node}" "${data_volume_path}" "${source_data_volume_qemu_ref}" wait_for_qemu_volume_absent "${source_node}" "${data_volume_path}" "${source_data_volume_qemu_ref}"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=2" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_HEARTBEAT count=3"
wait_for_vm_demo_http "${node_id}" "${port_ip}"
demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
assert_vm_demo_state "${demo_state_json}" 2 3 3
demo_visit_json="$(vm_demo_request_json "${node_id}" POST "${port_ip}" "/visit")"
assert_vm_demo_state "${demo_visit_json}" 3 3 3
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
[[ "$(printf '%s' "${root_volume_state_json}" | jq -r '.attachedToNode // empty')" == "${node_id}" ]] || die "root volume ${volume_id} is not owned by migrated node ${node_id}" [[ "$(printf '%s' "${root_volume_state_json}" | jq -r '.attachedToNode // empty')" == "${node_id}" ]] || die "root volume ${volume_id} is not owned by migrated node ${node_id}"
@ -4768,8 +5046,12 @@ EOS
[[ -n "${current_data_volume_qemu_ref}" ]] || die "worker ${node_id} did not republish an attachable local ref for ${data_volume_id} after post-migration restart" [[ -n "${current_data_volume_qemu_ref}" ]] || die "worker ${node_id} did not republish an attachable local ref for ${data_volume_id} after post-migration restart"
wait_for_qemu_volume_present "${node_id}" "${volume_path}" "${current_volume_qemu_ref}" wait_for_qemu_volume_present "${node_id}" "${volume_path}" "${current_volume_qemu_ref}"
wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}" wait_for_qemu_volume_present "${node_id}" "${data_volume_path}" "${current_data_volume_qemu_ref}"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=3" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_READY count=4"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=3" wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_SMOKE_DATA_READY count=4"
wait_for_vm_console_pattern "${node_id}" "${vm_id}" "PHOTON_VM_DEMO_WEB_READY count=4"
wait_for_vm_demo_http "${node_id}" "${port_ip}"
demo_state_json="$(vm_demo_request_json "${node_id}" GET "${port_ip}" "/state")"
assert_vm_demo_state "${demo_state_json}" 3 4 4
wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM post-migration restart" wait_for_lightningstor_counts_equal "${image_after_node01}" "${image_after_node04}" "${image_after_node05}" "shared-fs VM post-migration restart"
root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")" root_volume_state_json="$(try_get_volume_json "${token}" "${get_root_volume_json}")"
data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")" data_volume_state_json="$(try_get_volume_json "${token}" "${get_data_volume_json}")"
@ -4830,7 +5112,10 @@ EOS
done done
wait_for_plasmavmc_vm_watch_completion node01 "${vm_watch_output}" 60 wait_for_plasmavmc_vm_watch_completion node01 "${vm_watch_output}" 60
assert_plasmavmc_vm_watch_events node01 "${vm_watch_output}" "${vm_id}" assert_plasmavmc_vm_watch_events node01 "${vm_watch_output}" "${vm_id}"
wait_for_prismnet_port_detachment "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null wait_for_prismnet_port_absent "${token}" "${org_id}" "${project_id}" "${subnet_id}" "${port_id}" >/dev/null
port_id=""
api_gateway_request DELETE "${token}" "/api/v1/security-groups/${demo_http_sg_id}" >/dev/null
demo_http_sg_id=""
ssh_node "${node_id}" "bash -lc '[[ ! -d $(printf '%q' "$(vm_runtime_dir_path "${vm_id}")") ]]'" ssh_node "${node_id}" "bash -lc '[[ ! -d $(printf '%q' "$(vm_runtime_dir_path "${vm_id}")") ]]'"
ssh_node node01 "bash -lc '[[ ! -f ${volume_path} ]]'" ssh_node node01 "bash -lc '[[ ! -f ${volume_path} ]]'"
@ -4879,28 +5164,6 @@ EOS
die "shared-fs VM data volume unexpectedly persisted to LightningStor object storage" die "shared-fs VM data volume unexpectedly persisted to LightningStor object storage"
fi fi
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg subnet "${subnet_id}" --arg id "${port_id}" '{orgId:$org, projectId:$project, subnetId:$subnet, id:$id}')" \
127.0.0.1:15081 prismnet.PortService/DeletePort >/dev/null
port_id=""
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg vpc "${vpc_id}" --arg id "${subnet_id}" '{orgId:$org, projectId:$project, vpcId:$vpc, id:$id}')" \
127.0.0.1:15081 prismnet.SubnetService/DeleteSubnet >/dev/null
subnet_id=""
grpcurl -plaintext \
-H "authorization: Bearer ${token}" \
-import-path "${PRISMNET_PROTO_DIR}" \
-proto "${PRISMNET_PROTO}" \
-d "$(jq -cn --arg org "${org_id}" --arg project "${project_id}" --arg id "${vpc_id}" '{orgId:$org, projectId:$project, id:$id}')" \
127.0.0.1:15081 prismnet.VpcService/DeleteVpc >/dev/null
vpc_id=""
grpcurl -plaintext \ grpcurl -plaintext \
-H "authorization: Bearer ${token}" \ -H "authorization: Bearer ${token}" \
-import-path "${PLASMAVMC_PROTO_DIR}" \ -import-path "${PLASMAVMC_PROTO_DIR}" \
@ -6890,8 +7153,11 @@ benchmark_plasmavmc_guest_runtime() {
fi fi
local start_ns attach_ns ready_ns attach_sec ready_sec local start_ns attach_ns ready_ns attach_sec ready_sec
local root_volume_id="${vm_id}-root" local root_volume_id data_volume_id
local data_volume_id="${vm_id}-data" root_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "root")"
data_volume_id="$(vm_disk_volume_id_from_json "${vm_json}" "data")"
[[ -n "${root_volume_id}" ]] || die "runtime benchmark VM did not expose a root volume ID"
[[ -n "${data_volume_id}" ]] || die "runtime benchmark VM did not expose a data volume ID"
local root_uri data_uri local root_uri data_uri
start_ns="$(date +%s%N)" start_ns="$(date +%s%N)"
@ -7434,6 +7700,7 @@ validate_cluster() {
validate_control_plane validate_control_plane
validate_iam_flow validate_iam_flow
validate_prismnet_flow validate_prismnet_flow
validate_tenant_networking_flow
validate_flashdns_flow validate_flashdns_flow
validate_fiberlb_flow validate_fiberlb_flow
validate_workers validate_workers
@ -7484,6 +7751,16 @@ fresh_storage_smoke_requested() {
storage_smoke_requested storage_smoke_requested
} }
demo_vm_webapp_requested() {
start_requested "$@"
validate_vm_storage_flow
}
fresh_demo_vm_webapp_requested() {
clean_requested "$@"
demo_vm_webapp_requested "$@"
}
matrix_requested() { matrix_requested() {
start_requested "$@" start_requested "$@"
validate_component_matrix validate_component_matrix
@ -7771,6 +8048,8 @@ Commands:
fresh-smoke clean local runtime state, rebuild on the host, start, and validate fresh-smoke clean local runtime state, rebuild on the host, start, and validate
storage-smoke start the storage lab (node01-05) and validate CoronaFS/LightningStor/PlasmaVMC storage-smoke start the storage lab (node01-05) and validate CoronaFS/LightningStor/PlasmaVMC
fresh-storage-smoke clean local runtime state, rebuild node01-05 on the host, start, and validate the storage lab fresh-storage-smoke clean local runtime state, rebuild node01-05 on the host, start, and validate the storage lab
demo-vm-webapp start the cluster and run the VM web app demo with persistent volume state
fresh-demo-vm-webapp clean local runtime state, rebuild on the host, start, and run the VM web app demo
matrix Start the cluster and validate composed service configurations against the current running VMs matrix Start the cluster and validate composed service configurations against the current running VMs
fresh-matrix clean local runtime state, rebuild on the host, start, and validate composed service configurations fresh-matrix clean local runtime state, rebuild on the host, start, and validate composed service configurations
bench-storage start the cluster and benchmark CoronaFS plus LightningStor against the current running VMs bench-storage start the cluster and benchmark CoronaFS plus LightningStor against the current running VMs
@ -7797,6 +8076,8 @@ Examples:
$0 fresh-smoke $0 fresh-smoke
$0 storage-smoke $0 storage-smoke
$0 fresh-storage-smoke $0 fresh-storage-smoke
$0 demo-vm-webapp
$0 fresh-demo-vm-webapp
$0 matrix $0 matrix
$0 fresh-matrix $0 fresh-matrix
$0 bench-storage $0 bench-storage
@ -7830,6 +8111,8 @@ main() {
fresh-smoke) fresh_smoke_requested "$@" ;; fresh-smoke) fresh_smoke_requested "$@" ;;
storage-smoke) storage_smoke_requested ;; storage-smoke) storage_smoke_requested ;;
fresh-storage-smoke) fresh_storage_smoke_requested ;; fresh-storage-smoke) fresh_storage_smoke_requested ;;
demo-vm-webapp) demo_vm_webapp_requested "$@" ;;
fresh-demo-vm-webapp) fresh_demo_vm_webapp_requested "$@" ;;
matrix) matrix_requested "$@" ;; matrix) matrix_requested "$@" ;;
fresh-matrix) fresh_matrix_requested "$@" ;; fresh-matrix) fresh_matrix_requested "$@" ;;
bench-storage) bench_storage_requested "$@" ;; bench-storage) bench_storage_requested "$@" ;;

View file

@ -1,6 +1,132 @@
{ modulesPath, lib, pkgs, ... }: { modulesPath, lib, pkgs, ... }:
{ let
photonVmDemoApi = pkgs.writeText "photon-vm-demo-api.py" ''
import json
import os
import socket
import sqlite3
from http import HTTPStatus
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
DATA_MOUNT = "/mnt/photon-vm-data"
DB_PATH = os.path.join(DATA_MOUNT, "demo.sqlite3")
ROOT_BOOT_COUNT_PATH = "/var/lib/photon-vm-smoke/boot-count"
DATA_BOOT_COUNT_PATH = os.path.join(DATA_MOUNT, "boot-count")
CONSOLE_PATH = "/dev/ttyS0"
LISTEN_HOST = "0.0.0.0"
LISTEN_PORT = 8080
def log_console(message: str) -> None:
try:
with open(CONSOLE_PATH, "a", encoding="utf-8") as console:
console.write(message + "\n")
except OSError:
pass
def read_int(path: str) -> int:
try:
with open(path, "r", encoding="utf-8") as handle:
return int(handle.read().strip() or "0")
except (FileNotFoundError, ValueError, OSError):
return 0
def init_db() -> None:
os.makedirs(DATA_MOUNT, exist_ok=True)
conn = sqlite3.connect(DB_PATH)
try:
conn.execute(
"CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)"
)
conn.execute(
"INSERT INTO counters (name, value) VALUES ('visits', 0) "
"ON CONFLICT(name) DO NOTHING"
)
conn.commit()
finally:
conn.close()
def current_state(increment: bool = False) -> dict:
conn = sqlite3.connect(DB_PATH, timeout=30)
try:
conn.execute(
"CREATE TABLE IF NOT EXISTS counters (name TEXT PRIMARY KEY, value INTEGER NOT NULL)"
)
conn.execute(
"INSERT INTO counters (name, value) VALUES ('visits', 0) "
"ON CONFLICT(name) DO NOTHING"
)
if increment:
conn.execute(
"UPDATE counters SET value = value + 1 WHERE name = 'visits'"
)
visits = conn.execute(
"SELECT value FROM counters WHERE name = 'visits'"
).fetchone()[0]
conn.commit()
finally:
conn.close()
return {
"status": "ok",
"hostname": socket.gethostname(),
"listen_port": LISTEN_PORT,
"db_path": DB_PATH,
"visits": visits,
"root_boot_count": read_int(ROOT_BOOT_COUNT_PATH),
"data_boot_count": read_int(DATA_BOOT_COUNT_PATH),
}
class Handler(BaseHTTPRequestHandler):
server_version = "PhotonVMDemo/1.0"
def log_message(self, format: str, *args) -> None:
return
def _send_json(self, payload: dict, status: int = HTTPStatus.OK) -> None:
body = json.dumps(payload, sort_keys=True).encode("utf-8")
self.send_response(status)
self.send_header("Content-Type", "application/json")
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
def do_GET(self) -> None:
if self.path == "/health":
self._send_json({"status": "ok"})
return
if self.path == "/state":
self._send_json(current_state())
return
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
def do_POST(self) -> None:
if self.path == "/visit":
payload = current_state(increment=True)
log_console("PHOTON_VM_DEMO_VISIT visits=%s" % payload["visits"])
self._send_json(payload)
return
self._send_json({"error": "not_found"}, HTTPStatus.NOT_FOUND)
def main() -> None:
init_db()
server = ThreadingHTTPServer((LISTEN_HOST, LISTEN_PORT), Handler)
log_console(
"PHOTON_VM_DEMO_WEB_READY count=%s port=%s db=%s"
% (read_int(ROOT_BOOT_COUNT_PATH), LISTEN_PORT, DB_PATH)
)
server.serve_forever()
if __name__ == "__main__":
main()
'';
in {
imports = [ imports = [
(modulesPath + "/virtualisation/disk-image.nix") (modulesPath + "/virtualisation/disk-image.nix")
(modulesPath + "/profiles/qemu-guest.nix") (modulesPath + "/profiles/qemu-guest.nix")
@ -18,6 +144,7 @@
networking.hostName = "photon-vm-smoke"; networking.hostName = "photon-vm-smoke";
networking.useDHCP = lib.mkDefault true; networking.useDHCP = lib.mkDefault true;
networking.firewall.enable = false;
services.getty.autologinUser = "root"; services.getty.autologinUser = "root";
users.mutableUsers = false; users.mutableUsers = false;
@ -144,5 +271,35 @@
''; '';
}; };
systemd.services.photon-vm-demo-api = {
description = "PhotonCloud VM demo web app";
wantedBy = [ "multi-user.target" ];
wants = [ "network-online.target" "photon-vm-smoke.service" ];
after = [ "network-online.target" "photon-vm-smoke.service" ];
path = with pkgs; [
bash
coreutils
python3
util-linux
];
serviceConfig = {
Type = "simple";
Restart = "always";
RestartSec = "1";
};
script = ''
deadline=$((SECONDS + 60))
while ! mountpoint -q /mnt/photon-vm-data; do
if [ "$SECONDS" -ge "$deadline" ]; then
echo "PHOTON_VM_DEMO_WEB_ERROR step=mount-timeout" >/dev/ttyS0
exit 1
fi
sleep 1
done
exec python3 ${photonVmDemoApi}
'';
};
system.stateVersion = "24.05"; system.stateVersion = "24.05";
} }

1476
plasmavmc/Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -10,7 +10,6 @@ pub const ENV_INITRD_PATH: &str = "PLASMAVMC_INITRD_PATH";
pub const ENV_RUNTIME_DIR: &str = "PLASMAVMC_RUNTIME_DIR"; pub const ENV_RUNTIME_DIR: &str = "PLASMAVMC_RUNTIME_DIR";
pub const ENV_QMP_TIMEOUT_SECS: &str = "PLASMAVMC_QMP_TIMEOUT_SECS"; pub const ENV_QMP_TIMEOUT_SECS: &str = "PLASMAVMC_QMP_TIMEOUT_SECS";
pub const ENV_NBD_MAX_QUEUES: &str = "PLASMAVMC_NBD_MAX_QUEUES"; pub const ENV_NBD_MAX_QUEUES: &str = "PLASMAVMC_NBD_MAX_QUEUES";
pub const ENV_NBD_AIO_MODE: &str = "PLASMAVMC_NBD_AIO_MODE";
/// Resolve QEMU binary path, falling back to a provided default. /// Resolve QEMU binary path, falling back to a provided default.
pub fn resolve_qemu_path(default: impl AsRef<Path>) -> PathBuf { pub fn resolve_qemu_path(default: impl AsRef<Path>) -> PathBuf {
@ -55,15 +54,6 @@ pub fn resolve_nbd_max_queues() -> u16 {
.unwrap_or(16) .unwrap_or(16)
} }
pub fn resolve_nbd_aio_mode() -> &'static str {
match std::env::var(ENV_NBD_AIO_MODE).ok().as_deref() {
Some("threads") => "threads",
Some("native") => "native",
Some("io_uring") => "io_uring",
_ => "io_uring",
}
}
#[cfg(test)] #[cfg(test)]
pub(crate) fn env_test_lock() -> &'static Mutex<()> { pub(crate) fn env_test_lock() -> &'static Mutex<()> {
static LOCK: OnceLock<Mutex<()>> = OnceLock::new(); static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
@ -161,29 +151,4 @@ mod tests {
assert_eq!(resolve_nbd_max_queues(), 12); assert_eq!(resolve_nbd_max_queues(), 12);
std::env::remove_var(ENV_NBD_MAX_QUEUES); std::env::remove_var(ENV_NBD_MAX_QUEUES);
} }
#[test]
fn resolve_nbd_aio_mode_defaults_to_io_uring() {
let _guard = env_test_lock().lock().unwrap();
std::env::remove_var(ENV_NBD_AIO_MODE);
assert_eq!(resolve_nbd_aio_mode(), "io_uring");
}
#[test]
fn resolve_nbd_aio_mode_accepts_supported_modes() {
let _guard = env_test_lock().lock().unwrap();
for mode in ["threads", "native", "io_uring"] {
std::env::set_var(ENV_NBD_AIO_MODE, mode);
assert_eq!(resolve_nbd_aio_mode(), mode);
}
std::env::remove_var(ENV_NBD_AIO_MODE);
}
#[test]
fn resolve_nbd_aio_mode_falls_back_for_invalid_values() {
let _guard = env_test_lock().lock().unwrap();
std::env::set_var(ENV_NBD_AIO_MODE, "bogus");
assert_eq!(resolve_nbd_aio_mode(), "io_uring");
std::env::remove_var(ENV_NBD_AIO_MODE);
}
} }

View file

@ -4,12 +4,17 @@
//! It uses QEMU with KVM acceleration to run virtual machines. //! It uses QEMU with KVM acceleration to run virtual machines.
mod env; mod env;
mod network;
mod qmp; mod qmp;
use async_trait::async_trait; use async_trait::async_trait;
use env::{ use env::{
resolve_kernel_initrd, resolve_nbd_aio_mode, resolve_nbd_max_queues, resolve_qcow2_path, resolve_kernel_initrd, resolve_nbd_max_queues, resolve_qcow2_path, resolve_qemu_path,
resolve_qemu_path, resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH, resolve_qmp_timeout_secs, resolve_runtime_dir, ENV_QCOW2_PATH,
};
use network::{
cleanup_vm_networks, decode_network_states, encode_network_states, ensure_vm_networks,
tap_name_for_nic, NETWORK_STATE_KEY,
}; };
use nix::sys::signal::{kill as nix_kill, Signal}; use nix::sys::signal::{kill as nix_kill, Signal};
use nix::unistd::Pid; use nix::unistd::Pid;
@ -76,7 +81,8 @@ fn disk_aio_mode(disk: &AttachedDisk) -> Option<&'static str> {
match (&disk.attachment, disk.cache) { match (&disk.attachment, disk.cache) {
(DiskAttachment::File { .. }, DiskCache::None) => Some("native"), (DiskAttachment::File { .. }, DiskCache::None) => Some("native"),
(DiskAttachment::File { .. }, _) => Some("threads"), (DiskAttachment::File { .. }, _) => Some("threads"),
(DiskAttachment::Nbd { .. }, _) => Some(resolve_nbd_aio_mode()), // QEMU's NBD blockdev backend does not accept an `aio` parameter.
(DiskAttachment::Nbd { .. }, _) => None,
(DiskAttachment::CephRbd { .. }, _) => None, (DiskAttachment::CephRbd { .. }, _) => None,
} }
} }
@ -118,6 +124,23 @@ fn bootindex_suffix(boot_index: Option<u32>) -> String {
.unwrap_or_default() .unwrap_or_default()
} }
fn nic_device_driver(model: NicModel) -> &'static str {
match model {
NicModel::VirtioNet => "virtio-net-pci",
NicModel::E1000 => "e1000",
}
}
fn nic_device_component(nic: &NetworkSpec, fallback_index: usize) -> String {
sanitize_device_component(
nic.port_id
.as_deref()
.filter(|value| !value.is_empty())
.unwrap_or(&nic.id),
fallback_index,
)
}
fn qmp_timeout() -> Duration { fn qmp_timeout() -> Duration {
Duration::from_secs(resolve_qmp_timeout_secs()) Duration::from_secs(resolve_qmp_timeout_secs())
} }
@ -434,6 +457,7 @@ fn build_qemu_args(
"-S".into(), "-S".into(),
]; ];
args.extend(build_disk_args(vm, disks)?); args.extend(build_disk_args(vm, disks)?);
args.extend(build_network_args(vm));
if let Some(kernel) = kernel { if let Some(kernel) = kernel {
args.push("-kernel".into()); args.push("-kernel".into());
@ -449,6 +473,31 @@ fn build_qemu_args(
Ok(args) Ok(args)
} }
fn build_network_args(vm: &VirtualMachine) -> Vec<String> {
let mut args = Vec::new();
for (index, nic) in vm.spec.network.iter().enumerate() {
let device_id = nic_device_component(nic, index);
let tap_name = tap_name_for_nic(nic);
args.push("-netdev".into());
args.push(format!(
"tap,id=netdev-{id},ifname={tap},script=no,downscript=no",
id = device_id,
tap = tap_name
));
args.push("-device".into());
let mut device = format!(
"{driver},id=net-{id},netdev=netdev-{id}",
driver = nic_device_driver(nic.model),
id = device_id
);
if let Some(mac) = nic.mac_address.as_deref() {
device.push_str(&format!(",mac={mac}"));
}
args.push(device);
}
args
}
/// Build QEMU args for an incoming migration listener. /// Build QEMU args for an incoming migration listener.
fn build_qemu_args_incoming( fn build_qemu_args_incoming(
vm: &VirtualMachine, vm: &VirtualMachine,
@ -568,6 +617,11 @@ impl HypervisorBackend for KvmBackend {
tokio::fs::create_dir_all(&runtime_dir) tokio::fs::create_dir_all(&runtime_dir)
.await .await
.map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?; .map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?;
tokio::fs::create_dir_all(&self.runtime_dir)
.await
.map_err(|e| {
Error::HypervisorError(format!("Failed to create backend runtime root: {e}"))
})?;
let qmp_socket = runtime_dir.join("qmp.sock"); let qmp_socket = runtime_dir.join("qmp.sock");
let console_log = runtime_dir.join("console.log"); let console_log = runtime_dir.join("console.log");
@ -575,16 +629,23 @@ impl HypervisorBackend for KvmBackend {
let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = tokio::fs::remove_file(&console_log).await; let _ = tokio::fs::remove_file(&console_log).await;
let qemu_bin = resolve_qemu_path(&self.qemu_path); let qemu_bin = resolve_qemu_path(&self.qemu_path);
let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?;
let (kernel_path, initrd_path) = resolve_kernel_initrd(); let (kernel_path, initrd_path) = resolve_kernel_initrd();
let args = build_qemu_args( let args = match build_qemu_args(
vm, vm,
disks, disks,
&qmp_socket, &qmp_socket,
&console_log, &console_log,
kernel_path.as_deref(), kernel_path.as_deref(),
initrd_path.as_deref(), initrd_path.as_deref(),
)?; ) {
Ok(args) => args,
Err(error) => {
let _ = cleanup_vm_networks(&network_states).await;
return Err(error);
}
};
let mut cmd = Command::new(&qemu_bin); let mut cmd = Command::new(&qemu_bin);
cmd.args(&args); cmd.args(&args);
@ -597,9 +658,15 @@ impl HypervisorBackend for KvmBackend {
"Spawning KVM QEMU" "Spawning KVM QEMU"
); );
let mut child = cmd let mut child = match cmd.spawn() {
.spawn() Ok(child) => child,
.map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?; Err(error) => {
let _ = cleanup_vm_networks(&network_states).await;
return Err(Error::HypervisorError(format!(
"Failed to spawn QEMU: {error}"
)));
}
};
let pid = child.id().map(|p| p); let pid = child.id().map(|p| p);
// Wait for QMP readiness before detaching so slow nested workers do not leave orphans. // Wait for QMP readiness before detaching so slow nested workers do not leave orphans.
@ -614,6 +681,7 @@ impl HypervisorBackend for KvmBackend {
let _ = child.start_kill(); let _ = child.start_kill();
let _ = child.wait().await; let _ = child.wait().await;
let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = cleanup_vm_networks(&network_states).await;
return Err(err); return Err(err);
} }
@ -629,6 +697,10 @@ impl HypervisorBackend for KvmBackend {
handle handle
.backend_state .backend_state
.insert("console_log".into(), console_log.display().to_string()); .insert("console_log".into(), console_log.display().to_string());
handle.backend_state.insert(
NETWORK_STATE_KEY.into(),
encode_network_states(&network_states)?,
);
handle.pid = pid; handle.pid = pid;
handle.attached_disks = disks.to_vec(); handle.attached_disks = disks.to_vec();
@ -789,15 +861,21 @@ impl HypervisorBackend for KvmBackend {
tokio::fs::create_dir_all(&runtime_dir) tokio::fs::create_dir_all(&runtime_dir)
.await .await
.map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?; .map_err(|e| Error::HypervisorError(format!("Failed to create runtime dir: {e}")))?;
tokio::fs::create_dir_all(&self.runtime_dir)
.await
.map_err(|e| {
Error::HypervisorError(format!("Failed to create backend runtime root: {e}"))
})?;
let qmp_socket = runtime_dir.join("qmp.sock"); let qmp_socket = runtime_dir.join("qmp.sock");
let console_log = runtime_dir.join("console.log"); let console_log = runtime_dir.join("console.log");
let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = tokio::fs::remove_file(&console_log).await; let _ = tokio::fs::remove_file(&console_log).await;
let qemu_bin = resolve_qemu_path(&self.qemu_path); let qemu_bin = resolve_qemu_path(&self.qemu_path);
let network_states = ensure_vm_networks(&self.runtime_dir, &vm.spec.network).await?;
let (kernel_path, initrd_path) = resolve_kernel_initrd(); let (kernel_path, initrd_path) = resolve_kernel_initrd();
let args = build_qemu_args_incoming( let args = match build_qemu_args_incoming(
vm, vm,
disks, disks,
&qmp_socket, &qmp_socket,
@ -805,7 +883,13 @@ impl HypervisorBackend for KvmBackend {
kernel_path.as_deref(), kernel_path.as_deref(),
initrd_path.as_deref(), initrd_path.as_deref(),
listen_uri, listen_uri,
)?; ) {
Ok(args) => args,
Err(error) => {
let _ = cleanup_vm_networks(&network_states).await;
return Err(error);
}
};
let mut cmd = Command::new(&qemu_bin); let mut cmd = Command::new(&qemu_bin);
cmd.args(&args); cmd.args(&args);
@ -818,9 +902,15 @@ impl HypervisorBackend for KvmBackend {
"Spawning QEMU for incoming migration" "Spawning QEMU for incoming migration"
); );
let mut child = cmd let mut child = match cmd.spawn() {
.spawn() Ok(child) => child,
.map_err(|e| Error::HypervisorError(format!("Failed to spawn QEMU: {e}")))?; Err(error) => {
let _ = cleanup_vm_networks(&network_states).await;
return Err(Error::HypervisorError(format!(
"Failed to spawn QEMU: {error}"
)));
}
};
let pid = child.id().map(|p| p); let pid = child.id().map(|p| p);
if let Err(err) = wait_for_qmp(&qmp_socket, qmp_timeout()).await { if let Err(err) = wait_for_qmp(&qmp_socket, qmp_timeout()).await {
@ -834,6 +924,7 @@ impl HypervisorBackend for KvmBackend {
let _ = child.start_kill(); let _ = child.start_kill();
let _ = child.wait().await; let _ = child.wait().await;
let _ = tokio::fs::remove_file(&qmp_socket).await; let _ = tokio::fs::remove_file(&qmp_socket).await;
let _ = cleanup_vm_networks(&network_states).await;
return Err(err); return Err(err);
} }
@ -848,6 +939,10 @@ impl HypervisorBackend for KvmBackend {
handle handle
.backend_state .backend_state
.insert("console_log".into(), console_log.display().to_string()); .insert("console_log".into(), console_log.display().to_string());
handle.backend_state.insert(
NETWORK_STATE_KEY.into(),
encode_network_states(&network_states)?,
);
handle.pid = pid; handle.pid = pid;
handle.attached_disks = disks.to_vec(); handle.attached_disks = disks.to_vec();
@ -913,6 +1008,7 @@ impl HypervisorBackend for KvmBackend {
async fn delete(&self, handle: &VmHandle) -> Result<()> { async fn delete(&self, handle: &VmHandle) -> Result<()> {
tracing::info!(vm_id = %handle.vm_id, "Deleting VM resources"); tracing::info!(vm_id = %handle.vm_id, "Deleting VM resources");
let network_states = decode_network_states(handle.backend_state.get(NETWORK_STATE_KEY))?;
if handle.pid.is_some() || self.qmp_socket_path(handle).exists() { if handle.pid.is_some() || self.qmp_socket_path(handle).exists() {
let _ = self.kill(handle).await; let _ = self.kill(handle).await;
@ -940,6 +1036,7 @@ impl HypervisorBackend for KvmBackend {
Error::HypervisorError(format!("Failed to remove runtime dir: {e}")) Error::HypervisorError(format!("Failed to remove runtime dir: {e}"))
})?; })?;
} }
cleanup_vm_networks(&network_states).await?;
tracing::info!(vm_id = %handle.vm_id, "Deleted VM resources"); tracing::info!(vm_id = %handle.vm_id, "Deleted VM resources");
@ -1054,6 +1151,10 @@ impl HypervisorBackend for KvmBackend {
let qmp_socket = self.qmp_socket_path(handle); let qmp_socket = self.qmp_socket_path(handle);
wait_for_qmp(&qmp_socket, qmp_timeout()).await?; wait_for_qmp(&qmp_socket, qmp_timeout()).await?;
let mut client = QmpClient::connect(&qmp_socket).await?; let mut client = QmpClient::connect(&qmp_socket).await?;
let network_states =
ensure_vm_networks(&self.runtime_dir, std::slice::from_ref(nic)).await?;
let tap_name = tap_name_for_nic(nic);
let device_id = nic_device_component(nic, 0);
// Generate MAC address if not provided // Generate MAC address if not provided
let mac_addr = nic let mac_addr = nic
@ -1068,23 +1169,29 @@ impl HypervisorBackend for KvmBackend {
// Step 1: Add network backend via netdev_add // Step 1: Add network backend via netdev_add
let netdev_args = serde_json::json!({ let netdev_args = serde_json::json!({
"type": "tap", "type": "tap",
"id": format!("netdev-{}", nic.id), "id": format!("netdev-{}", device_id),
"ifname": format!("tap-{}", nic.id), "ifname": tap_name,
"script": "no", "script": "no",
"downscript": "no" "downscript": "no"
}); });
client.command("netdev_add", Some(netdev_args)).await?; if let Err(error) = client.command("netdev_add", Some(netdev_args)).await {
let _ = cleanup_vm_networks(&network_states).await;
return Err(error);
}
// Step 2: Add virtio-net-pci frontend device // Step 2: Add virtio-net-pci frontend device
let device_args = serde_json::json!({ let device_args = serde_json::json!({
"driver": "virtio-net-pci", "driver": nic_device_driver(nic.model),
"id": format!("net-{}", nic.id), "id": format!("net-{}", device_id),
"netdev": format!("netdev-{}", nic.id), "netdev": format!("netdev-{}", device_id),
"mac": mac_addr "mac": mac_addr
}); });
client.command("device_add", Some(device_args)).await?; if let Err(error) = client.command("device_add", Some(device_args)).await {
let _ = cleanup_vm_networks(&network_states).await;
return Err(error);
}
tracing::info!( tracing::info!(
vm_id = %handle.vm_id, vm_id = %handle.vm_id,
@ -1106,6 +1213,7 @@ impl HypervisorBackend for KvmBackend {
let qmp_socket = self.qmp_socket_path(handle); let qmp_socket = self.qmp_socket_path(handle);
wait_for_qmp(&qmp_socket, qmp_timeout()).await?; wait_for_qmp(&qmp_socket, qmp_timeout()).await?;
let mut client = QmpClient::connect(&qmp_socket).await?; let mut client = QmpClient::connect(&qmp_socket).await?;
let nic_id = sanitize_device_component(nic_id, 0);
// Remove the virtio-net-pci device (netdev backend will be cleaned up automatically) // Remove the virtio-net-pci device (netdev backend will be cleaned up automatically)
let device_args = serde_json::json!({ let device_args = serde_json::json!({
@ -1281,8 +1389,6 @@ mod tests {
#[test] #[test]
fn build_qemu_args_coerces_writeback_cache_to_none_for_nbd_disks() { fn build_qemu_args_coerces_writeback_cache_to_none_for_nbd_disks() {
let _guard = crate::env::env_test_lock().lock().unwrap();
std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default()); let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
let disks = vec![AttachedDisk { let disks = vec![AttachedDisk {
id: "root".into(), id: "root".into(),
@ -1300,13 +1406,11 @@ mod tests {
let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap(); let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
let args_joined = args.join(" "); let args_joined = args.join(" ");
assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}")); assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
assert!(args_joined.contains("\"aio\":\"io_uring\"")); assert!(!args_joined.contains("\"aio\":"));
} }
#[test] #[test]
fn build_qemu_args_uses_io_uring_for_nbd_none_cache_by_default() { fn build_qemu_args_does_not_set_aio_for_nbd_disks() {
let _guard = crate::env::env_test_lock().lock().unwrap();
std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default()); let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
let disks = vec![AttachedDisk { let disks = vec![AttachedDisk {
id: "root".into(), id: "root".into(),
@ -1324,32 +1428,7 @@ mod tests {
let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap(); let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
let args_joined = args.join(" "); let args_joined = args.join(" ");
assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}")); assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
assert!(args_joined.contains("\"aio\":\"io_uring\"")); assert!(!args_joined.contains("\"aio\":"));
}
#[test]
fn build_qemu_args_honors_nbd_aio_override() {
let _guard = crate::env::env_test_lock().lock().unwrap();
std::env::set_var(crate::env::ENV_NBD_AIO_MODE, "threads");
let vm = VirtualMachine::new("vm1", "org", "proj", VmSpec::default());
let disks = vec![AttachedDisk {
id: "root".into(),
attachment: DiskAttachment::Nbd {
uri: "nbd://10.100.0.11:11000".into(),
format: VolumeFormat::Raw,
},
bus: DiskBus::Virtio,
cache: DiskCache::None,
boot_index: Some(1),
read_only: false,
}];
let qmp = PathBuf::from("/tmp/qmp.sock");
let console = PathBuf::from("/tmp/console.log");
let args = build_qemu_args(&vm, &disks, &qmp, &console, None, None).unwrap();
let args_joined = args.join(" ");
assert!(args_joined.contains("\"cache\":{\"direct\":true,\"no-flush\":false}"));
assert!(args_joined.contains("\"aio\":\"threads\""));
std::env::remove_var(crate::env::ENV_NBD_AIO_MODE);
} }
#[test] #[test]

View file

@ -0,0 +1,678 @@
use nix::sys::signal::{kill as nix_kill, Signal};
use nix::unistd::Pid;
use plasmavmc_types::{Error, NetworkSpec, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
use std::net::Ipv4Addr;
use std::os::unix::fs::MetadataExt;
use std::path::{Path, PathBuf};
use tokio::fs;
use tokio::process::Command;
use tokio::time::{sleep, Duration, Instant};
pub const NETWORK_STATE_KEY: &str = "network_state";
const DNSMASQ_START_TIMEOUT: Duration = Duration::from_secs(5);
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub struct NetworkRuntimeState {
pub nic_id: String,
pub subnet_id: String,
pub port_id: String,
pub bridge_name: String,
pub tap_name: String,
pub mac_address: String,
pub ip_address: String,
pub cidr_block: String,
pub gateway_ip: String,
pub dhcp_enabled: bool,
pub network_dir: String,
pub dnsmasq_conf: String,
pub hosts_file: String,
pub lease_file: String,
pub pid_file: String,
pub host_alias: String,
}
#[derive(Debug, Clone)]
struct NicDataplaneConfig {
state: NetworkRuntimeState,
gateway_prefix: String,
dhcp_range_start: String,
dhcp_range_end: String,
}
pub fn tap_name_for_nic(nic: &NetworkSpec) -> String {
let seed = nic
.port_id
.as_deref()
.or(nic.subnet_id.as_deref())
.unwrap_or(&nic.id);
interface_name("pct", seed)
}
pub fn encode_network_states(states: &[NetworkRuntimeState]) -> Result<String> {
serde_json::to_string(states)
.map_err(|error| Error::HypervisorError(format!("failed to encode network state: {error}")))
}
pub fn decode_network_states(serialized: Option<&String>) -> Result<Vec<NetworkRuntimeState>> {
match serialized {
Some(value) if !value.trim().is_empty() => serde_json::from_str(value).map_err(|error| {
Error::HypervisorError(format!("failed to decode network state: {error}"))
}),
_ => Ok(Vec::new()),
}
}
pub async fn ensure_vm_networks(
runtime_root: &Path,
nics: &[NetworkSpec],
) -> Result<Vec<NetworkRuntimeState>> {
let mut states = Vec::with_capacity(nics.len());
for nic in nics {
let config = dataplane_config(runtime_root, nic)?;
states.push(config.state.clone());
if let Err(error) = ensure_bridge(&config).await {
let _ = cleanup_vm_networks(&states).await;
return Err(error);
}
if let Err(error) = ensure_dnsmasq(&config).await {
let _ = cleanup_vm_networks(&states).await;
return Err(error);
}
if let Err(error) = ensure_tap(runtime_root, &config).await {
let _ = cleanup_vm_networks(&states).await;
return Err(error);
}
}
Ok(states)
}
pub async fn cleanup_vm_networks(states: &[NetworkRuntimeState]) -> Result<()> {
let mut errors = Vec::new();
let mut seen_bridges = HashSet::new();
for state in states.iter().rev() {
if let Err(error) = delete_interface_if_present(&state.tap_name).await {
errors.push(error.to_string());
}
if let Err(error) = remove_host_entry(state).await {
errors.push(error.to_string());
}
if !seen_bridges.insert(state.bridge_name.clone()) {
continue;
}
match bridge_has_hosts(state).await {
Ok(true) => {
if let Err(error) = reload_dnsmasq(state).await {
errors.push(error.to_string());
}
}
Ok(false) => {
if let Err(error) = stop_dnsmasq(state).await {
errors.push(error.to_string());
}
if let Err(error) = delete_interface_if_present(&state.bridge_name).await {
errors.push(error.to_string());
}
let _ = fs::remove_dir_all(&state.network_dir).await;
}
Err(error) => errors.push(error.to_string()),
}
}
if errors.is_empty() {
Ok(())
} else {
Err(Error::HypervisorError(format!(
"network cleanup failed: {}",
errors.join("; ")
)))
}
}
fn dataplane_config(runtime_root: &Path, nic: &NetworkSpec) -> Result<NicDataplaneConfig> {
let subnet_id = nic
.subnet_id
.clone()
.ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires subnet_id".into()))?;
let port_id = nic.port_id.clone().unwrap_or_else(|| nic.id.clone());
let mac_address = nic
.mac_address
.clone()
.ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires mac_address".into()))?;
let ip_address = nic
.ip_address
.clone()
.ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires ip_address".into()))?;
let cidr_block = nic
.cidr_block
.clone()
.ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires cidr_block".into()))?;
let gateway_ip = nic
.gateway_ip
.clone()
.ok_or_else(|| Error::UnsupportedFeature("KVM NIC requires gateway_ip".into()))?;
let (cidr_ip, prefix) = parse_ipv4_cidr(&cidr_block)?;
let gateway = parse_ipv4(&gateway_ip, "gateway_ip")?;
if !cidr_contains_ip(cidr_ip, prefix, gateway) {
return Err(Error::HypervisorError(format!(
"gateway {gateway_ip} is outside subnet {cidr_block}"
)));
}
let (dhcp_range_start, dhcp_range_end) = dhcp_range(cidr_ip, prefix, gateway)?;
let bridge_name = interface_name("pcbr", &subnet_id);
let tap_name = tap_name_for_nic(nic);
let network_dir = runtime_root.join("networks").join(&subnet_id);
let host_alias = format!("port-{}", compact_id(&port_id, 12));
let state = NetworkRuntimeState {
nic_id: nic.id.clone(),
subnet_id,
port_id,
bridge_name,
tap_name,
mac_address,
ip_address,
cidr_block,
gateway_ip: gateway_ip.clone(),
dhcp_enabled: nic.dhcp_enabled,
network_dir: network_dir.display().to_string(),
dnsmasq_conf: network_dir.join("dnsmasq.conf").display().to_string(),
hosts_file: network_dir.join("hosts").display().to_string(),
lease_file: network_dir.join("leases").display().to_string(),
pid_file: network_dir.join("dnsmasq.pid").display().to_string(),
host_alias,
};
Ok(NicDataplaneConfig {
gateway_prefix: format!("{gateway_ip}/{prefix}"),
dhcp_range_start,
dhcp_range_end,
state,
})
}
async fn ensure_bridge(config: &NicDataplaneConfig) -> Result<()> {
if !link_exists(&config.state.bridge_name).await? {
run_command(
"ip",
[
"link",
"add",
"name",
config.state.bridge_name.as_str(),
"type",
"bridge",
],
)
.await?;
}
run_command(
"ip",
[
"addr",
"replace",
config.gateway_prefix.as_str(),
"dev",
config.state.bridge_name.as_str(),
],
)
.await?;
run_command(
"ip",
[
"link",
"set",
"dev",
config.state.bridge_name.as_str(),
"up",
],
)
.await
}
async fn ensure_dnsmasq(config: &NicDataplaneConfig) -> Result<()> {
fs::create_dir_all(&config.state.network_dir)
.await
.map_err(|error| {
Error::HypervisorError(format!(
"failed to create network runtime directory {}: {error}",
config.state.network_dir
))
})?;
write_hosts_file(&config.state).await?;
let dnsmasq_conf = format!(
"interface={bridge}\n\
bind-interfaces\n\
except-interface=lo\n\
port=0\n\
dhcp-authoritative\n\
dhcp-option=option:router,{gateway}\n\
dhcp-range={range_start},{range_end},{mask},1h\n\
dhcp-hostsfile={hosts_file}\n\
dhcp-leasefile={lease_file}\n\
pid-file={pid_file}\n",
bridge = config.state.bridge_name,
gateway = config.state.gateway_ip,
range_start = config.dhcp_range_start,
range_end = config.dhcp_range_end,
mask = cidr_mask(&config.state.cidr_block)?,
hosts_file = config.state.hosts_file,
lease_file = config.state.lease_file,
pid_file = config.state.pid_file,
);
fs::write(&config.state.dnsmasq_conf, dnsmasq_conf)
.await
.map_err(|error| {
Error::HypervisorError(format!(
"failed to write dnsmasq config {}: {error}",
config.state.dnsmasq_conf
))
})?;
if dnsmasq_running(&config.state).await? {
reload_dnsmasq(&config.state).await?;
return Ok(());
}
let mut command = Command::new("dnsmasq");
command.arg(format!("--conf-file={}", config.state.dnsmasq_conf));
let output = command
.output()
.await
.map_err(|error| Error::HypervisorError(format!("failed to spawn dnsmasq: {error}")))?;
if !output.status.success() {
return Err(command_failed("dnsmasq", &[], &output));
}
let deadline = Instant::now() + DNSMASQ_START_TIMEOUT;
while Instant::now() < deadline {
if dnsmasq_running(&config.state).await? {
return Ok(());
}
sleep(Duration::from_millis(100)).await;
}
Err(Error::HypervisorError(format!(
"dnsmasq did not start for bridge {}",
config.state.bridge_name
)))
}
async fn ensure_tap(runtime_root: &Path, config: &NicDataplaneConfig) -> Result<()> {
let _ = delete_interface_if_present(&config.state.tap_name).await;
let metadata = fs::metadata(runtime_root).await.map_err(|error| {
Error::HypervisorError(format!(
"failed to inspect runtime root {}: {error}",
runtime_root.display()
))
})?;
let uid = metadata.uid().to_string();
let gid = metadata.gid().to_string();
run_command(
"ip",
[
"tuntap",
"add",
"dev",
config.state.tap_name.as_str(),
"mode",
"tap",
"user",
uid.as_str(),
"group",
gid.as_str(),
],
)
.await?;
run_command(
"ip",
[
"link",
"set",
"dev",
config.state.tap_name.as_str(),
"master",
config.state.bridge_name.as_str(),
],
)
.await?;
run_command(
"ip",
["link", "set", "dev", config.state.tap_name.as_str(), "up"],
)
.await
}
async fn write_hosts_file(state: &NetworkRuntimeState) -> Result<()> {
let hosts_path = PathBuf::from(&state.hosts_file);
let existing = match fs::read_to_string(&hosts_path).await {
Ok(contents) => contents,
Err(error) if error.kind() == std::io::ErrorKind::NotFound => String::new(),
Err(error) => {
return Err(Error::HypervisorError(format!(
"failed to read dnsmasq hosts file {}: {error}",
hosts_path.display()
)))
}
};
let mut lines: Vec<String> = existing
.lines()
.filter(|line| !line.trim().is_empty() && !line.contains(&state.host_alias))
.map(ToOwned::to_owned)
.collect();
lines.push(format!(
"{mac},{ip},{alias}",
mac = state.mac_address,
ip = state.ip_address,
alias = state.host_alias
));
let mut rendered = lines.join("\n");
if !rendered.is_empty() {
rendered.push('\n');
}
fs::write(&hosts_path, rendered).await.map_err(|error| {
Error::HypervisorError(format!(
"failed to write dnsmasq hosts file {}: {error}",
hosts_path.display()
))
})
}
async fn remove_host_entry(state: &NetworkRuntimeState) -> Result<()> {
let hosts_path = PathBuf::from(&state.hosts_file);
let existing = match fs::read_to_string(&hosts_path).await {
Ok(contents) => contents,
Err(error) if error.kind() == std::io::ErrorKind::NotFound => return Ok(()),
Err(error) => {
return Err(Error::HypervisorError(format!(
"failed to read dnsmasq hosts file {}: {error}",
hosts_path.display()
)))
}
};
let filtered: Vec<&str> = existing
.lines()
.filter(|line| !line.contains(&state.host_alias))
.collect();
let mut rendered = filtered.join("\n");
if !rendered.is_empty() {
rendered.push('\n');
}
fs::write(&hosts_path, rendered).await.map_err(|error| {
Error::HypervisorError(format!(
"failed to update dnsmasq hosts file {}: {error}",
hosts_path.display()
))
})
}
async fn bridge_has_hosts(state: &NetworkRuntimeState) -> Result<bool> {
match fs::read_to_string(&state.hosts_file).await {
Ok(contents) => Ok(contents.lines().any(|line| !line.trim().is_empty())),
Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(false),
Err(error) => Err(Error::HypervisorError(format!(
"failed to inspect dnsmasq hosts file {}: {error}",
state.hosts_file
))),
}
}
async fn dnsmasq_running(state: &NetworkRuntimeState) -> Result<bool> {
let pid = read_pid_file(&state.pid_file).await?;
if let Some(pid) = pid {
return Ok(pid_running(pid));
}
Ok(false)
}
async fn reload_dnsmasq(state: &NetworkRuntimeState) -> Result<()> {
if let Some(pid) = read_pid_file(&state.pid_file).await? {
signal_pid(pid, Signal::SIGHUP)?;
}
Ok(())
}
async fn stop_dnsmasq(state: &NetworkRuntimeState) -> Result<()> {
if let Some(pid) = read_pid_file(&state.pid_file).await? {
signal_pid(pid, Signal::SIGTERM)?;
let deadline = Instant::now() + Duration::from_secs(2);
while pid_running(pid) && Instant::now() < deadline {
sleep(Duration::from_millis(50)).await;
}
if pid_running(pid) {
signal_pid(pid, Signal::SIGKILL)?;
}
}
let _ = fs::remove_file(&state.pid_file).await;
Ok(())
}
async fn read_pid_file(path: &str) -> Result<Option<u32>> {
match fs::read_to_string(path).await {
Ok(contents) => Ok(contents.trim().parse::<u32>().ok()),
Err(error) if error.kind() == std::io::ErrorKind::NotFound => Ok(None),
Err(error) => Err(Error::HypervisorError(format!(
"failed to read pid file {path}: {error}"
))),
}
}
fn signal_pid(pid: u32, signal: Signal) -> Result<()> {
nix_kill(Pid::from_raw(pid as i32), signal).map_err(|error| {
Error::HypervisorError(format!(
"failed to signal pid {pid} with {signal:?}: {error}"
))
})
}
fn pid_running(pid: u32) -> bool {
match nix_kill(Pid::from_raw(pid as i32), None::<Signal>) {
Ok(()) => true,
Err(nix::errno::Errno::EPERM) => true,
Err(nix::errno::Errno::ESRCH) => false,
Err(_) => false,
}
}
async fn link_exists(name: &str) -> Result<bool> {
let output = Command::new("ip")
.args(["link", "show", "dev", name])
.output()
.await
.map_err(|error| Error::HypervisorError(format!("failed to query link {name}: {error}")))?;
Ok(output.status.success())
}
async fn delete_interface_if_present(name: &str) -> Result<()> {
if !link_exists(name).await? {
return Ok(());
}
run_command("ip", ["link", "set", "dev", name, "down"]).await?;
run_command("ip", ["link", "delete", "dev", name]).await
}
async fn run_command<const N: usize>(program: &str, args: [&str; N]) -> Result<()> {
let output = Command::new(program)
.args(args)
.output()
.await
.map_err(|error| Error::HypervisorError(format!("failed to spawn {program}: {error}")))?;
if output.status.success() {
Ok(())
} else {
Err(command_failed(program, &args, &output))
}
}
fn command_failed(program: &str, args: &[&str], output: &std::process::Output) -> Error {
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
let stdout = String::from_utf8_lossy(&output.stdout).trim().to_string();
let detail = if !stderr.is_empty() {
stderr
} else if !stdout.is_empty() {
stdout
} else {
format!("exit code {:?}", output.status.code())
};
Error::HypervisorError(format!("{program} {} failed: {detail}", args.join(" ")))
}
fn parse_ipv4(value: &str, field: &str) -> Result<Ipv4Addr> {
value
.parse()
.map_err(|error| Error::HypervisorError(format!("invalid {field} {value}: {error}")))
}
fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8)> {
let (ip, prefix) = cidr.split_once('/').ok_or_else(|| {
Error::HypervisorError(format!("invalid cidr_block {cidr}: missing prefix"))
})?;
let ip = parse_ipv4(ip, "cidr_block ip")?;
let prefix = prefix
.parse::<u8>()
.map_err(|error| Error::HypervisorError(format!("invalid cidr_block {cidr}: {error}")))?;
if prefix > 32 {
return Err(Error::HypervisorError(format!(
"invalid cidr_block {cidr}: prefix must be <= 32"
)));
}
Ok((ip, prefix))
}
fn cidr_contains_ip(cidr_ip: Ipv4Addr, prefix: u8, ip: Ipv4Addr) -> bool {
let mask = if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
};
(u32::from(cidr_ip) & mask) == (u32::from(ip) & mask)
}
fn cidr_mask(cidr: &str) -> Result<String> {
let (_, prefix) = parse_ipv4_cidr(cidr)?;
let mask = if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
};
Ok(Ipv4Addr::from(mask).to_string())
}
fn dhcp_range(cidr_ip: Ipv4Addr, prefix: u8, gateway: Ipv4Addr) -> Result<(String, String)> {
if prefix >= 31 {
return Err(Error::UnsupportedFeature(
"KVM local bridge dataplane requires an IPv4 subnet larger than /31".into(),
));
}
let mask = if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
};
let network = u32::from(cidr_ip) & mask;
let broadcast = network | !mask;
let gateway = u32::from(gateway);
let mut start = network + 1;
let mut end = broadcast - 1;
if start == gateway {
start += 1;
}
if end == gateway {
end = end.saturating_sub(1);
}
if start > end {
return Err(Error::HypervisorError(
"subnet does not have enough usable DHCP addresses".into(),
));
}
Ok((
Ipv4Addr::from(start).to_string(),
Ipv4Addr::from(end).to_string(),
))
}
fn compact_id(value: &str, limit: usize) -> String {
let compact: String = value
.chars()
.filter(|ch| ch.is_ascii_alphanumeric())
.map(|ch| ch.to_ascii_lowercase())
.collect();
if compact.is_empty() {
"0".repeat(limit.max(1))
} else {
compact.chars().take(limit).collect()
}
}
fn interface_name(prefix: &str, seed: &str) -> String {
let available = 15usize.saturating_sub(prefix.len());
format!("{prefix}{}", compact_id(seed, available.max(1)))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn tap_name_prefers_port_id() {
let nic = NetworkSpec {
id: "tenant0".into(),
port_id: Some("12345678-1234-1234-1234-1234567890ab".into()),
..NetworkSpec::default()
};
assert_eq!(tap_name_for_nic(&nic), "pct123456781234");
}
#[test]
fn interface_names_fit_kernel_limit() {
let name = interface_name("pcbr", "12345678-1234-1234-1234-1234567890ab");
assert!(name.len() <= 15);
assert_eq!(name, "pcbr12345678123");
}
#[test]
fn dhcp_range_skips_gateway() {
let (start, end) = dhcp_range(
"10.62.10.0".parse().unwrap(),
24,
"10.62.10.1".parse().unwrap(),
)
.unwrap();
assert_eq!(start, "10.62.10.2");
assert_eq!(end, "10.62.10.254");
}
#[test]
fn encode_and_decode_network_state_round_trip() {
let state = NetworkRuntimeState {
nic_id: "tenant0".into(),
subnet_id: "subnet".into(),
port_id: "port".into(),
bridge_name: "pcbrsubnet".into(),
tap_name: "pctport".into(),
mac_address: "02:00:00:00:62:10".into(),
ip_address: "10.62.10.10".into(),
cidr_block: "10.62.10.0/24".into(),
gateway_ip: "10.62.10.1".into(),
dhcp_enabled: true,
network_dir: "/run/libvirt/plasmavmc/networks/subnet".into(),
dnsmasq_conf: "/run/libvirt/plasmavmc/networks/subnet/dnsmasq.conf".into(),
hosts_file: "/run/libvirt/plasmavmc/networks/subnet/hosts".into(),
lease_file: "/run/libvirt/plasmavmc/networks/subnet/leases".into(),
pid_file: "/run/libvirt/plasmavmc/networks/subnet/dnsmasq.pid".into(),
host_alias: "port-port".into(),
};
let encoded = encode_network_states(&[state.clone()]).unwrap();
let decoded = decode_network_states(Some(&encoded)).unwrap();
assert_eq!(decoded, vec![state]);
}
}

View file

@ -51,14 +51,12 @@ chrono = { version = "0.4", features = ["serde"] }
[dev-dependencies] [dev-dependencies]
tempfile = { workspace = true } tempfile = { workspace = true }
chrono = "0.4" chrono = "0.4"
chainfire-server = { path = "../../../chainfire/crates/chainfire-server" }
flaredb-server = { path = "../../../flaredb/crates/flaredb-server" }
flaredb-proto = { path = "../../../flaredb/crates/flaredb-proto" }
iam-api = { path = "../../../iam/crates/iam-api" } iam-api = { path = "../../../iam/crates/iam-api" }
iam-authn = { path = "../../../iam/crates/iam-authn" }
iam-authz = { path = "../../../iam/crates/iam-authz" }
iam-store = { path = "../../../iam/crates/iam-store" }
prismnet-server = { path = "../../../prismnet/crates/prismnet-server" } prismnet-server = { path = "../../../prismnet/crates/prismnet-server" }
prismnet-types = { path = "../../../prismnet/crates/prismnet-types" } prismnet-types = { path = "../../../prismnet/crates/prismnet-types" }
creditservice-api = { path = "../../../creditservice/crates/creditservice-api" }
creditservice-proto = { path = "../../../creditservice/crates/creditservice-proto" }
[lints] [lints]
workspace = true workspace = true

View file

@ -82,7 +82,7 @@ struct ValidatedImportUrl {
struct ImportedImageSource { struct ImportedImageSource {
source_type: String, source_type: String,
host: String, host: Option<String>,
} }
impl ArtifactStore { impl ArtifactStore {
@ -177,7 +177,7 @@ impl ArtifactStore {
&staging_path, &staging_path,
source_format, source_format,
source_type, source_type,
Some(host), host,
) )
.await .await
} }
@ -750,14 +750,70 @@ impl ArtifactStore {
.map_err(|e| Status::internal(format!("failed to create {parent:?}: {e}")))?; .map_err(|e| Status::internal(format!("failed to create {parent:?}: {e}")))?;
} }
if let Some(local_path) = local_source_path(source_url)? {
self.copy_local_source(&local_path, path).await?;
return Ok(ImportedImageSource {
source_type: "file".to_string(),
host: None,
});
}
let validated = self.validate_import_url(source_url).await?; let validated = self.validate_import_url(source_url).await?;
self.download_https_source(&validated, path).await?; self.download_https_source(&validated, path).await?;
Ok(ImportedImageSource { Ok(ImportedImageSource {
source_type: "https".to_string(), source_type: "https".to_string(),
host: validated.host, host: Some(validated.host),
}) })
} }
async fn copy_local_source(&self, source_path: &Path, path: &Path) -> Result<(), Status> {
let source = tokio::fs::canonicalize(source_path).await.map_err(|e| {
Status::invalid_argument(format!(
"failed to access local source_url path {}: {e}",
source_path.display()
))
})?;
let metadata = tokio::fs::metadata(&source).await.map_err(|e| {
Status::invalid_argument(format!(
"failed to stat local source_url path {}: {e}",
source.display()
))
})?;
if !metadata.is_file() {
return Err(Status::invalid_argument(format!(
"local source_url path {} is not a regular file",
source.display()
)));
}
if metadata.len() > self.max_image_import_size_bytes {
return Err(Status::resource_exhausted(format!(
"local source_url exceeds the configured maximum size of {} bytes",
self.max_image_import_size_bytes
)));
}
let temp_path = path.with_extension("local");
if tokio::fs::try_exists(&temp_path).await.unwrap_or(false) {
let _ = tokio::fs::remove_file(&temp_path).await;
}
tokio::fs::copy(&source, &temp_path).await.map_err(|e| {
Status::internal(format!(
"failed to copy local source_url {} into {}: {e}",
source.display(),
temp_path.display()
))
})?;
tokio::fs::rename(&temp_path, path).await.map_err(|e| {
Status::internal(format!(
"failed to finalize local source_url copy into {}: {e}",
path.display()
))
})?;
ensure_cache_file_permissions(path).await?;
Ok(())
}
async fn convert_to_qcow2(&self, source: &Path, destination: &Path) -> Result<(), Status> { async fn convert_to_qcow2(&self, source: &Path, destination: &Path) -> Result<(), Status> {
if tokio::fs::try_exists(destination) if tokio::fs::try_exists(destination)
.await .await
@ -917,12 +973,6 @@ impl ArtifactStore {
} }
async fn validate_import_url(&self, source_url: &str) -> Result<ValidatedImportUrl, Status> { async fn validate_import_url(&self, source_url: &str) -> Result<ValidatedImportUrl, Status> {
if source_url.starts_with("file://") || source_url.starts_with('/') {
return Err(Status::invalid_argument(
"source_url must use https:// and may not reference local files",
));
}
let url = Url::parse(source_url) let url = Url::parse(source_url)
.map_err(|e| Status::invalid_argument(format!("invalid source_url: {e}")))?; .map_err(|e| Status::invalid_argument(format!("invalid source_url: {e}")))?;
if url.scheme() != "https" { if url.scheme() != "https" {
@ -1413,6 +1463,21 @@ fn resolve_binary_path(
Ok(candidate) Ok(candidate)
} }
fn local_source_path(source_url: &str) -> Result<Option<PathBuf>, Status> {
if source_url.starts_with("file://") {
let url = Url::parse(source_url)
.map_err(|e| Status::invalid_argument(format!("invalid source_url: {e}")))?;
let path = url
.to_file_path()
.map_err(|_| Status::invalid_argument("source_url file:// path must be absolute"))?;
return Ok(Some(path));
}
if source_url.starts_with('/') {
return Ok(Some(PathBuf::from(source_url)));
}
Ok(None)
}
async fn ensure_cache_dir_permissions(path: &Path) -> Result<(), Status> { async fn ensure_cache_dir_permissions(path: &Path) -> Result<(), Status> {
#[cfg(unix)] #[cfg(unix)]
{ {
@ -1513,4 +1578,20 @@ mod tests {
"org/project/11111111-1111-1111-1111-111111111111.qcow2" "org/project/11111111-1111-1111-1111-111111111111.qcow2"
); );
} }
#[test]
fn local_source_path_accepts_local_files_and_ignores_https_urls() {
assert_eq!(
local_source_path("file:///tmp/source.qcow2").unwrap(),
Some(PathBuf::from("/tmp/source.qcow2"))
);
assert_eq!(
local_source_path("/var/lib/source.qcow2").unwrap(),
Some(PathBuf::from("/var/lib/source.qcow2"))
);
assert_eq!(
local_source_path("https://example.com/source.qcow2").unwrap(),
None
);
}
} }

View file

@ -1,8 +1,9 @@
//! PrismNET client for port management //! PrismNET client for port management
use prismnet_api::proto::{ use prismnet_api::proto::{
port_service_client::PortServiceClient, GetPortRequest, AttachDeviceRequest, port_service_client::PortServiceClient, subnet_service_client::SubnetServiceClient,
DetachDeviceRequest, AttachDeviceRequest, CreatePortRequest, DeletePortRequest, DetachDeviceRequest, GetPortRequest,
GetSubnetRequest,
}; };
use tonic::metadata::MetadataValue; use tonic::metadata::MetadataValue;
use tonic::transport::Channel; use tonic::transport::Channel;
@ -11,6 +12,7 @@ use tonic::transport::Channel;
pub struct PrismNETClient { pub struct PrismNETClient {
auth_token: String, auth_token: String,
port_client: PortServiceClient<Channel>, port_client: PortServiceClient<Channel>,
subnet_client: SubnetServiceClient<Channel>,
} }
impl PrismNETClient { impl PrismNETClient {
@ -18,21 +20,21 @@ impl PrismNETClient {
pub async fn new( pub async fn new(
endpoint: String, endpoint: String,
auth_token: String, auth_token: String,
) -> Result<Self, Box<dyn std::error::Error>> { ) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
let channel = Channel::from_shared(endpoint)? let channel = Channel::from_shared(endpoint)?.connect().await?;
.connect() let port_client = PortServiceClient::new(channel.clone());
.await?; let subnet_client = SubnetServiceClient::new(channel);
let port_client = PortServiceClient::new(channel);
Ok(Self { Ok(Self {
auth_token, auth_token,
port_client, port_client,
subnet_client,
}) })
} }
fn request_with_auth<T>( fn request_with_auth<T>(
auth_token: &str, auth_token: &str,
payload: T, payload: T,
) -> Result<tonic::Request<T>, Box<dyn std::error::Error>> { ) -> Result<tonic::Request<T>, Box<dyn std::error::Error + Send + Sync>> {
let mut request = tonic::Request::new(payload); let mut request = tonic::Request::new(payload);
let token_value = MetadataValue::try_from(auth_token)?; let token_value = MetadataValue::try_from(auth_token)?;
request request
@ -48,15 +50,74 @@ impl PrismNETClient {
project_id: &str, project_id: &str,
subnet_id: &str, subnet_id: &str,
port_id: &str, port_id: &str,
) -> Result<prismnet_api::proto::Port, Box<dyn std::error::Error>> { ) -> Result<prismnet_api::proto::Port, Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(&self.auth_token, GetPortRequest { let request = Self::request_with_auth(
&self.auth_token,
GetPortRequest {
org_id: org_id.to_string(), org_id: org_id.to_string(),
project_id: project_id.to_string(), project_id: project_id.to_string(),
subnet_id: subnet_id.to_string(), subnet_id: subnet_id.to_string(),
id: port_id.to_string(), id: port_id.to_string(),
})?; },
)?;
let response = self.port_client.get_port(request).await?; let response = self.port_client.get_port(request).await?;
Ok(response.into_inner().port.ok_or("Port not found in response")?) Ok(response
.into_inner()
.port
.ok_or("Port not found in response")?)
}
/// Get subnet details, resolving by subnet ID when VPC ID is not known locally.
pub async fn get_subnet(
&mut self,
org_id: &str,
project_id: &str,
subnet_id: &str,
) -> Result<prismnet_api::proto::Subnet, Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(
&self.auth_token,
GetSubnetRequest {
org_id: org_id.to_string(),
project_id: project_id.to_string(),
vpc_id: String::new(),
id: subnet_id.to_string(),
},
)?;
let response = self.subnet_client.get_subnet(request).await?;
Ok(response
.into_inner()
.subnet
.ok_or("Subnet not found in response")?)
}
/// Create a port for a VM-managed NIC
pub async fn create_port(
&mut self,
org_id: &str,
project_id: &str,
subnet_id: &str,
name: &str,
description: Option<&str>,
ip_address: Option<&str>,
security_group_ids: Vec<String>,
) -> Result<prismnet_api::proto::Port, Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(
&self.auth_token,
CreatePortRequest {
org_id: org_id.to_string(),
project_id: project_id.to_string(),
subnet_id: subnet_id.to_string(),
name: name.to_string(),
description: description.unwrap_or_default().to_string(),
ip_address: ip_address.unwrap_or_default().to_string(),
security_group_ids,
},
)?;
let response = self.port_client.create_port(request).await?;
Ok(response
.into_inner()
.port
.ok_or("Port not found in response")?)
} }
/// Attach a device to a port /// Attach a device to a port
@ -68,15 +129,18 @@ impl PrismNETClient {
port_id: &str, port_id: &str,
device_id: &str, device_id: &str,
device_type: i32, device_type: i32,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(&self.auth_token, AttachDeviceRequest { let request = Self::request_with_auth(
&self.auth_token,
AttachDeviceRequest {
org_id: org_id.to_string(), org_id: org_id.to_string(),
project_id: project_id.to_string(), project_id: project_id.to_string(),
subnet_id: subnet_id.to_string(), subnet_id: subnet_id.to_string(),
port_id: port_id.to_string(), port_id: port_id.to_string(),
device_id: device_id.to_string(), device_id: device_id.to_string(),
device_type, device_type,
})?; },
)?;
self.port_client.attach_device(request).await?; self.port_client.attach_device(request).await?;
Ok(()) Ok(())
} }
@ -88,16 +152,40 @@ impl PrismNETClient {
project_id: &str, project_id: &str,
subnet_id: &str, subnet_id: &str,
port_id: &str, port_id: &str,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(&self.auth_token, DetachDeviceRequest { let request = Self::request_with_auth(
&self.auth_token,
DetachDeviceRequest {
org_id: org_id.to_string(), org_id: org_id.to_string(),
project_id: project_id.to_string(), project_id: project_id.to_string(),
subnet_id: subnet_id.to_string(), subnet_id: subnet_id.to_string(),
port_id: port_id.to_string(), port_id: port_id.to_string(),
})?; },
)?;
self.port_client.detach_device(request).await?; self.port_client.detach_device(request).await?;
Ok(()) Ok(())
} }
/// Delete a port
pub async fn delete_port(
&mut self,
org_id: &str,
project_id: &str,
subnet_id: &str,
port_id: &str,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let request = Self::request_with_auth(
&self.auth_token,
DeletePortRequest {
org_id: org_id.to_string(),
project_id: project_id.to_string(),
subnet_id: subnet_id.to_string(),
id: port_id.to_string(),
},
)?;
self.port_client.delete_port(request).await?;
Ok(())
}
} }
#[cfg(test)] #[cfg(test)]

View file

@ -11,23 +11,22 @@
use axum::{ use axum::{
extract::{Path, State}, extract::{Path, State},
http::StatusCode,
http::HeaderMap, http::HeaderMap,
http::StatusCode,
routing::{get, post}, routing::{get, post},
Json, Router, Json, Router,
}; };
use plasmavmc_api::proto::{ use plasmavmc_api::proto::{
CreateVmRequest, DeleteVmRequest, GetVmRequest, ListVmsRequest, vm_service_server::VmService, CreateVmRequest, DeleteVmRequest, GetVmRequest, ListVmsRequest,
StartVmRequest, StopVmRequest, MigrateVmRequest, VirtualMachine as ProtoVm, MigrateVmRequest, StartVmRequest, StopVmRequest, VirtualMachine as ProtoVm,
vm_service_server::VmService,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::sync::Arc; use std::sync::Arc;
use tonic::Request;
use tonic::Code; use tonic::Code;
use tonic::Request;
use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext};
use crate::VmServiceImpl; use crate::VmServiceImpl;
use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext};
/// REST API state /// REST API state
#[derive(Clone)] #[derive(Clone)]
@ -93,6 +92,8 @@ pub struct CreateVmRequestRest {
pub hypervisor: Option<String>, pub hypervisor: Option<String>,
#[serde(default)] #[serde(default)]
pub disks: Vec<DiskSpecRest>, pub disks: Vec<DiskSpecRest>,
#[serde(default)]
pub network: Vec<NetworkSpecRest>,
} }
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
@ -113,6 +114,22 @@ pub enum DiskSourceRest {
Blank, Blank,
} }
#[derive(Debug, Deserialize)]
pub struct NetworkSpecRest {
pub id: Option<String>,
pub network_id: Option<String>,
pub subnet_id: Option<String>,
pub port_id: Option<String>,
pub mac_address: Option<String>,
pub ip_address: Option<String>,
pub cidr_block: Option<String>,
pub gateway_ip: Option<String>,
pub dhcp_enabled: Option<bool>,
pub model: Option<String>,
#[serde(default)]
pub security_groups: Vec<String>,
}
/// VM migration request /// VM migration request
#[derive(Debug, Deserialize)] #[derive(Debug, Deserialize)]
pub struct MigrateVmRequestRest { pub struct MigrateVmRequestRest {
@ -126,23 +143,115 @@ pub struct MigrateVmRequestRest {
pub struct VmResponse { pub struct VmResponse {
pub id: String, pub id: String,
pub name: String, pub name: String,
pub org_id: String,
pub project_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
pub state: String, pub state: String,
pub hypervisor: String,
pub cpus: u32, pub cpus: u32,
pub memory_mb: u64, pub memory_mb: u64,
pub network: Vec<VmNetworkResponse>,
}
#[derive(Debug, Serialize)]
pub struct VmNetworkResponse {
pub id: String,
pub network_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub port_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cidr_block: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gateway_ip: Option<String>,
pub dhcp_enabled: bool,
pub model: String,
pub security_groups: Vec<String>,
}
fn nic_model_to_string(model: i32) -> String {
match plasmavmc_api::proto::NicModel::try_from(model)
.unwrap_or(plasmavmc_api::proto::NicModel::Unspecified)
{
plasmavmc_api::proto::NicModel::VirtioNet => "virtio-net".to_string(),
plasmavmc_api::proto::NicModel::E1000 => "e1000".to_string(),
plasmavmc_api::proto::NicModel::Unspecified => "unspecified".to_string(),
}
}
fn hypervisor_to_string(hypervisor: i32) -> String {
match plasmavmc_api::proto::HypervisorType::try_from(hypervisor)
.unwrap_or(plasmavmc_api::proto::HypervisorType::Unspecified)
{
plasmavmc_api::proto::HypervisorType::Kvm => "kvm".to_string(),
plasmavmc_api::proto::HypervisorType::Firecracker => "firecracker".to_string(),
plasmavmc_api::proto::HypervisorType::Mvisor => "mvisor".to_string(),
plasmavmc_api::proto::HypervisorType::Unspecified => "unspecified".to_string(),
}
}
impl From<plasmavmc_api::proto::NetworkSpec> for VmNetworkResponse {
fn from(network: plasmavmc_api::proto::NetworkSpec) -> Self {
Self {
id: network.id,
network_id: network.network_id,
subnet_id: (!network.subnet_id.is_empty()).then_some(network.subnet_id),
port_id: (!network.port_id.is_empty()).then_some(network.port_id),
mac_address: (!network.mac_address.is_empty()).then_some(network.mac_address),
ip_address: (!network.ip_address.is_empty()).then_some(network.ip_address),
cidr_block: (!network.cidr_block.is_empty()).then_some(network.cidr_block),
gateway_ip: (!network.gateway_ip.is_empty()).then_some(network.gateway_ip),
dhcp_enabled: network.dhcp_enabled,
model: nic_model_to_string(network.model),
security_groups: network.security_groups,
}
}
} }
impl From<ProtoVm> for VmResponse { impl From<ProtoVm> for VmResponse {
fn from(vm: ProtoVm) -> Self { fn from(vm: ProtoVm) -> Self {
let cpus = vm.spec.as_ref().and_then(|s| s.cpu.as_ref()).map(|c| c.vcpus).unwrap_or(1); let cpus = vm
let memory_mb = vm.spec.as_ref().and_then(|s| s.memory.as_ref()).map(|m| m.size_mib).unwrap_or(512); .spec
.as_ref()
.and_then(|s| s.cpu.as_ref())
.map(|c| c.vcpus)
.unwrap_or(1);
let memory_mb = vm
.spec
.as_ref()
.and_then(|s| s.memory.as_ref())
.map(|m| m.size_mib)
.unwrap_or(512);
let state = format!("{:?}", vm.state()); let state = format!("{:?}", vm.state());
let network = vm
.spec
.as_ref()
.map(|spec| {
spec.network
.clone()
.into_iter()
.map(VmNetworkResponse::from)
.collect()
})
.unwrap_or_default();
Self { Self {
id: vm.id, id: vm.id,
name: vm.name, name: vm.name,
org_id: vm.org_id,
project_id: vm.project_id,
node_id: (!vm.node_id.is_empty()).then_some(vm.node_id),
state, state,
hypervisor: hypervisor_to_string(vm.hypervisor),
cpus, cpus,
memory_mb, memory_mb,
network,
} }
} }
} }
@ -169,7 +278,9 @@ pub fn build_router(state: RestApiState) -> Router {
async fn health_check() -> (StatusCode, Json<SuccessResponse<serde_json::Value>>) { async fn health_check() -> (StatusCode, Json<SuccessResponse<serde_json::Value>>) {
( (
StatusCode::OK, StatusCode::OK,
Json(SuccessResponse::new(serde_json::json!({ "status": "healthy" }))), Json(SuccessResponse::new(
serde_json::json!({ "status": "healthy" }),
)),
) )
} }
@ -188,11 +299,18 @@ async fn list_vms(
}); });
req.extensions_mut().insert(tenant); req.extensions_mut().insert(tenant);
let response = state.vm_service.list_vms(req) let response = state
.vm_service
.list_vms(req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "LIST_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
let vms: Vec<VmResponse> = response.into_inner().vms.into_iter().map(VmResponse::from).collect(); let vms: Vec<VmResponse> = response
.into_inner()
.vms
.into_iter()
.map(VmResponse::from)
.collect();
Ok(Json(SuccessResponse::new(VmsResponse { vms }))) Ok(Json(SuccessResponse::new(VmsResponse { vms })))
} }
@ -204,19 +322,29 @@ async fn create_vm(
Json(req): Json<CreateVmRequestRest>, Json(req): Json<CreateVmRequestRest>,
) -> Result<(StatusCode, Json<SuccessResponse<VmResponse>>), (StatusCode, Json<ErrorResponse>)> { ) -> Result<(StatusCode, Json<SuccessResponse<VmResponse>>), (StatusCode, Json<ErrorResponse>)> {
use plasmavmc_api::proto::{ use plasmavmc_api::proto::{
disk_source, CpuSpec, DiskBus, DiskCache, DiskSource, DiskSpec, HypervisorType, disk_source, CpuSpec, DiskBus, DiskCache, DiskSource, DiskSpec, HypervisorType, MemorySpec,
MemorySpec, NicModel as ProtoNicModel,
}; };
let hypervisor_type = match req.hypervisor.as_deref() { let CreateVmRequestRest {
name,
org_id,
project_id,
vcpus,
memory_mib,
hypervisor,
disks,
network,
} = req;
let hypervisor_type = match hypervisor.as_deref() {
Some("kvm") => HypervisorType::Kvm, Some("kvm") => HypervisorType::Kvm,
Some("firecracker") => HypervisorType::Firecracker, Some("firecracker") => HypervisorType::Firecracker,
Some("mvisor") => HypervisorType::Mvisor, Some("mvisor") => HypervisorType::Mvisor,
_ => HypervisorType::Unspecified, _ => HypervisorType::Unspecified,
}; };
let disks = req let disks = disks
.disks
.into_iter() .into_iter()
.map(|disk| DiskSpec { .map(|disk| DiskSpec {
id: disk.id, id: disk.id,
@ -245,26 +373,46 @@ async fn create_vm(
}) })
.collect(); .collect();
let network = network
.into_iter()
.enumerate()
.map(|(index, nic)| plasmavmc_api::proto::NetworkSpec {
id: nic.id.unwrap_or_else(|| format!("nic{}", index)),
network_id: nic.network_id.unwrap_or_else(|| "default".to_string()),
subnet_id: nic.subnet_id.unwrap_or_default(),
port_id: nic.port_id.unwrap_or_default(),
mac_address: nic.mac_address.unwrap_or_default(),
ip_address: nic.ip_address.unwrap_or_default(),
cidr_block: nic.cidr_block.unwrap_or_default(),
gateway_ip: nic.gateway_ip.unwrap_or_default(),
dhcp_enabled: nic.dhcp_enabled.unwrap_or(false),
model: match nic.model.as_deref() {
Some("e1000") => ProtoNicModel::E1000 as i32,
_ => ProtoNicModel::VirtioNet as i32,
},
security_groups: nic.security_groups,
})
.collect();
let tenant = let tenant =
resolve_rest_tenant(&state, &headers, req.org_id.as_deref(), req.project_id.as_deref()) resolve_rest_tenant(&state, &headers, org_id.as_deref(), project_id.as_deref()).await?;
.await?;
let mut grpc_req = Request::new(CreateVmRequest { let mut grpc_req = Request::new(CreateVmRequest {
name: req.name, name,
org_id: tenant.org_id.clone(), org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(), project_id: tenant.project_id.clone(),
spec: Some(plasmavmc_api::proto::VmSpec { spec: Some(plasmavmc_api::proto::VmSpec {
cpu: Some(CpuSpec { cpu: Some(CpuSpec {
vcpus: req.vcpus.unwrap_or(1), vcpus: vcpus.unwrap_or(1),
cores_per_socket: 1, cores_per_socket: 1,
sockets: 1, sockets: 1,
cpu_model: String::new(), cpu_model: String::new(),
}), }),
memory: Some(MemorySpec { memory: Some(MemorySpec {
size_mib: req.memory_mib.unwrap_or(512), size_mib: memory_mib.unwrap_or(512),
hugepages: false, hugepages: false,
}), }),
disks, disks,
network: vec![], network,
boot: None, boot: None,
security: None, security: None,
}), }),
@ -274,13 +422,17 @@ async fn create_vm(
}); });
grpc_req.extensions_mut().insert(tenant); grpc_req.extensions_mut().insert(tenant);
let response = state.vm_service.create_vm(grpc_req) let response = state
.vm_service
.create_vm(grpc_req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "CREATE_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
Ok(( Ok((
StatusCode::CREATED, StatusCode::CREATED,
Json(SuccessResponse::new(VmResponse::from(response.into_inner()))), Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))),
)) ))
} }
@ -298,17 +450,15 @@ async fn get_vm(
}); });
req.extensions_mut().insert(tenant); req.extensions_mut().insert(tenant);
let response = state.vm_service.get_vm(req) let response = state
.vm_service
.get_vm(req)
.await .await
.map_err(|e| { .map_err(map_tonic_status)?;
if e.code() == tonic::Code::NotFound {
error_response(StatusCode::NOT_FOUND, "NOT_FOUND", "VM not found")
} else {
error_response(StatusCode::INTERNAL_SERVER_ERROR, "GET_FAILED", &e.message())
}
})?;
Ok(Json(SuccessResponse::new(VmResponse::from(response.into_inner())))) Ok(Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))))
} }
/// DELETE /api/v1/vms/{id} - Delete VM /// DELETE /api/v1/vms/{id} - Delete VM
@ -316,7 +466,8 @@ async fn delete_vm(
State(state): State<RestApiState>, State(state): State<RestApiState>,
Path(id): Path<String>, Path(id): Path<String>,
headers: HeaderMap, headers: HeaderMap,
) -> Result<(StatusCode, Json<SuccessResponse<serde_json::Value>>), (StatusCode, Json<ErrorResponse>)> { ) -> Result<(StatusCode, Json<SuccessResponse<serde_json::Value>>), (StatusCode, Json<ErrorResponse>)>
{
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?; let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(DeleteVmRequest { let mut req = Request::new(DeleteVmRequest {
org_id: tenant.org_id.clone(), org_id: tenant.org_id.clone(),
@ -326,13 +477,17 @@ async fn delete_vm(
}); });
req.extensions_mut().insert(tenant); req.extensions_mut().insert(tenant);
state.vm_service.delete_vm(req) state
.vm_service
.delete_vm(req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "DELETE_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
Ok(( Ok((
StatusCode::OK, StatusCode::OK,
Json(SuccessResponse::new(serde_json::json!({ "id": id, "deleted": true }))), Json(SuccessResponse::new(
serde_json::json!({ "id": id, "deleted": true }),
)),
)) ))
} }
@ -350,11 +505,15 @@ async fn start_vm(
}); });
req.extensions_mut().insert(tenant); req.extensions_mut().insert(tenant);
state.vm_service.start_vm(req) state
.vm_service
.start_vm(req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "START_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(serde_json::json!({ "id": id, "action": "started" })))) Ok(Json(SuccessResponse::new(
serde_json::json!({ "id": id, "action": "started" }),
)))
} }
/// POST /api/v1/vms/{id}/stop - Stop VM /// POST /api/v1/vms/{id}/stop - Stop VM
@ -373,11 +532,15 @@ async fn stop_vm(
}); });
req.extensions_mut().insert(tenant); req.extensions_mut().insert(tenant);
state.vm_service.stop_vm(req) state
.vm_service
.stop_vm(req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "STOP_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(serde_json::json!({ "id": id, "action": "stopped" })))) Ok(Json(SuccessResponse::new(
serde_json::json!({ "id": id, "action": "stopped" }),
)))
} }
/// POST /api/v1/vms/{id}/migrate - Migrate VM /// POST /api/v1/vms/{id}/migrate - Migrate VM
@ -402,9 +565,11 @@ async fn migrate_vm(
.vm_service .vm_service
.migrate_vm(grpc_req) .migrate_vm(grpc_req)
.await .await
.map_err(|e| error_response(StatusCode::INTERNAL_SERVER_ERROR, "MIGRATE_FAILED", &e.message()))?; .map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(VmResponse::from(response.into_inner())))) Ok(Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))))
} }
/// Helper to create error response /// Helper to create error response
@ -448,11 +613,18 @@ async fn resolve_rest_tenant(
} }
fn map_auth_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) { fn map_auth_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) {
map_tonic_status(status)
}
fn map_tonic_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) {
let status_code = match status.code() { let status_code = match status.code() {
Code::Unauthenticated => StatusCode::UNAUTHORIZED, Code::Unauthenticated => StatusCode::UNAUTHORIZED,
Code::PermissionDenied => StatusCode::FORBIDDEN, Code::PermissionDenied => StatusCode::FORBIDDEN,
Code::InvalidArgument => StatusCode::BAD_REQUEST, Code::InvalidArgument => StatusCode::BAD_REQUEST,
Code::NotFound => StatusCode::NOT_FOUND, Code::NotFound => StatusCode::NOT_FOUND,
Code::AlreadyExists => StatusCode::CONFLICT,
Code::ResourceExhausted => StatusCode::TOO_MANY_REQUESTS,
Code::FailedPrecondition => StatusCode::UNPROCESSABLE_ENTITY,
_ => StatusCode::INTERNAL_SERVER_ERROR, _ => StatusCode::INTERNAL_SERVER_ERROR,
}; };
let code = match status.code() { let code = match status.code() {
@ -460,8 +632,102 @@ fn map_auth_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) {
Code::PermissionDenied => "FORBIDDEN", Code::PermissionDenied => "FORBIDDEN",
Code::InvalidArgument => "INVALID_ARGUMENT", Code::InvalidArgument => "INVALID_ARGUMENT",
Code::NotFound => "NOT_FOUND", Code::NotFound => "NOT_FOUND",
Code::AlreadyExists => "ALREADY_EXISTS",
Code::ResourceExhausted => "RESOURCE_EXHAUSTED",
Code::FailedPrecondition => "FAILED_PRECONDITION",
_ => "INTERNAL", _ => "INTERNAL",
}; };
error_response(status_code, code, status.message()) error_response(status_code, code, status.message())
} }
#[cfg(test)]
mod tests {
use super::*;
use plasmavmc_api::proto::{
CpuSpec, HypervisorType, MemorySpec, NetworkSpec, NicModel, VirtualMachine as ProtoVm,
VmSpec,
};
#[test]
fn map_tonic_status_preserves_client_error_categories() {
let (status, Json(body)) = map_tonic_status(tonic::Status::not_found("missing vm"));
assert_eq!(status, StatusCode::NOT_FOUND);
assert_eq!(body.error.code, "NOT_FOUND");
let (status, Json(body)) = map_tonic_status(tonic::Status::invalid_argument("bad nic"));
assert_eq!(status, StatusCode::BAD_REQUEST);
assert_eq!(body.error.code, "INVALID_ARGUMENT");
let (status, Json(body)) =
map_tonic_status(tonic::Status::failed_precondition("network attach failed"));
assert_eq!(status, StatusCode::UNPROCESSABLE_ENTITY);
assert_eq!(body.error.code, "FAILED_PRECONDITION");
}
#[test]
fn vm_response_exposes_network_details() {
let response = VmResponse::from(ProtoVm {
id: "vm-1".to_string(),
name: "vm-1".to_string(),
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
state: plasmavmc_api::proto::VmState::Running as i32,
spec: Some(VmSpec {
cpu: Some(CpuSpec {
vcpus: 2,
cores_per_socket: 1,
sockets: 1,
cpu_model: String::new(),
}),
memory: Some(MemorySpec {
size_mib: 2048,
hugepages: false,
}),
disks: vec![],
network: vec![NetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
mac_address: "02:00:00:00:00:01".to_string(),
ip_address: "10.62.10.15".to_string(),
cidr_block: "10.62.10.0/24".to_string(),
gateway_ip: "10.62.10.1".to_string(),
dhcp_enabled: true,
model: NicModel::VirtioNet as i32,
security_groups: vec!["sg-1".to_string()],
port_id: "port-1".to_string(),
subnet_id: "subnet-1".to_string(),
}],
boot: None,
security: None,
}),
status: None,
node_id: "node04".to_string(),
hypervisor: HypervisorType::Kvm as i32,
created_at: 0,
updated_at: 0,
created_by: String::new(),
metadata: Default::default(),
labels: Default::default(),
});
assert_eq!(response.hypervisor, "kvm");
assert_eq!(response.node_id.as_deref(), Some("node04"));
assert_eq!(response.network.len(), 1);
assert_eq!(response.network[0].port_id.as_deref(), Some("port-1"));
assert_eq!(response.network[0].subnet_id.as_deref(), Some("subnet-1"));
assert_eq!(
response.network[0].ip_address.as_deref(),
Some("10.62.10.15")
);
assert_eq!(
response.network[0].cidr_block.as_deref(),
Some("10.62.10.0/24")
);
assert_eq!(
response.network[0].gateway_ip.as_deref(),
Some("10.62.10.1")
);
assert!(response.network[0].dhcp_enabled);
}
}

View file

@ -45,6 +45,7 @@ use plasmavmc_types::{
NetworkSpec, NicModel, Node, NodeCapacity, NodeId, NodeState, OsType, Visibility, VmId, NetworkSpec, NicModel, Node, NodeCapacity, NodeId, NodeState, OsType, Visibility, VmId,
VmState, Volume, VolumeBacking, VolumeDriverKind, VolumeFormat, VolumeStatus, VmState, Volume, VolumeBacking, VolumeDriverKind, VolumeFormat, VolumeStatus,
}; };
use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::collections::HashSet;
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::sync::Arc; use std::sync::Arc;
@ -75,6 +76,7 @@ const NODE_ENDPOINT_LABEL: &str = "plasmavmc_endpoint";
const FAILOVER_META_KEY: &str = "failover_at"; const FAILOVER_META_KEY: &str = "failover_at";
const FAILOVER_TARGET_KEY: &str = "failover_target"; const FAILOVER_TARGET_KEY: &str = "failover_target";
const PRISMNET_VM_DEVICE_TYPE: i32 = prismnet_api::proto::DeviceType::Vm as i32; const PRISMNET_VM_DEVICE_TYPE: i32 = prismnet_api::proto::DeviceType::Vm as i32;
const PRISMNET_AUTO_PORTS_METADATA_KEY: &str = "plasmavmc.prismnet.auto_ports";
const STORE_OP_TIMEOUT: Duration = Duration::from_secs(5); const STORE_OP_TIMEOUT: Duration = Duration::from_secs(5);
/// VM Service implementation /// VM Service implementation
@ -113,6 +115,13 @@ struct TenantKey {
vm_id: String, vm_id: String,
} }
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct AutoPrismnetPort {
nic_id: String,
subnet_id: String,
port_id: String,
}
impl PartialEq for TenantKey { impl PartialEq for TenantKey {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.org_id == other.org_id self.org_id == other.org_id
@ -332,6 +341,50 @@ impl VmServiceImpl {
Ok(()) Ok(())
} }
fn validate_network_reference(network: &plasmavmc_types::NetworkSpec) -> Result<(), Status> {
if network.port_id.is_some() && network.subnet_id.is_none() {
return Err(Status::invalid_argument(
"subnet_id is required when port_id is specified",
));
}
if !network.security_groups.is_empty() && network.subnet_id.is_none() {
return Err(Status::invalid_argument(
"subnet_id is required when security_groups are specified",
));
}
if let Some(subnet_id) = network.subnet_id.as_deref() {
Self::require_uuid(subnet_id, "subnet_id")?;
}
if let Some(port_id) = network.port_id.as_deref() {
Self::require_uuid(port_id, "port_id")?;
}
for security_group in &network.security_groups {
Self::require_uuid(security_group, "security_group_id")?;
}
Ok(())
}
fn validate_vm_network_references(spec: &plasmavmc_types::VmSpec) -> Result<(), Status> {
for network in &spec.network {
Self::validate_network_reference(network)?;
}
Ok(())
}
fn map_prismnet_error(
error: &(dyn std::error::Error + Send + Sync + 'static),
action: &str,
) -> Status {
if let Some(status) = error.downcast_ref::<tonic::Status>() {
return Status::new(
status.code(),
format!("failed to {action}: {}", status.message()),
);
}
Status::failed_precondition(format!("failed to {action}: {error}"))
}
fn ensure_internal_rpc(tenant: &TenantContext) -> Result<(), Status> { fn ensure_internal_rpc(tenant: &TenantContext) -> Result<(), Status> {
if tenant.principal_kind != PrincipalKind::ServiceAccount if tenant.principal_kind != PrincipalKind::ServiceAccount
|| !tenant.principal_id.starts_with("plasmavmc-") || !tenant.principal_id.starts_with("plasmavmc-")
@ -563,6 +616,28 @@ impl VmServiceImpl {
.collect() .collect()
} }
fn load_auto_prismnet_ports(vm: &plasmavmc_types::VirtualMachine) -> Vec<AutoPrismnetPort> {
vm.metadata
.get(PRISMNET_AUTO_PORTS_METADATA_KEY)
.and_then(|value| serde_json::from_str(value).ok())
.unwrap_or_default()
}
fn record_auto_prismnet_port(
vm: &mut plasmavmc_types::VirtualMachine,
auto_port: AutoPrismnetPort,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let mut auto_ports = Self::load_auto_prismnet_ports(vm);
if auto_ports.iter().all(|entry| entry != &auto_port) {
auto_ports.push(auto_port);
vm.metadata.insert(
PRISMNET_AUTO_PORTS_METADATA_KEY.to_string(),
serde_json::to_string(&auto_ports)?,
);
}
Ok(())
}
async fn ensure_nodes_loaded(&self) { async fn ensure_nodes_loaded(&self) {
if !self.nodes.is_empty() { if !self.nodes.is_empty() {
return; return;
@ -813,6 +888,17 @@ impl VmServiceImpl {
} else { } else {
Some(n.ip_address) Some(n.ip_address)
}, },
cidr_block: if n.cidr_block.is_empty() {
None
} else {
Some(n.cidr_block)
},
gateway_ip: if n.gateway_ip.is_empty() {
None
} else {
Some(n.gateway_ip)
},
dhcp_enabled: n.dhcp_enabled,
model: Self::map_nic_model(n.model), model: Self::map_nic_model(n.model),
security_groups: n.security_groups, security_groups: n.security_groups,
} }
@ -889,6 +975,17 @@ impl VmServiceImpl {
} else { } else {
Some(n.ip_address) Some(n.ip_address)
}, },
cidr_block: if n.cidr_block.is_empty() {
None
} else {
Some(n.cidr_block)
},
gateway_ip: if n.gateway_ip.is_empty() {
None
} else {
Some(n.gateway_ip)
},
dhcp_enabled: n.dhcp_enabled,
model: Self::map_nic_model(n.model), model: Self::map_nic_model(n.model),
security_groups: n.security_groups, security_groups: n.security_groups,
}) })
@ -1014,6 +1111,9 @@ impl VmServiceImpl {
port_id: n.port_id.clone().unwrap_or_default(), port_id: n.port_id.clone().unwrap_or_default(),
mac_address: n.mac_address.clone().unwrap_or_default(), mac_address: n.mac_address.clone().unwrap_or_default(),
ip_address: n.ip_address.clone().unwrap_or_default(), ip_address: n.ip_address.clone().unwrap_or_default(),
cidr_block: n.cidr_block.clone().unwrap_or_default(),
gateway_ip: n.gateway_ip.clone().unwrap_or_default(),
dhcp_enabled: n.dhcp_enabled,
model: match n.model { model: match n.model {
NicModel::VirtioNet => ProtoNicModel::VirtioNet as i32, NicModel::VirtioNet => ProtoNicModel::VirtioNet as i32,
NicModel::E1000 => ProtoNicModel::E1000 as i32, NicModel::E1000 => ProtoNicModel::E1000 as i32,
@ -1625,7 +1725,7 @@ impl VmServiceImpl {
async fn attach_prismnet_ports( async fn attach_prismnet_ports(
&self, &self,
vm: &mut plasmavmc_types::VirtualMachine, vm: &mut plasmavmc_types::VirtualMachine,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let Some(ref endpoint) = self.prismnet_endpoint else { let Some(ref endpoint) = self.prismnet_endpoint else {
return Ok(()); return Ok(());
}; };
@ -1635,30 +1735,62 @@ impl VmServiceImpl {
.await?; .await?;
let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?; let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?;
for net_spec in &mut vm.spec.network { for nic_index in 0..vm.spec.network.len() {
if let (Some(ref subnet_id), Some(ref port_id)) = let nic = vm.spec.network[nic_index].clone();
(&net_spec.subnet_id, &net_spec.port_id) let Some(subnet_id) = nic.subnet_id.clone() else {
{ continue;
// Get port details from PrismNET };
let port = if let Some(port_id) = nic.port_id.clone() {
client
.get_port(&vm.org_id, &vm.project_id, &subnet_id, &port_id)
.await?
} else {
let port = client let port = client
.get_port(&vm.org_id, &vm.project_id, subnet_id, port_id) .create_port(
&vm.org_id,
&vm.project_id,
&subnet_id,
&format!("{}-{}", vm.name, nic.id),
Some(&format!("Auto-managed port for VM {}", vm.id)),
nic.ip_address.as_deref(),
nic.security_groups.clone(),
)
.await?;
Self::record_auto_prismnet_port(
vm,
AutoPrismnetPort {
nic_id: nic.id.clone(),
subnet_id: subnet_id.clone(),
port_id: port.id.clone(),
},
)?;
port
};
let subnet = client
.get_subnet(&vm.org_id, &vm.project_id, &subnet_id)
.await?; .await?;
// Update network spec with port information {
let net_spec = &mut vm.spec.network[nic_index];
net_spec.port_id = Some(port.id.clone());
net_spec.mac_address = Some(port.mac_address.clone()); net_spec.mac_address = Some(port.mac_address.clone());
net_spec.ip_address = if port.ip_address.is_empty() { net_spec.ip_address = if port.ip_address.is_empty() {
None None
} else { } else {
Some(port.ip_address.clone()) Some(port.ip_address.clone())
}; };
net_spec.cidr_block = (!subnet.cidr_block.is_empty()).then_some(subnet.cidr_block);
net_spec.gateway_ip = (!subnet.gateway_ip.is_empty()).then_some(subnet.gateway_ip);
net_spec.dhcp_enabled = subnet.dhcp_enabled;
}
// Attach VM to the PrismNET port using the generated enum value.
client client
.attach_device( .attach_device(
&vm.org_id, &vm.org_id,
&vm.project_id, &vm.project_id,
subnet_id, &subnet_id,
port_id, &port.id,
&vm.id.to_string(), &vm.id.to_string(),
PRISMNET_VM_DEVICE_TYPE, PRISMNET_VM_DEVICE_TYPE,
) )
@ -1666,12 +1798,12 @@ impl VmServiceImpl {
tracing::info!( tracing::info!(
vm_id = %vm.id, vm_id = %vm.id,
port_id = %port_id, nic_id = %nic.id,
port_id = %port.id,
mac = %port.mac_address, mac = %port.mac_address,
"Attached VM to PrismNET port" "Attached VM to PrismNET port"
); );
} }
}
Ok(()) Ok(())
} }
@ -1679,7 +1811,7 @@ impl VmServiceImpl {
async fn detach_prismnet_ports( async fn detach_prismnet_ports(
&self, &self,
vm: &plasmavmc_types::VirtualMachine, vm: &plasmavmc_types::VirtualMachine,
) -> Result<(), Box<dyn std::error::Error>> { ) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let Some(ref endpoint) = self.prismnet_endpoint else { let Some(ref endpoint) = self.prismnet_endpoint else {
return Ok(()); return Ok(());
}; };
@ -1708,6 +1840,53 @@ impl VmServiceImpl {
Ok(()) Ok(())
} }
async fn delete_auto_prismnet_ports(
&self,
vm: &plasmavmc_types::VirtualMachine,
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
let Some(ref endpoint) = self.prismnet_endpoint else {
return Ok(());
};
let auto_ports = Self::load_auto_prismnet_ports(vm);
if auto_ports.is_empty() {
return Ok(());
}
let auth_token = self
.issue_internal_token(&vm.org_id, &vm.project_id)
.await?;
let mut client = PrismNETClient::new(endpoint.clone(), auth_token).await?;
for auto_port in auto_ports {
let result = client
.delete_port(
&vm.org_id,
&vm.project_id,
&auto_port.subnet_id,
&auto_port.port_id,
)
.await;
if let Err(error) = result {
if error
.downcast_ref::<tonic::Status>()
.is_some_and(|status| status.code() == tonic::Code::NotFound)
{
continue;
}
return Err(error);
}
tracing::info!(
vm_id = %vm.id,
nic_id = %auto_port.nic_id,
port_id = %auto_port.port_id,
"Deleted auto-managed PrismNET port"
);
}
Ok(())
}
async fn rollback_prepared_vm_resources( async fn rollback_prepared_vm_resources(
&self, &self,
vm: &plasmavmc_types::VirtualMachine, vm: &plasmavmc_types::VirtualMachine,
@ -1720,6 +1899,13 @@ impl VmServiceImpl {
"Failed to detach PrismNET ports during VM rollback" "Failed to detach PrismNET ports during VM rollback"
); );
} }
if let Err(error) = self.delete_auto_prismnet_ports(vm).await {
tracing::warn!(
vm_id = %vm.id,
error = %error,
"Failed to delete auto-managed PrismNET ports during VM rollback"
);
}
if let Err(error) = self if let Err(error) = self
.volume_manager .volume_manager
.rollback_vm_volumes(vm, delete_auto_delete_volumes) .rollback_vm_volumes(vm, delete_auto_delete_volumes)
@ -2064,7 +2250,339 @@ impl VmServiceImpl {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use plasmavmc_types::VmSpec; use async_trait::async_trait;
use iam_api::{
iam_admin_server::IamAdminServer, iam_authz_server::IamAuthzServer,
iam_token_server::IamTokenServer, IamAdminService, IamAuthzService, IamTokenService,
};
use iam_authn::{InternalTokenConfig, InternalTokenService, SigningKey};
use iam_authz::{PolicyCache, PolicyEvaluator};
use iam_store::{
Backend, BindingStore, GroupStore, OrgStore, PrincipalStore, ProjectStore, RoleStore,
TokenStore,
};
use iam_types::{PolicyBinding, Principal, PrincipalRef, Scope};
use plasmavmc_api::proto::{
CpuSpec as ProtoCpuSpec, HypervisorType as ProtoHypervisorType,
MemorySpec as ProtoMemorySpec, NetworkSpec as ProtoNetworkSpec,
};
use plasmavmc_hypervisor::{BackendCapabilities, HypervisorBackend, UnsupportedReason};
use plasmavmc_types::{AttachedDisk, VmHandle, VmSpec, VmStatus};
use prismnet_api::{
port_service_server::PortServiceServer, subnet_service_server::SubnetServiceServer,
};
use prismnet_server::{NetworkMetadataStore, OvnClient, PortServiceImpl, SubnetServiceImpl};
use prismnet_types::{
DeviceType as PrismnetDeviceType, Port, PortId as PrismnetPortId, Subnet, Vpc,
};
use std::net::SocketAddr;
use std::sync::Mutex;
use tempfile::{tempdir, TempDir};
use tokio_stream::wrappers::TcpListenerStream;
use tonic::transport::Server;
#[derive(Default)]
struct MockBackend {
created: Mutex<Vec<plasmavmc_types::VirtualMachine>>,
deleted: Mutex<Vec<String>>,
}
impl MockBackend {
fn last_created_vm(&self) -> Option<plasmavmc_types::VirtualMachine> {
self.created.lock().unwrap().last().cloned()
}
}
#[async_trait]
impl HypervisorBackend for MockBackend {
fn backend_type(&self) -> HypervisorType {
HypervisorType::Kvm
}
fn capabilities(&self) -> BackendCapabilities {
BackendCapabilities::default()
}
fn supports(&self, _spec: &VmSpec) -> std::result::Result<(), UnsupportedReason> {
Ok(())
}
async fn create(
&self,
vm: &plasmavmc_types::VirtualMachine,
disks: &[AttachedDisk],
) -> plasmavmc_types::Result<VmHandle> {
self.created.lock().unwrap().push(vm.clone());
let mut handle = VmHandle::new(vm.id, format!("/tmp/{}", vm.id));
handle.attached_disks = disks.to_vec();
Ok(handle)
}
async fn start(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn stop(
&self,
_handle: &VmHandle,
_timeout: Duration,
) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn kill(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn reboot(&self, _handle: &VmHandle) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn migrate(
&self,
_handle: &VmHandle,
_destination_uri: &str,
_timeout: Duration,
_wait: bool,
) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn prepare_incoming(
&self,
vm: &plasmavmc_types::VirtualMachine,
_listen_uri: &str,
disks: &[AttachedDisk],
) -> plasmavmc_types::Result<VmHandle> {
self.create(vm, disks).await
}
async fn delete(&self, handle: &VmHandle) -> plasmavmc_types::Result<()> {
self.deleted.lock().unwrap().push(handle.vm_id.to_string());
Ok(())
}
async fn status(&self, _handle: &VmHandle) -> plasmavmc_types::Result<VmStatus> {
Ok(VmStatus {
actual_state: VmState::Stopped,
..VmStatus::default()
})
}
async fn attach_disk(
&self,
_handle: &VmHandle,
_disk: &AttachedDisk,
) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn detach_disk(
&self,
_handle: &VmHandle,
_disk_id: &str,
) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn attach_nic(
&self,
_handle: &VmHandle,
_nic: &NetworkSpec,
) -> plasmavmc_types::Result<()> {
Ok(())
}
async fn detach_nic(
&self,
_handle: &VmHandle,
_nic_id: &str,
) -> plasmavmc_types::Result<()> {
Ok(())
}
}
async fn wait_for_test_tcp(addr: SocketAddr) {
let deadline = tokio::time::Instant::now() + Duration::from_secs(2);
loop {
if tokio::net::TcpStream::connect(addr).await.is_ok() {
return;
}
assert!(
tokio::time::Instant::now() < deadline,
"timed out waiting for test listener {}",
addr
);
tokio::time::sleep(Duration::from_millis(25)).await;
}
}
async fn start_test_iam_server() -> String {
let backend = Arc::new(Backend::memory());
let principal_store = Arc::new(PrincipalStore::new(backend.clone()));
let role_store = Arc::new(RoleStore::new(backend.clone()));
let binding_store = Arc::new(BindingStore::new(backend.clone()));
let token_store = Arc::new(TokenStore::new(backend.clone()));
let group_store = Arc::new(GroupStore::new(backend.clone()));
let org_store = Arc::new(OrgStore::new(backend.clone()));
let project_store = Arc::new(ProjectStore::new(backend));
role_store.init_builtin_roles().await.unwrap();
principal_store
.create(&Principal::new_user("user-1", "User One"))
.await
.unwrap();
binding_store
.create(&PolicyBinding::new(
"binding-user-1-project-admin",
PrincipalRef::user("user-1"),
"roles/ProjectAdmin",
Scope::project("proj-1", "org-1"),
))
.await
.unwrap();
let cache = Arc::new(PolicyCache::default_config());
let evaluator = Arc::new(PolicyEvaluator::with_group_store(
binding_store.clone(),
role_store.clone(),
group_store.clone(),
cache,
));
let token_service = Arc::new(InternalTokenService::new(InternalTokenConfig::new(
SigningKey::generate("vm-service-test-key"),
"vm-service-test",
)));
let authz_service = IamAuthzService::new(evaluator.clone(), principal_store.clone());
let token_grpc_service =
IamTokenService::new(token_service, principal_store.clone(), token_store, None);
let admin_service = IamAdminService::new(
principal_store,
role_store,
binding_store,
org_store,
project_store,
group_store,
)
.with_evaluator(evaluator);
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
Server::builder()
.add_service(IamAuthzServer::new(authz_service))
.add_service(IamTokenServer::new(token_grpc_service))
.add_service(IamAdminServer::new(admin_service))
.serve_with_incoming(TcpListenerStream::new(listener))
.await
.unwrap();
});
wait_for_test_tcp(addr).await;
format!("http://{}", addr)
}
async fn start_test_prismnet_server(
iam_endpoint: &str,
metadata: Arc<NetworkMetadataStore>,
) -> String {
let auth_service = Arc::new(AuthService::new(iam_endpoint).await.unwrap());
let ovn = Arc::new(OvnClient::new_mock());
let port_service = PortServiceImpl::new(metadata.clone(), ovn, auth_service.clone());
let subnet_service = SubnetServiceImpl::new(metadata, auth_service.clone());
let auth_handle = tokio::runtime::Handle::current();
let interceptor = move |mut req: Request<()>| -> Result<Request<()>, Status> {
let auth = auth_service.clone();
tokio::task::block_in_place(|| {
auth_handle.block_on(async move {
let tenant_context = auth.authenticate_request(&req).await?;
req.extensions_mut().insert(tenant_context);
Ok(req)
})
})
};
let listener = tokio::net::TcpListener::bind("127.0.0.1:0").await.unwrap();
let addr = listener.local_addr().unwrap();
tokio::spawn(async move {
Server::builder()
.add_service(SubnetServiceServer::with_interceptor(
subnet_service,
interceptor.clone(),
))
.add_service(PortServiceServer::with_interceptor(
port_service,
interceptor,
))
.serve_with_incoming(TcpListenerStream::new(listener))
.await
.unwrap();
});
wait_for_test_tcp(addr).await;
format!("http://{}", addr)
}
async fn new_test_vm_service(
iam_endpoint: &str,
prismnet_endpoint: Option<String>,
) -> (TempDir, VmServiceImpl, Arc<MockBackend>) {
let tempdir = tempdir().unwrap();
let registry = Arc::new(HypervisorRegistry::new());
let backend = Arc::new(MockBackend::default());
registry.register(backend.clone());
let mut config = ServerConfig::default();
config.storage.backend = crate::config::StorageBackendKind::File;
config.storage.state_path = Some(tempdir.path().join("state"));
config.integrations.prismnet_endpoint = prismnet_endpoint;
config.volumes.managed_volume_root = tempdir.path().join("managed-volumes");
config.volumes.qemu_img_path = Some(std::env::current_exe().unwrap());
let auth = Arc::new(AuthService::new(iam_endpoint).await.unwrap());
let service = VmServiceImpl::new(registry, auth, iam_endpoint.to_string(), &config)
.await
.unwrap();
(tempdir, service, backend)
}
fn test_tenant() -> TenantContext {
TenantContext {
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
principal_id: "user-1".to_string(),
principal_name: "User One".to_string(),
principal_kind: PrincipalKind::User,
node_id: None,
}
}
fn test_vm_request(network: Vec<ProtoNetworkSpec>) -> CreateVmRequest {
CreateVmRequest {
name: "test-vm".to_string(),
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
spec: Some(ProtoVmSpec {
cpu: Some(ProtoCpuSpec {
vcpus: 1,
cores_per_socket: 1,
sockets: 1,
cpu_model: String::new(),
}),
memory: Some(ProtoMemorySpec {
size_mib: 512,
hugepages: false,
}),
disks: vec![],
network,
boot: None,
security: None,
}),
hypervisor: ProtoHypervisorType::Kvm as i32,
metadata: Default::default(),
labels: Default::default(),
}
}
#[test] #[test]
fn unspecified_disk_cache_defaults_to_writeback() { fn unspecified_disk_cache_defaults_to_writeback() {
@ -2163,6 +2681,216 @@ mod tests {
assert!(VmServiceImpl::validate_vm_disk_references(&spec).is_err()); assert!(VmServiceImpl::validate_vm_disk_references(&spec).is_err());
} }
#[tokio::test]
async fn vm_network_reference_validation_requires_subnet_for_port_id() {
let iam_endpoint = start_test_iam_server().await;
let (_tempdir, service, backend) = new_test_vm_service(&iam_endpoint, None).await;
let mut request = Request::new(test_vm_request(vec![ProtoNetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
subnet_id: String::new(),
port_id: Uuid::new_v4().to_string(),
mac_address: String::new(),
ip_address: String::new(),
cidr_block: String::new(),
gateway_ip: String::new(),
dhcp_enabled: false,
model: ProtoNicModel::VirtioNet as i32,
security_groups: vec![],
}]));
request.extensions_mut().insert(test_tenant());
let error = service.create_vm(request).await.unwrap_err();
assert_eq!(error.code(), tonic::Code::InvalidArgument);
assert!(backend.last_created_vm().is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn create_vm_rejects_unknown_security_group_reference() {
let iam_endpoint = start_test_iam_server().await;
let metadata = Arc::new(NetworkMetadataStore::new_in_memory());
let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16");
metadata.create_vpc(vpc.clone()).await.unwrap();
let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24");
metadata.create_subnet(subnet.clone()).await.unwrap();
let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await;
let (_tempdir, service, backend) =
new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await;
let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
subnet_id: subnet.id.to_string(),
port_id: String::new(),
mac_address: String::new(),
ip_address: String::new(),
cidr_block: String::new(),
gateway_ip: String::new(),
dhcp_enabled: false,
model: ProtoNicModel::VirtioNet as i32,
security_groups: vec![Uuid::new_v4().to_string()],
}]));
create_request.extensions_mut().insert(test_tenant());
let error = service.create_vm(create_request).await.unwrap_err();
assert_eq!(error.code(), tonic::Code::NotFound);
assert!(backend.last_created_vm().is_none());
assert!(metadata
.list_ports(Some(&subnet.id), None)
.await
.unwrap()
.is_empty());
}
#[tokio::test(flavor = "multi_thread")]
async fn create_vm_auto_manages_prismnet_port_lifecycle() {
let iam_endpoint = start_test_iam_server().await;
let metadata = Arc::new(NetworkMetadataStore::new_in_memory());
let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16");
metadata.create_vpc(vpc.clone()).await.unwrap();
let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24");
metadata.create_subnet(subnet.clone()).await.unwrap();
let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await;
let (_tempdir, service, backend) =
new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await;
let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
subnet_id: subnet.id.to_string(),
port_id: String::new(),
mac_address: String::new(),
ip_address: String::new(),
cidr_block: String::new(),
gateway_ip: String::new(),
dhcp_enabled: false,
model: ProtoNicModel::VirtioNet as i32,
security_groups: vec![],
}]));
create_request.extensions_mut().insert(test_tenant());
let vm = service
.create_vm(create_request)
.await
.unwrap()
.into_inner();
let nic = vm.spec.as_ref().unwrap().network.first().unwrap();
assert!(!nic.port_id.is_empty());
assert!(!nic.mac_address.is_empty());
assert!(!nic.ip_address.is_empty());
let backend_vm = backend.last_created_vm().unwrap();
assert!(backend_vm
.metadata
.contains_key(PRISMNET_AUTO_PORTS_METADATA_KEY));
assert_eq!(
backend_vm
.spec
.network
.first()
.and_then(|entry| entry.port_id.as_deref()),
Some(nic.port_id.as_str())
);
let port_id = PrismnetPortId::from_uuid(Uuid::parse_str(&nic.port_id).unwrap());
let port = metadata
.get_port(&subnet.id, &port_id)
.await
.unwrap()
.unwrap();
assert_eq!(port.device_id.as_deref(), Some(vm.id.as_str()));
assert_eq!(port.device_type, PrismnetDeviceType::Vm);
let mut delete_request = Request::new(DeleteVmRequest {
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
vm_id: vm.id.clone(),
force: true,
});
delete_request.extensions_mut().insert(test_tenant());
service.delete_vm(delete_request).await.unwrap();
assert!(metadata
.get_port(&subnet.id, &port_id)
.await
.unwrap()
.is_none());
}
#[tokio::test(flavor = "multi_thread")]
async fn delete_vm_preserves_tenant_managed_prismnet_port() {
let iam_endpoint = start_test_iam_server().await;
let metadata = Arc::new(NetworkMetadataStore::new_in_memory());
let vpc = Vpc::new("tenant-vpc", "org-1", "proj-1", "10.0.0.0/16");
metadata.create_vpc(vpc.clone()).await.unwrap();
let subnet = Subnet::new("tenant-subnet", vpc.id, "10.0.1.0/24");
metadata.create_subnet(subnet.clone()).await.unwrap();
let mut existing_port = Port::new("tenant-managed-port", subnet.id);
existing_port.ip_address = Some("10.0.1.25".to_string());
let existing_port_id = existing_port.id;
metadata.create_port(existing_port).await.unwrap();
let prismnet_endpoint = start_test_prismnet_server(&iam_endpoint, metadata.clone()).await;
let (_tempdir, service, backend) =
new_test_vm_service(&iam_endpoint, Some(prismnet_endpoint)).await;
let mut create_request = Request::new(test_vm_request(vec![ProtoNetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
subnet_id: subnet.id.to_string(),
port_id: existing_port_id.to_string(),
mac_address: String::new(),
ip_address: String::new(),
cidr_block: String::new(),
gateway_ip: String::new(),
dhcp_enabled: false,
model: ProtoNicModel::VirtioNet as i32,
security_groups: vec![],
}]));
create_request.extensions_mut().insert(test_tenant());
let vm = service
.create_vm(create_request)
.await
.unwrap()
.into_inner();
let nic = vm.spec.as_ref().unwrap().network.first().unwrap();
assert_eq!(nic.port_id, existing_port_id.to_string());
assert!(!nic.mac_address.is_empty());
assert_eq!(nic.ip_address.as_str(), "10.0.1.25");
let backend_vm = backend.last_created_vm().unwrap();
assert!(!backend_vm
.metadata
.contains_key(PRISMNET_AUTO_PORTS_METADATA_KEY));
let port = metadata
.get_port(&subnet.id, &existing_port_id)
.await
.unwrap()
.unwrap();
assert_eq!(port.device_id.as_deref(), Some(vm.id.as_str()));
assert_eq!(port.device_type, PrismnetDeviceType::Vm);
let mut delete_request = Request::new(DeleteVmRequest {
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
vm_id: vm.id.clone(),
force: true,
});
delete_request.extensions_mut().insert(test_tenant());
service.delete_vm(delete_request).await.unwrap();
let port = metadata
.get_port(&subnet.id, &existing_port_id)
.await
.unwrap()
.unwrap();
assert_eq!(port.device_id, None);
assert_eq!(port.device_type, PrismnetDeviceType::None);
}
} }
impl StateSink for VmServiceImpl { impl StateSink for VmServiceImpl {
@ -2245,6 +2973,7 @@ impl VmService for VmServiceImpl {
} }
let spec = Self::proto_spec_to_types(req.spec.clone()); let spec = Self::proto_spec_to_types(req.spec.clone());
Self::validate_vm_disk_references(&spec)?; Self::validate_vm_disk_references(&spec)?;
Self::validate_vm_network_references(&spec)?;
if self.is_control_plane_scheduler() { if self.is_control_plane_scheduler() {
if let Some(target) = self if let Some(target) = self
.select_target_node(hv, &req.org_id, &req.project_id, &spec) .select_target_node(hv, &req.org_id, &req.project_id, &spec)
@ -2369,10 +3098,23 @@ impl VmService for VmServiceImpl {
} }
}; };
// Attach to PrismNET ports if configured
if let Err(e) = self.attach_prismnet_ports(&mut vm).await { if let Err(e) = self.attach_prismnet_ports(&mut vm).await {
tracing::warn!("Failed to attach PrismNET ports: {}", e); if let (Some(ref credit_svc), Some(ref res_id)) =
// Continue anyway - network attachment is optional (&self.credit_service, &reservation_id)
{
let mut client = credit_svc.write().await;
if let Err(release_err) = client
.release_reservation(res_id, format!("VM network attachment failed: {}", e))
.await
{
tracing::warn!("Failed to release reservation {}: {}", res_id, release_err);
}
}
self.rollback_prepared_vm_resources(&vm, true).await;
return Err(Self::map_prismnet_error(
e.as_ref(),
"attach PrismNET ports",
));
} }
// Create VM // Create VM
@ -2700,6 +3442,9 @@ impl VmService for VmServiceImpl {
tracing::warn!("Failed to detach PrismNET ports: {}", e); tracing::warn!("Failed to detach PrismNET ports: {}", e);
// Continue anyway - we still want to delete the VM // Continue anyway - we still want to delete the VM
} }
if let Err(e) = self.delete_auto_prismnet_ports(&vm).await {
tracing::warn!("Failed to delete auto-managed PrismNET ports: {}", e);
}
} }
if self.is_control_plane_scheduler() { if self.is_control_plane_scheduler() {
@ -3535,6 +4280,7 @@ impl VmService for VmServiceImpl {
let spec = Self::proto_spec_to_types(req.spec); let spec = Self::proto_spec_to_types(req.spec);
Self::validate_vm_disk_references(&spec)?; Self::validate_vm_disk_references(&spec)?;
Self::validate_vm_network_references(&spec)?;
let name = if req.name.is_empty() { let name = if req.name.is_empty() {
req.vm_id.clone() req.vm_id.clone()
} else { } else {
@ -3635,7 +4381,11 @@ impl VmService for VmServiceImpl {
let attached_disks = self.volume_manager.prepare_vm_volumes(&mut vm).await?; let attached_disks = self.volume_manager.prepare_vm_volumes(&mut vm).await?;
if let Err(e) = self.attach_prismnet_ports(&mut vm).await { if let Err(e) = self.attach_prismnet_ports(&mut vm).await {
tracing::warn!("Failed to attach PrismNET ports: {}", e); self.rollback_prepared_vm_resources(&vm, false).await;
return Err(Self::map_prismnet_error(
e.as_ref(),
"attach PrismNET ports",
));
} }
let handle = match backend.create(&vm, &attached_disks).await { let handle = match backend.create(&vm, &attached_disks).await {
@ -4352,9 +5102,6 @@ impl ImageService for VmServiceImpl {
if req.source_url.trim().is_empty() { if req.source_url.trim().is_empty() {
return Err(Status::invalid_argument("source_url is required")); return Err(Status::invalid_argument("source_url is required"));
} }
if !req.source_url.starts_with("https://") {
return Err(Status::invalid_argument("source_url must use https://"));
}
let Some(store) = self.artifact_store.as_ref() else { let Some(store) = self.artifact_store.as_ref() else {
return Err(Status::failed_precondition( return Err(Status::failed_precondition(
"LightningStor artifact backing is required for image imports", "LightningStor artifact backing is required for image imports",

View file

@ -6,6 +6,7 @@ use plasmavmc_types::{
VolumeDriverKind, VolumeFormat, VolumeStatus, VolumeDriverKind, VolumeFormat, VolumeStatus,
}; };
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::net::IpAddr; use std::net::IpAddr;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
@ -1757,7 +1758,12 @@ fn volume_has_pending_coronafs_image_seed(volume: &Volume) -> bool {
} }
fn derived_volume_id(vm_id: &str, disk_id: &str) -> String { fn derived_volume_id(vm_id: &str, disk_id: &str) -> String {
format!("{vm_id}-{disk_id}") let digest = Sha256::digest(format!("photoncloud-vm-disk:{vm_id}:{disk_id}").as_bytes());
let mut bytes = [0u8; 16];
bytes.copy_from_slice(&digest[..16]);
bytes[6] = (bytes[6] & 0x0f) | 0x50;
bytes[8] = (bytes[8] & 0x3f) | 0x80;
Uuid::from_bytes(bytes).to_string()
} }
fn volume_is_auto_delete(volume: &Volume) -> bool { fn volume_is_auto_delete(volume: &Volume) -> bool {
@ -2443,6 +2449,20 @@ mod tests {
); );
} }
#[test]
fn derived_volume_id_is_stable_uuid() {
let volume_id = derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "root");
assert_eq!(
volume_id,
derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "root")
);
assert_ne!(
volume_id,
derived_volume_id("d1d891a9-7dd1-442d-bf71-50672f150afe", "data")
);
Uuid::parse_str(&volume_id).unwrap();
}
#[test] #[test]
fn normalize_coronafs_endpoint_supports_comma_separated_values() { fn normalize_coronafs_endpoint_supports_comma_separated_values() {
assert_eq!( assert_eq!(

View file

@ -262,6 +262,12 @@ pub struct NetworkSpec {
pub mac_address: Option<String>, pub mac_address: Option<String>,
/// IP address (DHCP if None) /// IP address (DHCP if None)
pub ip_address: Option<String>, pub ip_address: Option<String>,
/// Attached subnet CIDR (required for the local KVM dataplane)
pub cidr_block: Option<String>,
/// Attached subnet gateway IP (required for the local KVM dataplane)
pub gateway_ip: Option<String>,
/// Whether DHCP should be enabled on the attached subnet
pub dhcp_enabled: bool,
/// NIC model /// NIC model
pub model: NicModel, pub model: NicModel,
/// Security groups /// Security groups
@ -277,6 +283,9 @@ impl Default for NetworkSpec {
port_id: None, port_id: None,
mac_address: None, mac_address: None,
ip_address: None, ip_address: None,
cidr_block: None,
gateway_ip: None,
dhcp_enabled: false,
model: NicModel::VirtioNet, model: NicModel::VirtioNet,
security_groups: Vec::new(), security_groups: Vec::new(),
} }
@ -304,8 +313,7 @@ pub struct SecuritySpec {
} }
/// Complete VM specification /// Complete VM specification
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
#[derive(Default)]
pub struct VmSpec { pub struct VmSpec {
/// CPU configuration /// CPU configuration
pub cpu: CpuSpec, pub cpu: CpuSpec,
@ -321,7 +329,6 @@ pub struct VmSpec {
pub security: SecuritySpec, pub security: SecuritySpec,
} }
/// Resource usage statistics /// Resource usage statistics
#[derive(Debug, Clone, Default, Serialize, Deserialize)] #[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ResourceUsage { pub struct ResourceUsage {

View file

@ -210,6 +210,9 @@ message NetworkSpec {
repeated string security_groups = 6; repeated string security_groups = 6;
string port_id = 7; // PrismNET port ID for OVN integration string port_id = 7; // PrismNET port ID for OVN integration
string subnet_id = 8; // PrismNET subnet ID for OVN integration string subnet_id = 8; // PrismNET subnet ID for OVN integration
string cidr_block = 9; // Effective subnet CIDR for the realized dataplane
string gateway_ip = 10; // Effective subnet gateway for the realized dataplane
bool dhcp_enabled = 11;
} }
enum NicModel { enum NicModel {

View file

@ -184,6 +184,100 @@ message DeleteSubnetRequest {
message DeleteSubnetResponse {} message DeleteSubnetResponse {}
// =============================================================================
// Router Service
// =============================================================================
service RouterService {
rpc CreateRouter(CreateRouterRequest) returns (CreateRouterResponse);
rpc GetRouter(GetRouterRequest) returns (GetRouterResponse);
rpc ListRouters(ListRoutersRequest) returns (ListRoutersResponse);
rpc UpdateRouter(UpdateRouterRequest) returns (UpdateRouterResponse);
rpc DeleteRouter(DeleteRouterRequest) returns (DeleteRouterResponse);
}
message Router {
string id = 1;
string org_id = 2;
string project_id = 3;
string vpc_id = 4;
string name = 5;
string description = 6;
string gateway_cidr = 7;
string mac_address = 8;
string external_ip = 9;
RouterStatus status = 10;
uint64 created_at = 11;
uint64 updated_at = 12;
}
enum RouterStatus {
ROUTER_STATUS_UNSPECIFIED = 0;
ROUTER_STATUS_PROVISIONING = 1;
ROUTER_STATUS_ACTIVE = 2;
ROUTER_STATUS_UPDATING = 3;
ROUTER_STATUS_DELETING = 4;
ROUTER_STATUS_ERROR = 5;
}
message CreateRouterRequest {
string org_id = 1;
string project_id = 2;
string vpc_id = 3;
string name = 4;
string description = 5;
string gateway_cidr = 6;
string mac_address = 7;
string external_ip = 8;
}
message CreateRouterResponse {
Router router = 1;
}
message GetRouterRequest {
string org_id = 1;
string project_id = 2;
string id = 3;
}
message GetRouterResponse {
Router router = 1;
}
message ListRoutersRequest {
string org_id = 1;
string project_id = 2;
string vpc_id = 3;
int32 page_size = 4;
string page_token = 5;
}
message ListRoutersResponse {
repeated Router routers = 1;
string next_page_token = 2;
}
message UpdateRouterRequest {
string org_id = 1;
string project_id = 2;
string id = 3;
string name = 4;
string description = 5;
}
message UpdateRouterResponse {
Router router = 1;
}
message DeleteRouterRequest {
string org_id = 1;
string project_id = 2;
string id = 3;
}
message DeleteRouterResponse {}
// ============================================================================= // =============================================================================
// Port Service // Port Service
// ============================================================================= // =============================================================================
@ -464,6 +558,12 @@ service IpamService {
// List Service IP Pools // List Service IP Pools
rpc ListServiceIPPools(ListServiceIPPoolsRequest) returns (ListServiceIPPoolsResponse); rpc ListServiceIPPools(ListServiceIPPoolsRequest) returns (ListServiceIPPoolsResponse);
// Update Service IP Pool metadata
rpc UpdateServiceIPPool(UpdateServiceIPPoolRequest) returns (UpdateServiceIPPoolResponse);
// Delete Service IP Pool
rpc DeleteServiceIPPool(DeleteServiceIPPoolRequest) returns (DeleteServiceIPPoolResponse);
// Allocate IP from pool // Allocate IP from pool
rpc AllocateServiceIP(AllocateServiceIPRequest) returns (AllocateServiceIPResponse); rpc AllocateServiceIP(AllocateServiceIPRequest) returns (AllocateServiceIPResponse);
@ -550,6 +650,26 @@ message ListServiceIPPoolsResponse {
string next_page_token = 2; string next_page_token = 2;
} }
message UpdateServiceIPPoolRequest {
string org_id = 1;
string project_id = 2;
string id = 3;
string name = 4;
string description = 5;
}
message UpdateServiceIPPoolResponse {
ServiceIPPool pool = 1;
}
message DeleteServiceIPPoolRequest {
string org_id = 1;
string project_id = 2;
string id = 3;
}
message DeleteServiceIPPoolResponse {}
message AllocateServiceIPRequest { message AllocateServiceIPRequest {
string org_id = 1; string org_id = 1;
string project_id = 2; string project_id = 2;

View file

@ -10,5 +10,6 @@ pub use config::ServerConfig;
pub use metadata::NetworkMetadataStore; pub use metadata::NetworkMetadataStore;
pub use ovn::OvnClient; pub use ovn::OvnClient;
pub use services::{ pub use services::{
IpamServiceImpl, PortServiceImpl, SecurityGroupServiceImpl, SubnetServiceImpl, VpcServiceImpl, IpamServiceImpl, PortServiceImpl, RouterServiceImpl, SecurityGroupServiceImpl,
SubnetServiceImpl, VpcServiceImpl,
}; };

View file

@ -7,12 +7,13 @@ use iam_service_auth::AuthService;
use metrics_exporter_prometheus::PrometheusBuilder; use metrics_exporter_prometheus::PrometheusBuilder;
use prismnet_api::{ use prismnet_api::{
ipam_service_server::IpamServiceServer, port_service_server::PortServiceServer, ipam_service_server::IpamServiceServer, port_service_server::PortServiceServer,
router_service_server::RouterServiceServer,
security_group_service_server::SecurityGroupServiceServer, security_group_service_server::SecurityGroupServiceServer,
subnet_service_server::SubnetServiceServer, vpc_service_server::VpcServiceServer, subnet_service_server::SubnetServiceServer, vpc_service_server::VpcServiceServer,
}; };
use prismnet_server::{ use prismnet_server::{
config::MetadataBackend, IpamServiceImpl, NetworkMetadataStore, OvnClient, PortServiceImpl, config::MetadataBackend, IpamServiceImpl, NetworkMetadataStore, OvnClient, PortServiceImpl,
SecurityGroupServiceImpl, ServerConfig, SubnetServiceImpl, VpcServiceImpl, RouterServiceImpl, SecurityGroupServiceImpl, ServerConfig, SubnetServiceImpl, VpcServiceImpl,
}; };
use std::net::SocketAddr; use std::net::SocketAddr;
use std::path::PathBuf; use std::path::PathBuf;
@ -186,6 +187,11 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
ovn.clone(), ovn.clone(),
auth_service.clone(), auth_service.clone(),
)); ));
let router_service = Arc::new(RouterServiceImpl::new(
metadata.clone(),
ovn.clone(),
auth_service.clone(),
));
let sg_service = Arc::new(SecurityGroupServiceImpl::new( let sg_service = Arc::new(SecurityGroupServiceImpl::new(
metadata.clone(), metadata.clone(),
ovn.clone(), ovn.clone(),
@ -204,6 +210,9 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
health_reporter health_reporter
.set_serving::<PortServiceServer<PortServiceImpl>>() .set_serving::<PortServiceServer<PortServiceImpl>>()
.await; .await;
health_reporter
.set_serving::<RouterServiceServer<RouterServiceImpl>>()
.await;
health_reporter health_reporter
.set_serving::<SecurityGroupServiceServer<SecurityGroupServiceImpl>>() .set_serving::<SecurityGroupServiceServer<SecurityGroupServiceImpl>>()
.await; .await;
@ -259,6 +268,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
PortServiceServer::new(port_service.as_ref().clone()), PortServiceServer::new(port_service.as_ref().clone()),
make_interceptor(auth_service.clone()), make_interceptor(auth_service.clone()),
)) ))
.add_service(tonic::codegen::InterceptedService::new(
RouterServiceServer::new(router_service.as_ref().clone()),
make_interceptor(auth_service.clone()),
))
.add_service(tonic::codegen::InterceptedService::new( .add_service(tonic::codegen::InterceptedService::new(
SecurityGroupServiceServer::new(sg_service.as_ref().clone()), SecurityGroupServiceServer::new(sg_service.as_ref().clone()),
make_interceptor(auth_service.clone()), make_interceptor(auth_service.clone()),
@ -274,6 +287,10 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
let rest_state = prismnet_server::rest::RestApiState { let rest_state = prismnet_server::rest::RestApiState {
vpc_service: vpc_service.clone(), vpc_service: vpc_service.clone(),
subnet_service: subnet_service.clone(), subnet_service: subnet_service.clone(),
port_service: port_service.clone(),
router_service: router_service.clone(),
security_group_service: sg_service.clone(),
ipam_service: ipam_service.clone(),
auth_service: auth_service.clone(), auth_service: auth_service.clone(),
}; };
let rest_app = prismnet_server::rest::build_router(rest_state); let rest_app = prismnet_server::rest::build_router(rest_state);

View file

@ -3,8 +3,9 @@
use dashmap::DashMap; use dashmap::DashMap;
use flaredb_client::RdbClient; use flaredb_client::RdbClient;
use prismnet_types::{ use prismnet_types::{
IPAllocation, Port, PortId, SecurityGroup, SecurityGroupId, SecurityGroupRule, IPAllocation, Port, PortId, Router, RouterId, SecurityGroup, SecurityGroupId,
SecurityGroupRuleId, ServiceIPPool, ServiceIPPoolId, Subnet, SubnetId, Vpc, VpcId, SecurityGroupRule, SecurityGroupRuleId, ServiceIPPool, ServiceIPPoolId, Subnet, SubnetId,
Vpc, VpcId,
}; };
use sqlx::pool::PoolOptions; use sqlx::pool::PoolOptions;
use sqlx::{Pool, Postgres, Sqlite}; use sqlx::{Pool, Postgres, Sqlite};
@ -406,6 +407,14 @@ impl NetworkMetadataStore {
format!("/prismnet/subnets/{}/", vpc_id) format!("/prismnet/subnets/{}/", vpc_id)
} }
fn router_key(org_id: &str, project_id: &str, router_id: &RouterId) -> String {
format!("/prismnet/routers/{}/{}/{}", org_id, project_id, router_id)
}
fn router_prefix(org_id: &str, project_id: &str) -> String {
format!("/prismnet/routers/{}/{}/", org_id, project_id)
}
fn port_key(subnet_id: &SubnetId, port_id: &PortId) -> String { fn port_key(subnet_id: &SubnetId, port_id: &PortId) -> String {
format!("/prismnet/ports/{}/{}", subnet_id, port_id) format!("/prismnet/ports/{}/{}", subnet_id, port_id)
} }
@ -645,6 +654,94 @@ impl NetworkMetadataStore {
// Port Operations // Port Operations
// ========================================================================= // =========================================================================
pub async fn create_router(&self, router: Router) -> Result<RouterId> {
let id = router.id;
let key = Self::router_key(&router.org_id, &router.project_id, &id);
let value = serde_json::to_string(&router)
.map_err(|e| MetadataError::Serialization(e.to_string()))?;
self.put(&key, &value).await?;
Ok(id)
}
pub async fn get_router(
&self,
org_id: &str,
project_id: &str,
id: &RouterId,
) -> Result<Option<Router>> {
let key = Self::router_key(org_id, project_id, id);
if let Some(value) = self.get(&key).await? {
let router: Router = serde_json::from_str(&value)
.map_err(|e| MetadataError::Serialization(e.to_string()))?;
Ok(Some(router))
} else {
Ok(None)
}
}
pub async fn list_routers(&self, org_id: &str, project_id: &str) -> Result<Vec<Router>> {
let prefix = Self::router_prefix(org_id, project_id);
let entries = self.get_prefix(&prefix).await?;
let mut routers = Vec::new();
for (_, value) in entries {
if let Ok(router) = serde_json::from_str::<Router>(&value) {
routers.push(router);
}
}
Ok(routers)
}
pub async fn update_router(
&self,
org_id: &str,
project_id: &str,
id: &RouterId,
name: Option<String>,
description: Option<String>,
) -> Result<Option<Router>> {
let router_opt = self.get_router(org_id, project_id, id).await?;
if let Some(mut router) = router_opt {
if let Some(name) = name {
router.name = name;
}
if let Some(description) = description {
router.description = Some(description);
}
router.updated_at = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let key = Self::router_key(org_id, project_id, id);
let value = serde_json::to_string(&router)
.map_err(|e| MetadataError::Serialization(e.to_string()))?;
self.put(&key, &value).await?;
Ok(Some(router))
} else {
Ok(None)
}
}
pub async fn delete_router(
&self,
org_id: &str,
project_id: &str,
id: &RouterId,
) -> Result<Option<Router>> {
let router_opt = self.get_router(org_id, project_id, id).await?;
if let Some(router) = router_opt {
let key = Self::router_key(org_id, project_id, id);
self.delete_key(&key).await?;
Ok(Some(router))
} else {
Ok(None)
}
}
// =========================================================================
// Port Operations
// =========================================================================
pub async fn create_port(&self, port: Port) -> Result<PortId> { pub async fn create_port(&self, port: Port) -> Result<PortId> {
let id = port.id; let id = port.id;
let key = Self::port_key(&port.subnet_id, &id); let key = Self::port_key(&port.subnet_id, &id);
@ -963,6 +1060,53 @@ impl NetworkMetadataStore {
Ok(pools) Ok(pools)
} }
pub async fn update_service_ip_pool(
&self,
org_id: &str,
project_id: &str,
pool_id: &ServiceIPPoolId,
name: Option<String>,
description: Option<String>,
) -> Result<Option<ServiceIPPool>> {
let pool_opt = self.get_service_ip_pool(org_id, project_id, pool_id).await?;
if let Some(mut pool) = pool_opt {
if let Some(name) = name {
pool.name = name;
}
if let Some(description) = description {
pool.description = Some(description);
}
pool.updated_at = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
let key = Self::service_ip_pool_key(org_id, project_id, pool_id);
let value = serde_json::to_string(&pool)
.map_err(|e| MetadataError::Serialization(e.to_string()))?;
self.put(&key, &value).await?;
Ok(Some(pool))
} else {
Ok(None)
}
}
pub async fn delete_service_ip_pool(
&self,
org_id: &str,
project_id: &str,
pool_id: &ServiceIPPoolId,
) -> Result<Option<ServiceIPPool>> {
let pool_opt = self.get_service_ip_pool(org_id, project_id, pool_id).await?;
if let Some(pool) = pool_opt {
let key = Self::service_ip_pool_key(org_id, project_id, pool_id);
self.delete_key(&key).await?;
Ok(Some(pool))
} else {
Ok(None)
}
}
pub async fn allocate_service_ip( pub async fn allocate_service_ip(
&self, &self,
pool_id: &ServiceIPPoolId, pool_id: &ServiceIPPoolId,
@ -1212,7 +1356,10 @@ fn normalize_transport_addr(endpoint: &str) -> String {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use prismnet_types::{IpProtocol, RuleDirection, SecurityGroup, SecurityGroupRule, Vpc}; use prismnet_types::{
IpProtocol, Router, RuleDirection, SecurityGroup, SecurityGroupRule, ServiceIPPool,
ServiceIPPoolType, Vpc,
};
#[tokio::test] #[tokio::test]
async fn test_vpc_crud() { async fn test_vpc_crud() {
@ -1311,6 +1458,68 @@ mod tests {
assert_eq!(subnets.len(), 1); assert_eq!(subnets.len(), 1);
} }
#[tokio::test]
async fn test_router_crud() {
let store = NetworkMetadataStore::new_in_memory();
let vpc = Vpc::new("test-vpc", "org-1", "proj-1", "10.0.0.0/16");
let vpc_id = store.create_vpc(vpc).await.unwrap();
let mut router = Router::new(
"edge",
"org-1",
"proj-1",
vpc_id,
"10.0.0.1/24",
"02:00:00:00:00:01",
"203.0.113.10",
);
router.ovn_router_id = "lr-1".to_string();
router.ovn_router_port_id = "lrp-1".to_string();
let router_id = store.create_router(router).await.unwrap();
let retrieved = store
.get_router("org-1", "proj-1", &router_id)
.await
.unwrap()
.unwrap();
assert_eq!(retrieved.name, "edge");
assert_eq!(retrieved.external_ip, "203.0.113.10");
let routers = store.list_routers("org-1", "proj-1").await.unwrap();
assert_eq!(routers.len(), 1);
store
.update_router(
"org-1",
"proj-1",
&router_id,
Some("edge-renamed".to_string()),
Some("tenant edge".to_string()),
)
.await
.unwrap();
let updated = store
.get_router("org-1", "proj-1", &router_id)
.await
.unwrap()
.unwrap();
assert_eq!(updated.name, "edge-renamed");
assert_eq!(updated.description.as_deref(), Some("tenant edge"));
let deleted = store
.delete_router("org-1", "proj-1", &router_id)
.await
.unwrap();
assert!(deleted.is_some());
assert!(store
.get_router("org-1", "proj-1", &router_id)
.await
.unwrap()
.is_none());
}
#[tokio::test] #[tokio::test]
async fn test_port_crud() { async fn test_port_crud() {
let store = NetworkMetadataStore::new_in_memory(); let store = NetworkMetadataStore::new_in_memory();
@ -1399,4 +1608,54 @@ mod tests {
// Gateway should be skipped // Gateway should be skipped
assert_ne!(ip1, "10.0.1.1"); assert_ne!(ip1, "10.0.1.1");
} }
#[tokio::test]
async fn test_service_ip_pool_crud() {
let store = NetworkMetadataStore::new_in_memory();
let pool = ServiceIPPool::new(
"services",
"org-1",
"proj-1",
"10.96.0.0/24",
ServiceIPPoolType::LoadBalancer,
);
let pool_id = store.create_service_ip_pool(pool).await.unwrap();
let retrieved = store
.get_service_ip_pool("org-1", "proj-1", &pool_id)
.await
.unwrap()
.unwrap();
assert_eq!(retrieved.name, "services");
store
.update_service_ip_pool(
"org-1",
"proj-1",
&pool_id,
Some("services-updated".to_string()),
Some("vip pool".to_string()),
)
.await
.unwrap();
let updated = store
.get_service_ip_pool("org-1", "proj-1", &pool_id)
.await
.unwrap()
.unwrap();
assert_eq!(updated.name, "services-updated");
assert_eq!(updated.description.as_deref(), Some("vip pool"));
let deleted = store
.delete_service_ip_pool("org-1", "proj-1", &pool_id)
.await
.unwrap();
assert!(deleted.is_some());
assert!(store
.get_service_ip_pool("org-1", "proj-1", &pool_id)
.await
.unwrap()
.is_none());
}
} }

File diff suppressed because it is too large Load diff

View file

@ -1,6 +1,7 @@
//! IPAM gRPC service implementation for k8shost Service IP allocation //! IPAM gRPC service implementation for k8shost Service IP allocation
use std::net::IpAddr; use std::net::IpAddr;
use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use tonic::{Request, Response, Status}; use tonic::{Request, Response, Status};
@ -8,6 +9,7 @@ use prismnet_api::{
ipam_service_server::IpamService, ipam_service_server::IpamService,
AllocateServiceIpRequest, AllocateServiceIpResponse, AllocateServiceIpRequest, AllocateServiceIpResponse,
CreateServiceIpPoolRequest, CreateServiceIpPoolResponse, CreateServiceIpPoolRequest, CreateServiceIpPoolResponse,
DeleteServiceIpPoolRequest, DeleteServiceIpPoolResponse,
GetIpAllocationRequest, GetIpAllocationResponse, GetIpAllocationRequest, GetIpAllocationResponse,
GetServiceIpPoolRequest, GetServiceIpPoolResponse, GetServiceIpPoolRequest, GetServiceIpPoolResponse,
IpAllocation as ProtoIPAllocation, IpAllocation as ProtoIPAllocation,
@ -16,6 +18,7 @@ use prismnet_api::{
ServiceIpPool as ProtoServiceIPPool, ServiceIpPool as ProtoServiceIPPool,
ServiceIpPoolStatus as ProtoServiceIPPoolStatus, ServiceIpPoolStatus as ProtoServiceIPPoolStatus,
ServiceIpPoolType as ProtoServiceIPPoolType, ServiceIpPoolType as ProtoServiceIPPoolType,
UpdateServiceIpPoolRequest, UpdateServiceIpPoolResponse,
}; };
use iam_service_auth::{ use iam_service_auth::{
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
@ -29,6 +32,8 @@ use crate::NetworkMetadataStore;
const ACTION_POOL_CREATE: &str = "network:ip-pools:create"; const ACTION_POOL_CREATE: &str = "network:ip-pools:create";
const ACTION_POOL_READ: &str = "network:ip-pools:read"; const ACTION_POOL_READ: &str = "network:ip-pools:read";
const ACTION_POOL_LIST: &str = "network:ip-pools:list"; const ACTION_POOL_LIST: &str = "network:ip-pools:list";
const ACTION_POOL_UPDATE: &str = "network:ip-pools:update";
const ACTION_POOL_DELETE: &str = "network:ip-pools:delete";
const ACTION_ALLOCATE_IP: &str = "network:ip-allocations:create"; const ACTION_ALLOCATE_IP: &str = "network:ip-allocations:create";
const ACTION_RELEASE_IP: &str = "network:ip-allocations:delete"; const ACTION_RELEASE_IP: &str = "network:ip-allocations:delete";
const ACTION_ALLOC_READ: &str = "network:ip-allocations:read"; const ACTION_ALLOC_READ: &str = "network:ip-allocations:read";
@ -45,6 +50,43 @@ impl IpamServiceImpl {
} }
} }
fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> {
let (ip, prefix) = cidr
.split_once('/')
.ok_or_else(|| Status::invalid_argument("cidr_block must be in a.b.c.d/prefix form"))?;
let ip: Ipv4Addr = ip
.parse()
.map_err(|_| Status::invalid_argument("cidr_block must contain a valid IPv4 address"))?;
let prefix: u8 = prefix
.parse()
.map_err(|_| Status::invalid_argument("cidr_block prefix must be an integer"))?;
if prefix > 32 {
return Err(Status::invalid_argument(
"cidr_block prefix must be between 0 and 32",
));
}
Ok((ip, prefix))
}
fn ensure_unique_pool_name(
existing: &[ServiceIPPool],
desired_name: &str,
exclude: Option<ServiceIPPoolId>,
) -> Result<(), Status> {
if desired_name.trim().is_empty() {
return Err(Status::invalid_argument("Service IP Pool name is required"));
}
if existing
.iter()
.any(|pool| pool.name == desired_name && Some(pool.id) != exclude)
{
return Err(Status::already_exists(
"Service IP Pool name already exists in the tenant",
));
}
Ok(())
}
// Proto conversion functions // Proto conversion functions
fn pool_to_proto(pool: &ServiceIPPool) -> ProtoServiceIPPool { fn pool_to_proto(pool: &ServiceIPPool) -> ProtoServiceIPPool {
@ -127,6 +169,13 @@ impl IpamService for IpamServiceImpl {
if req.cidr_block.is_empty() { if req.cidr_block.is_empty() {
return Err(Status::invalid_argument("cidr_block is required")); return Err(Status::invalid_argument("cidr_block is required"));
} }
parse_ipv4_cidr(&req.cidr_block)?;
let existing = self
.metadata
.list_service_ip_pools(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_pool_name(&existing, &req.name, None)?;
let pool_type = pool_type_from_proto(req.pool_type); let pool_type = pool_type_from_proto(req.pool_type);
let mut pool = ServiceIPPool::new( let mut pool = ServiceIPPool::new(
@ -230,6 +279,115 @@ impl IpamService for IpamServiceImpl {
})) }))
} }
async fn update_service_ip_pool(
&self,
request: Request<UpdateServiceIpPoolRequest>,
) -> Result<Response<UpdateServiceIpPoolResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid pool ID"))?;
let pool_id = ServiceIPPoolId::from_uuid(id);
self.auth
.authorize(
&tenant,
ACTION_POOL_UPDATE,
&resource_for_tenant(
"service-ip-pool",
pool_id.to_string(),
&org_id,
&project_id,
),
)
.await?;
let name = if !req.name.is_empty() {
Some(req.name)
} else {
None
};
let description = if !req.description.is_empty() {
Some(req.description)
} else {
None
};
if let Some(name) = name.as_deref() {
let existing = self
.metadata
.list_service_ip_pools(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_pool_name(&existing, name, Some(pool_id))?;
}
let pool = self
.metadata
.update_service_ip_pool(&org_id, &project_id, &pool_id, name, description)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Service IP Pool not found"))?;
Ok(Response::new(UpdateServiceIpPoolResponse {
pool: Some(pool_to_proto(&pool)),
}))
}
async fn delete_service_ip_pool(
&self,
request: Request<DeleteServiceIpPoolRequest>,
) -> Result<Response<DeleteServiceIpPoolResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid pool ID"))?;
let pool_id = ServiceIPPoolId::from_uuid(id);
self.auth
.authorize(
&tenant,
ACTION_POOL_DELETE,
&resource_for_tenant(
"service-ip-pool",
pool_id.to_string(),
&org_id,
&project_id,
),
)
.await?;
let pool = self
.metadata
.get_service_ip_pool(&org_id, &project_id, &pool_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Service IP Pool not found"))?;
if !pool.allocated_ips.is_empty() {
return Err(Status::failed_precondition(
"cannot delete Service IP Pool with allocated IPs",
));
}
self.metadata
.delete_service_ip_pool(&org_id, &project_id, &pool_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Service IP Pool not found"))?;
Ok(Response::new(DeleteServiceIpPoolResponse {}))
}
async fn allocate_service_ip( async fn allocate_service_ip(
&self, &self,
request: Request<AllocateServiceIpRequest>, request: Request<AllocateServiceIpRequest>,
@ -383,6 +541,30 @@ impl IpamService for IpamServiceImpl {
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rejects_invalid_pool_cidr() {
let err = parse_ipv4_cidr("10.96.0.0/99").unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_duplicate_pool_name() {
let existing = vec![ServiceIPPool::new(
"svc",
"org",
"proj",
"10.96.0.0/24",
ServiceIPPoolType::ClusterIp,
)];
let err = ensure_unique_pool_name(&existing, "svc", None).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
}
impl IpamServiceImpl { impl IpamServiceImpl {
/// Allocate next available IP from pool's CIDR /// Allocate next available IP from pool's CIDR
async fn allocate_next_available_ip(&self, pool: &ServiceIPPool) -> Option<String> { async fn allocate_next_available_ip(&self, pool: &ServiceIPPool) -> Option<String> {

View file

@ -2,12 +2,14 @@
pub mod ipam; pub mod ipam;
pub mod port; pub mod port;
pub mod router;
pub mod security_group; pub mod security_group;
pub mod subnet; pub mod subnet;
pub mod vpc; pub mod vpc;
pub use ipam::IpamServiceImpl; pub use ipam::IpamServiceImpl;
pub use port::PortServiceImpl; pub use port::PortServiceImpl;
pub use router::RouterServiceImpl;
pub use security_group::SecurityGroupServiceImpl; pub use security_group::SecurityGroupServiceImpl;
pub use subnet::SubnetServiceImpl; pub use subnet::SubnetServiceImpl;
pub use vpc::VpcServiceImpl; pub use vpc::VpcServiceImpl;

View file

@ -1,5 +1,6 @@
//! Port gRPC service implementation //! Port gRPC service implementation
use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use tonic::{Request, Response, Status}; use tonic::{Request, Response, Status};
@ -70,6 +71,34 @@ impl PortServiceImpl {
Ok(subnet) Ok(subnet)
} }
async fn resolve_security_groups_in_tenant(
&self,
org_id: &str,
project_id: &str,
security_group_ids: &[String],
) -> Result<Vec<SecurityGroupId>, Status> {
let mut resolved = Vec::with_capacity(security_group_ids.len());
for security_group_id in security_group_ids {
let uuid = uuid::Uuid::parse_str(security_group_id)
.map_err(|_| Status::invalid_argument("Invalid SecurityGroup ID"))?;
let security_group_id = SecurityGroupId::from_uuid(uuid);
self.metadata
.get_security_group(org_id, project_id, &security_group_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| {
Status::not_found(format!(
"SecurityGroup {} not found",
security_group_id
))
})?;
resolved.push(security_group_id);
}
Ok(resolved)
}
} }
fn port_to_proto(port: &Port) -> ProtoPort { fn port_to_proto(port: &Port) -> ProtoPort {
@ -126,6 +155,91 @@ fn proto_to_device_type(device_type: i32) -> DeviceType {
} }
} }
fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> {
let (ip, prefix) = cidr
.split_once('/')
.ok_or_else(|| Status::invalid_argument("subnet cidr_block must be in a.b.c.d/prefix form"))?;
let ip: Ipv4Addr = ip
.parse()
.map_err(|_| Status::invalid_argument("subnet cidr_block must contain a valid IPv4 address"))?;
let prefix: u8 = prefix
.parse()
.map_err(|_| Status::invalid_argument("subnet cidr_block prefix must be an integer"))?;
if prefix > 32 {
return Err(Status::invalid_argument(
"subnet cidr_block prefix must be between 0 and 32",
));
}
Ok((ip, prefix))
}
fn parse_ipv4(ip: &str, field_name: &str) -> Result<Ipv4Addr, Status> {
ip.parse()
.map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address")))
}
fn network_mask(prefix: u8) -> u32 {
if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
}
}
fn cidr_contains_ip(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool {
let mask = network_mask(cidr.1);
(u32::from(cidr.0) & mask) == (u32::from(ip) & mask)
}
fn validate_requested_ip(subnet: &Subnet, requested_ip: &str, existing_ports: &[Port]) -> Result<(), Status> {
let subnet_cidr = parse_ipv4_cidr(&subnet.cidr_block)?;
let requested_ip = parse_ipv4(requested_ip, "ip_address")?;
let requested_ip_string = requested_ip.to_string();
if !cidr_contains_ip(subnet_cidr, requested_ip) {
return Err(Status::invalid_argument(
"ip_address must fall within the subnet cidr_block",
));
}
if subnet
.gateway_ip
.as_deref()
.map(|gateway_ip| gateway_ip == requested_ip_string.as_str())
.unwrap_or(false)
{
return Err(Status::invalid_argument(
"ip_address cannot reuse the subnet gateway_ip",
));
}
if existing_ports.iter().any(|port| {
port.ip_address
.as_deref()
.map(|ip_address| ip_address == requested_ip_string.as_str())
.unwrap_or(false)
}) {
return Err(Status::already_exists(
"ip_address is already allocated in the subnet",
));
}
Ok(())
}
fn ensure_unique_port_name(existing_ports: &[Port], desired_name: &str, exclude: Option<PortId>) -> Result<(), Status> {
if desired_name.trim().is_empty() {
return Err(Status::invalid_argument("Port name is required"));
}
if existing_ports
.iter()
.any(|port| port.name == desired_name && Some(port.id) != exclude)
{
return Err(Status::already_exists("Port name already exists in the subnet"));
}
Ok(())
}
#[tonic::async_trait] #[tonic::async_trait]
impl PortService for PortServiceImpl { impl PortService for PortServiceImpl {
async fn create_port( async fn create_port(
@ -155,6 +269,13 @@ impl PortService for PortServiceImpl {
) )
.await?; .await?;
let existing_ports = self
.metadata
.list_ports(Some(&subnet_id), None)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_port_name(&existing_ports, &req.name, None)?;
let port = Port::new(&req.name, subnet_id); let port = Port::new(&req.name, subnet_id);
let mut port = port; let mut port = port;
if !req.description.is_empty() { if !req.description.is_empty() {
@ -163,6 +284,7 @@ impl PortService for PortServiceImpl {
// IP allocation: use provided IP or auto-allocate // IP allocation: use provided IP or auto-allocate
if !req.ip_address.is_empty() { if !req.ip_address.is_empty() {
validate_requested_ip(&subnet, &req.ip_address, &existing_ports)?;
port.ip_address = Some(req.ip_address); port.ip_address = Some(req.ip_address);
} else { } else {
// Auto-allocate IP from subnet CIDR // Auto-allocate IP from subnet CIDR
@ -171,15 +293,21 @@ impl PortService for PortServiceImpl {
.allocate_ip(&org_id, &project_id, &subnet_id) .allocate_ip(&org_id, &project_id, &subnet_id)
.await .await
.map_err(|e| Status::internal(e.to_string()))?; .map_err(|e| Status::internal(e.to_string()))?;
if port.ip_address.is_none() {
return Err(Status::resource_exhausted(
"no available IP addresses remain in the subnet",
));
}
} }
if !req.security_group_ids.is_empty() { if !req.security_group_ids.is_empty() {
port.security_groups = req port.security_groups = self
.security_group_ids .resolve_security_groups_in_tenant(
.iter() &org_id,
.filter_map(|id| uuid::Uuid::parse_str(id).ok()) &project_id,
.map(SecurityGroupId::from_uuid) &req.security_group_ids,
.collect(); )
.await?;
} }
self.metadata self.metadata
@ -192,10 +320,17 @@ impl PortService for PortServiceImpl {
.as_ref() .as_ref()
.ok_or_else(|| Status::internal("IP allocation failed"))?; .ok_or_else(|| Status::internal("IP allocation failed"))?;
self.ovn if let Err(error) = self
.ovn
.create_logical_switch_port(&port, &subnet.vpc_id, ip_address) .create_logical_switch_port(&port, &subnet.vpc_id, ip_address)
.await .await
.map_err(|e| Status::internal(e.to_string()))?; {
let _ = self
.metadata
.delete_port(&org_id, &project_id, &subnet_id, &port.id)
.await;
return Err(Status::internal(error.to_string()));
}
Ok(Response::new(CreatePortResponse { Ok(Response::new(CreatePortResponse {
port: Some(port_to_proto(&port)), port: Some(port_to_proto(&port)),
@ -332,15 +467,24 @@ impl PortService for PortServiceImpl {
}; };
let security_group_ids = if !req.security_group_ids.is_empty() { let security_group_ids = if !req.security_group_ids.is_empty() {
Some( Some(
req.security_group_ids self.resolve_security_groups_in_tenant(
.iter() &org_id,
.filter_map(|id| uuid::Uuid::parse_str(id).ok()) &project_id,
.map(SecurityGroupId::from_uuid) &req.security_group_ids,
.collect(), )
.await?,
) )
} else { } else {
None None
}; };
if let Some(name) = name.as_deref() {
let existing_ports = self
.metadata
.list_ports(Some(&subnet_id), None)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_port_name(&existing_ports, name, Some(port_id))?;
}
let port = self let port = self
.metadata .metadata
@ -490,3 +634,44 @@ impl PortService for PortServiceImpl {
})) }))
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use prismnet_types::VpcId;
#[test]
fn rejects_requested_ip_outside_subnet() {
let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24");
subnet.gateway_ip = Some("10.0.1.1".to_string());
let err = validate_requested_ip(&subnet, "10.0.2.10", &[]).unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_requested_ip_matching_gateway() {
let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24");
subnet.gateway_ip = Some("10.0.1.1".to_string());
let err = validate_requested_ip(&subnet, "10.0.1.1", &[]).unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_duplicate_requested_ip() {
let mut subnet = Subnet::new("subnet", VpcId::new(), "10.0.1.0/24");
subnet.gateway_ip = Some("10.0.1.1".to_string());
let mut existing_port = Port::new("existing", subnet.id);
existing_port.ip_address = Some("10.0.1.10".to_string());
let err = validate_requested_ip(&subnet, "10.0.1.10", &[existing_port]).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
#[test]
fn rejects_duplicate_port_name() {
let existing = vec![Port::new("frontend", SubnetId::new())];
let err = ensure_unique_port_name(&existing, "frontend", None).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
}

View file

@ -0,0 +1,455 @@
//! Router gRPC service implementation
use std::net::Ipv4Addr;
use std::sync::Arc;
use tonic::{Request, Response, Status};
use iam_service_auth::{
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
};
use prismnet_api::{
router_service_server::RouterService, CreateRouterRequest, CreateRouterResponse,
DeleteRouterRequest, DeleteRouterResponse, GetRouterRequest, GetRouterResponse,
ListRoutersRequest, ListRoutersResponse, Router as ProtoRouter,
RouterStatus as ProtoRouterStatus, UpdateRouterRequest, UpdateRouterResponse,
};
use prismnet_types::{Router, RouterId, RouterStatus, Vpc, VpcId};
use crate::{NetworkMetadataStore, OvnClient};
const ACTION_ROUTER_CREATE: &str = "network:routers:create";
const ACTION_ROUTER_READ: &str = "network:routers:read";
const ACTION_ROUTER_LIST: &str = "network:routers:list";
const ACTION_ROUTER_UPDATE: &str = "network:routers:update";
const ACTION_ROUTER_DELETE: &str = "network:routers:delete";
#[derive(Clone)]
pub struct RouterServiceImpl {
metadata: Arc<NetworkMetadataStore>,
ovn: Arc<OvnClient>,
auth: Arc<AuthService>,
}
impl RouterServiceImpl {
pub fn new(
metadata: Arc<NetworkMetadataStore>,
ovn: Arc<OvnClient>,
auth: Arc<AuthService>,
) -> Self {
Self {
metadata,
ovn,
auth,
}
}
async fn validate_vpc_in_tenant(
&self,
org_id: &str,
project_id: &str,
vpc_id: &VpcId,
) -> Result<Vpc, Status> {
self.metadata
.get_vpc(org_id, project_id, vpc_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::permission_denied("VPC not in tenant scope"))
}
}
fn router_to_proto(router: &Router) -> ProtoRouter {
ProtoRouter {
id: router.id.to_string(),
org_id: router.org_id.clone(),
project_id: router.project_id.clone(),
vpc_id: router.vpc_id.to_string(),
name: router.name.clone(),
description: router.description.clone().unwrap_or_default(),
gateway_cidr: router.gateway_cidr.clone(),
mac_address: router.mac_address.clone(),
external_ip: router.external_ip.clone(),
status: status_to_proto(&router.status) as i32,
created_at: router.created_at,
updated_at: router.updated_at,
}
}
fn status_to_proto(status: &RouterStatus) -> ProtoRouterStatus {
match status {
RouterStatus::Provisioning => ProtoRouterStatus::Provisioning,
RouterStatus::Active => ProtoRouterStatus::Active,
RouterStatus::Updating => ProtoRouterStatus::Updating,
RouterStatus::Deleting => ProtoRouterStatus::Deleting,
RouterStatus::Error => ProtoRouterStatus::Error,
}
}
fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> {
let (ip, prefix) = cidr
.split_once('/')
.ok_or_else(|| Status::invalid_argument("CIDR must be in a.b.c.d/prefix form"))?;
let ip: Ipv4Addr = ip
.parse()
.map_err(|_| Status::invalid_argument("CIDR must contain a valid IPv4 address"))?;
let prefix: u8 = prefix
.parse()
.map_err(|_| Status::invalid_argument("CIDR prefix must be an integer"))?;
if prefix > 32 {
return Err(Status::invalid_argument(
"CIDR prefix must be between 0 and 32",
));
}
Ok((ip, prefix))
}
fn parse_ipv4(ip: &str, field_name: &str) -> Result<Ipv4Addr, Status> {
ip.parse()
.map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address")))
}
fn validate_mac_address(mac_address: &str) -> Result<(), Status> {
let octets: Vec<_> = mac_address.split(':').collect();
if octets.len() != 6
|| octets
.iter()
.any(|octet| octet.len() != 2 || u8::from_str_radix(octet, 16).is_err())
{
return Err(Status::invalid_argument(
"mac_address must be a valid MAC address",
));
}
Ok(())
}
fn ipv4_to_u32(ip: Ipv4Addr) -> u32 {
u32::from(ip)
}
fn network_mask(prefix: u8) -> u32 {
if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
}
}
fn cidr_contains(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool {
let mask = network_mask(cidr.1);
(ipv4_to_u32(cidr.0) & mask) == (ipv4_to_u32(ip) & mask)
}
fn validate_router_inputs(vpc: &Vpc, gateway_cidr: &str, mac_address: &str, external_ip: &str) -> Result<(), Status> {
let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block)?;
let (gateway_ip, _) = parse_ipv4_cidr(gateway_cidr)?;
let _ = parse_ipv4(external_ip, "external_ip")?;
validate_mac_address(mac_address)?;
if !cidr_contains(vpc_cidr, gateway_ip) {
return Err(Status::invalid_argument(
"gateway_cidr must use an address within the VPC cidr_block",
));
}
Ok(())
}
#[tonic::async_trait]
impl RouterService for RouterServiceImpl {
async fn create_router(
&self,
request: Request<CreateRouterRequest>,
) -> Result<Response<CreateRouterResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let vpc_uuid =
uuid::Uuid::parse_str(&req.vpc_id).map_err(|_| Status::invalid_argument("Invalid VPC ID"))?;
let vpc_id = VpcId::from_uuid(vpc_uuid);
let vpc = self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id).await?;
self.auth
.authorize(
&tenant,
ACTION_ROUTER_CREATE,
&resource_for_tenant("router", "*", &org_id, &project_id),
)
.await?;
if req.name.trim().is_empty() {
return Err(Status::invalid_argument("router name is required"));
}
validate_router_inputs(&vpc, &req.gateway_cidr, &req.mac_address, &req.external_ip)?;
let existing = self
.metadata
.list_routers(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
if existing.iter().any(|router| router.vpc_id == vpc_id) {
return Err(Status::already_exists("VPC already has a router"));
}
let mut router = Router::new(
&req.name,
&org_id,
&project_id,
vpc_id,
&req.gateway_cidr,
&req.mac_address,
&req.external_ip,
);
if !req.description.is_empty() {
router.description = Some(req.description);
}
let ovn_router_id = self
.ovn
.create_logical_router(&format!("router-{}", router.id))
.await
.map_err(|e| Status::internal(e.to_string()))?;
let ovn_router_port_id = match self
.ovn
.add_router_port(&ovn_router_id, &vpc_id, &router.gateway_cidr, &router.mac_address)
.await
{
Ok(port_id) => port_id,
Err(error) => {
let _ = self.ovn.delete_logical_router(&ovn_router_id).await;
return Err(Status::internal(error.to_string()));
}
};
if let Err(error) = self
.ovn
.configure_snat(&ovn_router_id, &router.external_ip, &vpc.cidr_block)
.await
{
let _ = self.ovn.delete_logical_router(&ovn_router_id).await;
return Err(Status::internal(error.to_string()));
}
router.ovn_router_id = ovn_router_id;
router.ovn_router_port_id = ovn_router_port_id;
if let Err(error) = self.metadata.create_router(router.clone()).await {
let _ = self.ovn.delete_logical_router(&router.ovn_router_id).await;
return Err(Status::internal(error.to_string()));
}
Ok(Response::new(CreateRouterResponse {
router: Some(router_to_proto(&router)),
}))
}
async fn get_router(
&self,
request: Request<GetRouterRequest>,
) -> Result<Response<GetRouterResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid router ID"))?;
let router_id = RouterId::from_uuid(id);
self.auth
.authorize(
&tenant,
ACTION_ROUTER_READ,
&resource_for_tenant("router", router_id.to_string(), &org_id, &project_id),
)
.await?;
let router = self
.metadata
.get_router(&org_id, &project_id, &router_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Router not found"))?;
Ok(Response::new(GetRouterResponse {
router: Some(router_to_proto(&router)),
}))
}
async fn list_routers(
&self,
request: Request<ListRoutersRequest>,
) -> Result<Response<ListRoutersResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
self.auth
.authorize(
&tenant,
ACTION_ROUTER_LIST,
&resource_for_tenant("router", "*", &org_id, &project_id),
)
.await?;
let req = request.into_inner();
let vpc_id = if req.vpc_id.is_empty() {
None
} else {
let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id)
.map_err(|_| Status::invalid_argument("Invalid VPC ID"))?;
Some(VpcId::from_uuid(vpc_uuid))
};
if let Some(vpc_id) = vpc_id.as_ref() {
self.validate_vpc_in_tenant(&org_id, &project_id, vpc_id).await?;
}
let mut routers = self
.metadata
.list_routers(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
if let Some(vpc_id) = vpc_id {
routers.retain(|router| router.vpc_id == vpc_id);
}
Ok(Response::new(ListRoutersResponse {
routers: routers.iter().map(router_to_proto).collect(),
next_page_token: String::new(),
}))
}
async fn update_router(
&self,
request: Request<UpdateRouterRequest>,
) -> Result<Response<UpdateRouterResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid router ID"))?;
let router_id = RouterId::from_uuid(id);
self.auth
.authorize(
&tenant,
ACTION_ROUTER_UPDATE,
&resource_for_tenant("router", router_id.to_string(), &org_id, &project_id),
)
.await?;
let name = if req.name.is_empty() {
None
} else {
Some(req.name)
};
let description = if req.description.is_empty() {
None
} else {
Some(req.description)
};
let router = self
.metadata
.update_router(&org_id, &project_id, &router_id, name, description)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Router not found"))?;
Ok(Response::new(UpdateRouterResponse {
router: Some(router_to_proto(&router)),
}))
}
async fn delete_router(
&self,
request: Request<DeleteRouterRequest>,
) -> Result<Response<DeleteRouterResponse>, Status> {
let tenant = get_tenant_context(&request)?;
let (org_id, project_id) = resolve_tenant_ids_from_context(
&tenant,
&request.get_ref().org_id,
&request.get_ref().project_id,
)?;
let req = request.into_inner();
let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid router ID"))?;
let router_id = RouterId::from_uuid(id);
self.auth
.authorize(
&tenant,
ACTION_ROUTER_DELETE,
&resource_for_tenant("router", router_id.to_string(), &org_id, &project_id),
)
.await?;
let router = self
.metadata
.get_router(&org_id, &project_id, &router_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Router not found"))?;
self.ovn
.delete_logical_router(&router.ovn_router_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
self.metadata
.delete_router(&org_id, &project_id, &router_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Router not found"))?;
Ok(Response::new(DeleteRouterResponse {}))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rejects_invalid_mac_address() {
let err = validate_mac_address("02:00:00:00:00").unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_gateway_outside_vpc_cidr() {
let vpc = Vpc::new("test", "org", "proj", "10.0.0.0/16");
let err = validate_router_inputs(
&vpc,
"10.1.0.1/24",
"02:00:00:00:00:01",
"203.0.113.10",
)
.unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn accepts_router_inputs_inside_vpc_cidr() {
let vpc = Vpc::new("test", "org", "proj", "10.0.0.0/16");
validate_router_inputs(
&vpc,
"10.0.0.1/24",
"02:00:00:00:00:01",
"203.0.113.10",
)
.unwrap();
}
}

View file

@ -15,7 +15,9 @@ use prismnet_api::{
use iam_service_auth::{ use iam_service_auth::{
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService, get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
}; };
use prismnet_types::{IpProtocol, RuleDirection, SecurityGroup, SecurityGroupId, SecurityGroupRule}; use prismnet_types::{
IpProtocol, Port, RuleDirection, SecurityGroup, SecurityGroupId, SecurityGroupRule,
};
use crate::ovn::{build_acl_match, calculate_priority}; use crate::ovn::{build_acl_match, calculate_priority};
use crate::{NetworkMetadataStore, OvnClient}; use crate::{NetworkMetadataStore, OvnClient};
@ -49,6 +51,41 @@ impl SecurityGroupServiceImpl {
} }
} }
fn ensure_unique_security_group_name(
existing: &[SecurityGroup],
desired_name: &str,
exclude: Option<SecurityGroupId>,
) -> Result<(), Status> {
if desired_name.trim().is_empty() {
return Err(Status::invalid_argument("SecurityGroup name is required"));
}
if existing
.iter()
.any(|sg| sg.name == desired_name && Some(sg.id) != exclude)
{
return Err(Status::already_exists(
"SecurityGroup name already exists in the tenant",
));
}
Ok(())
}
fn security_group_is_referenced(
security_group_id: SecurityGroupId,
security_groups: &[SecurityGroup],
ports: &[Port],
) -> bool {
ports
.iter()
.any(|port| port.security_groups.contains(&security_group_id))
|| security_groups.iter().any(|sg| {
sg.id != security_group_id
&& sg.rules
.iter()
.any(|rule| rule.remote_group_id == Some(security_group_id))
})
}
fn security_group_to_proto(sg: &SecurityGroup) -> ProtoSecurityGroup { fn security_group_to_proto(sg: &SecurityGroup) -> ProtoSecurityGroup {
ProtoSecurityGroup { ProtoSecurityGroup {
id: sg.id.to_string(), id: sg.id.to_string(),
@ -137,6 +174,12 @@ impl SecurityGroupService for SecurityGroupServiceImpl {
) )
.await?; .await?;
let req = request.into_inner(); let req = request.into_inner();
let existing = self
.metadata
.list_security_groups(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_security_group_name(&existing, &req.name, None)?;
let sg = SecurityGroup::new(&req.name, &org_id, &project_id); let sg = SecurityGroup::new(&req.name, &org_id, &project_id);
let mut sg = sg; let mut sg = sg;
@ -256,6 +299,14 @@ impl SecurityGroupService for SecurityGroupServiceImpl {
} else { } else {
None None
}; };
if let Some(name) = name.as_deref() {
let existing = self
.metadata
.list_security_groups(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_security_group_name(&existing, name, Some(sg_id))?;
}
let sg = self let sg = self
.metadata .metadata
@ -292,6 +343,38 @@ impl SecurityGroupService for SecurityGroupServiceImpl {
) )
.await?; .await?;
let security_groups = self
.metadata
.list_security_groups(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
let vpcs = self
.metadata
.list_vpcs(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
let mut ports = Vec::new();
for vpc in vpcs {
let subnets = self
.metadata
.list_subnets(&org_id, &project_id, &vpc.id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
for subnet in subnets {
ports.extend(
self.metadata
.list_ports(Some(&subnet.id), None)
.await
.map_err(|e| Status::internal(e.to_string()))?,
);
}
}
if security_group_is_referenced(sg_id, &security_groups, &ports) {
return Err(Status::failed_precondition(
"cannot delete SecurityGroup while it is referenced by ports or rules",
));
}
self.metadata self.metadata
.delete_security_group(&org_id, &project_id, &sg_id) .delete_security_group(&org_id, &project_id, &sg_id)
.await .await
@ -440,3 +523,25 @@ impl SecurityGroupService for SecurityGroupServiceImpl {
Ok(Response::new(RemoveRuleResponse {})) Ok(Response::new(RemoveRuleResponse {}))
} }
} }
#[cfg(test)]
mod tests {
use super::*;
use prismnet_types::SubnetId;
#[test]
fn rejects_duplicate_security_group_name() {
let existing = vec![SecurityGroup::new("web", "org", "proj")];
let err = ensure_unique_security_group_name(&existing, "web", None).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
#[test]
fn detects_security_group_references() {
let sg = SecurityGroup::new("web", "org", "proj");
let sg_id = sg.id;
let mut port = Port::new("port", SubnetId::new());
port.security_groups = vec![sg_id];
assert!(security_group_is_referenced(sg_id, &[sg], &[port]));
}
}

View file

@ -1,18 +1,19 @@
//! Subnet gRPC service implementation //! Subnet gRPC service implementation
use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use tonic::{Request, Response, Status}; use tonic::{Request, Response, Status};
use iam_service_auth::{
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
};
use prismnet_api::{ use prismnet_api::{
subnet_service_server::SubnetService, CreateSubnetRequest, CreateSubnetResponse, subnet_service_server::SubnetService, CreateSubnetRequest, CreateSubnetResponse,
DeleteSubnetRequest, DeleteSubnetResponse, GetSubnetRequest, GetSubnetResponse, DeleteSubnetRequest, DeleteSubnetResponse, GetSubnetRequest, GetSubnetResponse,
ListSubnetsRequest, ListSubnetsResponse, Subnet as ProtoSubnet, ListSubnetsRequest, ListSubnetsResponse, Subnet as ProtoSubnet,
SubnetStatus as ProtoSubnetStatus, UpdateSubnetRequest, UpdateSubnetResponse, SubnetStatus as ProtoSubnetStatus, UpdateSubnetRequest, UpdateSubnetResponse,
}; };
use iam_service_auth::{ use prismnet_types::{Subnet, SubnetId, SubnetStatus, Vpc, VpcId};
get_tenant_context, resolve_tenant_ids_from_context, resource_for_tenant, AuthService,
};
use prismnet_types::{Subnet, SubnetId, SubnetStatus, VpcId};
use crate::NetworkMetadataStore; use crate::NetworkMetadataStore;
@ -38,19 +39,127 @@ impl SubnetServiceImpl {
org_id: &str, org_id: &str,
project_id: &str, project_id: &str,
vpc_id: &VpcId, vpc_id: &VpcId,
) -> Result<(), Status> { ) -> Result<Vpc, Status> {
if self self.metadata
.metadata
.get_vpc(org_id, project_id, vpc_id) .get_vpc(org_id, project_id, vpc_id)
.await .await
.map_err(|e| Status::internal(e.to_string()))? .map_err(|e| Status::internal(e.to_string()))?
.is_none() .ok_or_else(|| Status::permission_denied("VPC not in tenant scope"))
{ }
return Err(Status::permission_denied("VPC not in tenant scope")); }
fn parse_ipv4_cidr(cidr: &str, field_name: &str) -> Result<(Ipv4Addr, u8), Status> {
let (ip, prefix) = cidr.split_once('/').ok_or_else(|| {
Status::invalid_argument(format!("{field_name} must be in a.b.c.d/prefix form"))
})?;
let ip: Ipv4Addr = ip.parse().map_err(|_| {
Status::invalid_argument(format!("{field_name} must contain a valid IPv4 address"))
})?;
let prefix: u8 = prefix
.parse()
.map_err(|_| Status::invalid_argument(format!("{field_name} prefix must be an integer")))?;
if prefix > 32 {
return Err(Status::invalid_argument(format!(
"{field_name} prefix must be between 0 and 32"
)));
}
Ok((ip, prefix))
}
fn parse_ipv4(ip: &str, field_name: &str) -> Result<Ipv4Addr, Status> {
ip.parse()
.map_err(|_| Status::invalid_argument(format!("{field_name} must be a valid IPv4 address")))
}
fn network_mask(prefix: u8) -> u32 {
if prefix == 0 {
0
} else {
u32::MAX << (32 - prefix)
}
}
fn cidr_range(cidr: (Ipv4Addr, u8)) -> (u32, u32) {
let mask = network_mask(cidr.1);
let start = u32::from(cidr.0) & mask;
let size = if cidr.1 == 32 {
1
} else {
1u64 << (32 - cidr.1)
};
let end = start + (size as u32) - 1;
(start, end)
}
fn cidr_contains_ip(cidr: (Ipv4Addr, u8), ip: Ipv4Addr) -> bool {
let mask = network_mask(cidr.1);
(u32::from(cidr.0) & mask) == (u32::from(ip) & mask)
}
fn cidr_contains_cidr(parent: (Ipv4Addr, u8), child: (Ipv4Addr, u8)) -> bool {
let (parent_start, parent_end) = cidr_range(parent);
let (child_start, child_end) = cidr_range(child);
child_start >= parent_start && child_end <= parent_end
}
fn cidr_overlaps(a: (Ipv4Addr, u8), b: (Ipv4Addr, u8)) -> bool {
let (a_start, a_end) = cidr_range(a);
let (b_start, b_end) = cidr_range(b);
a_start <= b_end && b_start <= a_end
}
fn validate_subnet_inputs(
vpc: &Vpc,
cidr_block: &str,
gateway_ip: Option<&str>,
sibling_cidrs: impl IntoIterator<Item = String>,
) -> Result<(), Status> {
let vpc_cidr = parse_ipv4_cidr(&vpc.cidr_block, "vpc cidr_block")?;
let subnet_cidr = parse_ipv4_cidr(cidr_block, "cidr_block")?;
if !cidr_contains_cidr(vpc_cidr, subnet_cidr) {
return Err(Status::invalid_argument(
"subnet cidr_block must be contained within the VPC cidr_block",
));
}
if let Some(gateway_ip) = gateway_ip {
let gateway_ip = parse_ipv4(gateway_ip, "gateway_ip")?;
if !cidr_contains_ip(subnet_cidr, gateway_ip) {
return Err(Status::invalid_argument(
"gateway_ip must fall within the subnet cidr_block",
));
}
}
for sibling in sibling_cidrs {
let sibling_cidr = parse_ipv4_cidr(&sibling, "existing subnet cidr_block")?;
if cidr_overlaps(subnet_cidr, sibling_cidr) {
return Err(Status::already_exists(
"subnet cidr_block overlaps an existing subnet in the VPC",
));
}
} }
Ok(()) Ok(())
} }
fn ensure_unique_subnet_name(
existing: &[Subnet],
desired_name: &str,
exclude: Option<SubnetId>,
) -> Result<(), Status> {
if desired_name.trim().is_empty() {
return Err(Status::invalid_argument("Subnet name is required"));
}
if existing
.iter()
.any(|subnet| subnet.name == desired_name && Some(subnet.id) != exclude)
{
return Err(Status::already_exists(
"Subnet name already exists in the VPC",
));
}
Ok(())
} }
fn subnet_to_proto(subnet: &Subnet) -> ProtoSubnet { fn subnet_to_proto(subnet: &Subnet) -> ProtoSubnet {
@ -93,7 +202,8 @@ impl SubnetService for SubnetServiceImpl {
.map_err(|_| Status::invalid_argument("Invalid VPC ID"))?; .map_err(|_| Status::invalid_argument("Invalid VPC ID"))?;
let vpc_id = VpcId::from_uuid(vpc_id); let vpc_id = VpcId::from_uuid(vpc_id);
self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id) let vpc = self
.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id)
.await?; .await?;
self.auth self.auth
.authorize( .authorize(
@ -103,6 +213,22 @@ impl SubnetService for SubnetServiceImpl {
) )
.await?; .await?;
let existing_subnets = self
.metadata
.list_subnets(&org_id, &project_id, &vpc_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_subnet_name(&existing_subnets, &req.name, None)?;
validate_subnet_inputs(
&vpc,
&req.cidr_block,
(!req.gateway_ip.is_empty()).then_some(req.gateway_ip.as_str()),
existing_subnets
.into_iter()
.map(|subnet| subnet.cidr_block)
.collect::<Vec<_>>(),
)?;
let subnet = Subnet::new(&req.name, vpc_id, &req.cidr_block); let subnet = Subnet::new(&req.name, vpc_id, &req.cidr_block);
let mut subnet = subnet; let mut subnet = subnet;
if !req.description.is_empty() { if !req.description.is_empty() {
@ -112,6 +238,7 @@ impl SubnetService for SubnetServiceImpl {
subnet.gateway_ip = Some(req.gateway_ip); subnet.gateway_ip = Some(req.gateway_ip);
} }
subnet.dhcp_enabled = req.dhcp_enabled; subnet.dhcp_enabled = req.dhcp_enabled;
subnet.status = SubnetStatus::Active;
self.metadata self.metadata
.create_subnet(subnet.clone()) .create_subnet(subnet.clone())
@ -138,12 +265,6 @@ impl SubnetService for SubnetServiceImpl {
let id = uuid::Uuid::parse_str(&req.id) let id = uuid::Uuid::parse_str(&req.id)
.map_err(|_| Status::invalid_argument("Invalid Subnet ID"))?; .map_err(|_| Status::invalid_argument("Invalid Subnet ID"))?;
let subnet_id = SubnetId::from_uuid(id); let subnet_id = SubnetId::from_uuid(id);
let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id)
.map_err(|_| Status::invalid_argument("Invalid VPC ID"))?;
let vpc_id = VpcId::from_uuid(vpc_uuid);
self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id)
.await?;
self.auth self.auth
.authorize( .authorize(
&tenant, &tenant,
@ -152,12 +273,28 @@ impl SubnetService for SubnetServiceImpl {
) )
.await?; .await?;
let subnet = if req.vpc_id.trim().is_empty() {
let subnet = self let subnet = self
.metadata .metadata
.get_subnet(&vpc_id, &subnet_id) .find_subnet_by_id(&subnet_id)
.await .await
.map_err(|e| Status::internal(e.to_string()))? .map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Subnet not found"))?; .ok_or_else(|| Status::not_found("Subnet not found"))?;
self.validate_vpc_in_tenant(&org_id, &project_id, &subnet.vpc_id)
.await?;
subnet
} else {
let vpc_uuid = uuid::Uuid::parse_str(&req.vpc_id)
.map_err(|_| Status::invalid_argument("Invalid VPC ID"))?;
let vpc_id = VpcId::from_uuid(vpc_uuid);
self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id)
.await?;
self.metadata
.get_subnet(&vpc_id, &subnet_id)
.await
.map_err(|e| Status::internal(e.to_string()))?
.ok_or_else(|| Status::not_found("Subnet not found"))?
};
Ok(Response::new(GetSubnetResponse { Ok(Response::new(GetSubnetResponse {
subnet: Some(subnet_to_proto(&subnet)), subnet: Some(subnet_to_proto(&subnet)),
@ -191,6 +328,9 @@ impl SubnetService for SubnetServiceImpl {
return Err(Status::invalid_argument("vpc_id is required")); return Err(Status::invalid_argument("vpc_id is required"));
}; };
self.validate_vpc_in_tenant(&org_id, &project_id, &vpc_id)
.await?;
let subnets = self let subnets = self
.metadata .metadata
.list_subnets(&org_id, &project_id, &vpc_id) .list_subnets(&org_id, &project_id, &vpc_id)
@ -242,6 +382,14 @@ impl SubnetService for SubnetServiceImpl {
} else { } else {
None None
}; };
if let Some(name) = name.as_deref() {
let existing_subnets = self
.metadata
.list_subnets(&org_id, &project_id, &vpc_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_subnet_name(&existing_subnets, name, Some(subnet_id))?;
}
let subnet = self let subnet = self
.metadata .metadata
@ -292,6 +440,17 @@ impl SubnetService for SubnetServiceImpl {
) )
.await?; .await?;
let ports = self
.metadata
.list_ports(Some(&subnet_id), None)
.await
.map_err(|e| Status::internal(e.to_string()))?;
if !ports.is_empty() {
return Err(Status::failed_precondition(
"cannot delete subnet while ports still exist",
));
}
self.metadata self.metadata
.delete_subnet(&org_id, &project_id, &vpc_id, &subnet_id) .delete_subnet(&org_id, &project_id, &vpc_id, &subnet_id)
.await .await
@ -300,3 +459,45 @@ impl SubnetService for SubnetServiceImpl {
Ok(Response::new(DeleteSubnetResponse {})) Ok(Response::new(DeleteSubnetResponse {}))
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rejects_subnet_outside_vpc() {
let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16");
let err =
validate_subnet_inputs(&vpc, "10.1.0.0/24", None, Vec::<String>::new()).unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_gateway_outside_subnet() {
let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16");
let err =
validate_subnet_inputs(&vpc, "10.0.1.0/24", Some("10.0.2.1"), Vec::<String>::new())
.unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_overlapping_subnet() {
let vpc = Vpc::new("vpc", "org", "proj", "10.0.0.0/16");
let err = validate_subnet_inputs(
&vpc,
"10.0.1.0/24",
Some("10.0.1.1"),
vec!["10.0.1.128/25".to_string()],
)
.unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
#[test]
fn rejects_duplicate_subnet_name() {
let existing = vec![Subnet::new("app", VpcId::new(), "10.0.1.0/24")];
let err = ensure_unique_subnet_name(&existing, "app", None).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
}

View file

@ -1,5 +1,6 @@
//! VPC gRPC service implementation //! VPC gRPC service implementation
use std::net::Ipv4Addr;
use std::sync::Arc; use std::sync::Arc;
use tonic::{Request, Response, Status}; use tonic::{Request, Response, Status};
@ -42,6 +43,37 @@ impl VpcServiceImpl {
} }
} }
fn parse_ipv4_cidr(cidr: &str) -> Result<(Ipv4Addr, u8), Status> {
let (ip, prefix) = cidr
.split_once('/')
.ok_or_else(|| Status::invalid_argument("cidr_block must be in a.b.c.d/prefix form"))?;
let ip: Ipv4Addr = ip
.parse()
.map_err(|_| Status::invalid_argument("cidr_block must contain a valid IPv4 address"))?;
let prefix: u8 = prefix
.parse()
.map_err(|_| Status::invalid_argument("cidr_block prefix must be an integer"))?;
if prefix > 32 {
return Err(Status::invalid_argument(
"cidr_block prefix must be between 0 and 32",
));
}
Ok((ip, prefix))
}
fn ensure_unique_vpc_name(existing: &[Vpc], desired_name: &str, exclude: Option<VpcId>) -> Result<(), Status> {
if desired_name.trim().is_empty() {
return Err(Status::invalid_argument("VPC name is required"));
}
if existing
.iter()
.any(|vpc| vpc.name == desired_name && Some(vpc.id) != exclude)
{
return Err(Status::already_exists("VPC name already exists"));
}
Ok(())
}
fn vpc_to_proto(vpc: &Vpc) -> ProtoVpc { fn vpc_to_proto(vpc: &Vpc) -> ProtoVpc {
ProtoVpc { ProtoVpc {
id: vpc.id.to_string(), id: vpc.id.to_string(),
@ -86,6 +118,13 @@ impl VpcService for VpcServiceImpl {
) )
.await?; .await?;
let req = request.into_inner(); let req = request.into_inner();
let existing = self
.metadata
.list_vpcs(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_vpc_name(&existing, &req.name, None)?;
parse_ipv4_cidr(&req.cidr_block)?;
let vpc = Vpc::new(&req.name, &org_id, &project_id, &req.cidr_block); let vpc = Vpc::new(&req.name, &org_id, &project_id, &req.cidr_block);
let mut vpc = vpc; let mut vpc = vpc;
@ -93,15 +132,19 @@ impl VpcService for VpcServiceImpl {
vpc.description = Some(req.description); vpc.description = Some(req.description);
} }
self.metadata if let Err(error) = self
.create_vpc(vpc.clone()) .ovn
.await
.map_err(|e| Status::internal(e.to_string()))?;
self.ovn
.create_logical_switch(&vpc.id, &vpc.cidr_block) .create_logical_switch(&vpc.id, &vpc.cidr_block)
.await .await
.map_err(|e| Status::internal(e.to_string()))?; {
return Err(Status::internal(error.to_string()));
}
vpc.status = VpcStatus::Active;
if let Err(error) = self.metadata.create_vpc(vpc.clone()).await {
let _ = self.ovn.delete_logical_switch(&vpc.id).await;
return Err(Status::internal(error.to_string()));
}
Ok(Response::new(CreateVpcResponse { Ok(Response::new(CreateVpcResponse {
vpc: Some(vpc_to_proto(&vpc)), vpc: Some(vpc_to_proto(&vpc)),
@ -208,6 +251,14 @@ impl VpcService for VpcServiceImpl {
} else { } else {
Some(req.description) Some(req.description)
}; };
if let Some(name) = name.as_deref() {
let existing = self
.metadata
.list_vpcs(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
ensure_unique_vpc_name(&existing, name, Some(vpc_id))?;
}
let vpc = self let vpc = self
.metadata .metadata
@ -244,6 +295,28 @@ impl VpcService for VpcServiceImpl {
) )
.await?; .await?;
let subnets = self
.metadata
.list_subnets(&org_id, &project_id, &vpc_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
if !subnets.is_empty() {
return Err(Status::failed_precondition(
"cannot delete VPC while subnets still exist",
));
}
let routers = self
.metadata
.list_routers(&org_id, &project_id)
.await
.map_err(|e| Status::internal(e.to_string()))?;
if routers.iter().any(|router| router.vpc_id == vpc_id) {
return Err(Status::failed_precondition(
"cannot delete VPC while routers still exist",
));
}
self.metadata self.metadata
.delete_vpc(&org_id, &project_id, &vpc_id) .delete_vpc(&org_id, &project_id, &vpc_id)
.await .await
@ -258,3 +331,21 @@ impl VpcService for VpcServiceImpl {
Ok(Response::new(DeleteVpcResponse {})) Ok(Response::new(DeleteVpcResponse {}))
} }
} }
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn rejects_invalid_vpc_cidr() {
let err = parse_ipv4_cidr("10.0.0.0/99").unwrap_err();
assert_eq!(err.code(), tonic::Code::InvalidArgument);
}
#[test]
fn rejects_duplicate_vpc_name() {
let existing = vec![Vpc::new("prod", "org", "proj", "10.0.0.0/16")];
let err = ensure_unique_vpc_name(&existing, "prod", None).unwrap_err();
assert_eq!(err.code(), tonic::Code::AlreadyExists);
}
}

View file

@ -4,6 +4,7 @@
mod dhcp; mod dhcp;
mod port; mod port;
mod router;
mod security_group; mod security_group;
mod service_ip_pool; mod service_ip_pool;
mod subnet; mod subnet;
@ -11,6 +12,7 @@ mod vpc;
pub use dhcp::*; pub use dhcp::*;
pub use port::*; pub use port::*;
pub use router::*;
pub use security_group::*; pub use security_group::*;
pub use service_ip_pool::*; pub use service_ip_pool::*;
pub use subnet::*; pub use subnet::*;

View file

@ -0,0 +1,129 @@
//! Router types
use serde::{Deserialize, Serialize};
use uuid::Uuid;
use crate::VpcId;
/// Unique identifier for a Router
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
pub struct RouterId(Uuid);
impl RouterId {
pub fn new() -> Self {
Self(Uuid::new_v4())
}
pub fn from_uuid(uuid: Uuid) -> Self {
Self(uuid)
}
pub fn as_uuid(&self) -> &Uuid {
&self.0
}
}
impl Default for RouterId {
fn default() -> Self {
Self::new()
}
}
impl std::fmt::Display for RouterId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0)
}
}
/// Router lifecycle status
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
#[serde(rename_all = "snake_case")]
pub enum RouterStatus {
Provisioning,
Active,
Updating,
Deleting,
Error,
}
impl Default for RouterStatus {
fn default() -> Self {
Self::Provisioning
}
}
/// Tenant-scoped logical router with SNAT configuration
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct Router {
pub id: RouterId,
pub org_id: String,
pub project_id: String,
pub vpc_id: VpcId,
pub name: String,
pub description: Option<String>,
pub gateway_cidr: String,
pub mac_address: String,
pub external_ip: String,
pub ovn_router_id: String,
pub ovn_router_port_id: String,
pub status: RouterStatus,
pub created_at: u64,
pub updated_at: u64,
}
impl Router {
pub fn new(
name: impl Into<String>,
org_id: impl Into<String>,
project_id: impl Into<String>,
vpc_id: VpcId,
gateway_cidr: impl Into<String>,
mac_address: impl Into<String>,
external_ip: impl Into<String>,
) -> Self {
let now = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_secs();
Self {
id: RouterId::new(),
org_id: org_id.into(),
project_id: project_id.into(),
vpc_id,
name: name.into(),
description: None,
gateway_cidr: gateway_cidr.into(),
mac_address: mac_address.into(),
external_ip: external_ip.into(),
ovn_router_id: String::new(),
ovn_router_port_id: String::new(),
status: RouterStatus::Active,
created_at: now,
updated_at: now,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_router_creation() {
let router = Router::new(
"edge",
"org-1",
"proj-1",
VpcId::new(),
"10.0.0.1/24",
"02:00:00:00:00:01",
"203.0.113.10",
);
assert_eq!(router.name, "edge");
assert_eq!(router.external_ip, "203.0.113.10");
assert!(router.ovn_router_id.is_empty());
assert_eq!(router.status, RouterStatus::Active);
}
}

View file

@ -0,0 +1,119 @@
#!/usr/bin/env python3
from __future__ import annotations
import re
import sys
import tomllib
from pathlib import Path
from typing import Any
def extract_workspace_source_roots(flake_path: Path) -> dict[str, list[str]]:
source = flake_path.read_text()
match = re.search(r"workspaceSourceRoots\s*=\s*\{(.*?)\n\s*\};", source, re.S)
if match is None:
raise ValueError(f"Could not find workspaceSourceRoots in {flake_path}")
roots: dict[str, list[str]] = {}
for name, body in re.findall(r"\n\s*(\w+)\s*=\s*\[(.*?)\];", match.group(1), re.S):
roots[name] = re.findall(r'"([^"]+)"', body)
return roots
def collect_path_dependencies(value: Any) -> list[str]:
found: list[str] = []
if isinstance(value, dict):
path = value.get("path")
if isinstance(path, str):
found.append(path)
for nested in value.values():
found.extend(collect_path_dependencies(nested))
elif isinstance(value, list):
for nested in value:
found.extend(collect_path_dependencies(nested))
return found
def workspace_manifests(repo_root: Path, workspace_name: str) -> list[Path]:
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
manifests = [workspace_manifest]
workspace_data = tomllib.loads(workspace_manifest.read_text())
members = workspace_data.get("workspace", {}).get("members", [])
for member in members:
for candidate in (workspace_manifest.parent).glob(member):
manifest = candidate if candidate.name == "Cargo.toml" else candidate / "Cargo.toml"
if manifest.is_file():
manifests.append(manifest)
unique_manifests: list[Path] = []
seen: set[Path] = set()
for manifest in manifests:
resolved = manifest.resolve()
if resolved in seen:
continue
seen.add(resolved)
unique_manifests.append(manifest)
return unique_manifests
def required_root(dep_rel: Path) -> str:
parts = dep_rel.parts
if not parts:
return ""
if parts[0] == "crates" and len(parts) >= 2:
return "/".join(parts[:2])
return parts[0]
def is_covered(dep_rel: str, configured_roots: list[str]) -> bool:
return any(dep_rel == root or dep_rel.startswith(f"{root}/") for root in configured_roots)
def main() -> int:
repo_root = Path(sys.argv[1]).resolve() if len(sys.argv) > 1 else Path.cwd().resolve()
flake_path = repo_root / "flake.nix"
workspace_roots = extract_workspace_source_roots(flake_path)
failures: list[str] = []
for workspace_name, configured_roots in sorted(workspace_roots.items()):
workspace_manifest = repo_root / workspace_name / "Cargo.toml"
if not workspace_manifest.is_file():
continue
for manifest in workspace_manifests(repo_root, workspace_name):
manifest_data = tomllib.loads(manifest.read_text())
for dep_path in collect_path_dependencies(manifest_data):
dependency_dir = (manifest.parent / dep_path).resolve()
try:
dep_rel = dependency_dir.relative_to(repo_root)
except ValueError:
continue
dep_rel_str = dep_rel.as_posix()
if is_covered(dep_rel_str, configured_roots):
continue
needed = required_root(dep_rel)
manifest_rel = manifest.relative_to(repo_root).as_posix()
failures.append(
f"{workspace_name}: missing source root '{needed}' for dependency "
f"'{dep_rel_str}' referenced by {manifest_rel}"
)
if failures:
print("workspaceSourceRoots is missing path dependencies:", file=sys.stderr)
for failure in failures:
print(f" - {failure}", file=sys.stderr)
return 1
print("workspaceSourceRoots covers all workspace path dependencies.")
return 0
if __name__ == "__main__":
raise SystemExit(main())