photoncloud-monorepo/plasmavmc/crates/plasmavmc-server/tests/grpc_smoke.rs
centra 5c6eb04a46 T036: Add VM cluster deployment configs for nixos-anywhere
- netboot-base.nix with SSH key auth
- Launch scripts for node01/02/03
- Node configuration.nix and disko.nix
- Nix modules for first-boot automation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
2025-12-11 09:59:19 +09:00

277 lines
9.4 KiB
Rust

use plasmavmc_api::proto::{
vm_service_client::VmServiceClient, CreateVmRequest, GetVmRequest, HypervisorType as ProtoHypervisorType,
ListVmsRequest, StartVmRequest, StopVmRequest, VmSpec,
};
use plasmavmc_server::{VmServiceImpl};
use plasmavmc_hypervisor::HypervisorRegistry;
use plasmavmc_kvm::KvmBackend;
use std::sync::Arc;
use std::time::Duration;
use tempfile::TempDir;
use tokio::time::sleep;
use tonic::transport::{Server, Channel};
use tonic::codegen::InterceptedService;
use tonic::service::Interceptor;
use tonic::Request;
struct OrgProjectInterceptor {
org: String,
project: String,
}
impl Interceptor for OrgProjectInterceptor {
fn call(&mut self, mut req: Request<()>) -> Result<Request<()>, tonic::Status> {
req.metadata_mut().insert("org-id", self.org.parse().unwrap());
req.metadata_mut().insert("project-id", self.project.parse().unwrap());
Ok(req)
}
}
async fn client_with_meta(addr: &str, org: &str, project: &str) -> VmServiceClient<InterceptedService<Channel, OrgProjectInterceptor>> {
let channel = Channel::from_shared(format!("http://{addr}")).unwrap().connect().await.unwrap();
VmServiceClient::with_interceptor(channel, OrgProjectInterceptor { org: org.to_string(), project: project.to_string() })
}
#[tokio::test]
#[ignore]
async fn grpc_create_start_status_stop() {
// Preconditions
let qemu = std::env::var("PLASMAVMC_QEMU_PATH").unwrap_or_else(|_| "/usr/bin/qemu-system-x86_64".into());
let qcow = match std::env::var("PLASMAVMC_QCOW2_PATH") {
Ok(path) => path,
Err(_) => {
eprintln!("Skipping grpc smoke: PLASMAVMC_QCOW2_PATH not set");
return;
}
};
if !std::path::Path::new(&qemu).exists() || !std::path::Path::new(&qcow).exists() {
eprintln!("Skipping grpc smoke: qemu or qcow2 missing");
return;
}
// Setup server
let registry = Arc::new(HypervisorRegistry::new());
registry.register(Arc::new(KvmBackend::with_defaults()));
let svc = VmServiceImpl::new(registry).await.unwrap();
let addr = "127.0.0.1:50071";
tokio::spawn(async move {
Server::builder()
.add_service(plasmavmc_api::proto::vm_service_server::VmServiceServer::new(svc))
.serve(addr.parse().unwrap())
.await
.unwrap();
});
tokio::time::sleep(std::time::Duration::from_millis(200)).await;
let mut client = client_with_meta(addr, "org1", "proj1").await;
let create = client.create_vm(CreateVmRequest {
name: "grpc-smoke".into(),
org_id: "org1".into(),
project_id: "proj1".into(),
spec: Some(VmSpec::default()),
hypervisor: ProtoHypervisorType::Kvm as i32,
metadata: Default::default(),
labels: Default::default(),
}).await.unwrap().into_inner();
let vm_id = create.id.clone();
let _ = client.start_vm(StartVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
}).await.unwrap();
let stopped = client.stop_vm(StopVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
force: false,
timeout_seconds: 2,
}).await.unwrap().into_inner();
assert_eq!(stopped.id, vm_id);
}
/// Helper to create a ChainFire test server configuration
fn chainfire_test_config(port: u16) -> (chainfire_server::config::ServerConfig, TempDir) {
use std::net::SocketAddr;
use chainfire_server::config::{ClusterConfig, NetworkConfig, NodeConfig, RaftConfig, ServerConfig, StorageConfig};
let api_addr: SocketAddr = format!("127.0.0.1:{}", port).parse().unwrap();
let raft_addr: SocketAddr = format!("127.0.0.1:{}", port + 100).parse().unwrap();
let gossip_addr: SocketAddr = format!("127.0.0.1:{}", port + 200).parse().unwrap();
let temp_dir = tempfile::tempdir().unwrap();
let config = ServerConfig {
node: NodeConfig {
id: 1,
name: format!("test-node-{}", port),
role: "control_plane".to_string(),
},
cluster: ClusterConfig {
id: 1,
bootstrap: true,
initial_members: vec![],
},
network: NetworkConfig {
api_addr,
raft_addr,
gossip_addr,
tls: None,
},
storage: StorageConfig {
data_dir: temp_dir.path().to_path_buf(),
},
raft: RaftConfig::default(),
};
(config, temp_dir)
}
#[tokio::test]
#[ignore]
async fn grpc_chainfire_restart_smoke() {
// Preconditions
let qemu = std::env::var("PLASMAVMC_QEMU_PATH").unwrap_or_else(|_| "/usr/bin/qemu-system-x86_64".into());
let qcow = match std::env::var("PLASMAVMC_QCOW2_PATH") {
Ok(path) => path,
Err(_) => {
eprintln!("Skipping ChainFire restart smoke: PLASMAVMC_QCOW2_PATH not set");
return;
}
};
if !std::path::Path::new(&qemu).exists() || !std::path::Path::new(&qcow).exists() {
eprintln!("Skipping ChainFire restart smoke: qemu or qcow2 missing");
return;
}
// Start ChainFire server
let (chainfire_config, _chainfire_temp_dir) = chainfire_test_config(25051);
let chainfire_api_addr = chainfire_config.network.api_addr;
let chainfire_server = chainfire_server::server::Server::new(chainfire_config).await.unwrap();
let chainfire_handle = tokio::spawn(async move {
let _ = chainfire_server.run().await;
});
// Wait for ChainFire to start
sleep(Duration::from_millis(500)).await;
// Setup PlasmaVMC server with ChainFire backend
std::env::set_var("PLASMAVMC_STORAGE_BACKEND", "chainfire");
std::env::set_var("PLASMAVMC_CHAINFIRE_ENDPOINT", format!("http://{}", chainfire_api_addr));
let registry1 = Arc::new(HypervisorRegistry::new());
registry1.register(Arc::new(KvmBackend::with_defaults()));
let svc1 = VmServiceImpl::new(registry1).await.unwrap();
let addr = "127.0.0.1:50072";
let server1_handle = tokio::spawn(async move {
Server::builder()
.add_service(plasmavmc_api::proto::vm_service_server::VmServiceServer::new(svc1))
.serve(addr.parse().unwrap())
.await
.unwrap();
});
sleep(Duration::from_millis(200)).await;
let mut client1 = client_with_meta(addr, "org1", "proj1").await;
// Create VM
let create = client1.create_vm(CreateVmRequest {
name: "chainfire-restart-smoke".into(),
org_id: "org1".into(),
project_id: "proj1".into(),
spec: Some(VmSpec::default()),
hypervisor: ProtoHypervisorType::Kvm as i32,
metadata: Default::default(),
labels: Default::default(),
}).await.unwrap().into_inner();
let vm_id = create.id.clone();
assert_eq!(create.name, "chainfire-restart-smoke");
// Start VM
let _started = client1.start_vm(StartVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
}).await.unwrap();
// Get VM status
let status1 = client1.get_vm(GetVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
}).await.unwrap().into_inner();
assert_eq!(status1.id, vm_id);
// Stop VM
let stopped = client1.stop_vm(StopVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
force: false,
timeout_seconds: 2,
}).await.unwrap().into_inner();
assert_eq!(stopped.id, vm_id);
// Shutdown first PlasmaVMC server
server1_handle.abort();
sleep(Duration::from_millis(200)).await;
// Restart PlasmaVMC server (same ChainFire backend)
let registry2 = Arc::new(HypervisorRegistry::new());
registry2.register(Arc::new(KvmBackend::with_defaults()));
let svc2 = VmServiceImpl::new(registry2).await.unwrap();
let server2_handle = tokio::spawn(async move {
Server::builder()
.add_service(plasmavmc_api::proto::vm_service_server::VmServiceServer::new(svc2))
.serve(addr.parse().unwrap())
.await
.unwrap();
});
sleep(Duration::from_millis(200)).await;
// Verify VM state persisted across restart
let mut client2 = client_with_meta(addr, "org1", "proj1").await;
let status2 = client2.get_vm(GetVmRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
vm_id: vm_id.clone(),
}).await.unwrap().into_inner();
assert_eq!(status2.id, vm_id);
assert_eq!(status2.name, "chainfire-restart-smoke");
// Verify list_vms includes the VM
let list = client2.list_vms(ListVmsRequest {
org_id: "org1".into(),
project_id: "proj1".into(),
page_size: 10,
page_token: String::new(),
filter: String::new(),
}).await.unwrap().into_inner();
assert_eq!(list.vms.len(), 1);
assert_eq!(list.vms[0].id, vm_id);
// Verify tenant scoping: different tenant cannot see the VM
let mut client_other = client_with_meta(addr, "org2", "proj2").await;
let list_other = client_other.list_vms(ListVmsRequest {
org_id: "org2".into(),
project_id: "proj2".into(),
page_size: 10,
page_token: String::new(),
filter: String::new(),
}).await.unwrap().into_inner();
assert_eq!(list_other.vms.len(), 0, "Other tenant should not see VM");
// Cleanup
server2_handle.abort();
chainfire_handle.abort();
}