photoncloud-monorepo/plasmavmc/crates/plasmavmc-server/tests/common/mod.rs

165 lines
5 KiB
Rust

#![allow(dead_code)]
use async_trait::async_trait;
use plasmavmc_api::proto::vm_service_client::VmServiceClient;
use plasmavmc_hypervisor::{BackendCapabilities, HypervisorBackend, UnsupportedReason};
use plasmavmc_types::{
DiskSpec, HypervisorType as VmHypervisorType, NetworkSpec, Result as VmResult, VmHandle,
VmState, VmStatus, VirtualMachine,
};
use std::time::Duration;
use tonic::codegen::InterceptedService;
use tonic::service::Interceptor;
use tonic::transport::Channel;
use tonic::Request;
/// Global lock to serialize tests that mutate process-wide environment variables.
///
/// Many of our integration tests rely on env-based configuration (endpoints, storage backend,
/// runtime paths). Rust tests run in parallel by default, so we guard those mutations.
pub async fn env_lock() -> tokio::sync::MutexGuard<'static, ()> {
use std::sync::OnceLock;
use tokio::sync::Mutex;
static LOCK: OnceLock<Mutex<()>> = OnceLock::new();
LOCK.get_or_init(|| Mutex::new(())).lock().await
}
/// Set per-test env defaults so PlasmaVMC can run in a fast, local-only mode.
///
/// - Uses file-backed storage to avoid external dependencies
/// - Stores runtime/state under `/tmp` to avoid permission issues
pub fn set_plasmavmc_fast_test_env() {
// Force file backend to avoid ChainFire/FlareDB connections in the fast lane.
std::env::set_var("PLASMAVMC_STORAGE_BACKEND", "file");
let nanos = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_nanos();
let runtime_dir = std::path::Path::new("/tmp").join(format!("pvmc-runtime-{nanos}"));
let state_path = std::path::Path::new("/tmp").join(format!("pvmc-state-{nanos}.json"));
std::env::set_var("PLASMAVMC_RUNTIME_DIR", runtime_dir.to_str().unwrap());
std::env::set_var("PLASMAVMC_STATE_PATH", state_path.to_str().unwrap());
}
/// Allocate an ephemeral localhost port for test servers.
pub fn allocate_port() -> u16 {
std::net::TcpListener::bind("127.0.0.1:0")
.expect("bind ephemeral port")
.local_addr()
.unwrap()
.port()
}
/// Common interceptor to attach org/project metadata to PlasmaVMC requests.
pub struct OrgProjectInterceptor {
pub org: String,
pub project: String,
}
impl Interceptor for OrgProjectInterceptor {
fn call(&mut self, mut req: Request<()>) -> Result<Request<()>, tonic::Status> {
req.metadata_mut().insert("org-id", self.org.parse().unwrap());
req.metadata_mut()
.insert("project-id", self.project.parse().unwrap());
Ok(req)
}
}
pub async fn vm_client_with_meta(
addr: &str,
org: &str,
project: &str,
) -> VmServiceClient<InterceptedService<Channel, OrgProjectInterceptor>> {
let channel = Channel::from_shared(format!("http://{addr}"))
.unwrap()
.connect()
.await
.unwrap();
VmServiceClient::with_interceptor(
channel,
OrgProjectInterceptor {
org: org.to_string(),
project: project.to_string(),
},
)
}
/// No-op hypervisor backend for tests (avoids QEMU dependency).
///
/// It reports itself as KVM and returns a stub `VmHandle`, allowing PlasmaVMC API
/// semantics and integrations to be tested without a real hypervisor.
pub struct NoopHypervisor;
#[async_trait]
impl HypervisorBackend for NoopHypervisor {
fn backend_type(&self) -> VmHypervisorType {
VmHypervisorType::Kvm
}
fn capabilities(&self) -> BackendCapabilities {
BackendCapabilities::default()
}
fn supports(&self, _spec: &plasmavmc_types::VmSpec) -> std::result::Result<(), UnsupportedReason> {
Ok(())
}
async fn create(&self, vm: &VirtualMachine) -> VmResult<VmHandle> {
let runtime_dir = std::env::var("PLASMAVMC_RUNTIME_DIR")
.unwrap_or_else(|_| "/tmp/plasmavmc-noop".into());
Ok(VmHandle {
vm_id: vm.id,
runtime_dir,
pid: Some(0),
backend_state: Default::default(),
})
}
async fn start(&self, _handle: &VmHandle) -> VmResult<()> {
Ok(())
}
async fn stop(&self, _handle: &VmHandle, _timeout: Duration) -> VmResult<()> {
Ok(())
}
async fn kill(&self, _handle: &VmHandle) -> VmResult<()> {
Ok(())
}
async fn reboot(&self, _handle: &VmHandle) -> VmResult<()> {
Ok(())
}
async fn delete(&self, _handle: &VmHandle) -> VmResult<()> {
Ok(())
}
async fn status(&self, _handle: &VmHandle) -> VmResult<VmStatus> {
Ok(VmStatus {
actual_state: VmState::Stopped,
host_pid: Some(0),
..Default::default()
})
}
async fn attach_disk(&self, _handle: &VmHandle, _disk: &DiskSpec) -> VmResult<()> {
Ok(())
}
async fn detach_disk(&self, _handle: &VmHandle, _disk_id: &str) -> VmResult<()> {
Ok(())
}
async fn attach_nic(&self, _handle: &VmHandle, _nic: &NetworkSpec) -> VmResult<()> {
Ok(())
}
async fn detach_nic(&self, _handle: &VmHandle, _nic_id: &str) -> VmResult<()> {
Ok(())
}
}