photoncloud-monorepo/plasmavmc/crates/plasmavmc-server/src/rest.rs

785 lines
24 KiB
Rust

//! REST HTTP API handlers for PlasmaVMC
//!
//! Implements REST endpoints as specified in T050.S5:
//! - GET /api/v1/vms - List VMs
//! - POST /api/v1/vms - Create VM
//! - GET /api/v1/vms/{id} - Get VM details
//! - DELETE /api/v1/vms/{id} - Delete VM
//! - POST /api/v1/vms/{id}/start - Start VM
//! - POST /api/v1/vms/{id}/stop - Stop VM
//! - GET /health - Health check
use axum::{
extract::{Path, State},
http::HeaderMap,
http::StatusCode,
routing::{get, post},
Json, Router,
};
use plasmavmc_api::proto::{
vm_service_server::VmService, CreateVmRequest, DeleteVmRequest, GetVmRequest, ListVmsRequest,
MigrateVmRequest, StartVmRequest, StopVmRequest, VirtualMachine as ProtoVm,
};
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use tonic::Code;
use tonic::Request;
use crate::VmServiceImpl;
use iam_service_auth::{resolve_tenant_ids_from_context, AuthService, TenantContext};
/// REST API state
#[derive(Clone)]
pub struct RestApiState {
pub vm_service: Arc<VmServiceImpl>,
pub auth_service: Arc<AuthService>,
}
/// Standard REST error response
#[derive(Debug, Serialize)]
pub struct ErrorResponse {
pub error: ErrorDetail,
pub meta: ResponseMeta,
}
#[derive(Debug, Serialize)]
pub struct ErrorDetail {
pub code: String,
pub message: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub details: Option<serde_json::Value>,
}
#[derive(Debug, Serialize)]
pub struct ResponseMeta {
pub request_id: String,
pub timestamp: String,
}
impl ResponseMeta {
fn new() -> Self {
Self {
request_id: uuid::Uuid::new_v4().to_string(),
timestamp: chrono::Utc::now().to_rfc3339(),
}
}
}
/// Standard REST success response
#[derive(Debug, Serialize)]
pub struct SuccessResponse<T> {
pub data: T,
pub meta: ResponseMeta,
}
impl<T> SuccessResponse<T> {
fn new(data: T) -> Self {
Self {
data,
meta: ResponseMeta::new(),
}
}
}
/// VM creation request
#[derive(Debug, Deserialize)]
pub struct CreateVmRequestRest {
pub name: String,
pub org_id: Option<String>,
pub project_id: Option<String>,
pub vcpus: Option<u32>,
pub memory_mib: Option<u64>,
pub hypervisor: Option<String>,
#[serde(default)]
pub disks: Vec<DiskSpecRest>,
#[serde(default)]
pub network: Vec<NetworkSpecRest>,
}
#[derive(Debug, Deserialize)]
pub struct DiskSpecRest {
pub id: String,
pub source: DiskSourceRest,
pub size_gib: Option<u64>,
pub bus: Option<String>,
pub cache: Option<String>,
pub boot_index: Option<u32>,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "type", rename_all = "snake_case")]
pub enum DiskSourceRest {
Image { image_id: String },
Volume { volume_id: String },
Blank,
}
#[derive(Debug, Deserialize)]
pub struct NetworkSpecRest {
pub id: Option<String>,
pub network_id: Option<String>,
pub subnet_id: Option<String>,
pub port_id: Option<String>,
pub mac_address: Option<String>,
pub ip_address: Option<String>,
pub cidr_block: Option<String>,
pub gateway_ip: Option<String>,
pub dhcp_enabled: Option<bool>,
pub model: Option<String>,
#[serde(default)]
pub security_groups: Vec<String>,
}
/// VM migration request
#[derive(Debug, Deserialize)]
pub struct MigrateVmRequestRest {
pub destination_node_id: String,
pub timeout_seconds: Option<u32>,
pub wait: Option<bool>,
}
/// VM response
#[derive(Debug, Serialize)]
pub struct VmResponse {
pub id: String,
pub name: String,
pub org_id: String,
pub project_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub node_id: Option<String>,
pub state: String,
pub hypervisor: String,
pub cpus: u32,
pub memory_mb: u64,
pub network: Vec<VmNetworkResponse>,
}
#[derive(Debug, Serialize)]
pub struct VmNetworkResponse {
pub id: String,
pub network_id: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub subnet_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub port_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub mac_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub ip_address: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub cidr_block: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub gateway_ip: Option<String>,
pub dhcp_enabled: bool,
pub model: String,
pub security_groups: Vec<String>,
}
const PUBLIC_KVM_ONLY_MESSAGE: &str =
"PlasmaVMC public VM APIs support only the KVM backend";
fn nic_model_to_string(model: i32) -> String {
match plasmavmc_api::proto::NicModel::try_from(model)
.unwrap_or(plasmavmc_api::proto::NicModel::Unspecified)
{
plasmavmc_api::proto::NicModel::VirtioNet => "virtio-net".to_string(),
plasmavmc_api::proto::NicModel::E1000 => "e1000".to_string(),
plasmavmc_api::proto::NicModel::Unspecified => "unspecified".to_string(),
}
}
fn hypervisor_to_string(hypervisor: i32) -> String {
match plasmavmc_api::proto::HypervisorType::try_from(hypervisor)
.unwrap_or(plasmavmc_api::proto::HypervisorType::Unspecified)
{
plasmavmc_api::proto::HypervisorType::Kvm => "kvm".to_string(),
plasmavmc_api::proto::HypervisorType::Firecracker => {
"legacy-unsupported-firecracker".to_string()
}
plasmavmc_api::proto::HypervisorType::Mvisor => "legacy-unsupported-mvisor".to_string(),
plasmavmc_api::proto::HypervisorType::Unspecified => "kvm".to_string(),
}
}
fn parse_supported_public_hypervisor(
hypervisor: Option<&str>,
) -> Result<plasmavmc_api::proto::HypervisorType, String> {
match hypervisor.map(str::trim).filter(|value| !value.is_empty()) {
None | Some("kvm") => Ok(plasmavmc_api::proto::HypervisorType::Kvm),
Some("firecracker") => Err(format!(
"{PUBLIC_KVM_ONLY_MESSAGE}; firecracker remains outside the supported surface"
)),
Some("mvisor") => Err(format!(
"{PUBLIC_KVM_ONLY_MESSAGE}; mvisor remains outside the supported surface"
)),
Some(other) => Err(format!(
"{PUBLIC_KVM_ONLY_MESSAGE}; unsupported value `{other}`"
)),
}
}
impl From<plasmavmc_api::proto::NetworkSpec> for VmNetworkResponse {
fn from(network: plasmavmc_api::proto::NetworkSpec) -> Self {
Self {
id: network.id,
network_id: network.network_id,
subnet_id: (!network.subnet_id.is_empty()).then_some(network.subnet_id),
port_id: (!network.port_id.is_empty()).then_some(network.port_id),
mac_address: (!network.mac_address.is_empty()).then_some(network.mac_address),
ip_address: (!network.ip_address.is_empty()).then_some(network.ip_address),
cidr_block: (!network.cidr_block.is_empty()).then_some(network.cidr_block),
gateway_ip: (!network.gateway_ip.is_empty()).then_some(network.gateway_ip),
dhcp_enabled: network.dhcp_enabled,
model: nic_model_to_string(network.model),
security_groups: network.security_groups,
}
}
}
impl From<ProtoVm> for VmResponse {
fn from(vm: ProtoVm) -> Self {
let cpus = vm
.spec
.as_ref()
.and_then(|s| s.cpu.as_ref())
.map(|c| c.vcpus)
.unwrap_or(1);
let memory_mb = vm
.spec
.as_ref()
.and_then(|s| s.memory.as_ref())
.map(|m| m.size_mib)
.unwrap_or(512);
let state = format!("{:?}", vm.state());
let network = vm
.spec
.as_ref()
.map(|spec| {
spec.network
.clone()
.into_iter()
.map(VmNetworkResponse::from)
.collect()
})
.unwrap_or_default();
Self {
id: vm.id,
name: vm.name,
org_id: vm.org_id,
project_id: vm.project_id,
node_id: (!vm.node_id.is_empty()).then_some(vm.node_id),
state,
hypervisor: hypervisor_to_string(vm.hypervisor),
cpus,
memory_mb,
network,
}
}
}
/// VMs list response
#[derive(Debug, Serialize)]
pub struct VmsResponse {
pub vms: Vec<VmResponse>,
}
/// Build the REST API router
pub fn build_router(state: RestApiState) -> Router {
Router::new()
.route("/api/v1/vms", get(list_vms).post(create_vm))
.route("/api/v1/vms/{id}", get(get_vm).delete(delete_vm))
.route("/api/v1/vms/{id}/start", post(start_vm))
.route("/api/v1/vms/{id}/stop", post(stop_vm))
.route("/api/v1/vms/{id}/migrate", post(migrate_vm))
.route("/health", get(health_check))
.with_state(state)
}
/// Health check endpoint
async fn health_check() -> (StatusCode, Json<SuccessResponse<serde_json::Value>>) {
(
StatusCode::OK,
Json(SuccessResponse::new(
serde_json::json!({ "status": "healthy" }),
)),
)
}
/// GET /api/v1/vms - List VMs
async fn list_vms(
State(state): State<RestApiState>,
headers: HeaderMap,
) -> Result<Json<SuccessResponse<VmsResponse>>, (StatusCode, Json<ErrorResponse>)> {
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(ListVmsRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
page_size: 100,
page_token: String::new(),
filter: String::new(),
});
req.extensions_mut().insert(tenant);
let response = state
.vm_service
.list_vms(req)
.await
.map_err(map_tonic_status)?;
let vms: Vec<VmResponse> = response
.into_inner()
.vms
.into_iter()
.map(VmResponse::from)
.collect();
Ok(Json(SuccessResponse::new(VmsResponse { vms })))
}
/// POST /api/v1/vms - Create VM
async fn create_vm(
State(state): State<RestApiState>,
headers: HeaderMap,
Json(req): Json<CreateVmRequestRest>,
) -> Result<(StatusCode, Json<SuccessResponse<VmResponse>>), (StatusCode, Json<ErrorResponse>)> {
use plasmavmc_api::proto::{
disk_source, CpuSpec, DiskBus, DiskCache, DiskSource, DiskSpec, MemorySpec,
NicModel as ProtoNicModel,
};
let CreateVmRequestRest {
name,
org_id,
project_id,
vcpus,
memory_mib,
hypervisor,
disks,
network,
} = req;
let hypervisor_type = parse_supported_public_hypervisor(hypervisor.as_deref())
.map_err(|message| error_response(StatusCode::BAD_REQUEST, "INVALID_ARGUMENT", &message))?;
let disks = disks
.into_iter()
.map(|disk| DiskSpec {
id: disk.id,
source: Some(DiskSource {
source: Some(match disk.source {
DiskSourceRest::Image { image_id } => disk_source::Source::ImageId(image_id),
DiskSourceRest::Volume { volume_id } => {
disk_source::Source::VolumeId(volume_id)
}
DiskSourceRest::Blank => disk_source::Source::Blank(true),
}),
}),
size_gib: disk.size_gib.unwrap_or(10),
bus: match disk.bus.as_deref() {
Some("scsi") => DiskBus::Scsi as i32,
Some("ide") => DiskBus::Ide as i32,
Some("sata") => DiskBus::Sata as i32,
_ => DiskBus::Virtio as i32,
},
cache: match disk.cache.as_deref() {
Some("writeback") => DiskCache::Writeback as i32,
Some("writethrough") => DiskCache::Writethrough as i32,
_ => DiskCache::Writeback as i32,
},
boot_index: disk.boot_index.unwrap_or_default(),
})
.collect();
let network = network
.into_iter()
.enumerate()
.map(|(index, nic)| plasmavmc_api::proto::NetworkSpec {
id: nic.id.unwrap_or_else(|| format!("nic{}", index)),
network_id: nic.network_id.unwrap_or_else(|| "default".to_string()),
subnet_id: nic.subnet_id.unwrap_or_default(),
port_id: nic.port_id.unwrap_or_default(),
mac_address: nic.mac_address.unwrap_or_default(),
ip_address: nic.ip_address.unwrap_or_default(),
cidr_block: nic.cidr_block.unwrap_or_default(),
gateway_ip: nic.gateway_ip.unwrap_or_default(),
dhcp_enabled: nic.dhcp_enabled.unwrap_or(false),
model: match nic.model.as_deref() {
Some("e1000") => ProtoNicModel::E1000 as i32,
_ => ProtoNicModel::VirtioNet as i32,
},
security_groups: nic.security_groups,
})
.collect();
let tenant =
resolve_rest_tenant(&state, &headers, org_id.as_deref(), project_id.as_deref()).await?;
let mut grpc_req = Request::new(CreateVmRequest {
name,
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
spec: Some(plasmavmc_api::proto::VmSpec {
cpu: Some(CpuSpec {
vcpus: vcpus.unwrap_or(1),
cores_per_socket: 1,
sockets: 1,
cpu_model: String::new(),
}),
memory: Some(MemorySpec {
size_mib: memory_mib.unwrap_or(512),
hugepages: false,
}),
disks,
network,
boot: None,
security: None,
}),
hypervisor: hypervisor_type as i32,
metadata: Default::default(),
labels: Default::default(),
});
grpc_req.extensions_mut().insert(tenant);
let response = state
.vm_service
.create_vm(grpc_req)
.await
.map_err(map_tonic_status)?;
Ok((
StatusCode::CREATED,
Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))),
))
}
/// GET /api/v1/vms/{id} - Get VM details
async fn get_vm(
State(state): State<RestApiState>,
Path(id): Path<String>,
headers: HeaderMap,
) -> Result<Json<SuccessResponse<VmResponse>>, (StatusCode, Json<ErrorResponse>)> {
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(GetVmRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
vm_id: id,
});
req.extensions_mut().insert(tenant);
let response = state
.vm_service
.get_vm(req)
.await
.map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))))
}
/// DELETE /api/v1/vms/{id} - Delete VM
async fn delete_vm(
State(state): State<RestApiState>,
Path(id): Path<String>,
headers: HeaderMap,
) -> Result<(StatusCode, Json<SuccessResponse<serde_json::Value>>), (StatusCode, Json<ErrorResponse>)>
{
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(DeleteVmRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
vm_id: id.clone(),
force: false,
});
req.extensions_mut().insert(tenant);
state
.vm_service
.delete_vm(req)
.await
.map_err(map_tonic_status)?;
Ok((
StatusCode::OK,
Json(SuccessResponse::new(
serde_json::json!({ "id": id, "deleted": true }),
)),
))
}
/// POST /api/v1/vms/{id}/start - Start VM
async fn start_vm(
State(state): State<RestApiState>,
Path(id): Path<String>,
headers: HeaderMap,
) -> Result<Json<SuccessResponse<serde_json::Value>>, (StatusCode, Json<ErrorResponse>)> {
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(StartVmRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
vm_id: id.clone(),
});
req.extensions_mut().insert(tenant);
state
.vm_service
.start_vm(req)
.await
.map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(
serde_json::json!({ "id": id, "action": "started" }),
)))
}
/// POST /api/v1/vms/{id}/stop - Stop VM
async fn stop_vm(
State(state): State<RestApiState>,
Path(id): Path<String>,
headers: HeaderMap,
) -> Result<Json<SuccessResponse<serde_json::Value>>, (StatusCode, Json<ErrorResponse>)> {
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut req = Request::new(StopVmRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
vm_id: id.clone(),
force: false,
timeout_seconds: 30,
});
req.extensions_mut().insert(tenant);
state
.vm_service
.stop_vm(req)
.await
.map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(
serde_json::json!({ "id": id, "action": "stopped" }),
)))
}
/// POST /api/v1/vms/{id}/migrate - Migrate VM
async fn migrate_vm(
State(state): State<RestApiState>,
headers: HeaderMap,
Path(id): Path<String>,
Json(req): Json<MigrateVmRequestRest>,
) -> Result<Json<SuccessResponse<VmResponse>>, (StatusCode, Json<ErrorResponse>)> {
let tenant = resolve_rest_tenant(&state, &headers, None, None).await?;
let mut grpc_req = Request::new(MigrateVmRequest {
org_id: tenant.org_id.clone(),
project_id: tenant.project_id.clone(),
vm_id: id,
destination_node_id: req.destination_node_id,
timeout_seconds: req.timeout_seconds.unwrap_or(0),
wait: req.wait.unwrap_or(false),
});
grpc_req.extensions_mut().insert(tenant);
let response = state
.vm_service
.migrate_vm(grpc_req)
.await
.map_err(map_tonic_status)?;
Ok(Json(SuccessResponse::new(VmResponse::from(
response.into_inner(),
))))
}
/// Helper to create error response
fn error_response(
status: StatusCode,
code: &str,
message: &str,
) -> (StatusCode, Json<ErrorResponse>) {
(
status,
Json(ErrorResponse {
error: ErrorDetail {
code: code.to_string(),
message: message.to_string(),
details: None,
},
meta: ResponseMeta::new(),
}),
)
}
async fn resolve_rest_tenant(
state: &RestApiState,
headers: &HeaderMap,
req_org_id: Option<&str>,
req_project_id: Option<&str>,
) -> Result<TenantContext, (StatusCode, Json<ErrorResponse>)> {
let tenant = state
.auth_service
.authenticate_headers(headers)
.await
.map_err(map_auth_status)?;
resolve_tenant_ids_from_context(
&tenant,
req_org_id.unwrap_or(""),
req_project_id.unwrap_or(""),
)
.map_err(map_auth_status)?;
Ok(tenant)
}
fn map_auth_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) {
map_tonic_status(status)
}
fn map_tonic_status(status: tonic::Status) -> (StatusCode, Json<ErrorResponse>) {
let status_code = match status.code() {
Code::Unauthenticated => StatusCode::UNAUTHORIZED,
Code::PermissionDenied => StatusCode::FORBIDDEN,
Code::InvalidArgument => StatusCode::BAD_REQUEST,
Code::NotFound => StatusCode::NOT_FOUND,
Code::AlreadyExists => StatusCode::CONFLICT,
Code::ResourceExhausted => StatusCode::TOO_MANY_REQUESTS,
Code::FailedPrecondition => StatusCode::UNPROCESSABLE_ENTITY,
_ => StatusCode::INTERNAL_SERVER_ERROR,
};
let code = match status.code() {
Code::Unauthenticated => "UNAUTHENTICATED",
Code::PermissionDenied => "FORBIDDEN",
Code::InvalidArgument => "INVALID_ARGUMENT",
Code::NotFound => "NOT_FOUND",
Code::AlreadyExists => "ALREADY_EXISTS",
Code::ResourceExhausted => "RESOURCE_EXHAUSTED",
Code::FailedPrecondition => "FAILED_PRECONDITION",
_ => "INTERNAL",
};
error_response(status_code, code, status.message())
}
#[cfg(test)]
mod tests {
use super::*;
use plasmavmc_api::proto::{
CpuSpec, HypervisorType, MemorySpec, NetworkSpec, NicModel, VirtualMachine as ProtoVm,
VmSpec,
};
#[test]
fn map_tonic_status_preserves_client_error_categories() {
let (status, Json(body)) = map_tonic_status(tonic::Status::not_found("missing vm"));
assert_eq!(status, StatusCode::NOT_FOUND);
assert_eq!(body.error.code, "NOT_FOUND");
let (status, Json(body)) = map_tonic_status(tonic::Status::invalid_argument("bad nic"));
assert_eq!(status, StatusCode::BAD_REQUEST);
assert_eq!(body.error.code, "INVALID_ARGUMENT");
let (status, Json(body)) =
map_tonic_status(tonic::Status::failed_precondition("network attach failed"));
assert_eq!(status, StatusCode::UNPROCESSABLE_ENTITY);
assert_eq!(body.error.code, "FAILED_PRECONDITION");
}
#[test]
fn vm_response_exposes_network_details() {
let response = VmResponse::from(ProtoVm {
id: "vm-1".to_string(),
name: "vm-1".to_string(),
org_id: "org-1".to_string(),
project_id: "proj-1".to_string(),
state: plasmavmc_api::proto::VmState::Running as i32,
spec: Some(VmSpec {
cpu: Some(CpuSpec {
vcpus: 2,
cores_per_socket: 1,
sockets: 1,
cpu_model: String::new(),
}),
memory: Some(MemorySpec {
size_mib: 2048,
hugepages: false,
}),
disks: vec![],
network: vec![NetworkSpec {
id: "nic0".to_string(),
network_id: "default".to_string(),
mac_address: "02:00:00:00:00:01".to_string(),
ip_address: "10.62.10.15".to_string(),
cidr_block: "10.62.10.0/24".to_string(),
gateway_ip: "10.62.10.1".to_string(),
dhcp_enabled: true,
model: NicModel::VirtioNet as i32,
security_groups: vec!["sg-1".to_string()],
port_id: "port-1".to_string(),
subnet_id: "subnet-1".to_string(),
}],
boot: None,
security: None,
}),
status: None,
node_id: "node04".to_string(),
hypervisor: HypervisorType::Kvm as i32,
created_at: 0,
updated_at: 0,
created_by: String::new(),
metadata: Default::default(),
labels: Default::default(),
});
assert_eq!(response.hypervisor, "kvm");
assert_eq!(response.node_id.as_deref(), Some("node04"));
assert_eq!(response.network.len(), 1);
assert_eq!(response.network[0].port_id.as_deref(), Some("port-1"));
assert_eq!(response.network[0].subnet_id.as_deref(), Some("subnet-1"));
assert_eq!(
response.network[0].ip_address.as_deref(),
Some("10.62.10.15")
);
assert_eq!(
response.network[0].cidr_block.as_deref(),
Some("10.62.10.0/24")
);
assert_eq!(
response.network[0].gateway_ip.as_deref(),
Some("10.62.10.1")
);
assert!(response.network[0].dhcp_enabled);
}
#[test]
fn parse_supported_public_hypervisor_defaults_to_kvm() {
assert_eq!(
parse_supported_public_hypervisor(None).unwrap(),
HypervisorType::Kvm
);
assert_eq!(
parse_supported_public_hypervisor(Some("kvm")).unwrap(),
HypervisorType::Kvm
);
}
#[test]
fn parse_supported_public_hypervisor_rejects_non_kvm_backends() {
assert!(parse_supported_public_hypervisor(Some("firecracker"))
.unwrap_err()
.contains(PUBLIC_KVM_ONLY_MESSAGE));
assert!(parse_supported_public_hypervisor(Some("mvisor"))
.unwrap_err()
.contains(PUBLIC_KVM_ONLY_MESSAGE));
}
#[test]
fn hypervisor_to_string_marks_legacy_backends_as_unsupported() {
assert_eq!(
hypervisor_to_string(HypervisorType::Firecracker as i32),
"legacy-unsupported-firecracker"
);
assert_eq!(
hypervisor_to_string(HypervisorType::Mvisor as i32),
"legacy-unsupported-mvisor"
);
}
}