- netboot-base.nix with SSH key auth - Launch scripts for node01/02/03 - Node configuration.nix and disko.nix - Nix modules for first-boot automation 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
987 lines
31 KiB
Rust
987 lines
31 KiB
Rust
//! Integration tests for PlasmaVMC with FlareDB metadata storage and IAM authentication
|
|
|
|
use std::collections::HashMap;
|
|
use std::sync::Arc;
|
|
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
|
|
|
use chrono::Utc;
|
|
use flaredb_proto::kvrpc::{
|
|
kv_raw_server::{KvRaw, KvRawServer},
|
|
RawDeleteRequest, RawDeleteResponse, RawGetRequest, RawGetResponse, RawPutRequest,
|
|
RawPutResponse, RawScanRequest, RawScanResponse,
|
|
};
|
|
use iam_api::proto::{
|
|
iam_token_server::{IamToken, IamTokenServer},
|
|
InternalTokenClaims, IssueTokenRequest, IssueTokenResponse, PrincipalKind,
|
|
RefreshTokenRequest, RefreshTokenResponse, RevokeTokenRequest, RevokeTokenResponse,
|
|
ValidateTokenRequest, ValidateTokenResponse,
|
|
};
|
|
use plasmavmc_api::proto::{
|
|
vm_service_client::VmServiceClient, CreateVmRequest, DeleteVmRequest, GetVmRequest,
|
|
HypervisorType as ProtoHypervisorType, ListVmsRequest, VmSpec,
|
|
};
|
|
use plasmavmc_hypervisor::HypervisorRegistry;
|
|
use plasmavmc_kvm::KvmBackend;
|
|
use plasmavmc_server::VmServiceImpl;
|
|
use tempfile::TempDir;
|
|
use tokio::sync::RwLock;
|
|
use tokio::time::sleep;
|
|
use tonic::transport::{Channel, Server};
|
|
use tonic::{Request, Response, Status};
|
|
|
|
// ============================================================================
|
|
// Mock FlareDB KV Service
|
|
// ============================================================================
|
|
|
|
/// Mock FlareDB service with in-memory HashMap-based KV store
|
|
#[derive(Clone)]
|
|
struct MockFlareDbService {
|
|
store: Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>,
|
|
}
|
|
|
|
impl MockFlareDbService {
|
|
fn new() -> Self {
|
|
Self {
|
|
store: Arc::new(RwLock::new(HashMap::new())),
|
|
}
|
|
}
|
|
|
|
/// Get a snapshot of stored keys for verification
|
|
async fn get_all_keys(&self) -> Vec<Vec<u8>> {
|
|
let store = self.store.read().await;
|
|
store.keys().cloned().collect()
|
|
}
|
|
|
|
/// Get value for verification
|
|
async fn get_value(&self, key: &[u8]) -> Option<Vec<u8>> {
|
|
let store = self.store.read().await;
|
|
store.get(key).cloned()
|
|
}
|
|
}
|
|
|
|
#[tonic::async_trait]
|
|
impl KvRaw for MockFlareDbService {
|
|
async fn raw_put(
|
|
&self,
|
|
request: Request<RawPutRequest>,
|
|
) -> Result<Response<RawPutResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let mut store = self.store.write().await;
|
|
store.insert(req.key, req.value);
|
|
|
|
Ok(Response::new(RawPutResponse { success: true }))
|
|
}
|
|
|
|
async fn raw_get(
|
|
&self,
|
|
request: Request<RawGetRequest>,
|
|
) -> Result<Response<RawGetResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let store = self.store.read().await;
|
|
|
|
match store.get(&req.key) {
|
|
Some(value) => Ok(Response::new(RawGetResponse {
|
|
found: true,
|
|
value: value.clone(),
|
|
})),
|
|
None => Ok(Response::new(RawGetResponse {
|
|
found: false,
|
|
value: vec![],
|
|
})),
|
|
}
|
|
}
|
|
|
|
async fn raw_scan(
|
|
&self,
|
|
request: Request<RawScanRequest>,
|
|
) -> Result<Response<RawScanResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let store = self.store.read().await;
|
|
|
|
let limit = if req.limit == 0 { 100 } else { req.limit } as usize;
|
|
|
|
// Collect and sort keys
|
|
let mut keys: Vec<_> = store.keys().cloned().collect();
|
|
keys.sort();
|
|
|
|
// Filter by start_key (inclusive) and end_key (exclusive)
|
|
let filtered_keys: Vec<_> = keys
|
|
.into_iter()
|
|
.filter(|k| {
|
|
(req.start_key.is_empty() || k >= &req.start_key)
|
|
&& (req.end_key.is_empty() || k < &req.end_key)
|
|
})
|
|
.take(limit)
|
|
.collect();
|
|
|
|
let mut result_keys = Vec::new();
|
|
let mut result_values = Vec::new();
|
|
|
|
for key in &filtered_keys {
|
|
if let Some(value) = store.get(key) {
|
|
result_keys.push(key.clone());
|
|
result_values.push(value.clone());
|
|
}
|
|
}
|
|
|
|
let has_more = result_keys.len() >= limit;
|
|
let next_key = if has_more {
|
|
result_keys.last().cloned().unwrap_or_default()
|
|
} else {
|
|
vec![]
|
|
};
|
|
|
|
Ok(Response::new(RawScanResponse {
|
|
keys: result_keys,
|
|
values: result_values,
|
|
has_more,
|
|
next_key,
|
|
}))
|
|
}
|
|
|
|
async fn raw_delete(
|
|
&self,
|
|
request: Request<RawDeleteRequest>,
|
|
) -> Result<Response<RawDeleteResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let mut store = self.store.write().await;
|
|
|
|
let existed = store.remove(&req.key).is_some();
|
|
|
|
Ok(Response::new(RawDeleteResponse {
|
|
success: true,
|
|
existed,
|
|
}))
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Mock IAM Token Service
|
|
// ============================================================================
|
|
|
|
/// Token claims for validation
|
|
#[derive(Clone, Debug)]
|
|
struct TokenClaims {
|
|
principal_id: String,
|
|
principal_kind: PrincipalKind,
|
|
roles: Vec<String>,
|
|
org_id: Option<String>,
|
|
project_id: Option<String>,
|
|
expires_at: u64,
|
|
session_id: String,
|
|
}
|
|
|
|
/// Mock IAM Token service
|
|
#[derive(Clone)]
|
|
struct MockIamTokenService {
|
|
tokens: Arc<RwLock<HashMap<String, TokenClaims>>>,
|
|
}
|
|
|
|
impl MockIamTokenService {
|
|
fn new() -> Self {
|
|
Self {
|
|
tokens: Arc::new(RwLock::new(HashMap::new())),
|
|
}
|
|
}
|
|
|
|
/// Pre-register a valid token for testing
|
|
async fn register_token(&self, token: String, claims: TokenClaims) {
|
|
let mut tokens = self.tokens.write().await;
|
|
tokens.insert(token, claims);
|
|
}
|
|
|
|
fn now_ts() -> u64 {
|
|
SystemTime::now()
|
|
.duration_since(UNIX_EPOCH)
|
|
.unwrap_or_default()
|
|
.as_secs()
|
|
}
|
|
|
|
fn generate_token(principal_id: &str) -> String {
|
|
use std::collections::hash_map::RandomState;
|
|
use std::hash::{BuildHasher, Hash, Hasher};
|
|
|
|
let random_state = RandomState::new();
|
|
let mut hasher = random_state.build_hasher();
|
|
principal_id.hash(&mut hasher);
|
|
Self::now_ts().hash(&mut hasher);
|
|
|
|
format!(
|
|
"mock_token_{}_{}_{:x}",
|
|
principal_id,
|
|
Self::now_ts(),
|
|
hasher.finish()
|
|
)
|
|
}
|
|
}
|
|
|
|
#[tonic::async_trait]
|
|
impl IamToken for MockIamTokenService {
|
|
async fn issue_token(
|
|
&self,
|
|
request: Request<IssueTokenRequest>,
|
|
) -> Result<Response<IssueTokenResponse>, Status> {
|
|
let req = request.into_inner();
|
|
|
|
let ttl = if req.ttl_seconds > 0 {
|
|
req.ttl_seconds
|
|
} else {
|
|
3600
|
|
};
|
|
|
|
let now = Self::now_ts();
|
|
let expires_at = now + ttl;
|
|
let token = Self::generate_token(&req.principal_id);
|
|
let session_id = format!("session_{}_{}", req.principal_id, now);
|
|
|
|
// Store token claims
|
|
let claims = TokenClaims {
|
|
principal_id: req.principal_id.clone(),
|
|
principal_kind: PrincipalKind::try_from(req.principal_kind)
|
|
.unwrap_or(PrincipalKind::User),
|
|
roles: req.roles.clone(),
|
|
org_id: req.scope.as_ref().and_then(|s| {
|
|
if let Some(iam_api::proto::scope::Scope::Org(org)) = &s.scope {
|
|
Some(org.id.clone())
|
|
} else {
|
|
None
|
|
}
|
|
}),
|
|
project_id: req.scope.as_ref().and_then(|s| {
|
|
if let Some(iam_api::proto::scope::Scope::Project(proj)) = &s.scope {
|
|
Some(proj.id.clone())
|
|
} else {
|
|
None
|
|
}
|
|
}),
|
|
expires_at,
|
|
session_id: session_id.clone(),
|
|
};
|
|
|
|
let mut tokens = self.tokens.write().await;
|
|
tokens.insert(token.clone(), claims);
|
|
|
|
Ok(Response::new(IssueTokenResponse {
|
|
token,
|
|
expires_at,
|
|
session_id,
|
|
}))
|
|
}
|
|
|
|
async fn validate_token(
|
|
&self,
|
|
request: Request<ValidateTokenRequest>,
|
|
) -> Result<Response<ValidateTokenResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let tokens = self.tokens.read().await;
|
|
|
|
match tokens.get(&req.token) {
|
|
Some(claims) => {
|
|
// Check if token is expired
|
|
let now = Self::now_ts();
|
|
if now > claims.expires_at {
|
|
return Ok(Response::new(ValidateTokenResponse {
|
|
valid: false,
|
|
claims: None,
|
|
reason: "token expired".to_string(),
|
|
}));
|
|
}
|
|
|
|
// Return valid token with claims
|
|
Ok(Response::new(ValidateTokenResponse {
|
|
valid: true,
|
|
claims: Some(InternalTokenClaims {
|
|
principal_id: claims.principal_id.clone(),
|
|
principal_kind: claims.principal_kind as i32,
|
|
principal_name: claims.principal_id.clone(),
|
|
roles: claims.roles.clone(),
|
|
scope: None,
|
|
org_id: claims.org_id.clone(),
|
|
project_id: claims.project_id.clone(),
|
|
node_id: None,
|
|
iat: claims.expires_at - 3600,
|
|
exp: claims.expires_at,
|
|
session_id: claims.session_id.clone(),
|
|
auth_method: "mock".to_string(),
|
|
}),
|
|
reason: String::new(),
|
|
}))
|
|
}
|
|
None => Ok(Response::new(ValidateTokenResponse {
|
|
valid: false,
|
|
claims: None,
|
|
reason: "token not found".to_string(),
|
|
})),
|
|
}
|
|
}
|
|
|
|
async fn revoke_token(
|
|
&self,
|
|
request: Request<RevokeTokenRequest>,
|
|
) -> Result<Response<RevokeTokenResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let mut tokens = self.tokens.write().await;
|
|
|
|
let success = tokens.remove(&req.token).is_some();
|
|
|
|
Ok(Response::new(RevokeTokenResponse { success }))
|
|
}
|
|
|
|
async fn refresh_token(
|
|
&self,
|
|
request: Request<RefreshTokenRequest>,
|
|
) -> Result<Response<RefreshTokenResponse>, Status> {
|
|
let req = request.into_inner();
|
|
let mut tokens = self.tokens.write().await;
|
|
|
|
match tokens.remove(&req.token) {
|
|
Some(old_claims) => {
|
|
let ttl = if req.ttl_seconds > 0 {
|
|
req.ttl_seconds
|
|
} else {
|
|
3600
|
|
};
|
|
|
|
let now = Self::now_ts();
|
|
let expires_at = now + ttl;
|
|
let new_token = Self::generate_token(&old_claims.principal_id);
|
|
|
|
let new_claims = TokenClaims {
|
|
expires_at,
|
|
..old_claims
|
|
};
|
|
|
|
tokens.insert(new_token.clone(), new_claims);
|
|
|
|
Ok(Response::new(RefreshTokenResponse {
|
|
token: new_token,
|
|
expires_at,
|
|
}))
|
|
}
|
|
None => Err(Status::not_found("token not found")),
|
|
}
|
|
}
|
|
}
|
|
|
|
// ============================================================================
|
|
// Test Helpers
|
|
// ============================================================================
|
|
|
|
/// Start mock FlareDB server
|
|
async fn start_flaredb_server(
|
|
addr: &str,
|
|
) -> (tokio::task::JoinHandle<()>, MockFlareDbService) {
|
|
let service = MockFlareDbService::new();
|
|
let service_clone = service.clone();
|
|
let addr_parsed = addr.parse().unwrap();
|
|
|
|
let handle = tokio::spawn(async move {
|
|
Server::builder()
|
|
.add_service(KvRawServer::new(service_clone))
|
|
.serve(addr_parsed)
|
|
.await
|
|
.unwrap();
|
|
});
|
|
|
|
(handle, service)
|
|
}
|
|
|
|
/// Start mock IAM server
|
|
async fn start_iam_server(addr: &str) -> (tokio::task::JoinHandle<()>, MockIamTokenService) {
|
|
let service = MockIamTokenService::new();
|
|
let service_clone = service.clone();
|
|
let addr_parsed = addr.parse().unwrap();
|
|
|
|
let handle = tokio::spawn(async move {
|
|
Server::builder()
|
|
.add_service(IamTokenServer::new(service_clone))
|
|
.serve(addr_parsed)
|
|
.await
|
|
.unwrap();
|
|
});
|
|
|
|
(handle, service)
|
|
}
|
|
|
|
/// Start PlasmaVMC server with FlareDB and IAM integration
|
|
async fn start_plasmavmc_server(
|
|
addr: &str,
|
|
flaredb_endpoint: String,
|
|
iam_endpoint: String,
|
|
storage_dir: &str,
|
|
) -> tokio::task::JoinHandle<()> {
|
|
std::env::set_var("FLAREDB_ENDPOINT", flaredb_endpoint);
|
|
std::env::set_var("IAM_ENDPOINT", iam_endpoint);
|
|
std::env::set_var("PLASMAVMC_STORAGE_BACKEND", "file");
|
|
std::env::set_var("PLASMAVMC_STORAGE_DIR", storage_dir);
|
|
|
|
let registry = Arc::new(HypervisorRegistry::new());
|
|
registry.register(Arc::new(KvmBackend::with_defaults()));
|
|
let svc = VmServiceImpl::new(registry).await.unwrap();
|
|
|
|
let addr_parsed = addr.parse().unwrap();
|
|
tokio::spawn(async move {
|
|
Server::builder()
|
|
.add_service(plasmavmc_api::proto::vm_service_server::VmServiceServer::new(svc))
|
|
.serve(addr_parsed)
|
|
.await
|
|
.unwrap();
|
|
})
|
|
}
|
|
|
|
// ============================================================================
|
|
// Test Cases
|
|
// ============================================================================
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires mock hypervisor mode
|
|
async fn test_vm_crud_with_flaredb_metadata() {
|
|
// Create temporary storage directory
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let storage_path = temp_dir.path().to_str().unwrap();
|
|
|
|
// Start mock FlareDB server
|
|
let flaredb_addr = "127.0.0.1:50091";
|
|
let (flaredb_handle, flaredb_service) = start_flaredb_server(flaredb_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Start mock IAM server (not used in this test, but required by plasmavmc)
|
|
let iam_addr = "127.0.0.1:50092";
|
|
let (iam_handle, _iam_service) = start_iam_server(iam_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Start PlasmaVMC server
|
|
let plasmavmc_addr = "127.0.0.1:50093";
|
|
let flaredb_endpoint = format!("http://{}", flaredb_addr);
|
|
let iam_endpoint = format!("http://{}", iam_addr);
|
|
let plasmavmc_handle =
|
|
start_plasmavmc_server(plasmavmc_addr, flaredb_endpoint, iam_endpoint, storage_path)
|
|
.await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Create PlasmaVMC client
|
|
let plasmavmc_channel = Channel::from_shared(format!("http://{}", plasmavmc_addr))
|
|
.unwrap()
|
|
.connect()
|
|
.await
|
|
.unwrap();
|
|
let mut vm_client = VmServiceClient::new(plasmavmc_channel);
|
|
|
|
let org_id = "test-org";
|
|
let project_id = "test-project";
|
|
|
|
// 1. Create VM
|
|
let vm_spec = VmSpec {
|
|
cpu: None,
|
|
memory: None,
|
|
disks: vec![],
|
|
network: vec![],
|
|
boot: None,
|
|
security: None,
|
|
};
|
|
|
|
let create_resp = vm_client
|
|
.create_vm(Request::new(CreateVmRequest {
|
|
name: "test-vm-flaredb".to_string(),
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
spec: Some(vm_spec),
|
|
hypervisor: ProtoHypervisorType::Kvm as i32,
|
|
metadata: [("environment".to_string(), "test".to_string())]
|
|
.iter()
|
|
.cloned()
|
|
.collect(),
|
|
labels: [("app".to_string(), "web".to_string())]
|
|
.iter()
|
|
.cloned()
|
|
.collect(),
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
let vm_id = create_resp.id.clone();
|
|
assert_eq!(create_resp.name, "test-vm-flaredb");
|
|
assert!(!vm_id.is_empty());
|
|
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// 2. Verify metadata stored in FlareDB
|
|
let stored_keys = flaredb_service.get_all_keys().await;
|
|
assert!(!stored_keys.is_empty(), "FlareDB should have metadata stored");
|
|
|
|
// Check that VM metadata exists
|
|
let vm_key = format!("vm:{}:{}:{}", org_id, project_id, vm_id);
|
|
let vm_metadata = flaredb_service.get_value(vm_key.as_bytes()).await;
|
|
assert!(
|
|
vm_metadata.is_some(),
|
|
"VM metadata should be stored in FlareDB"
|
|
);
|
|
|
|
// 3. Get VM and verify metadata persistence
|
|
let get_resp = vm_client
|
|
.get_vm(Request::new(GetVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
assert_eq!(get_resp.id, vm_id);
|
|
assert_eq!(get_resp.name, "test-vm-flaredb");
|
|
assert_eq!(get_resp.metadata.get("environment"), Some(&"test".to_string()));
|
|
assert_eq!(get_resp.labels.get("app"), Some(&"web".to_string()));
|
|
|
|
// 4. List VMs
|
|
let list_resp = vm_client
|
|
.list_vms(Request::new(ListVmsRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
page_size: 10,
|
|
page_token: String::new(),
|
|
filter: String::new(),
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
assert_eq!(list_resp.vms.len(), 1);
|
|
assert_eq!(list_resp.vms[0].id, vm_id);
|
|
assert_eq!(list_resp.vms[0].name, "test-vm-flaredb");
|
|
|
|
// 5. Delete VM
|
|
vm_client
|
|
.delete_vm(Request::new(DeleteVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
force: true,
|
|
}))
|
|
.await
|
|
.unwrap();
|
|
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// 6. Verify VM deleted from FlareDB
|
|
let vm_metadata_after_delete = flaredb_service.get_value(vm_key.as_bytes()).await;
|
|
assert!(
|
|
vm_metadata_after_delete.is_none(),
|
|
"VM metadata should be deleted from FlareDB"
|
|
);
|
|
|
|
// 7. Verify Get returns not found
|
|
let get_after_delete = vm_client
|
|
.get_vm(Request::new(GetVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
}))
|
|
.await;
|
|
|
|
assert!(
|
|
get_after_delete.is_err(),
|
|
"Get VM should fail after deletion"
|
|
);
|
|
|
|
// Cleanup
|
|
flaredb_handle.abort();
|
|
iam_handle.abort();
|
|
plasmavmc_handle.abort();
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires mock hypervisor mode
|
|
async fn test_vm_auth_validation() {
|
|
// Create temporary storage directory
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let storage_path = temp_dir.path().to_str().unwrap();
|
|
|
|
// Start mock FlareDB server
|
|
let flaredb_addr = "127.0.0.1:50094";
|
|
let (flaredb_handle, _flaredb_service) = start_flaredb_server(flaredb_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Start mock IAM server
|
|
let iam_addr = "127.0.0.1:50095";
|
|
let (iam_handle, iam_service) = start_iam_server(iam_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Pre-register valid tokens
|
|
let valid_token = "valid_test_token_abc123";
|
|
iam_service
|
|
.register_token(
|
|
valid_token.to_string(),
|
|
TokenClaims {
|
|
principal_id: "user-alice".to_string(),
|
|
principal_kind: PrincipalKind::User,
|
|
roles: vec!["roles/VmAdmin".to_string()],
|
|
org_id: Some("test-org".to_string()),
|
|
project_id: Some("test-project".to_string()),
|
|
expires_at: MockIamTokenService::now_ts() + 3600,
|
|
session_id: "session-1".to_string(),
|
|
},
|
|
)
|
|
.await;
|
|
|
|
// Start PlasmaVMC server
|
|
let plasmavmc_addr = "127.0.0.1:50096";
|
|
let flaredb_endpoint = format!("http://{}", flaredb_addr);
|
|
let iam_endpoint = format!("http://{}", iam_addr);
|
|
let plasmavmc_handle =
|
|
start_plasmavmc_server(plasmavmc_addr, flaredb_endpoint, iam_endpoint, storage_path)
|
|
.await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Create PlasmaVMC client
|
|
let plasmavmc_channel = Channel::from_shared(format!("http://{}", plasmavmc_addr))
|
|
.unwrap()
|
|
.connect()
|
|
.await
|
|
.unwrap();
|
|
let mut vm_client = VmServiceClient::new(plasmavmc_channel);
|
|
|
|
let org_id = "test-org";
|
|
let project_id = "test-project";
|
|
|
|
// Test 1: Request with INVALID token should fail
|
|
let invalid_token = "invalid_token_xyz";
|
|
let mut invalid_request = Request::new(CreateVmRequest {
|
|
name: "unauthorized-vm".to_string(),
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
spec: Some(VmSpec {
|
|
cpu: None,
|
|
memory: None,
|
|
disks: vec![],
|
|
network: vec![],
|
|
boot: None,
|
|
security: None,
|
|
}),
|
|
hypervisor: ProtoHypervisorType::Kvm as i32,
|
|
metadata: Default::default(),
|
|
labels: Default::default(),
|
|
});
|
|
invalid_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", invalid_token).parse().unwrap());
|
|
|
|
let invalid_result = vm_client.create_vm(invalid_request).await;
|
|
assert!(
|
|
invalid_result.is_err(),
|
|
"CreateVM with invalid token should fail"
|
|
);
|
|
let err = invalid_result.unwrap_err();
|
|
assert!(
|
|
err.code() == tonic::Code::Unauthenticated || err.code() == tonic::Code::PermissionDenied,
|
|
"Should return auth error, got: {:?}",
|
|
err
|
|
);
|
|
|
|
// Test 2: Request with VALID token should succeed
|
|
let mut valid_request = Request::new(CreateVmRequest {
|
|
name: "authorized-vm".to_string(),
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
spec: Some(VmSpec {
|
|
cpu: None,
|
|
memory: None,
|
|
disks: vec![],
|
|
network: vec![],
|
|
boot: None,
|
|
security: None,
|
|
}),
|
|
hypervisor: ProtoHypervisorType::Kvm as i32,
|
|
metadata: Default::default(),
|
|
labels: Default::default(),
|
|
});
|
|
valid_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", valid_token).parse().unwrap());
|
|
|
|
let valid_result = vm_client.create_vm(valid_request).await;
|
|
assert!(
|
|
valid_result.is_ok(),
|
|
"CreateVM with valid token should succeed"
|
|
);
|
|
let create_resp = valid_result.unwrap().into_inner();
|
|
assert_eq!(create_resp.name, "authorized-vm");
|
|
|
|
// Test 3: Revoke token and verify subsequent requests fail
|
|
let vm_id = create_resp.id;
|
|
iam_service
|
|
.revoke_token(Request::new(RevokeTokenRequest {
|
|
token: valid_token.to_string(),
|
|
reason: "test revocation".to_string(),
|
|
}))
|
|
.await
|
|
.unwrap();
|
|
|
|
let mut revoked_request = Request::new(GetVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
});
|
|
revoked_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", valid_token).parse().unwrap());
|
|
|
|
let revoked_result = vm_client.get_vm(revoked_request).await;
|
|
assert!(
|
|
revoked_result.is_err(),
|
|
"GetVM with revoked token should fail"
|
|
);
|
|
|
|
// Cleanup
|
|
flaredb_handle.abort();
|
|
iam_handle.abort();
|
|
plasmavmc_handle.abort();
|
|
}
|
|
|
|
#[tokio::test]
|
|
#[ignore] // Requires mock hypervisor mode
|
|
async fn test_full_vm_lifecycle_e2e() {
|
|
// Create temporary storage directory
|
|
let temp_dir = TempDir::new().unwrap();
|
|
let storage_path = temp_dir.path().to_str().unwrap();
|
|
|
|
// Start mock FlareDB server
|
|
let flaredb_addr = "127.0.0.1:50097";
|
|
let (flaredb_handle, flaredb_service) = start_flaredb_server(flaredb_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Start mock IAM server
|
|
let iam_addr = "127.0.0.1:50098";
|
|
let (iam_handle, iam_service) = start_iam_server(iam_addr).await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Create IAM token client
|
|
let iam_channel = Channel::from_shared(format!("http://{}", iam_addr))
|
|
.unwrap()
|
|
.connect()
|
|
.await
|
|
.unwrap();
|
|
let mut token_client = iam_api::proto::iam_token_client::IamTokenClient::new(iam_channel);
|
|
|
|
// Start PlasmaVMC server
|
|
let plasmavmc_addr = "127.0.0.1:50099";
|
|
let flaredb_endpoint = format!("http://{}", flaredb_addr);
|
|
let iam_endpoint = format!("http://{}", iam_addr);
|
|
let plasmavmc_handle =
|
|
start_plasmavmc_server(plasmavmc_addr, flaredb_endpoint, iam_endpoint, storage_path)
|
|
.await;
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// Create PlasmaVMC client
|
|
let plasmavmc_channel = Channel::from_shared(format!("http://{}", plasmavmc_addr))
|
|
.unwrap()
|
|
.connect()
|
|
.await
|
|
.unwrap();
|
|
let mut vm_client = VmServiceClient::new(plasmavmc_channel);
|
|
|
|
let org_id = "e2e-org";
|
|
let project_id = "e2e-project";
|
|
|
|
// === Step 1: Issue IAM Token ===
|
|
let issue_resp = token_client
|
|
.issue_token(Request::new(IssueTokenRequest {
|
|
principal_id: "e2e-user".to_string(),
|
|
principal_kind: PrincipalKind::User as i32,
|
|
roles: vec!["roles/VmAdmin".to_string(), "roles/VmViewer".to_string()],
|
|
scope: Some(iam_api::proto::Scope {
|
|
scope: Some(iam_api::proto::scope::Scope::Project(
|
|
iam_api::proto::ProjectScope {
|
|
id: project_id.to_string(),
|
|
org_id: org_id.to_string(),
|
|
},
|
|
)),
|
|
}),
|
|
ttl_seconds: 3600,
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
let auth_token = issue_resp.token;
|
|
assert!(!auth_token.is_empty(), "Token should be issued");
|
|
|
|
sleep(Duration::from_millis(200)).await;
|
|
|
|
// === Step 2: Validate Token ===
|
|
let validate_resp = token_client
|
|
.validate_token(Request::new(ValidateTokenRequest {
|
|
token: auth_token.clone(),
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
assert!(validate_resp.valid, "Token should be valid");
|
|
let claims = validate_resp.claims.unwrap();
|
|
assert_eq!(claims.principal_id, "e2e-user");
|
|
assert_eq!(claims.roles.len(), 2);
|
|
assert!(claims.roles.contains(&"roles/VmAdmin".to_string()));
|
|
|
|
// === Step 3: Create VM with Authentication ===
|
|
let mut create_request = Request::new(CreateVmRequest {
|
|
name: "e2e-test-vm".to_string(),
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
spec: Some(VmSpec {
|
|
cpu: None,
|
|
memory: None,
|
|
disks: vec![],
|
|
network: vec![],
|
|
boot: None,
|
|
security: None,
|
|
}),
|
|
hypervisor: ProtoHypervisorType::Kvm as i32,
|
|
metadata: [
|
|
("created_by".to_string(), "e2e-user".to_string()),
|
|
("test_run".to_string(), Utc::now().to_rfc3339()),
|
|
]
|
|
.iter()
|
|
.cloned()
|
|
.collect(),
|
|
labels: [
|
|
("tier".to_string(), "frontend".to_string()),
|
|
("env".to_string(), "e2e-test".to_string()),
|
|
]
|
|
.iter()
|
|
.cloned()
|
|
.collect(),
|
|
});
|
|
create_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", auth_token).parse().unwrap());
|
|
|
|
let create_resp = vm_client
|
|
.create_vm(create_request)
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
let vm_id = create_resp.id.clone();
|
|
assert_eq!(create_resp.name, "e2e-test-vm");
|
|
assert!(!vm_id.is_empty());
|
|
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// === Step 4: Verify FlareDB Metadata Storage ===
|
|
let stored_keys = flaredb_service.get_all_keys().await;
|
|
assert!(!stored_keys.is_empty(), "FlareDB should contain VM metadata");
|
|
|
|
let vm_key = format!("vm:{}:{}:{}", org_id, project_id, vm_id);
|
|
let stored_metadata = flaredb_service.get_value(vm_key.as_bytes()).await;
|
|
assert!(
|
|
stored_metadata.is_some(),
|
|
"VM metadata should be persisted in FlareDB"
|
|
);
|
|
|
|
// Parse and verify stored JSON metadata
|
|
let metadata_json = String::from_utf8(stored_metadata.unwrap()).unwrap();
|
|
assert!(
|
|
metadata_json.contains("e2e-test-vm"),
|
|
"Metadata should contain VM name"
|
|
);
|
|
assert!(
|
|
metadata_json.contains(&vm_id),
|
|
"Metadata should contain VM ID"
|
|
);
|
|
|
|
// === Step 5: Retrieve VM ===
|
|
let mut get_request = Request::new(GetVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
});
|
|
get_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", auth_token).parse().unwrap());
|
|
|
|
let get_resp = vm_client.get_vm(get_request).await.unwrap().into_inner();
|
|
|
|
assert_eq!(get_resp.id, vm_id);
|
|
assert_eq!(get_resp.name, "e2e-test-vm");
|
|
assert_eq!(get_resp.org_id, org_id);
|
|
assert_eq!(get_resp.project_id, project_id);
|
|
assert_eq!(
|
|
get_resp.metadata.get("created_by"),
|
|
Some(&"e2e-user".to_string())
|
|
);
|
|
assert_eq!(get_resp.labels.get("tier"), Some(&"frontend".to_string()));
|
|
|
|
// === Step 6: List VMs ===
|
|
let mut list_request = Request::new(ListVmsRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
page_size: 10,
|
|
page_token: String::new(),
|
|
filter: String::new(),
|
|
});
|
|
list_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", auth_token).parse().unwrap());
|
|
|
|
let list_resp = vm_client.list_vms(list_request).await.unwrap().into_inner();
|
|
|
|
assert_eq!(list_resp.vms.len(), 1);
|
|
assert_eq!(list_resp.vms[0].id, vm_id);
|
|
assert_eq!(list_resp.vms[0].name, "e2e-test-vm");
|
|
|
|
// === Step 7: Delete VM ===
|
|
let mut delete_request = Request::new(DeleteVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
force: true,
|
|
});
|
|
delete_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", auth_token).parse().unwrap());
|
|
|
|
vm_client.delete_vm(delete_request).await.unwrap();
|
|
|
|
sleep(Duration::from_millis(300)).await;
|
|
|
|
// === Step 8: Verify VM Deleted from FlareDB ===
|
|
let deleted_metadata = flaredb_service.get_value(vm_key.as_bytes()).await;
|
|
assert!(
|
|
deleted_metadata.is_none(),
|
|
"VM metadata should be removed from FlareDB after deletion"
|
|
);
|
|
|
|
// === Step 9: Verify VM No Longer Exists ===
|
|
let mut get_deleted_request = Request::new(GetVmRequest {
|
|
org_id: org_id.to_string(),
|
|
project_id: project_id.to_string(),
|
|
vm_id: vm_id.clone(),
|
|
});
|
|
get_deleted_request
|
|
.metadata_mut()
|
|
.insert("authorization", format!("Bearer {}", auth_token).parse().unwrap());
|
|
|
|
let get_deleted_result = vm_client.get_vm(get_deleted_request).await;
|
|
assert!(
|
|
get_deleted_result.is_err(),
|
|
"GetVM should fail for deleted VM"
|
|
);
|
|
|
|
// === Step 10: Verify Token Still Valid ===
|
|
let final_validate = token_client
|
|
.validate_token(Request::new(ValidateTokenRequest {
|
|
token: auth_token.clone(),
|
|
}))
|
|
.await
|
|
.unwrap()
|
|
.into_inner();
|
|
|
|
assert!(final_validate.valid, "Token should still be valid after operations");
|
|
|
|
// Cleanup
|
|
flaredb_handle.abort();
|
|
iam_handle.abort();
|
|
plasmavmc_handle.abort();
|
|
}
|